1
//===- ScalarEvolutionExpander.cpp - Scalar Evolution Analysis --*- C++ -*-===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file contains the implementation of the scalar evolution expander,
11
// which is used to generate the code corresponding to a given scalar evolution
14
//===----------------------------------------------------------------------===//
16
#include "llvm/Analysis/ScalarEvolutionExpander.h"
17
#include "llvm/Analysis/LoopInfo.h"
18
#include "llvm/IntrinsicInst.h"
19
#include "llvm/LLVMContext.h"
20
#include "llvm/Support/Debug.h"
21
#include "llvm/Target/TargetData.h"
22
#include "llvm/Target/TargetLowering.h"
23
#include "llvm/ADT/STLExtras.h"
27
/// ReuseOrCreateCast - Arrange for there to be a cast of V to Ty at IP,
28
/// reusing an existing cast if a suitable one exists, moving an existing
29
/// cast if a suitable one exists but isn't in the right place, or
30
/// creating a new one.
31
Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
32
Instruction::CastOps Op,
33
BasicBlock::iterator IP) {
34
// This function must be called with the builder having a valid insertion
35
// point. It doesn't need to be the actual IP where the uses of the returned
36
// cast will be added, but it must dominate such IP.
37
// We use this precondition to produce a cast that will dominate all its
38
// uses. In particular, this is crucial for the case where the builder's
39
// insertion point *is* the point where we were asked to put the cast.
40
// Since we don't know the the builder's insertion point is actually
41
// where the uses will be added (only that it dominates it), we are
42
// not allowed to move it.
43
BasicBlock::iterator BIP = Builder.GetInsertPoint();
45
Instruction *Ret = NULL;
47
// Check to see if there is already a cast!
48
for (Value::use_iterator UI = V->use_begin(), E = V->use_end();
51
if (U->getType() == Ty)
52
if (CastInst *CI = dyn_cast<CastInst>(U))
53
if (CI->getOpcode() == Op) {
54
// If the cast isn't where we want it, create a new cast at IP.
55
// Likewise, do not reuse a cast at BIP because it must dominate
56
// instructions that might be inserted before BIP.
57
if (BasicBlock::iterator(CI) != IP || BIP == IP) {
58
// Create a new cast, and leave the old cast in place in case
59
// it is being used as an insert point. Clear its operand
60
// so that it doesn't hold anything live.
61
Ret = CastInst::Create(Op, V, Ty, "", IP);
63
CI->replaceAllUsesWith(Ret);
64
CI->setOperand(0, UndefValue::get(V->getType()));
74
Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
76
// We assert at the end of the function since IP might point to an
77
// instruction with different dominance properties than a cast
78
// (an invoke for example) and not dominate BIP (but the cast does).
79
assert(SE.DT->dominates(Ret, BIP));
81
rememberInstruction(Ret);
85
/// InsertNoopCastOfTo - Insert a cast of V to the specified type,
86
/// which must be possible with a noop cast, doing what we can to share
88
Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
89
Instruction::CastOps Op = CastInst::getCastOpcode(V, false, Ty, false);
90
assert((Op == Instruction::BitCast ||
91
Op == Instruction::PtrToInt ||
92
Op == Instruction::IntToPtr) &&
93
"InsertNoopCastOfTo cannot perform non-noop casts!");
94
assert(SE.getTypeSizeInBits(V->getType()) == SE.getTypeSizeInBits(Ty) &&
95
"InsertNoopCastOfTo cannot change sizes!");
97
// Short-circuit unnecessary bitcasts.
98
if (Op == Instruction::BitCast) {
99
if (V->getType() == Ty)
101
if (CastInst *CI = dyn_cast<CastInst>(V)) {
102
if (CI->getOperand(0)->getType() == Ty)
103
return CI->getOperand(0);
106
// Short-circuit unnecessary inttoptr<->ptrtoint casts.
107
if ((Op == Instruction::PtrToInt || Op == Instruction::IntToPtr) &&
108
SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(V->getType())) {
109
if (CastInst *CI = dyn_cast<CastInst>(V))
110
if ((CI->getOpcode() == Instruction::PtrToInt ||
111
CI->getOpcode() == Instruction::IntToPtr) &&
112
SE.getTypeSizeInBits(CI->getType()) ==
113
SE.getTypeSizeInBits(CI->getOperand(0)->getType()))
114
return CI->getOperand(0);
115
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
116
if ((CE->getOpcode() == Instruction::PtrToInt ||
117
CE->getOpcode() == Instruction::IntToPtr) &&
118
SE.getTypeSizeInBits(CE->getType()) ==
119
SE.getTypeSizeInBits(CE->getOperand(0)->getType()))
120
return CE->getOperand(0);
123
// Fold a cast of a constant.
124
if (Constant *C = dyn_cast<Constant>(V))
125
return ConstantExpr::getCast(Op, C, Ty);
127
// Cast the argument at the beginning of the entry block, after
128
// any bitcasts of other arguments.
129
if (Argument *A = dyn_cast<Argument>(V)) {
130
BasicBlock::iterator IP = A->getParent()->getEntryBlock().begin();
131
while ((isa<BitCastInst>(IP) &&
132
isa<Argument>(cast<BitCastInst>(IP)->getOperand(0)) &&
133
cast<BitCastInst>(IP)->getOperand(0) != A) ||
134
isa<DbgInfoIntrinsic>(IP) ||
135
isa<LandingPadInst>(IP))
137
return ReuseOrCreateCast(A, Ty, Op, IP);
140
// Cast the instruction immediately after the instruction.
141
Instruction *I = cast<Instruction>(V);
142
BasicBlock::iterator IP = I; ++IP;
143
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
144
IP = II->getNormalDest()->begin();
145
while (isa<PHINode>(IP) || isa<LandingPadInst>(IP))
147
return ReuseOrCreateCast(I, Ty, Op, IP);
150
/// InsertBinop - Insert the specified binary operator, doing a small amount
151
/// of work to avoid inserting an obviously redundant operation.
152
Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
153
Value *LHS, Value *RHS) {
154
// Fold a binop with constant operands.
155
if (Constant *CLHS = dyn_cast<Constant>(LHS))
156
if (Constant *CRHS = dyn_cast<Constant>(RHS))
157
return ConstantExpr::get(Opcode, CLHS, CRHS);
159
// Do a quick scan to see if we have this binop nearby. If so, reuse it.
160
unsigned ScanLimit = 6;
161
BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
162
// Scanning starts from the last instruction before the insertion point.
163
BasicBlock::iterator IP = Builder.GetInsertPoint();
164
if (IP != BlockBegin) {
166
for (; ScanLimit; --IP, --ScanLimit) {
167
// Don't count dbg.value against the ScanLimit, to avoid perturbing the
169
if (isa<DbgInfoIntrinsic>(IP))
171
if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
172
IP->getOperand(1) == RHS)
174
if (IP == BlockBegin) break;
178
// Save the original insertion point so we can restore it when we're done.
179
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
180
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
182
// Move the insertion point out of as many loops as we can.
183
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
184
if (!L->isLoopInvariant(LHS) || !L->isLoopInvariant(RHS)) break;
185
BasicBlock *Preheader = L->getLoopPreheader();
186
if (!Preheader) break;
188
// Ok, move up a level.
189
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
192
// If we haven't found this binop, insert it.
193
Instruction *BO = cast<Instruction>(Builder.CreateBinOp(Opcode, LHS, RHS));
194
BO->setDebugLoc(SaveInsertPt->getDebugLoc());
195
rememberInstruction(BO);
197
// Restore the original insert point.
199
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
204
/// FactorOutConstant - Test if S is divisible by Factor, using signed
205
/// division. If so, update S with Factor divided out and return true.
206
/// S need not be evenly divisible if a reasonable remainder can be
208
/// TODO: When ScalarEvolution gets a SCEVSDivExpr, this can be made
209
/// unnecessary; in its place, just signed-divide Ops[i] by the scale and
210
/// check to see if the divide was folded.
211
static bool FactorOutConstant(const SCEV *&S,
212
const SCEV *&Remainder,
215
const TargetData *TD) {
216
// Everything is divisible by one.
222
S = SE.getConstant(S->getType(), 1);
226
// For a Constant, check for a multiple of the given factor.
227
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
231
// Check for divisibility.
232
if (const SCEVConstant *FC = dyn_cast<SCEVConstant>(Factor)) {
234
ConstantInt::get(SE.getContext(),
235
C->getValue()->getValue().sdiv(
236
FC->getValue()->getValue()));
237
// If the quotient is zero and the remainder is non-zero, reject
238
// the value at this scale. It will be considered for subsequent
241
const SCEV *Div = SE.getConstant(CI);
244
SE.getAddExpr(Remainder,
245
SE.getConstant(C->getValue()->getValue().srem(
246
FC->getValue()->getValue())));
252
// In a Mul, check if there is a constant operand which is a multiple
253
// of the given factor.
254
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
256
// With TargetData, the size is known. Check if there is a constant
257
// operand which is a multiple of the given factor. If so, we can
259
const SCEVConstant *FC = cast<SCEVConstant>(Factor);
260
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(M->getOperand(0)))
261
if (!C->getValue()->getValue().srem(FC->getValue()->getValue())) {
262
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
264
SE.getConstant(C->getValue()->getValue().sdiv(
265
FC->getValue()->getValue()));
266
S = SE.getMulExpr(NewMulOps);
270
// Without TargetData, check if Factor can be factored out of any of the
271
// Mul's operands. If so, we can just remove it.
272
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
273
const SCEV *SOp = M->getOperand(i);
274
const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
275
if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
276
Remainder->isZero()) {
277
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
279
S = SE.getMulExpr(NewMulOps);
286
// In an AddRec, check if both start and step are divisible.
287
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
288
const SCEV *Step = A->getStepRecurrence(SE);
289
const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
290
if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
292
if (!StepRem->isZero())
294
const SCEV *Start = A->getStart();
295
if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
297
// FIXME: can use A->getNoWrapFlags(FlagNW)
298
S = SE.getAddRecExpr(Start, Step, A->getLoop(), SCEV::FlagAnyWrap);
305
/// SimplifyAddOperands - Sort and simplify a list of add operands. NumAddRecs
306
/// is the number of SCEVAddRecExprs present, which are kept at the end of
309
static void SimplifyAddOperands(SmallVectorImpl<const SCEV *> &Ops,
311
ScalarEvolution &SE) {
312
unsigned NumAddRecs = 0;
313
for (unsigned i = Ops.size(); i > 0 && isa<SCEVAddRecExpr>(Ops[i-1]); --i)
315
// Group Ops into non-addrecs and addrecs.
316
SmallVector<const SCEV *, 8> NoAddRecs(Ops.begin(), Ops.end() - NumAddRecs);
317
SmallVector<const SCEV *, 8> AddRecs(Ops.end() - NumAddRecs, Ops.end());
318
// Let ScalarEvolution sort and simplify the non-addrecs list.
319
const SCEV *Sum = NoAddRecs.empty() ?
320
SE.getConstant(Ty, 0) :
321
SE.getAddExpr(NoAddRecs);
322
// If it returned an add, use the operands. Otherwise it simplified
323
// the sum into a single value, so just use that.
325
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Sum))
326
Ops.append(Add->op_begin(), Add->op_end());
327
else if (!Sum->isZero())
329
// Then append the addrecs.
330
Ops.append(AddRecs.begin(), AddRecs.end());
333
/// SplitAddRecs - Flatten a list of add operands, moving addrec start values
334
/// out to the top level. For example, convert {a + b,+,c} to a, b, {0,+,d}.
335
/// This helps expose more opportunities for folding parts of the expressions
336
/// into GEP indices.
338
static void SplitAddRecs(SmallVectorImpl<const SCEV *> &Ops,
340
ScalarEvolution &SE) {
342
SmallVector<const SCEV *, 8> AddRecs;
343
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
344
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Ops[i])) {
345
const SCEV *Start = A->getStart();
346
if (Start->isZero()) break;
347
const SCEV *Zero = SE.getConstant(Ty, 0);
348
AddRecs.push_back(SE.getAddRecExpr(Zero,
349
A->getStepRecurrence(SE),
351
// FIXME: A->getNoWrapFlags(FlagNW)
353
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Start)) {
355
Ops.append(Add->op_begin(), Add->op_end());
356
e += Add->getNumOperands();
361
if (!AddRecs.empty()) {
362
// Add the addrecs onto the end of the list.
363
Ops.append(AddRecs.begin(), AddRecs.end());
364
// Resort the operand list, moving any constants to the front.
365
SimplifyAddOperands(Ops, Ty, SE);
369
/// expandAddToGEP - Expand an addition expression with a pointer type into
370
/// a GEP instead of using ptrtoint+arithmetic+inttoptr. This helps
371
/// BasicAliasAnalysis and other passes analyze the result. See the rules
372
/// for getelementptr vs. inttoptr in
373
/// http://llvm.org/docs/LangRef.html#pointeraliasing
376
/// Design note: The correctness of using getelementptr here depends on
377
/// ScalarEvolution not recognizing inttoptr and ptrtoint operators, as
378
/// they may introduce pointer arithmetic which may not be safely converted
379
/// into getelementptr.
381
/// Design note: It might seem desirable for this function to be more
382
/// loop-aware. If some of the indices are loop-invariant while others
383
/// aren't, it might seem desirable to emit multiple GEPs, keeping the
384
/// loop-invariant portions of the overall computation outside the loop.
385
/// However, there are a few reasons this is not done here. Hoisting simple
386
/// arithmetic is a low-level optimization that often isn't very
387
/// important until late in the optimization process. In fact, passes
388
/// like InstructionCombining will combine GEPs, even if it means
389
/// pushing loop-invariant computation down into loops, so even if the
390
/// GEPs were split here, the work would quickly be undone. The
391
/// LoopStrengthReduction pass, which is usually run quite late (and
392
/// after the last InstructionCombining pass), takes care of hoisting
393
/// loop-invariant portions of expressions, after considering what
394
/// can be folded using target addressing modes.
396
Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
397
const SCEV *const *op_end,
401
Type *ElTy = PTy->getElementType();
402
SmallVector<Value *, 4> GepIndices;
403
SmallVector<const SCEV *, 8> Ops(op_begin, op_end);
404
bool AnyNonZeroIndices = false;
406
// Split AddRecs up into parts as either of the parts may be usable
407
// without the other.
408
SplitAddRecs(Ops, Ty, SE);
410
// Descend down the pointer's type and attempt to convert the other
411
// operands into GEP indices, at each level. The first index in a GEP
412
// indexes into the array implied by the pointer operand; the rest of
413
// the indices index into the element or field type selected by the
416
// If the scale size is not 0, attempt to factor out a scale for
418
SmallVector<const SCEV *, 8> ScaledOps;
419
if (ElTy->isSized()) {
420
const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
421
if (!ElSize->isZero()) {
422
SmallVector<const SCEV *, 8> NewOps;
423
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
424
const SCEV *Op = Ops[i];
425
const SCEV *Remainder = SE.getConstant(Ty, 0);
426
if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
427
// Op now has ElSize factored out.
428
ScaledOps.push_back(Op);
429
if (!Remainder->isZero())
430
NewOps.push_back(Remainder);
431
AnyNonZeroIndices = true;
433
// The operand was not divisible, so add it to the list of operands
434
// we'll scan next iteration.
435
NewOps.push_back(Ops[i]);
438
// If we made any changes, update Ops.
439
if (!ScaledOps.empty()) {
441
SimplifyAddOperands(Ops, Ty, SE);
446
// Record the scaled array index for this level of the type. If
447
// we didn't find any operands that could be factored, tentatively
448
// assume that element zero was selected (since the zero offset
449
// would obviously be folded away).
450
Value *Scaled = ScaledOps.empty() ?
451
Constant::getNullValue(Ty) :
452
expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
453
GepIndices.push_back(Scaled);
455
// Collect struct field index operands.
456
while (StructType *STy = dyn_cast<StructType>(ElTy)) {
457
bool FoundFieldNo = false;
458
// An empty struct has no fields.
459
if (STy->getNumElements() == 0) break;
461
// With TargetData, field offsets are known. See if a constant offset
462
// falls within any of the struct fields.
463
if (Ops.empty()) break;
464
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
465
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
466
const StructLayout &SL = *SE.TD->getStructLayout(STy);
467
uint64_t FullOffset = C->getValue()->getZExtValue();
468
if (FullOffset < SL.getSizeInBytes()) {
469
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
470
GepIndices.push_back(
471
ConstantInt::get(Type::getInt32Ty(Ty->getContext()), ElIdx));
472
ElTy = STy->getTypeAtIndex(ElIdx);
474
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
475
AnyNonZeroIndices = true;
480
// Without TargetData, just check for an offsetof expression of the
481
// appropriate struct type.
482
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
483
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
486
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
487
GepIndices.push_back(FieldNo);
489
STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
490
Ops[i] = SE.getConstant(Ty, 0);
491
AnyNonZeroIndices = true;
497
// If no struct field offsets were found, tentatively assume that
498
// field zero was selected (since the zero offset would obviously
501
ElTy = STy->getTypeAtIndex(0u);
502
GepIndices.push_back(
503
Constant::getNullValue(Type::getInt32Ty(Ty->getContext())));
507
if (ArrayType *ATy = dyn_cast<ArrayType>(ElTy))
508
ElTy = ATy->getElementType();
513
// If none of the operands were convertible to proper GEP indices, cast
514
// the base to i8* and do an ugly getelementptr with that. It's still
515
// better than ptrtoint+arithmetic+inttoptr at least.
516
if (!AnyNonZeroIndices) {
517
// Cast the base to i8*.
518
V = InsertNoopCastOfTo(V,
519
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
521
assert(!isa<Instruction>(V) ||
522
SE.DT->dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
524
// Expand the operands for a plain byte offset.
525
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
527
// Fold a GEP with constant operands.
528
if (Constant *CLHS = dyn_cast<Constant>(V))
529
if (Constant *CRHS = dyn_cast<Constant>(Idx))
530
return ConstantExpr::getGetElementPtr(CLHS, CRHS);
532
// Do a quick scan to see if we have this GEP nearby. If so, reuse it.
533
unsigned ScanLimit = 6;
534
BasicBlock::iterator BlockBegin = Builder.GetInsertBlock()->begin();
535
// Scanning starts from the last instruction before the insertion point.
536
BasicBlock::iterator IP = Builder.GetInsertPoint();
537
if (IP != BlockBegin) {
539
for (; ScanLimit; --IP, --ScanLimit) {
540
// Don't count dbg.value against the ScanLimit, to avoid perturbing the
542
if (isa<DbgInfoIntrinsic>(IP))
544
if (IP->getOpcode() == Instruction::GetElementPtr &&
545
IP->getOperand(0) == V && IP->getOperand(1) == Idx)
547
if (IP == BlockBegin) break;
551
// Save the original insertion point so we can restore it when we're done.
552
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
553
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
555
// Move the insertion point out of as many loops as we can.
556
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
557
if (!L->isLoopInvariant(V) || !L->isLoopInvariant(Idx)) break;
558
BasicBlock *Preheader = L->getLoopPreheader();
559
if (!Preheader) break;
561
// Ok, move up a level.
562
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
566
Value *GEP = Builder.CreateGEP(V, Idx, "uglygep");
567
rememberInstruction(GEP);
569
// Restore the original insert point.
571
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
576
// Save the original insertion point so we can restore it when we're done.
577
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
578
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
580
// Move the insertion point out of as many loops as we can.
581
while (const Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock())) {
582
if (!L->isLoopInvariant(V)) break;
584
bool AnyIndexNotLoopInvariant = false;
585
for (SmallVectorImpl<Value *>::const_iterator I = GepIndices.begin(),
586
E = GepIndices.end(); I != E; ++I)
587
if (!L->isLoopInvariant(*I)) {
588
AnyIndexNotLoopInvariant = true;
591
if (AnyIndexNotLoopInvariant)
594
BasicBlock *Preheader = L->getLoopPreheader();
595
if (!Preheader) break;
597
// Ok, move up a level.
598
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
601
// Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
602
// because ScalarEvolution may have changed the address arithmetic to
603
// compute a value which is beyond the end of the allocated object.
605
if (V->getType() != PTy)
606
Casted = InsertNoopCastOfTo(Casted, PTy);
607
Value *GEP = Builder.CreateGEP(Casted,
610
Ops.push_back(SE.getUnknown(GEP));
611
rememberInstruction(GEP);
613
// Restore the original insert point.
615
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
617
return expand(SE.getAddExpr(Ops));
620
/// PickMostRelevantLoop - Given two loops pick the one that's most relevant for
621
/// SCEV expansion. If they are nested, this is the most nested. If they are
622
/// neighboring, pick the later.
623
static const Loop *PickMostRelevantLoop(const Loop *A, const Loop *B,
627
if (A->contains(B)) return B;
628
if (B->contains(A)) return A;
629
if (DT.dominates(A->getHeader(), B->getHeader())) return B;
630
if (DT.dominates(B->getHeader(), A->getHeader())) return A;
631
return A; // Arbitrarily break the tie.
634
/// getRelevantLoop - Get the most relevant loop associated with the given
635
/// expression, according to PickMostRelevantLoop.
636
const Loop *SCEVExpander::getRelevantLoop(const SCEV *S) {
637
// Test whether we've already computed the most relevant loop for this SCEV.
638
std::pair<DenseMap<const SCEV *, const Loop *>::iterator, bool> Pair =
639
RelevantLoops.insert(std::make_pair(S, static_cast<const Loop *>(0)));
641
return Pair.first->second;
643
if (isa<SCEVConstant>(S))
644
// A constant has no relevant loops.
646
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
647
if (const Instruction *I = dyn_cast<Instruction>(U->getValue()))
648
return Pair.first->second = SE.LI->getLoopFor(I->getParent());
649
// A non-instruction has no relevant loops.
652
if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S)) {
654
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
656
for (SCEVNAryExpr::op_iterator I = N->op_begin(), E = N->op_end();
658
L = PickMostRelevantLoop(L, getRelevantLoop(*I), *SE.DT);
659
return RelevantLoops[N] = L;
661
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S)) {
662
const Loop *Result = getRelevantLoop(C->getOperand());
663
return RelevantLoops[C] = Result;
665
if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
667
PickMostRelevantLoop(getRelevantLoop(D->getLHS()),
668
getRelevantLoop(D->getRHS()),
670
return RelevantLoops[D] = Result;
672
llvm_unreachable("Unexpected SCEV type!");
677
/// LoopCompare - Compare loops by PickMostRelevantLoop.
681
explicit LoopCompare(DominatorTree &dt) : DT(dt) {}
683
bool operator()(std::pair<const Loop *, const SCEV *> LHS,
684
std::pair<const Loop *, const SCEV *> RHS) const {
685
// Keep pointer operands sorted at the end.
686
if (LHS.second->getType()->isPointerTy() !=
687
RHS.second->getType()->isPointerTy())
688
return LHS.second->getType()->isPointerTy();
690
// Compare loops with PickMostRelevantLoop.
691
if (LHS.first != RHS.first)
692
return PickMostRelevantLoop(LHS.first, RHS.first, DT) != LHS.first;
694
// If one operand is a non-constant negative and the other is not,
695
// put the non-constant negative on the right so that a sub can
696
// be used instead of a negate and add.
697
if (LHS.second->isNonConstantNegative()) {
698
if (!RHS.second->isNonConstantNegative())
700
} else if (RHS.second->isNonConstantNegative())
703
// Otherwise they are equivalent according to this comparison.
710
Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) {
711
Type *Ty = SE.getEffectiveSCEVType(S->getType());
713
// Collect all the add operands in a loop, along with their associated loops.
714
// Iterate in reverse so that constants are emitted last, all else equal, and
715
// so that pointer operands are inserted first, which the code below relies on
716
// to form more involved GEPs.
717
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
718
for (std::reverse_iterator<SCEVAddExpr::op_iterator> I(S->op_end()),
719
E(S->op_begin()); I != E; ++I)
720
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
722
// Sort by loop. Use a stable sort so that constants follow non-constants and
723
// pointer operands precede non-pointer operands.
724
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
726
// Emit instructions to add all the operands. Hoist as much as possible
727
// out of loops, and form meaningful getelementptrs where possible.
729
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
730
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
731
const Loop *CurLoop = I->first;
732
const SCEV *Op = I->second;
734
// This is the first operand. Just expand it.
737
} else if (PointerType *PTy = dyn_cast<PointerType>(Sum->getType())) {
738
// The running sum expression is a pointer. Try to form a getelementptr
739
// at this level with that as the base.
740
SmallVector<const SCEV *, 4> NewOps;
741
for (; I != E && I->first == CurLoop; ++I) {
742
// If the operand is SCEVUnknown and not instructions, peek through
743
// it, to enable more of it to be folded into the GEP.
744
const SCEV *X = I->second;
745
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(X))
746
if (!isa<Instruction>(U->getValue()))
747
X = SE.getSCEV(U->getValue());
750
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, Sum);
751
} else if (PointerType *PTy = dyn_cast<PointerType>(Op->getType())) {
752
// The running sum is an integer, and there's a pointer at this level.
753
// Try to form a getelementptr. If the running sum is instructions,
754
// use a SCEVUnknown to avoid re-analyzing them.
755
SmallVector<const SCEV *, 4> NewOps;
756
NewOps.push_back(isa<Instruction>(Sum) ? SE.getUnknown(Sum) :
758
for (++I; I != E && I->first == CurLoop; ++I)
759
NewOps.push_back(I->second);
760
Sum = expandAddToGEP(NewOps.begin(), NewOps.end(), PTy, Ty, expand(Op));
761
} else if (Op->isNonConstantNegative()) {
762
// Instead of doing a negate and add, just do a subtract.
763
Value *W = expandCodeFor(SE.getNegativeSCEV(Op), Ty);
764
Sum = InsertNoopCastOfTo(Sum, Ty);
765
Sum = InsertBinop(Instruction::Sub, Sum, W);
769
Value *W = expandCodeFor(Op, Ty);
770
Sum = InsertNoopCastOfTo(Sum, Ty);
771
// Canonicalize a constant to the RHS.
772
if (isa<Constant>(Sum)) std::swap(Sum, W);
773
Sum = InsertBinop(Instruction::Add, Sum, W);
781
Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
782
Type *Ty = SE.getEffectiveSCEVType(S->getType());
784
// Collect all the mul operands in a loop, along with their associated loops.
785
// Iterate in reverse so that constants are emitted last, all else equal.
786
SmallVector<std::pair<const Loop *, const SCEV *>, 8> OpsAndLoops;
787
for (std::reverse_iterator<SCEVMulExpr::op_iterator> I(S->op_end()),
788
E(S->op_begin()); I != E; ++I)
789
OpsAndLoops.push_back(std::make_pair(getRelevantLoop(*I), *I));
791
// Sort by loop. Use a stable sort so that constants follow non-constants.
792
std::stable_sort(OpsAndLoops.begin(), OpsAndLoops.end(), LoopCompare(*SE.DT));
794
// Emit instructions to mul all the operands. Hoist as much as possible
797
for (SmallVectorImpl<std::pair<const Loop *, const SCEV *> >::iterator
798
I = OpsAndLoops.begin(), E = OpsAndLoops.end(); I != E; ) {
799
const SCEV *Op = I->second;
801
// This is the first operand. Just expand it.
804
} else if (Op->isAllOnesValue()) {
805
// Instead of doing a multiply by negative one, just do a negate.
806
Prod = InsertNoopCastOfTo(Prod, Ty);
807
Prod = InsertBinop(Instruction::Sub, Constant::getNullValue(Ty), Prod);
811
Value *W = expandCodeFor(Op, Ty);
812
Prod = InsertNoopCastOfTo(Prod, Ty);
813
// Canonicalize a constant to the RHS.
814
if (isa<Constant>(Prod)) std::swap(Prod, W);
815
Prod = InsertBinop(Instruction::Mul, Prod, W);
823
Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
824
Type *Ty = SE.getEffectiveSCEVType(S->getType());
826
Value *LHS = expandCodeFor(S->getLHS(), Ty);
827
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(S->getRHS())) {
828
const APInt &RHS = SC->getValue()->getValue();
829
if (RHS.isPowerOf2())
830
return InsertBinop(Instruction::LShr, LHS,
831
ConstantInt::get(Ty, RHS.logBase2()));
834
Value *RHS = expandCodeFor(S->getRHS(), Ty);
835
return InsertBinop(Instruction::UDiv, LHS, RHS);
838
/// Move parts of Base into Rest to leave Base with the minimal
839
/// expression that provides a pointer operand suitable for a
841
static void ExposePointerBase(const SCEV *&Base, const SCEV *&Rest,
842
ScalarEvolution &SE) {
843
while (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(Base)) {
844
Base = A->getStart();
845
Rest = SE.getAddExpr(Rest,
846
SE.getAddRecExpr(SE.getConstant(A->getType(), 0),
847
A->getStepRecurrence(SE),
849
// FIXME: A->getNoWrapFlags(FlagNW)
852
if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(Base)) {
853
Base = A->getOperand(A->getNumOperands()-1);
854
SmallVector<const SCEV *, 8> NewAddOps(A->op_begin(), A->op_end());
855
NewAddOps.back() = Rest;
856
Rest = SE.getAddExpr(NewAddOps);
857
ExposePointerBase(Base, Rest, SE);
861
/// Determine if this is a well-behaved chain of instructions leading back to
862
/// the PHI. If so, it may be reused by expanded expressions.
863
bool SCEVExpander::isNormalAddRecExprPHI(PHINode *PN, Instruction *IncV,
865
if (IncV->getNumOperands() == 0 || isa<PHINode>(IncV) ||
866
(isa<CastInst>(IncV) && !isa<BitCastInst>(IncV)))
868
// If any of the operands don't dominate the insert position, bail.
869
// Addrec operands are always loop-invariant, so this can only happen
870
// if there are instructions which haven't been hoisted.
871
if (L == IVIncInsertLoop) {
872
for (User::op_iterator OI = IncV->op_begin()+1,
873
OE = IncV->op_end(); OI != OE; ++OI)
874
if (Instruction *OInst = dyn_cast<Instruction>(OI))
875
if (!SE.DT->dominates(OInst, IVIncInsertPos))
878
// Advance to the next instruction.
879
IncV = dyn_cast<Instruction>(IncV->getOperand(0));
883
if (IncV->mayHaveSideEffects())
889
return isNormalAddRecExprPHI(PN, IncV, L);
892
/// getIVIncOperand returns an induction variable increment's induction
893
/// variable operand.
895
/// If allowScale is set, any type of GEP is allowed as long as the nonIV
896
/// operands dominate InsertPos.
898
/// If allowScale is not set, ensure that a GEP increment conforms to one of the
899
/// simple patterns generated by getAddRecExprPHILiterally and
900
/// expandAddtoGEP. If the pattern isn't recognized, return NULL.
901
Instruction *SCEVExpander::getIVIncOperand(Instruction *IncV,
902
Instruction *InsertPos,
904
if (IncV == InsertPos)
907
switch (IncV->getOpcode()) {
910
// Check for a simple Add/Sub or GEP of a loop invariant step.
911
case Instruction::Add:
912
case Instruction::Sub: {
913
Instruction *OInst = dyn_cast<Instruction>(IncV->getOperand(1));
914
if (!OInst || SE.DT->dominates(OInst, InsertPos))
915
return dyn_cast<Instruction>(IncV->getOperand(0));
918
case Instruction::BitCast:
919
return dyn_cast<Instruction>(IncV->getOperand(0));
920
case Instruction::GetElementPtr:
921
for (Instruction::op_iterator I = IncV->op_begin()+1, E = IncV->op_end();
923
if (isa<Constant>(*I))
925
if (Instruction *OInst = dyn_cast<Instruction>(*I)) {
926
if (!SE.DT->dominates(OInst, InsertPos))
930
// allow any kind of GEP as long as it can be hoisted.
933
// This must be a pointer addition of constants (pretty), which is already
934
// handled, or some number of address-size elements (ugly). Ugly geps
935
// have 2 operands. i1* is used by the expander to represent an
936
// address-size element.
937
if (IncV->getNumOperands() != 2)
939
unsigned AS = cast<PointerType>(IncV->getType())->getAddressSpace();
940
if (IncV->getType() != Type::getInt1PtrTy(SE.getContext(), AS)
941
&& IncV->getType() != Type::getInt8PtrTy(SE.getContext(), AS))
945
return dyn_cast<Instruction>(IncV->getOperand(0));
949
/// hoistStep - Attempt to hoist a simple IV increment above InsertPos to make
950
/// it available to other uses in this loop. Recursively hoist any operands,
951
/// until we reach a value that dominates InsertPos.
952
bool SCEVExpander::hoistIVInc(Instruction *IncV, Instruction *InsertPos) {
953
if (SE.DT->dominates(IncV, InsertPos))
956
// InsertPos must itself dominate IncV so that IncV's new position satisfies
957
// its existing users.
958
if (!SE.DT->dominates(InsertPos->getParent(), IncV->getParent()))
961
// Check that the chain of IV operands leading back to Phi can be hoisted.
962
SmallVector<Instruction*, 4> IVIncs;
964
Instruction *Oper = getIVIncOperand(IncV, InsertPos, /*allowScale*/true);
967
// IncV is safe to hoist.
968
IVIncs.push_back(IncV);
970
if (SE.DT->dominates(IncV, InsertPos))
973
for (SmallVectorImpl<Instruction*>::reverse_iterator I = IVIncs.rbegin(),
974
E = IVIncs.rend(); I != E; ++I) {
975
(*I)->moveBefore(InsertPos);
980
/// Determine if this cyclic phi is in a form that would have been generated by
981
/// LSR. We don't care if the phi was actually expanded in this pass, as long
982
/// as it is in a low-cost form, for example, no implied multiplication. This
983
/// should match any patterns generated by getAddRecExprPHILiterally and
985
bool SCEVExpander::isExpandedAddRecExprPHI(PHINode *PN, Instruction *IncV,
987
for(Instruction *IVOper = IncV;
988
(IVOper = getIVIncOperand(IVOper, L->getLoopPreheader()->getTerminator(),
989
/*allowScale=*/false));) {
996
/// expandIVInc - Expand an IV increment at Builder's current InsertPos.
997
/// Typically this is the LatchBlock terminator or IVIncInsertPos, but we may
998
/// need to materialize IV increments elsewhere to handle difficult situations.
999
Value *SCEVExpander::expandIVInc(PHINode *PN, Value *StepV, const Loop *L,
1000
Type *ExpandTy, Type *IntTy,
1003
// If the PHI is a pointer, use a GEP, otherwise use an add or sub.
1004
if (ExpandTy->isPointerTy()) {
1005
PointerType *GEPPtrTy = cast<PointerType>(ExpandTy);
1006
// If the step isn't constant, don't use an implicitly scaled GEP, because
1007
// that would require a multiply inside the loop.
1008
if (!isa<ConstantInt>(StepV))
1009
GEPPtrTy = PointerType::get(Type::getInt1Ty(SE.getContext()),
1010
GEPPtrTy->getAddressSpace());
1011
const SCEV *const StepArray[1] = { SE.getSCEV(StepV) };
1012
IncV = expandAddToGEP(StepArray, StepArray+1, GEPPtrTy, IntTy, PN);
1013
if (IncV->getType() != PN->getType()) {
1014
IncV = Builder.CreateBitCast(IncV, PN->getType());
1015
rememberInstruction(IncV);
1018
IncV = useSubtract ?
1019
Builder.CreateSub(PN, StepV, Twine(IVName) + ".iv.next") :
1020
Builder.CreateAdd(PN, StepV, Twine(IVName) + ".iv.next");
1021
rememberInstruction(IncV);
1026
/// getAddRecExprPHILiterally - Helper for expandAddRecExprLiterally. Expand
1027
/// the base addrec, which is the addrec without any non-loop-dominating
1028
/// values, and return the PHI.
1030
SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
1034
assert((!IVIncInsertLoop||IVIncInsertPos) && "Uninitialized insert position");
1036
// Reuse a previously-inserted PHI, if present.
1037
BasicBlock *LatchBlock = L->getLoopLatch();
1039
for (BasicBlock::iterator I = L->getHeader()->begin();
1040
PHINode *PN = dyn_cast<PHINode>(I); ++I) {
1041
if (!SE.isSCEVable(PN->getType()) ||
1042
(SE.getEffectiveSCEVType(PN->getType()) !=
1043
SE.getEffectiveSCEVType(Normalized->getType())) ||
1044
SE.getSCEV(PN) != Normalized)
1048
cast<Instruction>(PN->getIncomingValueForBlock(LatchBlock));
1051
if (!isExpandedAddRecExprPHI(PN, IncV, L))
1053
if (L == IVIncInsertLoop && !hoistIVInc(IncV, IVIncInsertPos))
1057
if (!isNormalAddRecExprPHI(PN, IncV, L))
1059
if (L == IVIncInsertLoop)
1061
if (SE.DT->dominates(IncV, IVIncInsertPos))
1063
// Make sure the increment is where we want it. But don't move it
1064
// down past a potential existing post-inc user.
1065
IncV->moveBefore(IVIncInsertPos);
1066
IVIncInsertPos = IncV;
1067
IncV = cast<Instruction>(IncV->getOperand(0));
1068
} while (IncV != PN);
1070
// Ok, the add recurrence looks usable.
1071
// Remember this PHI, even in post-inc mode.
1072
InsertedValues.insert(PN);
1073
// Remember the increment.
1074
rememberInstruction(IncV);
1079
// Save the original insertion point so we can restore it when we're done.
1080
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1081
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1083
// Another AddRec may need to be recursively expanded below. For example, if
1084
// this AddRec is quadratic, the StepV may itself be an AddRec in this
1085
// loop. Remove this loop from the PostIncLoops set before expanding such
1086
// AddRecs. Otherwise, we cannot find a valid position for the step
1087
// (i.e. StepV can never dominate its loop header). Ideally, we could do
1088
// SavedIncLoops.swap(PostIncLoops), but we generally have a single element,
1089
// so it's not worth implementing SmallPtrSet::swap.
1090
PostIncLoopSet SavedPostIncLoops = PostIncLoops;
1091
PostIncLoops.clear();
1093
// Expand code for the start value.
1094
Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
1095
L->getHeader()->begin());
1097
// StartV must be hoisted into L's preheader to dominate the new phi.
1098
assert(!isa<Instruction>(StartV) ||
1099
SE.DT->properlyDominates(cast<Instruction>(StartV)->getParent(),
1102
// Expand code for the step value. Do this before creating the PHI so that PHI
1103
// reuse code doesn't see an incomplete PHI.
1104
const SCEV *Step = Normalized->getStepRecurrence(SE);
1105
// If the stride is negative, insert a sub instead of an add for the increment
1106
// (unless it's a constant, because subtracts of constants are canonicalized
1108
bool useSubtract = !ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1110
Step = SE.getNegativeSCEV(Step);
1111
// Expand the step somewhere that dominates the loop header.
1112
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1115
BasicBlock *Header = L->getHeader();
1116
Builder.SetInsertPoint(Header, Header->begin());
1117
pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1118
PHINode *PN = Builder.CreatePHI(ExpandTy, std::distance(HPB, HPE),
1119
Twine(IVName) + ".iv");
1120
rememberInstruction(PN);
1122
// Create the step instructions and populate the PHI.
1123
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1124
BasicBlock *Pred = *HPI;
1126
// Add a start value.
1127
if (!L->contains(Pred)) {
1128
PN->addIncoming(StartV, Pred);
1132
// Create a step value and add it to the PHI.
1133
// If IVIncInsertLoop is non-null and equal to the addrec's loop, insert the
1134
// instructions at IVIncInsertPos.
1135
Instruction *InsertPos = L == IVIncInsertLoop ?
1136
IVIncInsertPos : Pred->getTerminator();
1137
Builder.SetInsertPoint(InsertPos);
1138
Value *IncV = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1140
PN->addIncoming(IncV, Pred);
1143
// Restore the original insert point.
1145
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1147
// After expanding subexpressions, restore the PostIncLoops set so the caller
1148
// can ensure that IVIncrement dominates the current uses.
1149
PostIncLoops = SavedPostIncLoops;
1151
// Remember this PHI, even in post-inc mode.
1152
InsertedValues.insert(PN);
1157
Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
1158
Type *STy = S->getType();
1159
Type *IntTy = SE.getEffectiveSCEVType(STy);
1160
const Loop *L = S->getLoop();
1162
// Determine a normalized form of this expression, which is the expression
1163
// before any post-inc adjustment is made.
1164
const SCEVAddRecExpr *Normalized = S;
1165
if (PostIncLoops.count(L)) {
1166
PostIncLoopSet Loops;
1169
cast<SCEVAddRecExpr>(TransformForPostIncUse(Normalize, S, 0, 0,
1170
Loops, SE, *SE.DT));
1173
// Strip off any non-loop-dominating component from the addrec start.
1174
const SCEV *Start = Normalized->getStart();
1175
const SCEV *PostLoopOffset = 0;
1176
if (!SE.properlyDominates(Start, L->getHeader())) {
1177
PostLoopOffset = Start;
1178
Start = SE.getConstant(Normalized->getType(), 0);
1179
Normalized = cast<SCEVAddRecExpr>(
1180
SE.getAddRecExpr(Start, Normalized->getStepRecurrence(SE),
1181
Normalized->getLoop(),
1182
// FIXME: Normalized->getNoWrapFlags(FlagNW)
1183
SCEV::FlagAnyWrap));
1186
// Strip off any non-loop-dominating component from the addrec step.
1187
const SCEV *Step = Normalized->getStepRecurrence(SE);
1188
const SCEV *PostLoopScale = 0;
1189
if (!SE.dominates(Step, L->getHeader())) {
1190
PostLoopScale = Step;
1191
Step = SE.getConstant(Normalized->getType(), 1);
1193
cast<SCEVAddRecExpr>(SE.getAddRecExpr(Start, Step,
1194
Normalized->getLoop(),
1195
// FIXME: Normalized
1196
// ->getNoWrapFlags(FlagNW)
1197
SCEV::FlagAnyWrap));
1200
// Expand the core addrec. If we need post-loop scaling, force it to
1201
// expand to an integer type to avoid the need for additional casting.
1202
Type *ExpandTy = PostLoopScale ? IntTy : STy;
1203
PHINode *PN = getAddRecExprPHILiterally(Normalized, L, ExpandTy, IntTy);
1205
// Accommodate post-inc mode, if necessary.
1207
if (!PostIncLoops.count(L))
1210
// In PostInc mode, use the post-incremented value.
1211
BasicBlock *LatchBlock = L->getLoopLatch();
1212
assert(LatchBlock && "PostInc mode requires a unique loop latch!");
1213
Result = PN->getIncomingValueForBlock(LatchBlock);
1215
// For an expansion to use the postinc form, the client must call
1216
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
1217
// or dominated by IVIncInsertPos.
1218
if (isa<Instruction>(Result)
1219
&& !SE.DT->dominates(cast<Instruction>(Result),
1220
Builder.GetInsertPoint())) {
1221
// The induction variable's postinc expansion does not dominate this use.
1222
// IVUsers tries to prevent this case, so it is rare. However, it can
1223
// happen when an IVUser outside the loop is not dominated by the latch
1224
// block. Adjusting IVIncInsertPos before expansion begins cannot handle
1225
// all cases. Consider a phi outide whose operand is replaced during
1226
// expansion with the value of the postinc user. Without fundamentally
1227
// changing the way postinc users are tracked, the only remedy is
1228
// inserting an extra IV increment. StepV might fold into PostLoopOffset,
1229
// but hopefully expandCodeFor handles that.
1231
!ExpandTy->isPointerTy() && Step->isNonConstantNegative();
1233
Step = SE.getNegativeSCEV(Step);
1234
// Expand the step somewhere that dominates the loop header.
1235
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1236
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1237
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
1238
// Restore the insertion point to the place where the caller has
1239
// determined dominates all uses.
1240
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1241
Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
1245
// Re-apply any non-loop-dominating scale.
1246
if (PostLoopScale) {
1247
Result = InsertNoopCastOfTo(Result, IntTy);
1248
Result = Builder.CreateMul(Result,
1249
expandCodeFor(PostLoopScale, IntTy));
1250
rememberInstruction(Result);
1253
// Re-apply any non-loop-dominating offset.
1254
if (PostLoopOffset) {
1255
if (PointerType *PTy = dyn_cast<PointerType>(ExpandTy)) {
1256
const SCEV *const OffsetArray[1] = { PostLoopOffset };
1257
Result = expandAddToGEP(OffsetArray, OffsetArray+1, PTy, IntTy, Result);
1259
Result = InsertNoopCastOfTo(Result, IntTy);
1260
Result = Builder.CreateAdd(Result,
1261
expandCodeFor(PostLoopOffset, IntTy));
1262
rememberInstruction(Result);
1269
Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
1270
if (!CanonicalMode) return expandAddRecExprLiterally(S);
1272
Type *Ty = SE.getEffectiveSCEVType(S->getType());
1273
const Loop *L = S->getLoop();
1275
// First check for an existing canonical IV in a suitable type.
1276
PHINode *CanonicalIV = 0;
1277
if (PHINode *PN = L->getCanonicalInductionVariable())
1278
if (SE.getTypeSizeInBits(PN->getType()) >= SE.getTypeSizeInBits(Ty))
1281
// Rewrite an AddRec in terms of the canonical induction variable, if
1282
// its type is more narrow.
1284
SE.getTypeSizeInBits(CanonicalIV->getType()) >
1285
SE.getTypeSizeInBits(Ty)) {
1286
SmallVector<const SCEV *, 4> NewOps(S->getNumOperands());
1287
for (unsigned i = 0, e = S->getNumOperands(); i != e; ++i)
1288
NewOps[i] = SE.getAnyExtendExpr(S->op_begin()[i], CanonicalIV->getType());
1289
Value *V = expand(SE.getAddRecExpr(NewOps, S->getLoop(),
1290
// FIXME: S->getNoWrapFlags(FlagNW)
1291
SCEV::FlagAnyWrap));
1292
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1293
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1294
BasicBlock::iterator NewInsertPt =
1295
llvm::next(BasicBlock::iterator(cast<Instruction>(V)));
1296
while (isa<PHINode>(NewInsertPt) || isa<DbgInfoIntrinsic>(NewInsertPt) ||
1297
isa<LandingPadInst>(NewInsertPt))
1299
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), 0,
1301
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1305
// {X,+,F} --> X + {0,+,F}
1306
if (!S->getStart()->isZero()) {
1307
SmallVector<const SCEV *, 4> NewOps(S->op_begin(), S->op_end());
1308
NewOps[0] = SE.getConstant(Ty, 0);
1309
// FIXME: can use S->getNoWrapFlags()
1310
const SCEV *Rest = SE.getAddRecExpr(NewOps, L, SCEV::FlagAnyWrap);
1312
// Turn things like ptrtoint+arithmetic+inttoptr into GEP. See the
1313
// comments on expandAddToGEP for details.
1314
const SCEV *Base = S->getStart();
1315
const SCEV *RestArray[1] = { Rest };
1316
// Dig into the expression to find the pointer base for a GEP.
1317
ExposePointerBase(Base, RestArray[0], SE);
1318
// If we found a pointer, expand the AddRec with a GEP.
1319
if (PointerType *PTy = dyn_cast<PointerType>(Base->getType())) {
1320
// Make sure the Base isn't something exotic, such as a multiplied
1321
// or divided pointer value. In those cases, the result type isn't
1322
// actually a pointer type.
1323
if (!isa<SCEVMulExpr>(Base) && !isa<SCEVUDivExpr>(Base)) {
1324
Value *StartV = expand(Base);
1325
assert(StartV->getType() == PTy && "Pointer type mismatch for GEP!");
1326
return expandAddToGEP(RestArray, RestArray+1, PTy, Ty, StartV);
1330
// Just do a normal add. Pre-expand the operands to suppress folding.
1331
return expand(SE.getAddExpr(SE.getUnknown(expand(S->getStart())),
1332
SE.getUnknown(expand(Rest))));
1335
// If we don't yet have a canonical IV, create one.
1337
// Create and insert the PHI node for the induction variable in the
1339
BasicBlock *Header = L->getHeader();
1340
pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
1341
CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
1343
rememberInstruction(CanonicalIV);
1345
Constant *One = ConstantInt::get(Ty, 1);
1346
for (pred_iterator HPI = HPB; HPI != HPE; ++HPI) {
1347
BasicBlock *HP = *HPI;
1348
if (L->contains(HP)) {
1349
// Insert a unit add instruction right before the terminator
1350
// corresponding to the back-edge.
1351
Instruction *Add = BinaryOperator::CreateAdd(CanonicalIV, One,
1353
HP->getTerminator());
1354
Add->setDebugLoc(HP->getTerminator()->getDebugLoc());
1355
rememberInstruction(Add);
1356
CanonicalIV->addIncoming(Add, HP);
1358
CanonicalIV->addIncoming(Constant::getNullValue(Ty), HP);
1363
// {0,+,1} --> Insert a canonical induction variable into the loop!
1364
if (S->isAffine() && S->getOperand(1)->isOne()) {
1365
assert(Ty == SE.getEffectiveSCEVType(CanonicalIV->getType()) &&
1366
"IVs with types different from the canonical IV should "
1367
"already have been handled!");
1371
// {0,+,F} --> {0,+,1} * F
1373
// If this is a simple linear addrec, emit it now as a special case.
1374
if (S->isAffine()) // {0,+,F} --> i*F
1376
expand(SE.getTruncateOrNoop(
1377
SE.getMulExpr(SE.getUnknown(CanonicalIV),
1378
SE.getNoopOrAnyExtend(S->getOperand(1),
1379
CanonicalIV->getType())),
1382
// If this is a chain of recurrences, turn it into a closed form, using the
1383
// folders, then expandCodeFor the closed form. This allows the folders to
1384
// simplify the expression without having to build a bunch of special code
1385
// into this folder.
1386
const SCEV *IH = SE.getUnknown(CanonicalIV); // Get I as a "symbolic" SCEV.
1388
// Promote S up to the canonical IV type, if the cast is foldable.
1389
const SCEV *NewS = S;
1390
const SCEV *Ext = SE.getNoopOrAnyExtend(S, CanonicalIV->getType());
1391
if (isa<SCEVAddRecExpr>(Ext))
1394
const SCEV *V = cast<SCEVAddRecExpr>(NewS)->evaluateAtIteration(IH, SE);
1395
//cerr << "Evaluated: " << *this << "\n to: " << *V << "\n";
1397
// Truncate the result down to the original type, if needed.
1398
const SCEV *T = SE.getTruncateOrNoop(V, Ty);
1402
Value *SCEVExpander::visitTruncateExpr(const SCEVTruncateExpr *S) {
1403
Type *Ty = SE.getEffectiveSCEVType(S->getType());
1404
Value *V = expandCodeFor(S->getOperand(),
1405
SE.getEffectiveSCEVType(S->getOperand()->getType()));
1406
Value *I = Builder.CreateTrunc(V, Ty);
1407
rememberInstruction(I);
1411
Value *SCEVExpander::visitZeroExtendExpr(const SCEVZeroExtendExpr *S) {
1412
Type *Ty = SE.getEffectiveSCEVType(S->getType());
1413
Value *V = expandCodeFor(S->getOperand(),
1414
SE.getEffectiveSCEVType(S->getOperand()->getType()));
1415
Value *I = Builder.CreateZExt(V, Ty);
1416
rememberInstruction(I);
1420
Value *SCEVExpander::visitSignExtendExpr(const SCEVSignExtendExpr *S) {
1421
Type *Ty = SE.getEffectiveSCEVType(S->getType());
1422
Value *V = expandCodeFor(S->getOperand(),
1423
SE.getEffectiveSCEVType(S->getOperand()->getType()));
1424
Value *I = Builder.CreateSExt(V, Ty);
1425
rememberInstruction(I);
1429
Value *SCEVExpander::visitSMaxExpr(const SCEVSMaxExpr *S) {
1430
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1431
Type *Ty = LHS->getType();
1432
for (int i = S->getNumOperands()-2; i >= 0; --i) {
1433
// In the case of mixed integer and pointer types, do the
1434
// rest of the comparisons as integer.
1435
if (S->getOperand(i)->getType() != Ty) {
1436
Ty = SE.getEffectiveSCEVType(Ty);
1437
LHS = InsertNoopCastOfTo(LHS, Ty);
1439
Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1440
Value *ICmp = Builder.CreateICmpSGT(LHS, RHS);
1441
rememberInstruction(ICmp);
1442
Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "smax");
1443
rememberInstruction(Sel);
1446
// In the case of mixed integer and pointer types, cast the
1447
// final result back to the pointer type.
1448
if (LHS->getType() != S->getType())
1449
LHS = InsertNoopCastOfTo(LHS, S->getType());
1453
Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
1454
Value *LHS = expand(S->getOperand(S->getNumOperands()-1));
1455
Type *Ty = LHS->getType();
1456
for (int i = S->getNumOperands()-2; i >= 0; --i) {
1457
// In the case of mixed integer and pointer types, do the
1458
// rest of the comparisons as integer.
1459
if (S->getOperand(i)->getType() != Ty) {
1460
Ty = SE.getEffectiveSCEVType(Ty);
1461
LHS = InsertNoopCastOfTo(LHS, Ty);
1463
Value *RHS = expandCodeFor(S->getOperand(i), Ty);
1464
Value *ICmp = Builder.CreateICmpUGT(LHS, RHS);
1465
rememberInstruction(ICmp);
1466
Value *Sel = Builder.CreateSelect(ICmp, LHS, RHS, "umax");
1467
rememberInstruction(Sel);
1470
// In the case of mixed integer and pointer types, cast the
1471
// final result back to the pointer type.
1472
if (LHS->getType() != S->getType())
1473
LHS = InsertNoopCastOfTo(LHS, S->getType());
1477
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
1479
Builder.SetInsertPoint(IP->getParent(), IP);
1480
return expandCodeFor(SH, Ty);
1483
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
1484
// Expand the code for this SCEV.
1485
Value *V = expand(SH);
1487
assert(SE.getTypeSizeInBits(Ty) == SE.getTypeSizeInBits(SH->getType()) &&
1488
"non-trivial casts should be done with the SCEVs directly!");
1489
V = InsertNoopCastOfTo(V, Ty);
1494
Value *SCEVExpander::expand(const SCEV *S) {
1495
// Compute an insertion point for this SCEV object. Hoist the instructions
1496
// as far out in the loop nest as possible.
1497
Instruction *InsertPt = Builder.GetInsertPoint();
1498
for (Loop *L = SE.LI->getLoopFor(Builder.GetInsertBlock()); ;
1499
L = L->getParentLoop())
1500
if (SE.isLoopInvariant(S, L)) {
1502
if (BasicBlock *Preheader = L->getLoopPreheader())
1503
InsertPt = Preheader->getTerminator();
1505
// LSR sets the insertion point for AddRec start/step values to the
1506
// block start to simplify value reuse, even though it's an invalid
1507
// position. SCEVExpander must correct for this in all cases.
1508
InsertPt = L->getHeader()->getFirstInsertionPt();
1511
// If the SCEV is computable at this level, insert it into the header
1512
// after the PHIs (and after any other instructions that we've inserted
1513
// there) so that it is guaranteed to dominate any user inside the loop.
1514
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
1515
InsertPt = L->getHeader()->getFirstInsertionPt();
1516
while (InsertPt != Builder.GetInsertPoint()
1517
&& (isInsertedInstruction(InsertPt)
1518
|| isa<DbgInfoIntrinsic>(InsertPt))) {
1519
InsertPt = llvm::next(BasicBlock::iterator(InsertPt));
1524
// Check to see if we already expanded this here.
1525
std::map<std::pair<const SCEV *, Instruction *>,
1526
AssertingVH<Value> >::iterator I =
1527
InsertedExpressions.find(std::make_pair(S, InsertPt));
1528
if (I != InsertedExpressions.end())
1531
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1532
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1533
Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
1535
// Expand the expression into instructions.
1536
Value *V = visit(S);
1538
// Remember the expanded value for this SCEV at this location.
1540
// This is independent of PostIncLoops. The mapped value simply materializes
1541
// the expression at this insertion point. If the mapped value happened to be
1542
// a postinc expansion, it could be reused by a non postinc user, but only if
1543
// its insertion point was already at the head of the loop.
1544
InsertedExpressions[std::make_pair(S, InsertPt)] = V;
1546
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1550
void SCEVExpander::rememberInstruction(Value *I) {
1551
if (!PostIncLoops.empty())
1552
InsertedPostIncValues.insert(I);
1554
InsertedValues.insert(I);
1557
void SCEVExpander::restoreInsertPoint(BasicBlock *BB, BasicBlock::iterator I) {
1558
Builder.SetInsertPoint(BB, I);
1561
/// getOrInsertCanonicalInductionVariable - This method returns the
1562
/// canonical induction variable of the specified type for the specified
1563
/// loop (inserting one if there is none). A canonical induction variable
1564
/// starts at zero and steps by one on each iteration.
1566
SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
1568
assert(Ty->isIntegerTy() && "Can only insert integer induction variables!");
1570
// Build a SCEV for {0,+,1}<L>.
1571
// Conservatively use FlagAnyWrap for now.
1572
const SCEV *H = SE.getAddRecExpr(SE.getConstant(Ty, 0),
1573
SE.getConstant(Ty, 1), L, SCEV::FlagAnyWrap);
1575
// Emit code for it.
1576
BasicBlock *SaveInsertBB = Builder.GetInsertBlock();
1577
BasicBlock::iterator SaveInsertPt = Builder.GetInsertPoint();
1578
PHINode *V = cast<PHINode>(expandCodeFor(H, 0, L->getHeader()->begin()));
1580
restoreInsertPoint(SaveInsertBB, SaveInsertPt);
1585
/// Sort values by integer width for replaceCongruentIVs.
1586
static bool width_descending(Value *lhs, Value *rhs) {
1587
// Put pointers at the back and make sure pointer < pointer = false.
1588
if (!lhs->getType()->isIntegerTy() || !rhs->getType()->isIntegerTy())
1589
return rhs->getType()->isIntegerTy() && !lhs->getType()->isIntegerTy();
1590
return rhs->getType()->getPrimitiveSizeInBits()
1591
< lhs->getType()->getPrimitiveSizeInBits();
1594
/// replaceCongruentIVs - Check for congruent phis in this loop header and
1595
/// replace them with their most canonical representative. Return the number of
1596
/// phis eliminated.
1598
/// This does not depend on any SCEVExpander state but should be used in
1599
/// the same context that SCEVExpander is used.
1600
unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
1601
SmallVectorImpl<WeakVH> &DeadInsts,
1602
const TargetLowering *TLI) {
1603
// Find integer phis in order of increasing width.
1604
SmallVector<PHINode*, 8> Phis;
1605
for (BasicBlock::iterator I = L->getHeader()->begin();
1606
PHINode *Phi = dyn_cast<PHINode>(I); ++I) {
1607
Phis.push_back(Phi);
1610
std::sort(Phis.begin(), Phis.end(), width_descending);
1612
unsigned NumElim = 0;
1613
DenseMap<const SCEV *, PHINode *> ExprToIVMap;
1614
// Process phis from wide to narrow. Mapping wide phis to the their truncation
1615
// so narrow phis can reuse them.
1616
for (SmallVectorImpl<PHINode*>::const_iterator PIter = Phis.begin(),
1617
PEnd = Phis.end(); PIter != PEnd; ++PIter) {
1618
PHINode *Phi = *PIter;
1620
if (!SE.isSCEVable(Phi->getType()))
1623
PHINode *&OrigPhiRef = ExprToIVMap[SE.getSCEV(Phi)];
1626
if (Phi->getType()->isIntegerTy() && TLI
1627
&& TLI->isTruncateFree(Phi->getType(), Phis.back()->getType())) {
1628
// This phi can be freely truncated to the narrowest phi type. Map the
1629
// truncated expression to it so it will be reused for narrow types.
1630
const SCEV *TruncExpr =
1631
SE.getTruncateExpr(SE.getSCEV(Phi), Phis.back()->getType());
1632
ExprToIVMap[TruncExpr] = Phi;
1637
// Replacing a pointer phi with an integer phi or vice-versa doesn't make
1639
if (OrigPhiRef->getType()->isPointerTy() != Phi->getType()->isPointerTy())
1642
if (BasicBlock *LatchBlock = L->getLoopLatch()) {
1643
Instruction *OrigInc =
1644
cast<Instruction>(OrigPhiRef->getIncomingValueForBlock(LatchBlock));
1645
Instruction *IsomorphicInc =
1646
cast<Instruction>(Phi->getIncomingValueForBlock(LatchBlock));
1648
// If this phi has the same width but is more canonical, replace the
1649
// original with it. As part of the "more canonical" determination,
1650
// respect a prior decision to use an IV chain.
1651
if (OrigPhiRef->getType() == Phi->getType()
1652
&& !(ChainedPhis.count(Phi)
1653
|| isExpandedAddRecExprPHI(OrigPhiRef, OrigInc, L))
1654
&& (ChainedPhis.count(Phi)
1655
|| isExpandedAddRecExprPHI(Phi, IsomorphicInc, L))) {
1656
std::swap(OrigPhiRef, Phi);
1657
std::swap(OrigInc, IsomorphicInc);
1659
// Replacing the congruent phi is sufficient because acyclic redundancy
1660
// elimination, CSE/GVN, should handle the rest. However, once SCEV proves
1661
// that a phi is congruent, it's often the head of an IV user cycle that
1662
// is isomorphic with the original phi. It's worth eagerly cleaning up the
1663
// common case of a single IV increment so that DeleteDeadPHIs can remove
1664
// cycles that had postinc uses.
1665
const SCEV *TruncExpr = SE.getTruncateOrNoop(SE.getSCEV(OrigInc),
1666
IsomorphicInc->getType());
1667
if (OrigInc != IsomorphicInc
1668
&& TruncExpr == SE.getSCEV(IsomorphicInc)
1669
&& ((isa<PHINode>(OrigInc) && isa<PHINode>(IsomorphicInc))
1670
|| hoistIVInc(OrigInc, IsomorphicInc))) {
1671
DEBUG_WITH_TYPE(DebugType, dbgs()
1672
<< "INDVARS: Eliminated congruent iv.inc: "
1673
<< *IsomorphicInc << '\n');
1674
Value *NewInc = OrigInc;
1675
if (OrigInc->getType() != IsomorphicInc->getType()) {
1676
Instruction *IP = isa<PHINode>(OrigInc)
1677
? (Instruction*)L->getHeader()->getFirstInsertionPt()
1678
: OrigInc->getNextNode();
1679
IRBuilder<> Builder(IP);
1680
Builder.SetCurrentDebugLocation(IsomorphicInc->getDebugLoc());
1682
CreateTruncOrBitCast(OrigInc, IsomorphicInc->getType(), IVName);
1684
IsomorphicInc->replaceAllUsesWith(NewInc);
1685
DeadInsts.push_back(IsomorphicInc);
1688
DEBUG_WITH_TYPE(DebugType, dbgs()
1689
<< "INDVARS: Eliminated congruent iv: " << *Phi << '\n');
1691
Value *NewIV = OrigPhiRef;
1692
if (OrigPhiRef->getType() != Phi->getType()) {
1693
IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
1694
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
1695
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
1697
Phi->replaceAllUsesWith(NewIV);
1698
DeadInsts.push_back(Phi);