1
//===- AddrModeMatcher.cpp - Addressing mode matching facility --*- C++ -*-===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file implements target addressing mode matcher class.
12
//===----------------------------------------------------------------------===//
14
#include "llvm/Transforms/Utils/AddrModeMatcher.h"
15
#include "llvm/DerivedTypes.h"
16
#include "llvm/GlobalValue.h"
17
#include "llvm/Instruction.h"
18
#include "llvm/Assembly/Writer.h"
19
#include "llvm/Target/TargetData.h"
20
#include "llvm/Support/Debug.h"
21
#include "llvm/Support/GetElementPtrTypeIterator.h"
22
#include "llvm/Support/PatternMatch.h"
23
#include "llvm/Support/raw_ostream.h"
26
using namespace llvm::PatternMatch;
28
void ExtAddrMode::print(raw_ostream &OS) const {
29
bool NeedPlus = false;
32
OS << (NeedPlus ? " + " : "")
34
WriteAsOperand(OS, BaseGV, /*PrintType=*/false);
39
OS << (NeedPlus ? " + " : "") << BaseOffs, NeedPlus = true;
42
OS << (NeedPlus ? " + " : "")
44
WriteAsOperand(OS, BaseReg, /*PrintType=*/false);
48
OS << (NeedPlus ? " + " : "")
50
WriteAsOperand(OS, ScaledReg, /*PrintType=*/false);
57
void ExtAddrMode::dump() const {
63
/// MatchScaledValue - Try adding ScaleReg*Scale to the current addressing mode.
64
/// Return true and update AddrMode if this addr mode is legal for the target,
66
bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
68
// If Scale is 1, then this is the same as adding ScaleReg to the addressing
69
// mode. Just process that directly.
71
return MatchAddr(ScaleReg, Depth);
73
// If the scale is 0, it takes nothing to add this.
77
// If we already have a scale of this value, we can add to it, otherwise, we
78
// need an available scale field.
79
if (AddrMode.Scale != 0 && AddrMode.ScaledReg != ScaleReg)
82
ExtAddrMode TestAddrMode = AddrMode;
84
// Add scale to turn X*4+X*3 -> X*7. This could also do things like
85
// [A+B + A*7] -> [B+A*8].
86
TestAddrMode.Scale += Scale;
87
TestAddrMode.ScaledReg = ScaleReg;
89
// If the new address isn't legal, bail out.
90
if (!TLI.isLegalAddressingMode(TestAddrMode, AccessTy))
93
// It was legal, so commit it.
94
AddrMode = TestAddrMode;
96
// Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
97
// to see if ScaleReg is actually X+C. If so, we can turn this into adding
98
// X*Scale + C*Scale to addr mode.
99
ConstantInt *CI = 0; Value *AddLHS = 0;
100
if (isa<Instruction>(ScaleReg) && // not a constant expr.
101
match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
102
TestAddrMode.ScaledReg = AddLHS;
103
TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
105
// If this addressing mode is legal, commit it and remember that we folded
107
if (TLI.isLegalAddressingMode(TestAddrMode, AccessTy)) {
108
AddrModeInsts.push_back(cast<Instruction>(ScaleReg));
109
AddrMode = TestAddrMode;
114
// Otherwise, not (x+c)*scale, just return what we have.
118
/// MightBeFoldableInst - This is a little filter, which returns true if an
119
/// addressing computation involving I might be folded into a load/store
120
/// accessing it. This doesn't need to be perfect, but needs to accept at least
121
/// the set of instructions that MatchOperationAddr can.
122
static bool MightBeFoldableInst(Instruction *I) {
123
switch (I->getOpcode()) {
124
case Instruction::BitCast:
125
// Don't touch identity bitcasts.
126
if (I->getType() == I->getOperand(0)->getType())
128
return I->getType()->isPointerTy() || I->getType()->isIntegerTy();
129
case Instruction::PtrToInt:
130
// PtrToInt is always a noop, as we know that the int type is pointer sized.
132
case Instruction::IntToPtr:
133
// We know the input is intptr_t, so this is foldable.
135
case Instruction::Add:
137
case Instruction::Mul:
138
case Instruction::Shl:
139
// Can only handle X*C and X << C.
140
return isa<ConstantInt>(I->getOperand(1));
141
case Instruction::GetElementPtr:
149
/// MatchOperationAddr - Given an instruction or constant expr, see if we can
150
/// fold the operation into the addressing mode. If so, update the addressing
151
/// mode and return true, otherwise return false without modifying AddrMode.
152
bool AddressingModeMatcher::MatchOperationAddr(User *AddrInst, unsigned Opcode,
154
// Avoid exponential behavior on extremely deep expression trees.
155
if (Depth >= 5) return false;
158
case Instruction::PtrToInt:
159
// PtrToInt is always a noop, as we know that the int type is pointer sized.
160
return MatchAddr(AddrInst->getOperand(0), Depth);
161
case Instruction::IntToPtr:
162
// This inttoptr is a no-op if the integer type is pointer sized.
163
if (TLI.getValueType(AddrInst->getOperand(0)->getType()) ==
165
return MatchAddr(AddrInst->getOperand(0), Depth);
167
case Instruction::BitCast:
168
// BitCast is always a noop, and we can handle it as long as it is
169
// int->int or pointer->pointer (we don't want int<->fp or something).
170
if ((AddrInst->getOperand(0)->getType()->isPointerTy() ||
171
AddrInst->getOperand(0)->getType()->isIntegerTy()) &&
172
// Don't touch identity bitcasts. These were probably put here by LSR,
173
// and we don't want to mess around with them. Assume it knows what it
175
AddrInst->getOperand(0)->getType() != AddrInst->getType())
176
return MatchAddr(AddrInst->getOperand(0), Depth);
178
case Instruction::Add: {
179
// Check to see if we can merge in the RHS then the LHS. If so, we win.
180
ExtAddrMode BackupAddrMode = AddrMode;
181
unsigned OldSize = AddrModeInsts.size();
182
if (MatchAddr(AddrInst->getOperand(1), Depth+1) &&
183
MatchAddr(AddrInst->getOperand(0), Depth+1))
186
// Restore the old addr mode info.
187
AddrMode = BackupAddrMode;
188
AddrModeInsts.resize(OldSize);
190
// Otherwise this was over-aggressive. Try merging in the LHS then the RHS.
191
if (MatchAddr(AddrInst->getOperand(0), Depth+1) &&
192
MatchAddr(AddrInst->getOperand(1), Depth+1))
195
// Otherwise we definitely can't merge the ADD in.
196
AddrMode = BackupAddrMode;
197
AddrModeInsts.resize(OldSize);
200
//case Instruction::Or:
201
// TODO: We can handle "Or Val, Imm" iff this OR is equivalent to an ADD.
203
case Instruction::Mul:
204
case Instruction::Shl: {
205
// Can only handle X*C and X << C.
206
ConstantInt *RHS = dyn_cast<ConstantInt>(AddrInst->getOperand(1));
207
if (!RHS) return false;
208
int64_t Scale = RHS->getSExtValue();
209
if (Opcode == Instruction::Shl)
210
Scale = 1LL << Scale;
212
return MatchScaledValue(AddrInst->getOperand(0), Scale, Depth);
214
case Instruction::GetElementPtr: {
215
// Scan the GEP. We check it if it contains constant offsets and at most
216
// one variable offset.
217
int VariableOperand = -1;
218
unsigned VariableScale = 0;
220
int64_t ConstantOffset = 0;
221
const TargetData *TD = TLI.getTargetData();
222
gep_type_iterator GTI = gep_type_begin(AddrInst);
223
for (unsigned i = 1, e = AddrInst->getNumOperands(); i != e; ++i, ++GTI) {
224
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
225
const StructLayout *SL = TD->getStructLayout(STy);
227
cast<ConstantInt>(AddrInst->getOperand(i))->getZExtValue();
228
ConstantOffset += SL->getElementOffset(Idx);
230
uint64_t TypeSize = TD->getTypeAllocSize(GTI.getIndexedType());
231
if (ConstantInt *CI = dyn_cast<ConstantInt>(AddrInst->getOperand(i))) {
232
ConstantOffset += CI->getSExtValue()*TypeSize;
233
} else if (TypeSize) { // Scales of zero don't do anything.
234
// We only allow one variable index at the moment.
235
if (VariableOperand != -1)
238
// Remember the variable index.
240
VariableScale = TypeSize;
245
// A common case is for the GEP to only do a constant offset. In this case,
246
// just add it to the disp field and check validity.
247
if (VariableOperand == -1) {
248
AddrMode.BaseOffs += ConstantOffset;
249
if (ConstantOffset == 0 || TLI.isLegalAddressingMode(AddrMode, AccessTy)){
250
// Check to see if we can fold the base pointer in too.
251
if (MatchAddr(AddrInst->getOperand(0), Depth+1))
254
AddrMode.BaseOffs -= ConstantOffset;
258
// Save the valid addressing mode in case we can't match.
259
ExtAddrMode BackupAddrMode = AddrMode;
260
unsigned OldSize = AddrModeInsts.size();
262
// See if the scale and offset amount is valid for this target.
263
AddrMode.BaseOffs += ConstantOffset;
265
// Match the base operand of the GEP.
266
if (!MatchAddr(AddrInst->getOperand(0), Depth+1)) {
267
// If it couldn't be matched, just stuff the value in a register.
268
if (AddrMode.HasBaseReg) {
269
AddrMode = BackupAddrMode;
270
AddrModeInsts.resize(OldSize);
273
AddrMode.HasBaseReg = true;
274
AddrMode.BaseReg = AddrInst->getOperand(0);
277
// Match the remaining variable portion of the GEP.
278
if (!MatchScaledValue(AddrInst->getOperand(VariableOperand), VariableScale,
280
// If it couldn't be matched, try stuffing the base into a register
281
// instead of matching it, and retrying the match of the scale.
282
AddrMode = BackupAddrMode;
283
AddrModeInsts.resize(OldSize);
284
if (AddrMode.HasBaseReg)
286
AddrMode.HasBaseReg = true;
287
AddrMode.BaseReg = AddrInst->getOperand(0);
288
AddrMode.BaseOffs += ConstantOffset;
289
if (!MatchScaledValue(AddrInst->getOperand(VariableOperand),
290
VariableScale, Depth)) {
291
// If even that didn't work, bail.
292
AddrMode = BackupAddrMode;
293
AddrModeInsts.resize(OldSize);
304
/// MatchAddr - If we can, try to add the value of 'Addr' into the current
305
/// addressing mode. If Addr can't be added to AddrMode this returns false and
306
/// leaves AddrMode unmodified. This assumes that Addr is either a pointer type
307
/// or intptr_t for the target.
309
bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
310
if (ConstantInt *CI = dyn_cast<ConstantInt>(Addr)) {
311
// Fold in immediates if legal for the target.
312
AddrMode.BaseOffs += CI->getSExtValue();
313
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
315
AddrMode.BaseOffs -= CI->getSExtValue();
316
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
317
// If this is a global variable, try to fold it into the addressing mode.
318
if (AddrMode.BaseGV == 0) {
319
AddrMode.BaseGV = GV;
320
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
324
} else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
325
ExtAddrMode BackupAddrMode = AddrMode;
326
unsigned OldSize = AddrModeInsts.size();
328
// Check to see if it is possible to fold this operation.
329
if (MatchOperationAddr(I, I->getOpcode(), Depth)) {
330
// Okay, it's possible to fold this. Check to see if it is actually
331
// *profitable* to do so. We use a simple cost model to avoid increasing
332
// register pressure too much.
333
if (I->hasOneUse() ||
334
IsProfitableToFoldIntoAddressingMode(I, BackupAddrMode, AddrMode)) {
335
AddrModeInsts.push_back(I);
339
// It isn't profitable to do this, roll back.
340
//cerr << "NOT FOLDING: " << *I;
341
AddrMode = BackupAddrMode;
342
AddrModeInsts.resize(OldSize);
344
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Addr)) {
345
if (MatchOperationAddr(CE, CE->getOpcode(), Depth))
347
} else if (isa<ConstantPointerNull>(Addr)) {
348
// Null pointer gets folded without affecting the addressing mode.
352
// Worse case, the target should support [reg] addressing modes. :)
353
if (!AddrMode.HasBaseReg) {
354
AddrMode.HasBaseReg = true;
355
AddrMode.BaseReg = Addr;
356
// Still check for legality in case the target supports [imm] but not [i+r].
357
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
359
AddrMode.HasBaseReg = false;
360
AddrMode.BaseReg = 0;
363
// If the base register is already taken, see if we can do [r+r].
364
if (AddrMode.Scale == 0) {
366
AddrMode.ScaledReg = Addr;
367
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
370
AddrMode.ScaledReg = 0;
377
/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
378
/// inline asm call are due to memory operands. If so, return true, otherwise
380
static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
381
const TargetLowering &TLI) {
382
std::vector<InlineAsm::ConstraintInfo>
383
Constraints = IA->ParseConstraints();
385
unsigned ArgNo = 1; // ArgNo - The operand of the CallInst.
386
for (unsigned i = 0, e = Constraints.size(); i != e; ++i) {
387
TargetLowering::AsmOperandInfo OpInfo(Constraints[i]);
389
// Compute the value type for each operand.
390
switch (OpInfo.Type) {
391
case InlineAsm::isOutput:
392
if (OpInfo.isIndirect)
393
OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
395
case InlineAsm::isInput:
396
OpInfo.CallOperandVal = CI->getOperand(ArgNo++);
398
case InlineAsm::isClobber:
403
// Compute the constraint code and ConstraintType to use.
404
TLI.ComputeConstraintToUse(OpInfo, SDValue(),
405
OpInfo.ConstraintType == TargetLowering::C_Memory);
407
// If this asm operand is our Value*, and if it isn't an indirect memory
408
// operand, we can't fold it!
409
if (OpInfo.CallOperandVal == OpVal &&
410
(OpInfo.ConstraintType != TargetLowering::C_Memory ||
419
/// FindAllMemoryUses - Recursively walk all the uses of I until we find a
420
/// memory use. If we find an obviously non-foldable instruction, return true.
421
/// Add the ultimately found memory instructions to MemoryUses.
422
static bool FindAllMemoryUses(Instruction *I,
423
SmallVectorImpl<std::pair<Instruction*,unsigned> > &MemoryUses,
424
SmallPtrSet<Instruction*, 16> &ConsideredInsts,
425
const TargetLowering &TLI) {
426
// If we already considered this instruction, we're done.
427
if (!ConsideredInsts.insert(I))
430
// If this is an obviously unfoldable instruction, bail out.
431
if (!MightBeFoldableInst(I))
434
// Loop over all the uses, recursively processing them.
435
for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
437
if (LoadInst *LI = dyn_cast<LoadInst>(*UI)) {
438
MemoryUses.push_back(std::make_pair(LI, UI.getOperandNo()));
442
if (StoreInst *SI = dyn_cast<StoreInst>(*UI)) {
443
if (UI.getOperandNo() == 0) return true; // Storing addr, not into addr.
444
MemoryUses.push_back(std::make_pair(SI, UI.getOperandNo()));
448
if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
449
InlineAsm *IA = dyn_cast<InlineAsm>(CI->getCalledValue());
450
if (IA == 0) return true;
452
// If this is a memory operand, we're cool, otherwise bail out.
453
if (!IsOperandAMemoryOperand(CI, IA, I, TLI))
458
if (FindAllMemoryUses(cast<Instruction>(*UI), MemoryUses, ConsideredInsts,
467
/// ValueAlreadyLiveAtInst - Retrn true if Val is already known to be live at
468
/// the use site that we're folding it into. If so, there is no cost to
469
/// include it in the addressing mode. KnownLive1 and KnownLive2 are two values
470
/// that we know are live at the instruction already.
471
bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
473
// If Val is either of the known-live values, we know it is live!
474
if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
477
// All values other than instructions and arguments (e.g. constants) are live.
478
if (!isa<Instruction>(Val) && !isa<Argument>(Val)) return true;
480
// If Val is a constant sized alloca in the entry block, it is live, this is
481
// true because it is just a reference to the stack/frame pointer, which is
482
// live for the whole function.
483
if (AllocaInst *AI = dyn_cast<AllocaInst>(Val))
484
if (AI->isStaticAlloca())
487
// Check to see if this value is already used in the memory instruction's
488
// block. If so, it's already live into the block at the very least, so we
489
// can reasonably fold it.
490
BasicBlock *MemBB = MemoryInst->getParent();
491
for (Value::use_iterator UI = Val->use_begin(), E = Val->use_end();
493
// We know that uses of arguments and instructions have to be instructions.
494
if (cast<Instruction>(*UI)->getParent() == MemBB)
502
/// IsProfitableToFoldIntoAddressingMode - It is possible for the addressing
503
/// mode of the machine to fold the specified instruction into a load or store
504
/// that ultimately uses it. However, the specified instruction has multiple
505
/// uses. Given this, it may actually increase register pressure to fold it
506
/// into the load. For example, consider this code:
510
/// use(Y) -> nonload/store
514
/// In this case, Y has multiple uses, and can be folded into the load of Z
515
/// (yielding load [X+2]). However, doing this will cause both "X" and "X+1" to
516
/// be live at the use(Y) line. If we don't fold Y into load Z, we use one
517
/// fewer register. Since Y can't be folded into "use(Y)" we don't increase the
518
/// number of computations either.
520
/// Note that this (like most of CodeGenPrepare) is just a rough heuristic. If
521
/// X was live across 'load Z' for other reasons, we actually *would* want to
522
/// fold the addressing mode in the Z case. This would make Y die earlier.
523
bool AddressingModeMatcher::
524
IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
525
ExtAddrMode &AMAfter) {
526
if (IgnoreProfitability) return true;
528
// AMBefore is the addressing mode before this instruction was folded into it,
529
// and AMAfter is the addressing mode after the instruction was folded. Get
530
// the set of registers referenced by AMAfter and subtract out those
531
// referenced by AMBefore: this is the set of values which folding in this
532
// address extends the lifetime of.
534
// Note that there are only two potential values being referenced here,
535
// BaseReg and ScaleReg (global addresses are always available, as are any
536
// folded immediates).
537
Value *BaseReg = AMAfter.BaseReg, *ScaledReg = AMAfter.ScaledReg;
539
// If the BaseReg or ScaledReg was referenced by the previous addrmode, their
540
// lifetime wasn't extended by adding this instruction.
541
if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
543
if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
546
// If folding this instruction (and it's subexprs) didn't extend any live
547
// ranges, we're ok with it.
548
if (BaseReg == 0 && ScaledReg == 0)
551
// If all uses of this instruction are ultimately load/store/inlineasm's,
552
// check to see if their addressing modes will include this instruction. If
553
// so, we can fold it into all uses, so it doesn't matter if it has multiple
555
SmallVector<std::pair<Instruction*,unsigned>, 16> MemoryUses;
556
SmallPtrSet<Instruction*, 16> ConsideredInsts;
557
if (FindAllMemoryUses(I, MemoryUses, ConsideredInsts, TLI))
558
return false; // Has a non-memory, non-foldable use!
560
// Now that we know that all uses of this instruction are part of a chain of
561
// computation involving only operations that could theoretically be folded
562
// into a memory use, loop over each of these uses and see if they could
563
// *actually* fold the instruction.
564
SmallVector<Instruction*, 32> MatchedAddrModeInsts;
565
for (unsigned i = 0, e = MemoryUses.size(); i != e; ++i) {
566
Instruction *User = MemoryUses[i].first;
567
unsigned OpNo = MemoryUses[i].second;
569
// Get the access type of this use. If the use isn't a pointer, we don't
570
// know what it accesses.
571
Value *Address = User->getOperand(OpNo);
572
if (!Address->getType()->isPointerTy())
574
const Type *AddressAccessTy =
575
cast<PointerType>(Address->getType())->getElementType();
577
// Do a match against the root of this address, ignoring profitability. This
578
// will tell us if the addressing mode for the memory operation will
579
// *actually* cover the shared instruction.
581
AddressingModeMatcher Matcher(MatchedAddrModeInsts, TLI, AddressAccessTy,
583
Matcher.IgnoreProfitability = true;
584
bool Success = Matcher.MatchAddr(Address, 0);
585
Success = Success; assert(Success && "Couldn't select *anything*?");
587
// If the match didn't cover I, then it won't be shared by it.
588
if (std::find(MatchedAddrModeInsts.begin(), MatchedAddrModeInsts.end(),
589
I) == MatchedAddrModeInsts.end())
592
MatchedAddrModeInsts.clear();