1
//===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file implements inline cost analysis.
12
//===----------------------------------------------------------------------===//
14
#include "llvm/Analysis/InlineCost.h"
15
#include "llvm/Support/CallSite.h"
16
#include "llvm/CallingConv.h"
17
#include "llvm/IntrinsicInst.h"
18
#include "llvm/ADT/SmallPtrSet.h"
21
// CountCodeReductionForConstant - Figure out an approximation for how many
22
// instructions will be constant folded if the specified value is constant.
24
unsigned InlineCostAnalyzer::FunctionInfo::
25
CountCodeReductionForConstant(Value *V) {
26
unsigned Reduction = 0;
27
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
29
if (isa<BranchInst>(U) || isa<SwitchInst>(U)) {
30
// We will be able to eliminate all but one of the successors.
31
const TerminatorInst &TI = cast<TerminatorInst>(*U);
32
const unsigned NumSucc = TI.getNumSuccessors();
34
for (unsigned I = 0; I != NumSucc; ++I)
35
Instrs += Metrics.NumBBInsts[TI.getSuccessor(I)];
36
// We don't know which blocks will be eliminated, so use the average size.
37
Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
38
} else if (CallInst *CI = dyn_cast<CallInst>(U)) {
39
// Turning an indirect call into a direct call is a BIG win
40
if (CI->getCalledValue() == V)
41
Reduction += InlineConstants::IndirectCallBonus;
42
} else if (InvokeInst *II = dyn_cast<InvokeInst>(U)) {
43
// Turning an indirect call into a direct call is a BIG win
44
if (II->getCalledValue() == V)
45
Reduction += InlineConstants::IndirectCallBonus;
47
// Figure out if this instruction will be removed due to simple constant
49
Instruction &Inst = cast<Instruction>(*U);
51
// We can't constant propagate instructions which have effects or
54
// FIXME: It would be nice to capture the fact that a load from a
55
// pointer-to-constant-global is actually a *really* good thing to zap.
56
// Unfortunately, we don't know the pointer that may get propagated here,
57
// so we can't make this decision.
58
if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
59
isa<AllocaInst>(Inst))
62
bool AllOperandsConstant = true;
63
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
64
if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
65
AllOperandsConstant = false;
69
if (AllOperandsConstant) {
70
// We will get to remove this instruction...
71
Reduction += InlineConstants::InstrCost;
73
// And any other instructions that use it which become constants
75
Reduction += CountCodeReductionForConstant(&Inst);
82
// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
83
// the function will be if it is inlined into a context where an argument
86
unsigned InlineCostAnalyzer::FunctionInfo::
87
CountCodeReductionForAlloca(Value *V) {
88
if (!V->getType()->isPointerTy()) return 0; // Not a pointer
89
unsigned Reduction = 0;
90
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
91
Instruction *I = cast<Instruction>(*UI);
92
if (isa<LoadInst>(I) || isa<StoreInst>(I))
93
Reduction += InlineConstants::InstrCost;
94
else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
95
// If the GEP has variable indices, we won't be able to do much with it.
96
if (GEP->hasAllConstantIndices())
97
Reduction += CountCodeReductionForAlloca(GEP);
98
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
99
// Track pointer through bitcasts.
100
Reduction += CountCodeReductionForAlloca(BCI);
102
// If there is some other strange instruction, we're not going to be able
103
// to do much if we inline this.
111
/// callIsSmall - If a call is likely to lower to a single target instruction,
112
/// or is otherwise deemed small return true.
113
/// TODO: Perhaps calls like memcpy, strcpy, etc?
114
bool llvm::callIsSmall(const Function *F) {
115
if (!F) return false;
117
if (F->hasLocalLinkage()) return false;
119
if (!F->hasName()) return false;
121
StringRef Name = F->getName();
123
// These will all likely lower to a single selection DAG node.
124
if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" ||
125
Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
126
Name == "sin" || Name == "sinf" || Name == "sinl" ||
127
Name == "cos" || Name == "cosf" || Name == "cosl" ||
128
Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
131
// These are all likely to be optimized into something smaller.
132
if (Name == "pow" || Name == "powf" || Name == "powl" ||
133
Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
134
Name == "floor" || Name == "floorf" || Name == "ceil" ||
135
Name == "round" || Name == "ffs" || Name == "ffsl" ||
136
Name == "abs" || Name == "labs" || Name == "llabs")
142
/// analyzeBasicBlock - Fill in the current structure with information gleaned
143
/// from the specified block.
144
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
146
unsigned NumInstsBeforeThisBB = NumInsts;
147
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
149
if (isa<PHINode>(II)) continue; // PHI nodes don't count.
151
// Special handling for calls.
152
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
153
if (isa<DbgInfoIntrinsic>(II))
154
continue; // Debug intrinsics don't count as size.
156
ImmutableCallSite CS(cast<Instruction>(II));
158
// If this function contains a call to setjmp or _setjmp, never inline
159
// it. This is a hack because we depend on the user marking their local
160
// variables as volatile if they are live across a setjmp call, and they
161
// probably won't do this in callers.
162
if (const Function *F = CS.getCalledFunction()) {
163
if (F->isDeclaration() &&
164
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
167
// If this call is to function itself, then the function is recursive.
168
// Inlining it into other functions is a bad idea, because this is
169
// basically just a form of loop peeling, and our metrics aren't useful
171
if (F == BB->getParent())
175
if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
176
// Each argument to a call takes on average one instruction to set up.
177
NumInsts += CS.arg_size();
179
// We don't want inline asm to count as a call - that would prevent loop
180
// unrolling. The argument setup cost is still real, though.
181
if (!isa<InlineAsm>(CS.getCalledValue()))
186
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
187
if (!AI->isStaticAlloca())
188
this->usesDynamicAlloca = true;
191
if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
194
if (const CastInst *CI = dyn_cast<CastInst>(II)) {
195
// Noop casts, including ptr <-> int, don't count.
196
if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
197
isa<PtrToIntInst>(CI))
199
// Result of a cmp instruction is often extended (to be used by other
200
// cmp instructions, logical or return instructions). These are usually
201
// nop on most sane targets.
202
if (isa<CmpInst>(CI->getOperand(0)))
204
} else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){
205
// If a GEP has all constant indices, it will probably be folded with
207
if (GEPI->hasAllConstantIndices())
214
if (isa<ReturnInst>(BB->getTerminator()))
217
// We never want to inline functions that contain an indirectbr. This is
218
// incorrect because all the blockaddress's (in static global initializers
219
// for example) would be referring to the original function, and this indirect
220
// jump would jump from the inlined copy of the function into the original
221
// function which is extremely undefined behavior.
222
if (isa<IndirectBrInst>(BB->getTerminator()))
223
containsIndirectBr = true;
225
// Remember NumInsts for this BB.
226
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
229
/// analyzeFunction - Fill in the current structure with information gleaned
230
/// from the specified function.
231
void CodeMetrics::analyzeFunction(Function *F) {
232
// Look at the size of the callee.
233
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
234
analyzeBasicBlock(&*BB);
237
/// analyzeFunction - Fill in the current structure with information gleaned
238
/// from the specified function.
239
void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
240
Metrics.analyzeFunction(F);
242
// A function with exactly one return has it removed during the inlining
243
// process (see InlineFunction), so don't count it.
244
// FIXME: This knowledge should really be encoded outside of FunctionInfo.
245
if (Metrics.NumRets==1)
248
// Don't bother calculating argument weights if we are never going to inline
249
// the function anyway.
253
// Check out all of the arguments to the function, figuring out how much
254
// code can be eliminated if one of the arguments is a constant.
255
ArgumentWeights.reserve(F->arg_size());
256
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
257
ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
258
CountCodeReductionForAlloca(I)));
261
/// NeverInline - returns true if the function should never be inlined into
263
bool InlineCostAnalyzer::FunctionInfo::NeverInline()
265
return (Metrics.callsSetJmp || Metrics.isRecursive ||
266
Metrics.containsIndirectBr);
269
// getInlineCost - The heuristic used to determine if we should inline the
270
// function call or not.
272
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
273
SmallPtrSet<const Function*, 16> &NeverInline) {
274
return getInlineCost(CS, CS.getCalledFunction(), NeverInline);
277
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
279
SmallPtrSet<const Function*, 16> &NeverInline) {
280
Instruction *TheCall = CS.getInstruction();
281
Function *Caller = TheCall->getParent()->getParent();
282
bool isDirectCall = CS.getCalledFunction() == Callee;
284
// Don't inline functions which can be redefined at link-time to mean
285
// something else. Don't inline functions marked noinline or call sites
287
if (Callee->mayBeOverridden() ||
288
Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee) ||
290
return llvm::InlineCost::getNever();
292
// InlineCost - This value measures how good of an inline candidate this call
293
// site is to inline. A lower inline cost make is more likely for the call to
294
// be inlined. This value may go negative.
298
// If there is only one call of the function, and it has internal linkage,
299
// make it almost guaranteed to be inlined.
301
if (Callee->hasLocalLinkage() && Callee->hasOneUse() && isDirectCall)
302
InlineCost += InlineConstants::LastCallToStaticBonus;
304
// If this function uses the coldcc calling convention, prefer not to inline
306
if (Callee->getCallingConv() == CallingConv::Cold)
307
InlineCost += InlineConstants::ColdccPenalty;
309
// If the instruction after the call, or if the normal destination of the
310
// invoke is an unreachable instruction, the function is noreturn. As such,
311
// there is little point in inlining this.
312
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
313
if (isa<UnreachableInst>(II->getNormalDest()->begin()))
314
InlineCost += InlineConstants::NoreturnPenalty;
315
} else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
316
InlineCost += InlineConstants::NoreturnPenalty;
318
// Get information about the callee.
319
FunctionInfo *CalleeFI = &CachedFunctionInfo[Callee];
321
// If we haven't calculated this information yet, do so now.
322
if (CalleeFI->Metrics.NumBlocks == 0)
323
CalleeFI->analyzeFunction(Callee);
325
// If we should never inline this, return a huge cost.
326
if (CalleeFI->NeverInline())
327
return InlineCost::getNever();
329
// FIXME: It would be nice to kill off CalleeFI->NeverInline. Then we
330
// could move this up and avoid computing the FunctionInfo for
331
// things we are going to just return always inline for. This
332
// requires handling setjmp somewhere else, however.
333
if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
334
return InlineCost::getAlways();
336
if (CalleeFI->Metrics.usesDynamicAlloca) {
337
// Get infomation about the caller.
338
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
340
// If we haven't calculated this information yet, do so now.
341
if (CallerFI.Metrics.NumBlocks == 0) {
342
CallerFI.analyzeFunction(Caller);
344
// Recompute the CalleeFI pointer, getting Caller could have invalidated
346
CalleeFI = &CachedFunctionInfo[Callee];
349
// Don't inline a callee with dynamic alloca into a caller without them.
350
// Functions containing dynamic alloca's are inefficient in various ways;
351
// don't create more inefficiency.
352
if (!CallerFI.Metrics.usesDynamicAlloca)
353
return InlineCost::getNever();
356
// Add to the inline quality for properties that make the call valuable to
357
// inline. This includes factors that indicate that the result of inlining
358
// the function will be optimizable. Currently this just looks at arguments
359
// passed into the function.
362
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
363
I != E; ++I, ++ArgNo) {
364
// Each argument passed in has a cost at both the caller and the callee
365
// sides. Measurements show that each argument costs about the same as an
367
InlineCost -= InlineConstants::InstrCost;
369
// If an alloca is passed in, inlining this function is likely to allow
370
// significant future optimization possibilities (like scalar promotion, and
371
// scalarization), so encourage the inlining of the function.
373
if (isa<AllocaInst>(I)) {
374
if (ArgNo < CalleeFI->ArgumentWeights.size())
375
InlineCost -= CalleeFI->ArgumentWeights[ArgNo].AllocaWeight;
377
// If this is a constant being passed into the function, use the argument
378
// weights calculated for the callee to determine how much will be folded
379
// away with this information.
380
} else if (isa<Constant>(I)) {
381
if (ArgNo < CalleeFI->ArgumentWeights.size())
382
InlineCost -= CalleeFI->ArgumentWeights[ArgNo].ConstantWeight;
386
// Now that we have considered all of the factors that make the call site more
387
// likely to be inlined, look at factors that make us not want to inline it.
389
// Calls usually take a long time, so they make the inlining gain smaller.
390
InlineCost += CalleeFI->Metrics.NumCalls * InlineConstants::CallPenalty;
392
// Look at the size of the callee. Each instruction counts as 5.
393
InlineCost += CalleeFI->Metrics.NumInsts*InlineConstants::InstrCost;
395
return llvm::InlineCost::get(InlineCost);
398
// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
399
// higher threshold to determine if the function call should be inlined.
400
float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
401
Function *Callee = CS.getCalledFunction();
403
// Get information about the callee.
404
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
406
// If we haven't calculated this information yet, do so now.
407
if (CalleeFI.Metrics.NumBlocks == 0)
408
CalleeFI.analyzeFunction(Callee);
411
// Single BB functions are often written to be inlined.
412
if (CalleeFI.Metrics.NumBlocks == 1)
415
// Be more aggressive if the function contains a good chunk (if it mades up
416
// at least 10% of the instructions) of vector instructions.
417
if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/2)
419
else if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/10)
424
/// growCachedCostInfo - update the cached cost info for Caller after Callee has
427
InlineCostAnalyzer::growCachedCostInfo(Function *Caller, Function *Callee) {
428
CodeMetrics &CallerMetrics = CachedFunctionInfo[Caller].Metrics;
430
// For small functions we prefer to recalculate the cost for better accuracy.
431
if (CallerMetrics.NumBlocks < 10 || CallerMetrics.NumInsts < 1000) {
432
resetCachedCostInfo(Caller);
436
// For large functions, we can save a lot of computation time by skipping
438
if (CallerMetrics.NumCalls > 0)
439
--CallerMetrics.NumCalls;
441
if (Callee == 0) return;
443
CodeMetrics &CalleeMetrics = CachedFunctionInfo[Callee].Metrics;
445
// If we don't have metrics for the callee, don't recalculate them just to
446
// update an approximation in the caller. Instead, just recalculate the
447
// caller info from scratch.
448
if (CalleeMetrics.NumBlocks == 0) {
449
resetCachedCostInfo(Caller);
453
// Since CalleeMetrics were already calculated, we know that the CallerMetrics
454
// reference isn't invalidated: both were in the DenseMap.
455
CallerMetrics.usesDynamicAlloca |= CalleeMetrics.usesDynamicAlloca;
457
// FIXME: If any of these three are true for the callee, the callee was
458
// not inlined into the caller, so I think they're redundant here.
459
CallerMetrics.callsSetJmp |= CalleeMetrics.callsSetJmp;
460
CallerMetrics.isRecursive |= CalleeMetrics.isRecursive;
461
CallerMetrics.containsIndirectBr |= CalleeMetrics.containsIndirectBr;
463
CallerMetrics.NumInsts += CalleeMetrics.NumInsts;
464
CallerMetrics.NumBlocks += CalleeMetrics.NumBlocks;
465
CallerMetrics.NumCalls += CalleeMetrics.NumCalls;
466
CallerMetrics.NumVectorInsts += CalleeMetrics.NumVectorInsts;
467
CallerMetrics.NumRets += CalleeMetrics.NumRets;
469
// analyzeBasicBlock counts each function argument as an inst.
470
if (CallerMetrics.NumInsts >= Callee->arg_size())
471
CallerMetrics.NumInsts -= Callee->arg_size();
473
CallerMetrics.NumInsts = 0;
475
// We are not updating the argument weights. We have already determined that
476
// Caller is a fairly large function, so we accept the loss of precision.
479
/// clear - empty the cache of inline costs
480
void InlineCostAnalyzer::clear() {
481
CachedFunctionInfo.clear();