1
//===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file implements inline cost analysis.
12
//===----------------------------------------------------------------------===//
14
#include "llvm/Analysis/InlineCost.h"
15
#include "llvm/Support/CallSite.h"
16
#include "llvm/CallingConv.h"
17
#include "llvm/IntrinsicInst.h"
18
#include "llvm/ADT/SmallPtrSet.h"
21
// CountCodeReductionForConstant - Figure out an approximation for how many
22
// instructions will be constant folded if the specified value is constant.
24
unsigned InlineCostAnalyzer::FunctionInfo::
25
CountCodeReductionForConstant(Value *V) {
26
unsigned Reduction = 0;
27
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
28
if (isa<BranchInst>(*UI) || isa<SwitchInst>(*UI)) {
29
// We will be able to eliminate all but one of the successors.
30
const TerminatorInst &TI = cast<TerminatorInst>(**UI);
31
const unsigned NumSucc = TI.getNumSuccessors();
33
for (unsigned I = 0; I != NumSucc; ++I)
34
Instrs += TI.getSuccessor(I)->size();
35
// We don't know which blocks will be eliminated, so use the average size.
36
Reduction += InlineConstants::InstrCost*Instrs*(NumSucc-1)/NumSucc;
37
} else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
38
// Turning an indirect call into a direct call is a BIG win
39
if (CI->getCalledValue() == V)
40
Reduction += InlineConstants::IndirectCallBonus;
41
} else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
42
// Turning an indirect call into a direct call is a BIG win
43
if (II->getCalledValue() == V)
44
Reduction += InlineConstants::IndirectCallBonus;
46
// Figure out if this instruction will be removed due to simple constant
48
Instruction &Inst = cast<Instruction>(**UI);
50
// We can't constant propagate instructions which have effects or
53
// FIXME: It would be nice to capture the fact that a load from a
54
// pointer-to-constant-global is actually a *really* good thing to zap.
55
// Unfortunately, we don't know the pointer that may get propagated here,
56
// so we can't make this decision.
57
if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
58
isa<AllocaInst>(Inst))
61
bool AllOperandsConstant = true;
62
for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
63
if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
64
AllOperandsConstant = false;
68
if (AllOperandsConstant) {
69
// We will get to remove this instruction...
70
Reduction += InlineConstants::InstrCost;
72
// And any other instructions that use it which become constants
74
Reduction += CountCodeReductionForConstant(&Inst);
81
// CountCodeReductionForAlloca - Figure out an approximation of how much smaller
82
// the function will be if it is inlined into a context where an argument
85
unsigned InlineCostAnalyzer::FunctionInfo::
86
CountCodeReductionForAlloca(Value *V) {
87
if (!V->getType()->isPointerTy()) return 0; // Not a pointer
88
unsigned Reduction = 0;
89
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
90
Instruction *I = cast<Instruction>(*UI);
91
if (isa<LoadInst>(I) || isa<StoreInst>(I))
92
Reduction += InlineConstants::InstrCost;
93
else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
94
// If the GEP has variable indices, we won't be able to do much with it.
95
if (GEP->hasAllConstantIndices())
96
Reduction += CountCodeReductionForAlloca(GEP);
97
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(I)) {
98
// Track pointer through bitcasts.
99
Reduction += CountCodeReductionForAlloca(BCI);
101
// If there is some other strange instruction, we're not going to be able
102
// to do much if we inline this.
110
// callIsSmall - If a call is likely to lower to a single target instruction, or
111
// is otherwise deemed small return true.
112
// TODO: Perhaps calls like memcpy, strcpy, etc?
113
static bool callIsSmall(const Function *F) {
114
if (!F) return false;
116
if (F->hasLocalLinkage()) return false;
118
if (!F->hasName()) return false;
120
StringRef Name = F->getName();
122
// These will all likely lower to a single selection DAG node.
123
if (Name == "copysign" || Name == "copysignf" ||
124
Name == "fabs" || Name == "fabsf" || Name == "fabsl" ||
125
Name == "sin" || Name == "sinf" || Name == "sinl" ||
126
Name == "cos" || Name == "cosf" || Name == "cosl" ||
127
Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" )
130
// These are all likely to be optimized into something smaller.
131
if (Name == "pow" || Name == "powf" || Name == "powl" ||
132
Name == "exp2" || Name == "exp2l" || Name == "exp2f" ||
133
Name == "floor" || Name == "floorf" || Name == "ceil" ||
134
Name == "round" || Name == "ffs" || Name == "ffsl" ||
135
Name == "abs" || Name == "labs" || Name == "llabs")
141
/// analyzeBasicBlock - Fill in the current structure with information gleaned
142
/// from the specified block.
143
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
146
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
148
if (isa<PHINode>(II)) continue; // PHI nodes don't count.
150
// Special handling for calls.
151
if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
152
if (isa<DbgInfoIntrinsic>(II))
153
continue; // Debug intrinsics don't count as size.
155
CallSite CS = CallSite::get(const_cast<Instruction*>(&*II));
157
// If this function contains a call to setjmp or _setjmp, never inline
158
// it. This is a hack because we depend on the user marking their local
159
// variables as volatile if they are live across a setjmp call, and they
160
// probably won't do this in callers.
161
if (Function *F = CS.getCalledFunction())
162
if (F->isDeclaration() &&
163
(F->getName() == "setjmp" || F->getName() == "_setjmp"))
166
if (!isa<IntrinsicInst>(II) && !callIsSmall(CS.getCalledFunction())) {
167
// Each argument to a call takes on average one instruction to set up.
168
NumInsts += CS.arg_size();
173
if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
174
if (!AI->isStaticAlloca())
175
this->usesDynamicAlloca = true;
178
if (isa<ExtractElementInst>(II) || II->getType()->isVectorTy())
181
if (const CastInst *CI = dyn_cast<CastInst>(II)) {
182
// Noop casts, including ptr <-> int, don't count.
183
if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
184
isa<PtrToIntInst>(CI))
186
// Result of a cmp instruction is often extended (to be used by other
187
// cmp instructions, logical or return instructions). These are usually
188
// nop on most sane targets.
189
if (isa<CmpInst>(CI->getOperand(0)))
191
} else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){
192
// If a GEP has all constant indices, it will probably be folded with
194
if (GEPI->hasAllConstantIndices())
201
if (isa<ReturnInst>(BB->getTerminator()))
204
// We never want to inline functions that contain an indirectbr. This is
205
// incorrect because all the blockaddress's (in static global initializers
206
// for example) would be referring to the original function, and this indirect
207
// jump would jump from the inlined copy of the function into the original
208
// function which is extremely undefined behavior.
209
if (isa<IndirectBrInst>(BB->getTerminator()))
213
/// analyzeFunction - Fill in the current structure with information gleaned
214
/// from the specified function.
215
void CodeMetrics::analyzeFunction(Function *F) {
216
// Look at the size of the callee.
217
for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
218
analyzeBasicBlock(&*BB);
221
/// analyzeFunction - Fill in the current structure with information gleaned
222
/// from the specified function.
223
void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
224
Metrics.analyzeFunction(F);
226
// A function with exactly one return has it removed during the inlining
227
// process (see InlineFunction), so don't count it.
228
// FIXME: This knowledge should really be encoded outside of FunctionInfo.
229
if (Metrics.NumRets==1)
232
// Don't bother calculating argument weights if we are never going to inline
233
// the function anyway.
234
if (Metrics.NeverInline)
237
// Check out all of the arguments to the function, figuring out how much
238
// code can be eliminated if one of the arguments is a constant.
239
ArgumentWeights.reserve(F->arg_size());
240
for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
241
ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
242
CountCodeReductionForAlloca(I)));
245
// getInlineCost - The heuristic used to determine if we should inline the
246
// function call or not.
248
InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
249
SmallPtrSet<const Function *, 16> &NeverInline) {
250
Instruction *TheCall = CS.getInstruction();
251
Function *Callee = CS.getCalledFunction();
252
Function *Caller = TheCall->getParent()->getParent();
254
// Don't inline functions which can be redefined at link-time to mean
255
// something else. Don't inline functions marked noinline.
256
if (Callee->mayBeOverridden() ||
257
Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee))
258
return llvm::InlineCost::getNever();
260
// InlineCost - This value measures how good of an inline candidate this call
261
// site is to inline. A lower inline cost make is more likely for the call to
262
// be inlined. This value may go negative.
266
// If there is only one call of the function, and it has internal linkage,
267
// make it almost guaranteed to be inlined.
269
if (Callee->hasLocalLinkage() && Callee->hasOneUse())
270
InlineCost += InlineConstants::LastCallToStaticBonus;
272
// If this function uses the coldcc calling convention, prefer not to inline
274
if (Callee->getCallingConv() == CallingConv::Cold)
275
InlineCost += InlineConstants::ColdccPenalty;
277
// If the instruction after the call, or if the normal destination of the
278
// invoke is an unreachable instruction, the function is noreturn. As such,
279
// there is little point in inlining this.
280
if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
281
if (isa<UnreachableInst>(II->getNormalDest()->begin()))
282
InlineCost += InlineConstants::NoreturnPenalty;
283
} else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
284
InlineCost += InlineConstants::NoreturnPenalty;
286
// Get information about the callee...
287
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
289
// If we haven't calculated this information yet, do so now.
290
if (CalleeFI.Metrics.NumBlocks == 0)
291
CalleeFI.analyzeFunction(Callee);
293
// If we should never inline this, return a huge cost.
294
if (CalleeFI.Metrics.NeverInline)
295
return InlineCost::getNever();
297
// FIXME: It would be nice to kill off CalleeFI.NeverInline. Then we
298
// could move this up and avoid computing the FunctionInfo for
299
// things we are going to just return always inline for. This
300
// requires handling setjmp somewhere else, however.
301
if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
302
return InlineCost::getAlways();
304
if (CalleeFI.Metrics.usesDynamicAlloca) {
305
// Get infomation about the caller...
306
FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
308
// If we haven't calculated this information yet, do so now.
309
if (CallerFI.Metrics.NumBlocks == 0)
310
CallerFI.analyzeFunction(Caller);
312
// Don't inline a callee with dynamic alloca into a caller without them.
313
// Functions containing dynamic alloca's are inefficient in various ways;
314
// don't create more inefficiency.
315
if (!CallerFI.Metrics.usesDynamicAlloca)
316
return InlineCost::getNever();
319
// Add to the inline quality for properties that make the call valuable to
320
// inline. This includes factors that indicate that the result of inlining
321
// the function will be optimizable. Currently this just looks at arguments
322
// passed into the function.
325
for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
326
I != E; ++I, ++ArgNo) {
327
// Each argument passed in has a cost at both the caller and the callee
328
// sides. Measurements show that each argument costs about the same as an
330
InlineCost -= InlineConstants::InstrCost;
332
// If an alloca is passed in, inlining this function is likely to allow
333
// significant future optimization possibilities (like scalar promotion, and
334
// scalarization), so encourage the inlining of the function.
336
if (isa<AllocaInst>(I)) {
337
if (ArgNo < CalleeFI.ArgumentWeights.size())
338
InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
340
// If this is a constant being passed into the function, use the argument
341
// weights calculated for the callee to determine how much will be folded
342
// away with this information.
343
} else if (isa<Constant>(I)) {
344
if (ArgNo < CalleeFI.ArgumentWeights.size())
345
InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
349
// Now that we have considered all of the factors that make the call site more
350
// likely to be inlined, look at factors that make us not want to inline it.
352
// Calls usually take a long time, so they make the inlining gain smaller.
353
InlineCost += CalleeFI.Metrics.NumCalls * InlineConstants::CallPenalty;
355
// Don't inline into something too big, which would make it bigger.
356
// "size" here is the number of basic blocks, not instructions.
358
InlineCost += Caller->size()/15;
360
// Look at the size of the callee. Each instruction counts as 5.
361
InlineCost += CalleeFI.Metrics.NumInsts*InlineConstants::InstrCost;
363
return llvm::InlineCost::get(InlineCost);
366
// getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
367
// higher threshold to determine if the function call should be inlined.
368
float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
369
Function *Callee = CS.getCalledFunction();
371
// Get information about the callee...
372
FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
374
// If we haven't calculated this information yet, do so now.
375
if (CalleeFI.Metrics.NumBlocks == 0)
376
CalleeFI.analyzeFunction(Callee);
379
// Single BB functions are often written to be inlined.
380
if (CalleeFI.Metrics.NumBlocks == 1)
383
// Be more aggressive if the function contains a good chunk (if it mades up
384
// at least 10% of the instructions) of vector instructions.
385
if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/2)
387
else if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/10)