1
//===- InstCombineCalls.cpp -----------------------------------------------===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This file implements the visitCall and visitInvoke functions.
12
//===----------------------------------------------------------------------===//
14
#include "InstCombine.h"
15
#include "llvm/IntrinsicInst.h"
16
#include "llvm/Support/CallSite.h"
17
#include "llvm/Target/TargetData.h"
18
#include "llvm/Analysis/MemoryBuiltins.h"
19
#include "llvm/Transforms/Utils/BuildLibCalls.h"
22
/// getPromotedType - Return the specified type promoted as it would be to pass
23
/// though a va_arg area.
24
static const Type *getPromotedType(const Type *Ty) {
25
if (const IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
26
if (ITy->getBitWidth() < 32)
27
return Type::getInt32Ty(Ty->getContext());
32
/// EnforceKnownAlignment - If the specified pointer points to an object that
33
/// we control, modify the object's alignment to PrefAlign. This isn't
34
/// often possible though. If alignment is important, a more reliable approach
35
/// is to simply align all global variables and allocation instructions to
36
/// their preferred alignment from the beginning.
38
static unsigned EnforceKnownAlignment(Value *V,
39
unsigned Align, unsigned PrefAlign) {
41
User *U = dyn_cast<User>(V);
44
switch (Operator::getOpcode(U)) {
46
case Instruction::BitCast:
47
return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
48
case Instruction::GetElementPtr: {
49
// If all indexes are zero, it is just the alignment of the base pointer.
50
bool AllZeroOperands = true;
51
for (User::op_iterator i = U->op_begin() + 1, e = U->op_end(); i != e; ++i)
52
if (!isa<Constant>(*i) ||
53
!cast<Constant>(*i)->isNullValue()) {
54
AllZeroOperands = false;
58
if (AllZeroOperands) {
59
// Treat this like a bitcast.
60
return EnforceKnownAlignment(U->getOperand(0), Align, PrefAlign);
66
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
67
// If there is a large requested alignment and we can, bump up the alignment
69
if (!GV->isDeclaration()) {
70
if (GV->getAlignment() >= PrefAlign)
71
Align = GV->getAlignment();
73
GV->setAlignment(PrefAlign);
77
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
78
// If there is a requested alignment and if this is an alloca, round up.
79
if (AI->getAlignment() >= PrefAlign)
80
Align = AI->getAlignment();
82
AI->setAlignment(PrefAlign);
90
/// GetOrEnforceKnownAlignment - If the specified pointer has an alignment that
91
/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
92
/// and it is more than the alignment of the ultimate object, see if we can
93
/// increase the alignment of the ultimate object, making this check succeed.
94
unsigned InstCombiner::GetOrEnforceKnownAlignment(Value *V,
96
unsigned BitWidth = TD ? TD->getTypeSizeInBits(V->getType()) :
97
sizeof(PrefAlign) * CHAR_BIT;
98
APInt Mask = APInt::getAllOnesValue(BitWidth);
99
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
100
ComputeMaskedBits(V, Mask, KnownZero, KnownOne);
101
unsigned TrailZ = KnownZero.countTrailingOnes();
102
unsigned Align = 1u << std::min(BitWidth - 1, TrailZ);
104
if (PrefAlign > Align)
105
Align = EnforceKnownAlignment(V, Align, PrefAlign);
107
// We don't need to make any adjustment.
111
Instruction *InstCombiner::SimplifyMemTransfer(MemIntrinsic *MI) {
112
unsigned DstAlign = GetOrEnforceKnownAlignment(MI->getOperand(1));
113
unsigned SrcAlign = GetOrEnforceKnownAlignment(MI->getOperand(2));
114
unsigned MinAlign = std::min(DstAlign, SrcAlign);
115
unsigned CopyAlign = MI->getAlignment();
117
if (CopyAlign < MinAlign) {
118
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
123
// If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
125
ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getOperand(3));
126
if (MemOpLength == 0) return 0;
128
// Source and destination pointer types are always "i8*" for intrinsic. See
129
// if the size is something we can handle with a single primitive load/store.
130
// A single load+store correctly handles overlapping memory in the memmove
132
unsigned Size = MemOpLength->getZExtValue();
133
if (Size == 0) return MI; // Delete this mem transfer.
135
if (Size > 8 || (Size&(Size-1)))
136
return 0; // If not 1/2/4/8 bytes, exit.
138
// Use an integer load+store unless we can find something better.
140
PointerType::getUnqual(IntegerType::get(MI->getContext(), Size<<3));
142
// Memcpy forces the use of i8* for the source and destination. That means
143
// that if you're using memcpy to move one double around, you'll get a cast
144
// from double* to i8*. We'd much rather use a double load+store rather than
145
// an i64 load+store, here because this improves the odds that the source or
146
// dest address will be promotable. See if we can find a better type than the
148
Value *StrippedDest = MI->getOperand(1)->stripPointerCasts();
149
if (StrippedDest != MI->getOperand(1)) {
150
const Type *SrcETy = cast<PointerType>(StrippedDest->getType())
152
if (TD && SrcETy->isSized() && TD->getTypeStoreSize(SrcETy) == Size) {
153
// The SrcETy might be something like {{{double}}} or [1 x double]. Rip
154
// down through these levels if so.
155
while (!SrcETy->isSingleValueType()) {
156
if (const StructType *STy = dyn_cast<StructType>(SrcETy)) {
157
if (STy->getNumElements() == 1)
158
SrcETy = STy->getElementType(0);
161
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(SrcETy)) {
162
if (ATy->getNumElements() == 1)
163
SrcETy = ATy->getElementType();
170
if (SrcETy->isSingleValueType())
171
NewPtrTy = PointerType::getUnqual(SrcETy);
176
// If the memcpy/memmove provides better alignment info than we can
178
SrcAlign = std::max(SrcAlign, CopyAlign);
179
DstAlign = std::max(DstAlign, CopyAlign);
181
Value *Src = Builder->CreateBitCast(MI->getOperand(2), NewPtrTy);
182
Value *Dest = Builder->CreateBitCast(MI->getOperand(1), NewPtrTy);
183
Instruction *L = new LoadInst(Src, "tmp", false, SrcAlign);
184
InsertNewInstBefore(L, *MI);
185
InsertNewInstBefore(new StoreInst(L, Dest, false, DstAlign), *MI);
187
// Set the size of the copy to 0, it will be deleted on the next iteration.
188
MI->setOperand(3, Constant::getNullValue(MemOpLength->getType()));
192
Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
193
unsigned Alignment = GetOrEnforceKnownAlignment(MI->getDest());
194
if (MI->getAlignment() < Alignment) {
195
MI->setAlignment(ConstantInt::get(MI->getAlignmentType(),
200
// Extract the length and alignment and fill if they are constant.
201
ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
202
ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
203
if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
205
uint64_t Len = LenC->getZExtValue();
206
Alignment = MI->getAlignment();
208
// If the length is zero, this is a no-op
209
if (Len == 0) return MI; // memset(d,c,0,a) -> noop
211
// memset(s,c,n) -> store s, c (for n=1,2,4,8)
212
if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
213
const Type *ITy = IntegerType::get(MI->getContext(), Len*8); // n=1 -> i8.
215
Value *Dest = MI->getDest();
216
Dest = Builder->CreateBitCast(Dest, PointerType::getUnqual(ITy));
218
// Alignment 0 is identity for alignment 1 for memset, but not store.
219
if (Alignment == 0) Alignment = 1;
221
// Extract the fill value and store.
222
uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
223
InsertNewInstBefore(new StoreInst(ConstantInt::get(ITy, Fill),
224
Dest, false, Alignment), *MI);
226
// Set the size of the copy to 0, it will be deleted on the next iteration.
227
MI->setLength(Constant::getNullValue(LenC->getType()));
234
/// visitCallInst - CallInst simplification. This mostly only handles folding
235
/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
236
/// the heavy lifting.
238
Instruction *InstCombiner::visitCallInst(CallInst &CI) {
240
return visitFree(CI);
242
// If the caller function is nounwind, mark the call as nounwind, even if the
244
if (CI.getParent()->getParent()->doesNotThrow() &&
245
!CI.doesNotThrow()) {
246
CI.setDoesNotThrow();
250
IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
251
if (!II) return visitCallSite(&CI);
253
// Intrinsics cannot occur in an invoke, so handle them here instead of in
255
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(II)) {
256
bool Changed = false;
258
// memmove/cpy/set of zero bytes is a noop.
259
if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
260
if (NumBytes->isNullValue()) return EraseInstFromFunction(CI);
262
if (ConstantInt *CI = dyn_cast<ConstantInt>(NumBytes))
263
if (CI->getZExtValue() == 1) {
264
// Replace the instruction with just byte operations. We would
265
// transform other cases to loads/stores, but we don't know if
266
// alignment is sufficient.
270
// If we have a memmove and the source operation is a constant global,
271
// then the source and dest pointers can't alias, so we can change this
272
// into a call to memcpy.
273
if (MemMoveInst *MMI = dyn_cast<MemMoveInst>(MI)) {
274
if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
275
if (GVSrc->isConstant()) {
276
Module *M = CI.getParent()->getParent()->getParent();
277
Intrinsic::ID MemCpyID = Intrinsic::memcpy;
279
Tys[0] = CI.getOperand(3)->getType();
281
Intrinsic::getDeclaration(M, MemCpyID, Tys, 1));
286
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
287
// memmove(x,x,size) -> noop.
288
if (MTI->getSource() == MTI->getDest())
289
return EraseInstFromFunction(CI);
292
// If we can determine a pointer alignment that is bigger than currently
293
// set, update the alignment.
294
if (isa<MemTransferInst>(MI)) {
295
if (Instruction *I = SimplifyMemTransfer(MI))
297
} else if (MemSetInst *MSI = dyn_cast<MemSetInst>(MI)) {
298
if (Instruction *I = SimplifyMemSet(MSI))
302
if (Changed) return II;
305
switch (II->getIntrinsicID()) {
307
case Intrinsic::objectsize: {
308
// We need target data for just about everything so depend on it.
311
const Type *ReturnTy = CI.getType();
312
bool Min = (cast<ConstantInt>(II->getOperand(2))->getZExtValue() == 1);
314
// Get to the real allocated thing and offset as fast as possible.
315
Value *Op1 = II->getOperand(1)->stripPointerCasts();
317
// If we've stripped down to a single global variable that we
318
// can know the size of then just return that.
319
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
320
if (GV->hasDefinitiveInitializer()) {
321
Constant *C = GV->getInitializer();
322
uint64_t GlobalSize = TD->getTypeAllocSize(C->getType());
323
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, GlobalSize));
325
// Can't determine size of the GV.
326
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
327
return ReplaceInstUsesWith(CI, RetVal);
329
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
331
if (AI->getAllocatedType()->isSized()) {
332
uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
333
if (AI->isArrayAllocation()) {
334
const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
336
AllocaSize *= C->getZExtValue();
338
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, AllocaSize));
340
} else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Op1)) {
341
// Only handle constant GEPs here.
342
if (CE->getOpcode() != Instruction::GetElementPtr) break;
343
GEPOperator *GEP = cast<GEPOperator>(CE);
345
// Make sure we're not a constant offset from an external
347
Value *Operand = GEP->getPointerOperand();
348
Operand = Operand->stripPointerCasts();
349
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Operand))
350
if (!GV->hasDefinitiveInitializer()) break;
352
// Get what we're pointing to and its size.
353
const PointerType *BaseType =
354
cast<PointerType>(Operand->getType());
355
uint64_t Size = TD->getTypeAllocSize(BaseType->getElementType());
357
// Get the current byte offset into the thing. Use the original
358
// operand in case we're looking through a bitcast.
359
SmallVector<Value*, 8> Ops(CE->op_begin()+1, CE->op_end());
360
const PointerType *OffsetType =
361
cast<PointerType>(GEP->getPointerOperand()->getType());
362
uint64_t Offset = TD->getIndexedOffset(OffsetType, &Ops[0], Ops.size());
365
// Out of bound reference? Negative index normalized to large
366
// index? Just return "I don't know".
367
Constant *RetVal = ConstantInt::get(ReturnTy, Min ? 0 : -1ULL);
368
return ReplaceInstUsesWith(CI, RetVal);
371
Constant *RetVal = ConstantInt::get(ReturnTy, Size-Offset);
372
return ReplaceInstUsesWith(CI, RetVal);
376
// Do not return "I don't know" here. Later optimization passes could
377
// make it possible to evaluate objectsize to a constant.
380
case Intrinsic::bswap:
381
// bswap(bswap(x)) -> x
382
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getOperand(1)))
383
if (Operand->getIntrinsicID() == Intrinsic::bswap)
384
return ReplaceInstUsesWith(CI, Operand->getOperand(1));
386
// bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
387
if (TruncInst *TI = dyn_cast<TruncInst>(II->getOperand(1))) {
388
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(TI->getOperand(0)))
389
if (Operand->getIntrinsicID() == Intrinsic::bswap) {
390
unsigned C = Operand->getType()->getPrimitiveSizeInBits() -
391
TI->getType()->getPrimitiveSizeInBits();
392
Value *CV = ConstantInt::get(Operand->getType(), C);
393
Value *V = Builder->CreateLShr(Operand->getOperand(1), CV);
394
return new TruncInst(V, TI->getType());
399
case Intrinsic::powi:
400
if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getOperand(2))) {
403
return ReplaceInstUsesWith(CI, ConstantFP::get(CI.getType(), 1.0));
406
return ReplaceInstUsesWith(CI, II->getOperand(1));
407
// powi(x, -1) -> 1/x
408
if (Power->isAllOnesValue())
409
return BinaryOperator::CreateFDiv(ConstantFP::get(CI.getType(), 1.0),
413
case Intrinsic::cttz: {
414
// If all bits below the first known one are known zero,
415
// this value is constant.
416
const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
417
uint32_t BitWidth = IT->getBitWidth();
418
APInt KnownZero(BitWidth, 0);
419
APInt KnownOne(BitWidth, 0);
420
ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
421
KnownZero, KnownOne);
422
unsigned TrailingZeros = KnownOne.countTrailingZeros();
423
APInt Mask(APInt::getLowBitsSet(BitWidth, TrailingZeros));
424
if ((Mask & KnownZero) == Mask)
425
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
426
APInt(BitWidth, TrailingZeros)));
430
case Intrinsic::ctlz: {
431
// If all bits above the first known one are known zero,
432
// this value is constant.
433
const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
434
uint32_t BitWidth = IT->getBitWidth();
435
APInt KnownZero(BitWidth, 0);
436
APInt KnownOne(BitWidth, 0);
437
ComputeMaskedBits(II->getOperand(1), APInt::getAllOnesValue(BitWidth),
438
KnownZero, KnownOne);
439
unsigned LeadingZeros = KnownOne.countLeadingZeros();
440
APInt Mask(APInt::getHighBitsSet(BitWidth, LeadingZeros));
441
if ((Mask & KnownZero) == Mask)
442
return ReplaceInstUsesWith(CI, ConstantInt::get(IT,
443
APInt(BitWidth, LeadingZeros)));
447
case Intrinsic::uadd_with_overflow: {
448
Value *LHS = II->getOperand(1), *RHS = II->getOperand(2);
449
const IntegerType *IT = cast<IntegerType>(II->getOperand(1)->getType());
450
uint32_t BitWidth = IT->getBitWidth();
451
APInt Mask = APInt::getSignBit(BitWidth);
452
APInt LHSKnownZero(BitWidth, 0);
453
APInt LHSKnownOne(BitWidth, 0);
454
ComputeMaskedBits(LHS, Mask, LHSKnownZero, LHSKnownOne);
455
bool LHSKnownNegative = LHSKnownOne[BitWidth - 1];
456
bool LHSKnownPositive = LHSKnownZero[BitWidth - 1];
458
if (LHSKnownNegative || LHSKnownPositive) {
459
APInt RHSKnownZero(BitWidth, 0);
460
APInt RHSKnownOne(BitWidth, 0);
461
ComputeMaskedBits(RHS, Mask, RHSKnownZero, RHSKnownOne);
462
bool RHSKnownNegative = RHSKnownOne[BitWidth - 1];
463
bool RHSKnownPositive = RHSKnownZero[BitWidth - 1];
464
if (LHSKnownNegative && RHSKnownNegative) {
465
// The sign bit is set in both cases: this MUST overflow.
466
// Create a simple add instruction, and insert it into the struct.
467
Instruction *Add = BinaryOperator::CreateAdd(LHS, RHS, "", &CI);
470
UndefValue::get(LHS->getType()),ConstantInt::getTrue(II->getContext())
472
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
473
return InsertValueInst::Create(Struct, Add, 0);
476
if (LHSKnownPositive && RHSKnownPositive) {
477
// The sign bit is clear in both cases: this CANNOT overflow.
478
// Create a simple add instruction, and insert it into the struct.
479
Instruction *Add = BinaryOperator::CreateNUWAdd(LHS, RHS, "", &CI);
482
UndefValue::get(LHS->getType()),
483
ConstantInt::getFalse(II->getContext())
485
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
486
return InsertValueInst::Create(Struct, Add, 0);
490
// FALL THROUGH uadd into sadd
491
case Intrinsic::sadd_with_overflow:
492
// Canonicalize constants into the RHS.
493
if (isa<Constant>(II->getOperand(1)) &&
494
!isa<Constant>(II->getOperand(2))) {
495
Value *LHS = II->getOperand(1);
496
II->setOperand(1, II->getOperand(2));
497
II->setOperand(2, LHS);
501
// X + undef -> undef
502
if (isa<UndefValue>(II->getOperand(2)))
503
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
505
if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
506
// X + 0 -> {X, false}
509
UndefValue::get(II->getOperand(0)->getType()),
510
ConstantInt::getFalse(II->getContext())
512
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
513
return InsertValueInst::Create(Struct, II->getOperand(1), 0);
517
case Intrinsic::usub_with_overflow:
518
case Intrinsic::ssub_with_overflow:
519
// undef - X -> undef
520
// X - undef -> undef
521
if (isa<UndefValue>(II->getOperand(1)) ||
522
isa<UndefValue>(II->getOperand(2)))
523
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
525
if (ConstantInt *RHS = dyn_cast<ConstantInt>(II->getOperand(2))) {
526
// X - 0 -> {X, false}
529
UndefValue::get(II->getOperand(1)->getType()),
530
ConstantInt::getFalse(II->getContext())
532
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
533
return InsertValueInst::Create(Struct, II->getOperand(1), 0);
537
case Intrinsic::umul_with_overflow:
538
case Intrinsic::smul_with_overflow:
539
// Canonicalize constants into the RHS.
540
if (isa<Constant>(II->getOperand(1)) &&
541
!isa<Constant>(II->getOperand(2))) {
542
Value *LHS = II->getOperand(1);
543
II->setOperand(1, II->getOperand(2));
544
II->setOperand(2, LHS);
548
// X * undef -> undef
549
if (isa<UndefValue>(II->getOperand(2)))
550
return ReplaceInstUsesWith(CI, UndefValue::get(II->getType()));
552
if (ConstantInt *RHSI = dyn_cast<ConstantInt>(II->getOperand(2))) {
555
return ReplaceInstUsesWith(CI, Constant::getNullValue(II->getType()));
557
// X * 1 -> {X, false}
558
if (RHSI->equalsInt(1)) {
560
UndefValue::get(II->getOperand(1)->getType()),
561
ConstantInt::getFalse(II->getContext())
563
Constant *Struct = ConstantStruct::get(II->getContext(), V, 2, false);
564
return InsertValueInst::Create(Struct, II->getOperand(1), 0);
568
case Intrinsic::ppc_altivec_lvx:
569
case Intrinsic::ppc_altivec_lvxl:
570
case Intrinsic::x86_sse_loadu_ps:
571
case Intrinsic::x86_sse2_loadu_pd:
572
case Intrinsic::x86_sse2_loadu_dq:
573
// Turn PPC lvx -> load if the pointer is known aligned.
574
// Turn X86 loadups -> load if the pointer is known aligned.
575
if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
576
Value *Ptr = Builder->CreateBitCast(II->getOperand(1),
577
PointerType::getUnqual(II->getType()));
578
return new LoadInst(Ptr);
581
case Intrinsic::ppc_altivec_stvx:
582
case Intrinsic::ppc_altivec_stvxl:
583
// Turn stvx -> store if the pointer is known aligned.
584
if (GetOrEnforceKnownAlignment(II->getOperand(2), 16) >= 16) {
585
const Type *OpPtrTy =
586
PointerType::getUnqual(II->getOperand(1)->getType());
587
Value *Ptr = Builder->CreateBitCast(II->getOperand(2), OpPtrTy);
588
return new StoreInst(II->getOperand(1), Ptr);
591
case Intrinsic::x86_sse_storeu_ps:
592
case Intrinsic::x86_sse2_storeu_pd:
593
case Intrinsic::x86_sse2_storeu_dq:
594
// Turn X86 storeu -> store if the pointer is known aligned.
595
if (GetOrEnforceKnownAlignment(II->getOperand(1), 16) >= 16) {
596
const Type *OpPtrTy =
597
PointerType::getUnqual(II->getOperand(2)->getType());
598
Value *Ptr = Builder->CreateBitCast(II->getOperand(1), OpPtrTy);
599
return new StoreInst(II->getOperand(2), Ptr);
603
case Intrinsic::x86_sse_cvttss2si: {
604
// These intrinsics only demands the 0th element of its input vector. If
605
// we can simplify the input based on that, do so now.
607
cast<VectorType>(II->getOperand(1)->getType())->getNumElements();
608
APInt DemandedElts(VWidth, 1);
609
APInt UndefElts(VWidth, 0);
610
if (Value *V = SimplifyDemandedVectorElts(II->getOperand(1), DemandedElts,
612
II->setOperand(1, V);
618
case Intrinsic::ppc_altivec_vperm:
619
// Turn vperm(V1,V2,mask) -> shuffle(V1,V2,mask) if mask is a constant.
620
if (ConstantVector *Mask = dyn_cast<ConstantVector>(II->getOperand(3))) {
621
assert(Mask->getNumOperands() == 16 && "Bad type for intrinsic!");
623
// Check that all of the elements are integer constants or undefs.
624
bool AllEltsOk = true;
625
for (unsigned i = 0; i != 16; ++i) {
626
if (!isa<ConstantInt>(Mask->getOperand(i)) &&
627
!isa<UndefValue>(Mask->getOperand(i))) {
634
// Cast the input vectors to byte vectors.
635
Value *Op0 = Builder->CreateBitCast(II->getOperand(1), Mask->getType());
636
Value *Op1 = Builder->CreateBitCast(II->getOperand(2), Mask->getType());
637
Value *Result = UndefValue::get(Op0->getType());
639
// Only extract each element once.
640
Value *ExtractedElts[32];
641
memset(ExtractedElts, 0, sizeof(ExtractedElts));
643
for (unsigned i = 0; i != 16; ++i) {
644
if (isa<UndefValue>(Mask->getOperand(i)))
646
unsigned Idx=cast<ConstantInt>(Mask->getOperand(i))->getZExtValue();
647
Idx &= 31; // Match the hardware behavior.
649
if (ExtractedElts[Idx] == 0) {
651
Builder->CreateExtractElement(Idx < 16 ? Op0 : Op1,
652
ConstantInt::get(Type::getInt32Ty(II->getContext()),
653
Idx&15, false), "tmp");
656
// Insert this value into the result vector.
657
Result = Builder->CreateInsertElement(Result, ExtractedElts[Idx],
658
ConstantInt::get(Type::getInt32Ty(II->getContext()),
661
return CastInst::Create(Instruction::BitCast, Result, CI.getType());
666
case Intrinsic::stackrestore: {
667
// If the save is right next to the restore, remove the restore. This can
668
// happen when variable allocas are DCE'd.
669
if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getOperand(1))) {
670
if (SS->getIntrinsicID() == Intrinsic::stacksave) {
671
BasicBlock::iterator BI = SS;
673
return EraseInstFromFunction(CI);
677
// Scan down this block to see if there is another stack restore in the
678
// same block without an intervening call/alloca.
679
BasicBlock::iterator BI = II;
680
TerminatorInst *TI = II->getParent()->getTerminator();
681
bool CannotRemove = false;
682
for (++BI; &*BI != TI; ++BI) {
683
if (isa<AllocaInst>(BI) || isMalloc(BI)) {
687
if (CallInst *BCI = dyn_cast<CallInst>(BI)) {
688
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(BCI)) {
689
// If there is a stackrestore below this one, remove this one.
690
if (II->getIntrinsicID() == Intrinsic::stackrestore)
691
return EraseInstFromFunction(CI);
692
// Otherwise, ignore the intrinsic.
694
// If we found a non-intrinsic call, we can't remove the stack
702
// If the stack restore is in a return/unwind block and if there are no
703
// allocas or calls between the restore and the return, nuke the restore.
704
if (!CannotRemove && (isa<ReturnInst>(TI) || isa<UnwindInst>(TI)))
705
return EraseInstFromFunction(CI);
710
return visitCallSite(II);
713
// InvokeInst simplification
715
Instruction *InstCombiner::visitInvokeInst(InvokeInst &II) {
716
return visitCallSite(&II);
719
/// isSafeToEliminateVarargsCast - If this cast does not affect the value
720
/// passed through the varargs area, we can eliminate the use of the cast.
721
static bool isSafeToEliminateVarargsCast(const CallSite CS,
722
const CastInst * const CI,
723
const TargetData * const TD,
725
if (!CI->isLosslessCast())
728
// The size of ByVal arguments is derived from the type, so we
729
// can't change to a type with a different size. If the size were
730
// passed explicitly we could avoid this check.
731
if (!CS.paramHasAttr(ix, Attribute::ByVal))
735
cast<PointerType>(CI->getOperand(0)->getType())->getElementType();
736
const Type* DstTy = cast<PointerType>(CI->getType())->getElementType();
737
if (!SrcTy->isSized() || !DstTy->isSized())
739
if (!TD || TD->getTypeAllocSize(SrcTy) != TD->getTypeAllocSize(DstTy))
744
// Try to fold some different type of calls here.
745
// Currently we're only working with the checking functions, memcpy_chk,
746
// mempcpy_chk, memmove_chk, memset_chk, strcpy_chk, stpcpy_chk, strncpy_chk,
747
// strcat_chk and strncat_chk.
748
Instruction *InstCombiner::tryOptimizeCall(CallInst *CI, const TargetData *TD) {
749
if (CI->getCalledFunction() == 0) return 0;
751
StringRef Name = CI->getCalledFunction()->getName();
752
BasicBlock *BB = CI->getParent();
753
IRBuilder<> B(CI->getParent()->getContext());
755
// Set the builder to the instruction after the call.
756
B.SetInsertPoint(BB, CI);
758
if (Name == "__memcpy_chk") {
759
ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
762
ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
765
if (SizeCI->isAllOnesValue() ||
766
SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
767
EmitMemCpy(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
769
return ReplaceInstUsesWith(*CI, CI->getOperand(1));
774
// Should be similar to memcpy.
775
if (Name == "__mempcpy_chk") {
779
if (Name == "__memmove_chk") {
780
ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
783
ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
786
if (SizeCI->isAllOnesValue() ||
787
SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
788
EmitMemMove(CI->getOperand(1), CI->getOperand(2), CI->getOperand(3),
790
return ReplaceInstUsesWith(*CI, CI->getOperand(1));
795
if (Name == "__memset_chk") {
796
ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
799
ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
802
if (SizeCI->isAllOnesValue() ||
803
SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
804
Value *Val = B.CreateIntCast(CI->getOperand(2), B.getInt8Ty(),
806
EmitMemSet(CI->getOperand(1), Val, CI->getOperand(3), B, TD);
807
return ReplaceInstUsesWith(*CI, CI->getOperand(1));
812
if (Name == "__strcpy_chk") {
813
ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(3));
816
// If a) we don't have any length information, or b) we know this will
817
// fit then just lower to a plain strcpy. Otherwise we'll keep our
818
// strcpy_chk call which may fail at runtime if the size is too long.
819
// TODO: It might be nice to get a maximum length out of the possible
820
// string lengths for varying.
821
if (SizeCI->isAllOnesValue() ||
822
SizeCI->getZExtValue() >= GetStringLength(CI->getOperand(2))) {
823
Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD);
824
return ReplaceInstUsesWith(*CI, Ret);
829
// Should be similar to strcpy.
830
if (Name == "__stpcpy_chk") {
834
if (Name == "__strncpy_chk") {
835
ConstantInt *SizeCI = dyn_cast<ConstantInt>(CI->getOperand(4));
838
ConstantInt *SizeArg = dyn_cast<ConstantInt>(CI->getOperand(3));
841
if (SizeCI->isAllOnesValue() ||
842
SizeCI->getZExtValue() <= SizeArg->getZExtValue()) {
843
Value *Ret = EmitStrCpy(CI->getOperand(1), CI->getOperand(2), B, TD);
844
return ReplaceInstUsesWith(*CI, Ret);
849
if (Name == "__strcat_chk") {
853
if (Name == "__strncat_chk") {
860
// visitCallSite - Improvements for call and invoke instructions.
862
Instruction *InstCombiner::visitCallSite(CallSite CS) {
863
bool Changed = false;
865
// If the callee is a constexpr cast of a function, attempt to move the cast
866
// to the arguments of the call/invoke.
867
if (transformConstExprCastCall(CS)) return 0;
869
Value *Callee = CS.getCalledValue();
871
if (Function *CalleeF = dyn_cast<Function>(Callee))
872
// If the call and callee calling conventions don't match, this call must
873
// be unreachable, as the call is undefined.
874
if (CalleeF->getCallingConv() != CS.getCallingConv() &&
875
// Only do this for calls to a function with a body. A prototype may
876
// not actually end up matching the implementation's calling conv for a
877
// variety of reasons (e.g. it may be written in assembly).
878
!CalleeF->isDeclaration()) {
879
Instruction *OldCall = CS.getInstruction();
880
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
881
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
883
// If OldCall dues not return void then replaceAllUsesWith undef.
884
// This allows ValueHandlers and custom metadata to adjust itself.
885
if (!OldCall->getType()->isVoidTy())
886
OldCall->replaceAllUsesWith(UndefValue::get(OldCall->getType()));
887
if (isa<CallInst>(OldCall))
888
return EraseInstFromFunction(*OldCall);
890
// We cannot remove an invoke, because it would change the CFG, just
891
// change the callee to a null pointer.
892
cast<InvokeInst>(OldCall)->setOperand(0,
893
Constant::getNullValue(CalleeF->getType()));
897
if (isa<ConstantPointerNull>(Callee) || isa<UndefValue>(Callee)) {
898
// This instruction is not reachable, just remove it. We insert a store to
899
// undef so that we know that this code is not reachable, despite the fact
900
// that we can't modify the CFG here.
901
new StoreInst(ConstantInt::getTrue(Callee->getContext()),
902
UndefValue::get(Type::getInt1PtrTy(Callee->getContext())),
903
CS.getInstruction());
905
// If CS dues not return void then replaceAllUsesWith undef.
906
// This allows ValueHandlers and custom metadata to adjust itself.
907
if (!CS.getInstruction()->getType()->isVoidTy())
908
CS.getInstruction()->
909
replaceAllUsesWith(UndefValue::get(CS.getInstruction()->getType()));
911
if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
912
// Don't break the CFG, insert a dummy cond branch.
913
BranchInst::Create(II->getNormalDest(), II->getUnwindDest(),
914
ConstantInt::getTrue(Callee->getContext()), II);
916
return EraseInstFromFunction(*CS.getInstruction());
919
if (BitCastInst *BC = dyn_cast<BitCastInst>(Callee))
920
if (IntrinsicInst *In = dyn_cast<IntrinsicInst>(BC->getOperand(0)))
921
if (In->getIntrinsicID() == Intrinsic::init_trampoline)
922
return transformCallThroughTrampoline(CS);
924
const PointerType *PTy = cast<PointerType>(Callee->getType());
925
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
926
if (FTy->isVarArg()) {
927
int ix = FTy->getNumParams() + (isa<InvokeInst>(Callee) ? 3 : 1);
928
// See if we can optimize any arguments passed through the varargs area of
930
for (CallSite::arg_iterator I = CS.arg_begin()+FTy->getNumParams(),
931
E = CS.arg_end(); I != E; ++I, ++ix) {
932
CastInst *CI = dyn_cast<CastInst>(*I);
933
if (CI && isSafeToEliminateVarargsCast(CS, CI, TD, ix)) {
934
*I = CI->getOperand(0);
940
if (isa<InlineAsm>(Callee) && !CS.doesNotThrow()) {
941
// Inline asm calls cannot throw - mark them 'nounwind'.
942
CS.setDoesNotThrow();
946
// Try to optimize the call if possible, we require TargetData for most of
947
// this. None of these calls are seen as possibly dead so go ahead and
948
// delete the instruction now.
949
if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
950
Instruction *I = tryOptimizeCall(CI, TD);
951
// If we changed something return the result, etc. Otherwise let
952
// the fallthrough check.
953
if (I) return EraseInstFromFunction(*I);
956
return Changed ? CS.getInstruction() : 0;
959
// transformConstExprCastCall - If the callee is a constexpr cast of a function,
960
// attempt to move the cast to the arguments of the call/invoke.
962
bool InstCombiner::transformConstExprCastCall(CallSite CS) {
963
if (!isa<ConstantExpr>(CS.getCalledValue())) return false;
964
ConstantExpr *CE = cast<ConstantExpr>(CS.getCalledValue());
965
if (CE->getOpcode() != Instruction::BitCast ||
966
!isa<Function>(CE->getOperand(0)))
968
Function *Callee = cast<Function>(CE->getOperand(0));
969
Instruction *Caller = CS.getInstruction();
970
const AttrListPtr &CallerPAL = CS.getAttributes();
972
// Okay, this is a cast from a function to a different type. Unless doing so
973
// would cause a type conversion of one of our arguments, change this call to
974
// be a direct call with arguments casted to the appropriate types.
976
const FunctionType *FT = Callee->getFunctionType();
977
const Type *OldRetTy = Caller->getType();
978
const Type *NewRetTy = FT->getReturnType();
980
if (NewRetTy->isStructTy())
981
return false; // TODO: Handle multiple return values.
983
// Check to see if we are changing the return type...
984
if (OldRetTy != NewRetTy) {
985
if (Callee->isDeclaration() &&
986
// Conversion is ok if changing from one pointer type to another or from
987
// a pointer to an integer of the same size.
988
!((OldRetTy->isPointerTy() || !TD ||
989
OldRetTy == TD->getIntPtrType(Caller->getContext())) &&
990
(NewRetTy->isPointerTy() || !TD ||
991
NewRetTy == TD->getIntPtrType(Caller->getContext()))))
992
return false; // Cannot transform this return value.
994
if (!Caller->use_empty() &&
995
// void -> non-void is handled specially
996
!NewRetTy->isVoidTy() && !CastInst::isCastable(NewRetTy, OldRetTy))
997
return false; // Cannot transform this return value.
999
if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
1000
Attributes RAttrs = CallerPAL.getRetAttributes();
1001
if (RAttrs & Attribute::typeIncompatible(NewRetTy))
1002
return false; // Attribute not compatible with transformed value.
1005
// If the callsite is an invoke instruction, and the return value is used by
1006
// a PHI node in a successor, we cannot change the return type of the call
1007
// because there is no place to put the cast instruction (without breaking
1008
// the critical edge). Bail out in this case.
1009
if (!Caller->use_empty())
1010
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller))
1011
for (Value::use_iterator UI = II->use_begin(), E = II->use_end();
1013
if (PHINode *PN = dyn_cast<PHINode>(*UI))
1014
if (PN->getParent() == II->getNormalDest() ||
1015
PN->getParent() == II->getUnwindDest())
1019
unsigned NumActualArgs = unsigned(CS.arg_end()-CS.arg_begin());
1020
unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
1022
CallSite::arg_iterator AI = CS.arg_begin();
1023
for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
1024
const Type *ParamTy = FT->getParamType(i);
1025
const Type *ActTy = (*AI)->getType();
1027
if (!CastInst::isCastable(ActTy, ParamTy))
1028
return false; // Cannot transform this parameter value.
1030
if (CallerPAL.getParamAttributes(i + 1)
1031
& Attribute::typeIncompatible(ParamTy))
1032
return false; // Attribute not compatible with transformed value.
1034
// Converting from one pointer type to another or between a pointer and an
1035
// integer of the same size is safe even if we do not have a body.
1036
bool isConvertible = ActTy == ParamTy ||
1037
(TD && ((ParamTy->isPointerTy() ||
1038
ParamTy == TD->getIntPtrType(Caller->getContext())) &&
1039
(ActTy->isPointerTy() ||
1040
ActTy == TD->getIntPtrType(Caller->getContext()))));
1041
if (Callee->isDeclaration() && !isConvertible) return false;
1044
if (FT->getNumParams() < NumActualArgs && !FT->isVarArg() &&
1045
Callee->isDeclaration())
1046
return false; // Do not delete arguments unless we have a function body.
1048
if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
1049
!CallerPAL.isEmpty())
1050
// In this case we have more arguments than the new function type, but we
1051
// won't be dropping them. Check that these extra arguments have attributes
1052
// that are compatible with being a vararg call argument.
1053
for (unsigned i = CallerPAL.getNumSlots(); i; --i) {
1054
if (CallerPAL.getSlot(i - 1).Index <= FT->getNumParams())
1056
Attributes PAttrs = CallerPAL.getSlot(i - 1).Attrs;
1057
if (PAttrs & Attribute::VarArgsIncompatible)
1061
// Okay, we decided that this is a safe thing to do: go ahead and start
1062
// inserting cast instructions as necessary...
1063
std::vector<Value*> Args;
1064
Args.reserve(NumActualArgs);
1065
SmallVector<AttributeWithIndex, 8> attrVec;
1066
attrVec.reserve(NumCommonArgs);
1068
// Get any return attributes.
1069
Attributes RAttrs = CallerPAL.getRetAttributes();
1071
// If the return value is not being used, the type may not be compatible
1072
// with the existing attributes. Wipe out any problematic attributes.
1073
RAttrs &= ~Attribute::typeIncompatible(NewRetTy);
1075
// Add the new return attributes.
1077
attrVec.push_back(AttributeWithIndex::get(0, RAttrs));
1079
AI = CS.arg_begin();
1080
for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
1081
const Type *ParamTy = FT->getParamType(i);
1082
if ((*AI)->getType() == ParamTy) {
1083
Args.push_back(*AI);
1085
Instruction::CastOps opcode = CastInst::getCastOpcode(*AI,
1086
false, ParamTy, false);
1087
Args.push_back(Builder->CreateCast(opcode, *AI, ParamTy, "tmp"));
1090
// Add any parameter attributes.
1091
if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1092
attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1095
// If the function takes more arguments than the call was taking, add them
1097
for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i)
1098
Args.push_back(Constant::getNullValue(FT->getParamType(i)));
1100
// If we are removing arguments to the function, emit an obnoxious warning.
1101
if (FT->getNumParams() < NumActualArgs) {
1102
if (!FT->isVarArg()) {
1103
errs() << "WARNING: While resolving call to function '"
1104
<< Callee->getName() << "' arguments were dropped!\n";
1106
// Add all of the arguments in their promoted form to the arg list.
1107
for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
1108
const Type *PTy = getPromotedType((*AI)->getType());
1109
if (PTy != (*AI)->getType()) {
1110
// Must promote to pass through va_arg area!
1111
Instruction::CastOps opcode =
1112
CastInst::getCastOpcode(*AI, false, PTy, false);
1113
Args.push_back(Builder->CreateCast(opcode, *AI, PTy, "tmp"));
1115
Args.push_back(*AI);
1118
// Add any parameter attributes.
1119
if (Attributes PAttrs = CallerPAL.getParamAttributes(i + 1))
1120
attrVec.push_back(AttributeWithIndex::get(i + 1, PAttrs));
1125
if (Attributes FnAttrs = CallerPAL.getFnAttributes())
1126
attrVec.push_back(AttributeWithIndex::get(~0, FnAttrs));
1128
if (NewRetTy->isVoidTy())
1129
Caller->setName(""); // Void type should not have a name.
1131
const AttrListPtr &NewCallerPAL = AttrListPtr::get(attrVec.begin(),
1135
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1136
NC = InvokeInst::Create(Callee, II->getNormalDest(), II->getUnwindDest(),
1137
Args.begin(), Args.end(),
1138
Caller->getName(), Caller);
1139
cast<InvokeInst>(NC)->setCallingConv(II->getCallingConv());
1140
cast<InvokeInst>(NC)->setAttributes(NewCallerPAL);
1142
NC = CallInst::Create(Callee, Args.begin(), Args.end(),
1143
Caller->getName(), Caller);
1144
CallInst *CI = cast<CallInst>(Caller);
1145
if (CI->isTailCall())
1146
cast<CallInst>(NC)->setTailCall();
1147
cast<CallInst>(NC)->setCallingConv(CI->getCallingConv());
1148
cast<CallInst>(NC)->setAttributes(NewCallerPAL);
1151
// Insert a cast of the return type as necessary.
1153
if (OldRetTy != NV->getType() && !Caller->use_empty()) {
1154
if (!NV->getType()->isVoidTy()) {
1155
Instruction::CastOps opcode = CastInst::getCastOpcode(NC, false,
1157
NV = NC = CastInst::Create(opcode, NC, OldRetTy, "tmp");
1159
// If this is an invoke instruction, we should insert it after the first
1160
// non-phi, instruction in the normal successor block.
1161
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1162
BasicBlock::iterator I = II->getNormalDest()->getFirstNonPHI();
1163
InsertNewInstBefore(NC, *I);
1165
// Otherwise, it's a call, just insert cast right after the call instr
1166
InsertNewInstBefore(NC, *Caller);
1168
Worklist.AddUsersToWorkList(*Caller);
1170
NV = UndefValue::get(Caller->getType());
1175
if (!Caller->use_empty())
1176
Caller->replaceAllUsesWith(NV);
1178
EraseInstFromFunction(*Caller);
1182
// transformCallThroughTrampoline - Turn a call to a function created by the
1183
// init_trampoline intrinsic into a direct call to the underlying function.
1185
Instruction *InstCombiner::transformCallThroughTrampoline(CallSite CS) {
1186
Value *Callee = CS.getCalledValue();
1187
const PointerType *PTy = cast<PointerType>(Callee->getType());
1188
const FunctionType *FTy = cast<FunctionType>(PTy->getElementType());
1189
const AttrListPtr &Attrs = CS.getAttributes();
1191
// If the call already has the 'nest' attribute somewhere then give up -
1192
// otherwise 'nest' would occur twice after splicing in the chain.
1193
if (Attrs.hasAttrSomewhere(Attribute::Nest))
1196
IntrinsicInst *Tramp =
1197
cast<IntrinsicInst>(cast<BitCastInst>(Callee)->getOperand(0));
1199
Function *NestF = cast<Function>(Tramp->getOperand(2)->stripPointerCasts());
1200
const PointerType *NestFPTy = cast<PointerType>(NestF->getType());
1201
const FunctionType *NestFTy = cast<FunctionType>(NestFPTy->getElementType());
1203
const AttrListPtr &NestAttrs = NestF->getAttributes();
1204
if (!NestAttrs.isEmpty()) {
1205
unsigned NestIdx = 1;
1206
const Type *NestTy = 0;
1207
Attributes NestAttr = Attribute::None;
1209
// Look for a parameter marked with the 'nest' attribute.
1210
for (FunctionType::param_iterator I = NestFTy->param_begin(),
1211
E = NestFTy->param_end(); I != E; ++NestIdx, ++I)
1212
if (NestAttrs.paramHasAttr(NestIdx, Attribute::Nest)) {
1213
// Record the parameter type and any other attributes.
1215
NestAttr = NestAttrs.getParamAttributes(NestIdx);
1220
Instruction *Caller = CS.getInstruction();
1221
std::vector<Value*> NewArgs;
1222
NewArgs.reserve(unsigned(CS.arg_end()-CS.arg_begin())+1);
1224
SmallVector<AttributeWithIndex, 8> NewAttrs;
1225
NewAttrs.reserve(Attrs.getNumSlots() + 1);
1227
// Insert the nest argument into the call argument list, which may
1228
// mean appending it. Likewise for attributes.
1230
// Add any result attributes.
1231
if (Attributes Attr = Attrs.getRetAttributes())
1232
NewAttrs.push_back(AttributeWithIndex::get(0, Attr));
1236
CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1238
if (Idx == NestIdx) {
1239
// Add the chain argument and attributes.
1240
Value *NestVal = Tramp->getOperand(3);
1241
if (NestVal->getType() != NestTy)
1242
NestVal = new BitCastInst(NestVal, NestTy, "nest", Caller);
1243
NewArgs.push_back(NestVal);
1244
NewAttrs.push_back(AttributeWithIndex::get(NestIdx, NestAttr));
1250
// Add the original argument and attributes.
1251
NewArgs.push_back(*I);
1252
if (Attributes Attr = Attrs.getParamAttributes(Idx))
1254
(AttributeWithIndex::get(Idx + (Idx >= NestIdx), Attr));
1260
// Add any function attributes.
1261
if (Attributes Attr = Attrs.getFnAttributes())
1262
NewAttrs.push_back(AttributeWithIndex::get(~0, Attr));
1264
// The trampoline may have been bitcast to a bogus type (FTy).
1265
// Handle this by synthesizing a new function type, equal to FTy
1266
// with the chain parameter inserted.
1268
std::vector<const Type*> NewTypes;
1269
NewTypes.reserve(FTy->getNumParams()+1);
1271
// Insert the chain's type into the list of parameter types, which may
1272
// mean appending it.
1275
FunctionType::param_iterator I = FTy->param_begin(),
1276
E = FTy->param_end();
1280
// Add the chain's type.
1281
NewTypes.push_back(NestTy);
1286
// Add the original type.
1287
NewTypes.push_back(*I);
1293
// Replace the trampoline call with a direct call. Let the generic
1294
// code sort out any function type mismatches.
1295
FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
1297
Constant *NewCallee =
1298
NestF->getType() == PointerType::getUnqual(NewFTy) ?
1299
NestF : ConstantExpr::getBitCast(NestF,
1300
PointerType::getUnqual(NewFTy));
1301
const AttrListPtr &NewPAL = AttrListPtr::get(NewAttrs.begin(),
1304
Instruction *NewCaller;
1305
if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
1306
NewCaller = InvokeInst::Create(NewCallee,
1307
II->getNormalDest(), II->getUnwindDest(),
1308
NewArgs.begin(), NewArgs.end(),
1309
Caller->getName(), Caller);
1310
cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
1311
cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
1313
NewCaller = CallInst::Create(NewCallee, NewArgs.begin(), NewArgs.end(),
1314
Caller->getName(), Caller);
1315
if (cast<CallInst>(Caller)->isTailCall())
1316
cast<CallInst>(NewCaller)->setTailCall();
1317
cast<CallInst>(NewCaller)->
1318
setCallingConv(cast<CallInst>(Caller)->getCallingConv());
1319
cast<CallInst>(NewCaller)->setAttributes(NewPAL);
1321
if (!Caller->getType()->isVoidTy())
1322
Caller->replaceAllUsesWith(NewCaller);
1323
Caller->eraseFromParent();
1324
Worklist.Remove(Caller);
1329
// Replace the trampoline call with a direct call. Since there is no 'nest'
1330
// parameter, there is no need to adjust the argument list. Let the generic
1331
// code sort out any function type mismatches.
1332
Constant *NewCallee =
1333
NestF->getType() == PTy ? NestF :
1334
ConstantExpr::getBitCast(NestF, PTy);
1335
CS.setCalledFunction(NewCallee);
1336
return CS.getInstruction();