1
//===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This transformation implements the well known scalar replacement of
11
// aggregates transformation. This xform breaks up alloca instructions of
12
// aggregate type (structure or array) into individual alloca instructions for
13
// each member (if possible). Then, if possible, it transforms the individual
14
// alloca instructions into nice clean scalar SSA form.
16
// This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17
// often interact, especially for C++ programs. As such, iterating between
18
// SRoA, then Mem2Reg until we run out of things to promote works well.
20
//===----------------------------------------------------------------------===//
22
#define DEBUG_TYPE "scalarrepl"
23
#include "llvm/Transforms/Scalar.h"
24
#include "llvm/Constants.h"
25
#include "llvm/DerivedTypes.h"
26
#include "llvm/Function.h"
27
#include "llvm/GlobalVariable.h"
28
#include "llvm/Instructions.h"
29
#include "llvm/IntrinsicInst.h"
30
#include "llvm/LLVMContext.h"
31
#include "llvm/Pass.h"
32
#include "llvm/Analysis/Dominators.h"
33
#include "llvm/Target/TargetData.h"
34
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
35
#include "llvm/Transforms/Utils/Local.h"
36
#include "llvm/Support/Debug.h"
37
#include "llvm/Support/ErrorHandling.h"
38
#include "llvm/Support/GetElementPtrTypeIterator.h"
39
#include "llvm/Support/IRBuilder.h"
40
#include "llvm/Support/MathExtras.h"
41
#include "llvm/Support/raw_ostream.h"
42
#include "llvm/ADT/SmallVector.h"
43
#include "llvm/ADT/Statistic.h"
46
STATISTIC(NumReplaced, "Number of allocas broken up");
47
STATISTIC(NumPromoted, "Number of allocas promoted");
48
STATISTIC(NumConverted, "Number of aggregates converted to scalar");
49
STATISTIC(NumGlobals, "Number of allocas copied from constant global");
52
struct SROA : public FunctionPass {
53
static char ID; // Pass identification, replacement for typeid
54
explicit SROA(signed T = -1) : FunctionPass(&ID) {
61
bool runOnFunction(Function &F);
63
bool performScalarRepl(Function &F);
64
bool performPromotion(Function &F);
66
// getAnalysisUsage - This pass does not require any passes, but we know it
67
// will not alter the CFG, so say so.
68
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
69
AU.addRequired<DominatorTree>();
70
AU.addRequired<DominanceFrontier>();
77
/// DeadInsts - Keep track of instructions we have made dead, so that
78
/// we can remove them after we are done working.
79
SmallVector<Value*, 32> DeadInsts;
81
/// AllocaInfo - When analyzing uses of an alloca instruction, this captures
82
/// information about the uses. All these fields are initialized to false
83
/// and set to true when something is learned.
85
/// isUnsafe - This is set to true if the alloca cannot be SROA'd.
88
/// isMemCpySrc - This is true if this aggregate is memcpy'd from.
91
/// isMemCpyDst - This is true if this aggregate is memcpy'd into.
95
: isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false) {}
100
void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
102
bool isSafeAllocaToScalarRepl(AllocaInst *AI);
104
void isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
106
void isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t &Offset,
108
void isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize,
109
const Type *MemOpType, bool isStore, AllocaInfo &Info);
110
bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size);
111
uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset,
114
void DoScalarReplacement(AllocaInst *AI,
115
std::vector<AllocaInst*> &WorkList);
116
void DeleteDeadInstructions();
117
AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
119
void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
120
SmallVector<AllocaInst*, 32> &NewElts);
121
void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
122
SmallVector<AllocaInst*, 32> &NewElts);
123
void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
124
SmallVector<AllocaInst*, 32> &NewElts);
125
void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
127
SmallVector<AllocaInst*, 32> &NewElts);
128
void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
129
SmallVector<AllocaInst*, 32> &NewElts);
130
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
131
SmallVector<AllocaInst*, 32> &NewElts);
133
bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
134
bool &SawVec, uint64_t Offset, unsigned AllocaSize);
135
void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
136
Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
137
uint64_t Offset, IRBuilder<> &Builder);
138
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
139
uint64_t Offset, IRBuilder<> &Builder);
140
static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
145
static RegisterPass<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
147
// Public interface to the ScalarReplAggregates pass
148
FunctionPass *llvm::createScalarReplAggregatesPass(signed int Threshold) {
149
return new SROA(Threshold);
153
bool SROA::runOnFunction(Function &F) {
154
TD = getAnalysisIfAvailable<TargetData>();
156
bool Changed = performPromotion(F);
158
// FIXME: ScalarRepl currently depends on TargetData more than it
159
// theoretically needs to. It should be refactored in order to support
160
// target-independent IR. Until this is done, just skip the actual
161
// scalar-replacement portion of this pass.
162
if (!TD) return Changed;
165
bool LocalChange = performScalarRepl(F);
166
if (!LocalChange) break; // No need to repromote if no scalarrepl
168
LocalChange = performPromotion(F);
169
if (!LocalChange) break; // No need to re-scalarrepl if no promotion
176
bool SROA::performPromotion(Function &F) {
177
std::vector<AllocaInst*> Allocas;
178
DominatorTree &DT = getAnalysis<DominatorTree>();
179
DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
181
BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
183
bool Changed = false;
188
// Find allocas that are safe to promote, by looking at all instructions in
190
for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
191
if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
192
if (isAllocaPromotable(AI))
193
Allocas.push_back(AI);
195
if (Allocas.empty()) break;
197
PromoteMemToReg(Allocas, DT, DF);
198
NumPromoted += Allocas.size();
205
/// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
206
/// SROA. It must be a struct or array type with a small number of elements.
207
static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
208
const Type *T = AI->getAllocatedType();
209
// Do not promote any struct into more than 32 separate vars.
210
if (const StructType *ST = dyn_cast<StructType>(T))
211
return ST->getNumElements() <= 32;
212
// Arrays are much less likely to be safe for SROA; only consider
213
// them if they are very small.
214
if (const ArrayType *AT = dyn_cast<ArrayType>(T))
215
return AT->getNumElements() <= 8;
219
// performScalarRepl - This algorithm is a simple worklist driven algorithm,
220
// which runs on all of the malloc/alloca instructions in the function, removing
221
// them if they are only used by getelementptr instructions.
223
bool SROA::performScalarRepl(Function &F) {
224
std::vector<AllocaInst*> WorkList;
226
// Scan the entry basic block, adding any alloca's and mallocs to the worklist
227
BasicBlock &BB = F.getEntryBlock();
228
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
229
if (AllocaInst *A = dyn_cast<AllocaInst>(I))
230
WorkList.push_back(A);
232
// Process the worklist
233
bool Changed = false;
234
while (!WorkList.empty()) {
235
AllocaInst *AI = WorkList.back();
238
// Handle dead allocas trivially. These can be formed by SROA'ing arrays
239
// with unused elements.
240
if (AI->use_empty()) {
241
AI->eraseFromParent();
245
// If this alloca is impossible for us to promote, reject it early.
246
if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
249
// Check to see if this allocation is only modified by a memcpy/memmove from
250
// a constant global. If this is the case, we can change all users to use
251
// the constant global instead. This is commonly produced by the CFE by
252
// constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
253
// is only subsequently read.
254
if (Instruction *TheCopy = isOnlyCopiedFromConstantGlobal(AI)) {
255
DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
256
DEBUG(dbgs() << " memcpy = " << *TheCopy << '\n');
257
Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
258
AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
259
TheCopy->eraseFromParent(); // Don't mutate the global.
260
AI->eraseFromParent();
266
// Check to see if we can perform the core SROA transformation. We cannot
267
// transform the allocation instruction if it is an array allocation
268
// (allocations OF arrays are ok though), and an allocation of a scalar
269
// value cannot be decomposed at all.
270
uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
272
// Do not promote [0 x %struct].
273
if (AllocaSize == 0) continue;
275
// If the alloca looks like a good candidate for scalar replacement, and if
276
// all its users can be transformed, then split up the aggregate into its
277
// separate elements.
278
if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) {
279
DoScalarReplacement(AI, WorkList);
284
// Do not promote any struct whose size is too big.
285
if (AllocaSize > SRThreshold) continue;
287
// If we can turn this aggregate value (potentially with casts) into a
288
// simple scalar value that can be mem2reg'd into a register value.
289
// IsNotTrivial tracks whether this is something that mem2reg could have
290
// promoted itself. If so, we don't want to transform it needlessly. Note
291
// that we can't just check based on the type: the alloca may be of an i32
292
// but that has pointer arithmetic to set byte 3 of it or something.
293
bool IsNotTrivial = false;
294
const Type *VectorTy = 0;
295
bool HadAVector = false;
296
if (CanConvertToScalar(AI, IsNotTrivial, VectorTy, HadAVector,
297
0, unsigned(AllocaSize)) && IsNotTrivial) {
299
// If we were able to find a vector type that can handle this with
300
// insert/extract elements, and if there was at least one use that had
301
// a vector type, promote this to a vector. We don't want to promote
302
// random stuff that doesn't use vectors (e.g. <9 x double>) because then
303
// we just get a lot of insert/extracts. If at least one vector is
304
// involved, then we probably really do have a union of vector/array.
305
if (VectorTy && VectorTy->isVectorTy() && HadAVector) {
306
DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
307
<< *VectorTy << '\n');
309
// Create and insert the vector alloca.
310
NewAI = new AllocaInst(VectorTy, 0, "", AI->getParent()->begin());
311
ConvertUsesToScalar(AI, NewAI, 0);
313
DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
315
// Create and insert the integer alloca.
316
const Type *NewTy = IntegerType::get(AI->getContext(), AllocaSize*8);
317
NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
318
ConvertUsesToScalar(AI, NewAI, 0);
321
AI->eraseFromParent();
327
// Otherwise, couldn't process this alloca.
333
/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
334
/// predicate, do SROA now.
335
void SROA::DoScalarReplacement(AllocaInst *AI,
336
std::vector<AllocaInst*> &WorkList) {
337
DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n');
338
SmallVector<AllocaInst*, 32> ElementAllocas;
339
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
340
ElementAllocas.reserve(ST->getNumContainedTypes());
341
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
342
AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
344
AI->getName() + "." + Twine(i), AI);
345
ElementAllocas.push_back(NA);
346
WorkList.push_back(NA); // Add to worklist for recursive processing
349
const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
350
ElementAllocas.reserve(AT->getNumElements());
351
const Type *ElTy = AT->getElementType();
352
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
353
AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
354
AI->getName() + "." + Twine(i), AI);
355
ElementAllocas.push_back(NA);
356
WorkList.push_back(NA); // Add to worklist for recursive processing
360
// Now that we have created the new alloca instructions, rewrite all the
361
// uses of the old alloca.
362
RewriteForScalarRepl(AI, AI, 0, ElementAllocas);
364
// Now erase any instructions that were made dead while rewriting the alloca.
365
DeleteDeadInstructions();
366
AI->eraseFromParent();
371
/// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
372
/// recursively including all their operands that become trivially dead.
373
void SROA::DeleteDeadInstructions() {
374
while (!DeadInsts.empty()) {
375
Instruction *I = cast<Instruction>(DeadInsts.pop_back_val());
377
for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
378
if (Instruction *U = dyn_cast<Instruction>(*OI)) {
379
// Zero out the operand and see if it becomes trivially dead.
380
// (But, don't add allocas to the dead instruction list -- they are
381
// already on the worklist and will be deleted separately.)
383
if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
384
DeadInsts.push_back(U);
387
I->eraseFromParent();
391
/// isSafeForScalarRepl - Check if instruction I is a safe use with regard to
392
/// performing scalar replacement of alloca AI. The results are flagged in
393
/// the Info parameter. Offset indicates the position within AI that is
394
/// referenced by this instruction.
395
void SROA::isSafeForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
397
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
398
Instruction *User = cast<Instruction>(*UI);
400
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
401
isSafeForScalarRepl(BC, AI, Offset, Info);
402
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
403
uint64_t GEPOffset = Offset;
404
isSafeGEP(GEPI, AI, GEPOffset, Info);
406
isSafeForScalarRepl(GEPI, AI, GEPOffset, Info);
407
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(UI)) {
408
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
410
isSafeMemAccess(AI, Offset, Length->getZExtValue(), 0,
411
UI.getOperandNo() == 1, Info);
414
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
415
if (!LI->isVolatile()) {
416
const Type *LIType = LI->getType();
417
isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(LIType),
418
LIType, false, Info);
421
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
422
// Store is ok if storing INTO the pointer, not storing the pointer
423
if (!SI->isVolatile() && SI->getOperand(0) != I) {
424
const Type *SIType = SI->getOperand(0)->getType();
425
isSafeMemAccess(AI, Offset, TD->getTypeAllocSize(SIType),
430
DEBUG(errs() << " Transformation preventing inst: " << *User << '\n');
433
if (Info.isUnsafe) return;
437
/// isSafeGEP - Check if a GEP instruction can be handled for scalar
438
/// replacement. It is safe when all the indices are constant, in-bounds
439
/// references, and when the resulting offset corresponds to an element within
440
/// the alloca type. The results are flagged in the Info parameter. Upon
441
/// return, Offset is adjusted as specified by the GEP indices.
442
void SROA::isSafeGEP(GetElementPtrInst *GEPI, AllocaInst *AI,
443
uint64_t &Offset, AllocaInfo &Info) {
444
gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
448
// Walk through the GEP type indices, checking the types that this indexes
450
for (; GEPIt != E; ++GEPIt) {
451
// Ignore struct elements, no extra checking needed for these.
452
if ((*GEPIt)->isStructTy())
455
ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
457
return MarkUnsafe(Info);
460
// Compute the offset due to this GEP and check if the alloca has a
461
// component element at that offset.
462
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
463
Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
464
&Indices[0], Indices.size());
465
if (!TypeHasComponent(AI->getAllocatedType(), Offset, 0))
469
/// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI
470
/// alloca or has an offset and size that corresponds to a component element
471
/// within it. The offset checked here may have been formed from a GEP with a
472
/// pointer bitcasted to a different type.
473
void SROA::isSafeMemAccess(AllocaInst *AI, uint64_t Offset, uint64_t MemSize,
474
const Type *MemOpType, bool isStore,
476
// Check if this is a load/store of the entire alloca.
477
if (Offset == 0 && MemSize == TD->getTypeAllocSize(AI->getAllocatedType())) {
478
bool UsesAggregateType = (MemOpType == AI->getAllocatedType());
479
// This is safe for MemIntrinsics (where MemOpType is 0), integer types
480
// (which are essentially the same as the MemIntrinsics, especially with
481
// regard to copying padding between elements), or references using the
482
// aggregate type of the alloca.
483
if (!MemOpType || MemOpType->isIntegerTy() || UsesAggregateType) {
484
if (!UsesAggregateType) {
486
Info.isMemCpyDst = true;
488
Info.isMemCpySrc = true;
493
// Check if the offset/size correspond to a component within the alloca type.
494
const Type *T = AI->getAllocatedType();
495
if (TypeHasComponent(T, Offset, MemSize))
498
return MarkUnsafe(Info);
501
/// TypeHasComponent - Return true if T has a component type with the
502
/// specified offset and size. If Size is zero, do not check the size.
503
bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) {
506
if (const StructType *ST = dyn_cast<StructType>(T)) {
507
const StructLayout *Layout = TD->getStructLayout(ST);
508
unsigned EltIdx = Layout->getElementContainingOffset(Offset);
509
EltTy = ST->getContainedType(EltIdx);
510
EltSize = TD->getTypeAllocSize(EltTy);
511
Offset -= Layout->getElementOffset(EltIdx);
512
} else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
513
EltTy = AT->getElementType();
514
EltSize = TD->getTypeAllocSize(EltTy);
515
if (Offset >= AT->getNumElements() * EltSize)
521
if (Offset == 0 && (Size == 0 || EltSize == Size))
523
// Check if the component spans multiple elements.
524
if (Offset + Size > EltSize)
526
return TypeHasComponent(EltTy, Offset, Size);
529
/// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite
530
/// the instruction I, which references it, to use the separate elements.
531
/// Offset indicates the position within AI that is referenced by this
533
void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
534
SmallVector<AllocaInst*, 32> &NewElts) {
535
for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
536
Instruction *User = cast<Instruction>(*UI);
538
if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
539
RewriteBitCast(BC, AI, Offset, NewElts);
540
} else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
541
RewriteGEP(GEPI, AI, Offset, NewElts);
542
} else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
543
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
544
uint64_t MemSize = Length->getZExtValue();
546
MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
547
RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
548
// Otherwise the intrinsic can only touch a single element and the
549
// address operand will be updated, so nothing else needs to be done.
550
} else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
551
const Type *LIType = LI->getType();
552
if (LIType == AI->getAllocatedType()) {
554
// %res = load { i32, i32 }* %alloc
556
// %load.0 = load i32* %alloc.0
557
// %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
558
// %load.1 = load i32* %alloc.1
559
// %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
560
// (Also works for arrays instead of structs)
561
Value *Insert = UndefValue::get(LIType);
562
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
563
Value *Load = new LoadInst(NewElts[i], "load", LI);
564
Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
566
LI->replaceAllUsesWith(Insert);
567
DeadInsts.push_back(LI);
568
} else if (LIType->isIntegerTy() &&
569
TD->getTypeAllocSize(LIType) ==
570
TD->getTypeAllocSize(AI->getAllocatedType())) {
571
// If this is a load of the entire alloca to an integer, rewrite it.
572
RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
574
} else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
575
Value *Val = SI->getOperand(0);
576
const Type *SIType = Val->getType();
577
if (SIType == AI->getAllocatedType()) {
579
// store { i32, i32 } %val, { i32, i32 }* %alloc
581
// %val.0 = extractvalue { i32, i32 } %val, 0
582
// store i32 %val.0, i32* %alloc.0
583
// %val.1 = extractvalue { i32, i32 } %val, 1
584
// store i32 %val.1, i32* %alloc.1
585
// (Also works for arrays instead of structs)
586
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
587
Value *Extract = ExtractValueInst::Create(Val, i, Val->getName(), SI);
588
new StoreInst(Extract, NewElts[i], SI);
590
DeadInsts.push_back(SI);
591
} else if (SIType->isIntegerTy() &&
592
TD->getTypeAllocSize(SIType) ==
593
TD->getTypeAllocSize(AI->getAllocatedType())) {
594
// If this is a store of the entire alloca from an integer, rewrite it.
595
RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
601
/// RewriteBitCast - Update a bitcast reference to the alloca being replaced
602
/// and recursively continue updating all of its uses.
603
void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
604
SmallVector<AllocaInst*, 32> &NewElts) {
605
RewriteForScalarRepl(BC, AI, Offset, NewElts);
606
if (BC->getOperand(0) != AI)
609
// The bitcast references the original alloca. Replace its uses with
610
// references to the first new element alloca.
611
Instruction *Val = NewElts[0];
612
if (Val->getType() != BC->getDestTy()) {
613
Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
616
BC->replaceAllUsesWith(Val);
617
DeadInsts.push_back(BC);
620
/// FindElementAndOffset - Return the index of the element containing Offset
621
/// within the specified type, which must be either a struct or an array.
622
/// Sets T to the type of the element and Offset to the offset within that
623
/// element. IdxTy is set to the type of the index result to be used in a
625
uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset,
626
const Type *&IdxTy) {
628
if (const StructType *ST = dyn_cast<StructType>(T)) {
629
const StructLayout *Layout = TD->getStructLayout(ST);
630
Idx = Layout->getElementContainingOffset(Offset);
631
T = ST->getContainedType(Idx);
632
Offset -= Layout->getElementOffset(Idx);
633
IdxTy = Type::getInt32Ty(T->getContext());
636
const ArrayType *AT = cast<ArrayType>(T);
637
T = AT->getElementType();
638
uint64_t EltSize = TD->getTypeAllocSize(T);
639
Idx = Offset / EltSize;
640
Offset -= Idx * EltSize;
641
IdxTy = Type::getInt64Ty(T->getContext());
645
/// RewriteGEP - Check if this GEP instruction moves the pointer across
646
/// elements of the alloca that are being split apart, and if so, rewrite
647
/// the GEP to be relative to the new element.
648
void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
649
SmallVector<AllocaInst*, 32> &NewElts) {
650
uint64_t OldOffset = Offset;
651
SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
652
Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
653
&Indices[0], Indices.size());
655
RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
657
const Type *T = AI->getAllocatedType();
659
uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy);
660
if (GEPI->getOperand(0) == AI)
661
OldIdx = ~0ULL; // Force the GEP to be rewritten.
663
T = AI->getAllocatedType();
664
uint64_t EltOffset = Offset;
665
uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
667
// If this GEP does not move the pointer across elements of the alloca
668
// being split, then it does not needs to be rewritten.
672
const Type *i32Ty = Type::getInt32Ty(AI->getContext());
673
SmallVector<Value*, 8> NewArgs;
674
NewArgs.push_back(Constant::getNullValue(i32Ty));
675
while (EltOffset != 0) {
676
uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy);
677
NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx));
679
Instruction *Val = NewElts[Idx];
680
if (NewArgs.size() > 1) {
681
Val = GetElementPtrInst::CreateInBounds(Val, NewArgs.begin(),
682
NewArgs.end(), "", GEPI);
685
if (Val->getType() != GEPI->getType())
686
Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI);
687
GEPI->replaceAllUsesWith(Val);
688
DeadInsts.push_back(GEPI);
691
/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
692
/// Rewrite it to copy or set the elements of the scalarized memory.
693
void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
695
SmallVector<AllocaInst*, 32> &NewElts) {
696
// If this is a memcpy/memmove, construct the other pointer as the
697
// appropriate type. The "Other" pointer is the pointer that goes to memory
698
// that doesn't have anything to do with the alloca that we are promoting. For
699
// memset, this Value* stays null.
701
LLVMContext &Context = MI->getContext();
702
unsigned MemAlignment = MI->getAlignment();
703
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
704
if (Inst == MTI->getRawDest())
705
OtherPtr = MTI->getRawSource();
707
assert(Inst == MTI->getRawSource());
708
OtherPtr = MTI->getRawDest();
712
// If there is an other pointer, we want to convert it to the same pointer
713
// type as AI has, so we can GEP through it safely.
716
// Remove bitcasts and all-zero GEPs from OtherPtr. This is an
717
// optimization, but it's also required to detect the corner case where
718
// both pointer operands are referencing the same memory, and where
719
// OtherPtr may be a bitcast or GEP that currently being rewritten. (This
720
// function is only called for mem intrinsics that access the whole
721
// aggregate, so non-zero GEPs are not an issue here.)
723
if (BitCastInst *BC = dyn_cast<BitCastInst>(OtherPtr)) {
724
OtherPtr = BC->getOperand(0);
727
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(OtherPtr)) {
728
// All zero GEPs are effectively bitcasts.
729
if (GEP->hasAllZeroIndices()) {
730
OtherPtr = GEP->getOperand(0);
736
// Copying the alloca to itself is a no-op: just delete it.
737
if (OtherPtr == AI || OtherPtr == NewElts[0]) {
738
// This code will run twice for a no-op memcpy -- once for each operand.
739
// Put only one reference to MI on the DeadInsts list.
740
for (SmallVector<Value*, 32>::const_iterator I = DeadInsts.begin(),
741
E = DeadInsts.end(); I != E; ++I)
742
if (*I == MI) return;
743
DeadInsts.push_back(MI);
747
if (ConstantExpr *BCE = dyn_cast<ConstantExpr>(OtherPtr))
748
if (BCE->getOpcode() == Instruction::BitCast)
749
OtherPtr = BCE->getOperand(0);
751
// If the pointer is not the right type, insert a bitcast to the right
753
if (OtherPtr->getType() != AI->getType())
754
OtherPtr = new BitCastInst(OtherPtr, AI->getType(), OtherPtr->getName(),
758
// Process each element of the aggregate.
759
Value *TheFn = MI->getOperand(0);
760
const Type *BytePtrTy = MI->getRawDest()->getType();
761
bool SROADest = MI->getRawDest() == Inst;
763
Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
765
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
766
// If this is a memcpy/memmove, emit a GEP of the other element address.
768
unsigned OtherEltAlign = MemAlignment;
771
Value *Idx[2] = { Zero,
772
ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
773
OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2,
774
OtherPtr->getName()+"."+Twine(i),
777
const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
778
if (const StructType *ST =
779
dyn_cast<StructType>(OtherPtrTy->getElementType())) {
780
EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
783
cast<SequentialType>(OtherPtr->getType())->getElementType();
784
EltOffset = TD->getTypeAllocSize(EltTy)*i;
787
// The alignment of the other pointer is the guaranteed alignment of the
788
// element, which is affected by both the known alignment of the whole
789
// mem intrinsic and the alignment of the element. If the alignment of
790
// the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
791
// known alignment is just 4 bytes.
792
OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
795
Value *EltPtr = NewElts[i];
796
const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
798
// If we got down to a scalar, insert a load or store as appropriate.
799
if (EltTy->isSingleValueType()) {
800
if (isa<MemTransferInst>(MI)) {
802
// From Other to Alloca.
803
Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
804
new StoreInst(Elt, EltPtr, MI);
806
// From Alloca to Other.
807
Value *Elt = new LoadInst(EltPtr, "tmp", MI);
808
new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
812
assert(isa<MemSetInst>(MI));
814
// If the stored element is zero (common case), just store a null
817
if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
819
StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
821
// If EltTy is a vector type, get the element type.
822
const Type *ValTy = EltTy->getScalarType();
824
// Construct an integer with the right value.
825
unsigned EltSize = TD->getTypeSizeInBits(ValTy);
826
APInt OneVal(EltSize, CI->getZExtValue());
827
APInt TotalVal(OneVal);
829
for (unsigned i = 0; 8*i < EltSize; ++i) {
830
TotalVal = TotalVal.shl(8);
834
// Convert the integer value to the appropriate type.
835
StoreVal = ConstantInt::get(Context, TotalVal);
836
if (ValTy->isPointerTy())
837
StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
838
else if (ValTy->isFloatingPointTy())
839
StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
840
assert(StoreVal->getType() == ValTy && "Type mismatch!");
842
// If the requested value was a vector constant, create it.
843
if (EltTy != ValTy) {
844
unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
845
SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
846
StoreVal = ConstantVector::get(&Elts[0], NumElts);
849
new StoreInst(StoreVal, EltPtr, MI);
852
// Otherwise, if we're storing a byte variable, use a memset call for
856
// Cast the element pointer to BytePtrTy.
857
if (EltPtr->getType() != BytePtrTy)
858
EltPtr = new BitCastInst(EltPtr, BytePtrTy, EltPtr->getName(), MI);
860
// Cast the other pointer (if we have one) to BytePtrTy.
861
if (OtherElt && OtherElt->getType() != BytePtrTy)
862
OtherElt = new BitCastInst(OtherElt, BytePtrTy, OtherElt->getName(), MI);
864
unsigned EltSize = TD->getTypeAllocSize(EltTy);
866
// Finally, insert the meminst for this element.
867
if (isa<MemTransferInst>(MI)) {
869
SROADest ? EltPtr : OtherElt, // Dest ptr
870
SROADest ? OtherElt : EltPtr, // Src ptr
871
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
873
ConstantInt::get(Type::getInt32Ty(MI->getContext()), OtherEltAlign)
875
CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
877
assert(isa<MemSetInst>(MI));
879
EltPtr, MI->getOperand(2), // Dest, Value,
880
ConstantInt::get(MI->getOperand(3)->getType(), EltSize), // Size
883
CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
886
DeadInsts.push_back(MI);
889
/// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
890
/// overwrites the entire allocation. Extract out the pieces of the stored
891
/// integer and store them individually.
892
void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
893
SmallVector<AllocaInst*, 32> &NewElts){
894
// Extract each element out of the integer according to its structure offset
895
// and store the element value to the individual alloca.
896
Value *SrcVal = SI->getOperand(0);
897
const Type *AllocaEltTy = AI->getAllocatedType();
898
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
900
// Handle tail padding by extending the operand
901
if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
902
SrcVal = new ZExtInst(SrcVal,
903
IntegerType::get(SI->getContext(), AllocaSizeBits),
906
DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI
909
// There are two forms here: AI could be an array or struct. Both cases
910
// have different ways to compute the element offset.
911
if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
912
const StructLayout *Layout = TD->getStructLayout(EltSTy);
914
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
915
// Get the number of bits to shift SrcVal to get the value.
916
const Type *FieldTy = EltSTy->getElementType(i);
917
uint64_t Shift = Layout->getElementOffsetInBits(i);
919
if (TD->isBigEndian())
920
Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
922
Value *EltVal = SrcVal;
924
Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
925
EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
926
"sroa.store.elt", SI);
929
// Truncate down to an integer of the right size.
930
uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
932
// Ignore zero sized fields like {}, they obviously contain no data.
933
if (FieldSizeBits == 0) continue;
935
if (FieldSizeBits != AllocaSizeBits)
936
EltVal = new TruncInst(EltVal,
937
IntegerType::get(SI->getContext(), FieldSizeBits),
939
Value *DestField = NewElts[i];
940
if (EltVal->getType() == FieldTy) {
941
// Storing to an integer field of this size, just do it.
942
} else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) {
943
// Bitcast to the right element type (for fp/vector values).
944
EltVal = new BitCastInst(EltVal, FieldTy, "", SI);
946
// Otherwise, bitcast the dest pointer (for aggregates).
947
DestField = new BitCastInst(DestField,
948
PointerType::getUnqual(EltVal->getType()),
951
new StoreInst(EltVal, DestField, SI);
955
const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
956
const Type *ArrayEltTy = ATy->getElementType();
957
uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
958
uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
962
if (TD->isBigEndian())
963
Shift = AllocaSizeBits-ElementOffset;
967
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
968
// Ignore zero sized fields like {}, they obviously contain no data.
969
if (ElementSizeBits == 0) continue;
971
Value *EltVal = SrcVal;
973
Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
974
EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
975
"sroa.store.elt", SI);
978
// Truncate down to an integer of the right size.
979
if (ElementSizeBits != AllocaSizeBits)
980
EltVal = new TruncInst(EltVal,
981
IntegerType::get(SI->getContext(),
982
ElementSizeBits),"",SI);
983
Value *DestField = NewElts[i];
984
if (EltVal->getType() == ArrayEltTy) {
985
// Storing to an integer field of this size, just do it.
986
} else if (ArrayEltTy->isFloatingPointTy() ||
987
ArrayEltTy->isVectorTy()) {
988
// Bitcast to the right element type (for fp/vector values).
989
EltVal = new BitCastInst(EltVal, ArrayEltTy, "", SI);
991
// Otherwise, bitcast the dest pointer (for aggregates).
992
DestField = new BitCastInst(DestField,
993
PointerType::getUnqual(EltVal->getType()),
996
new StoreInst(EltVal, DestField, SI);
998
if (TD->isBigEndian())
999
Shift -= ElementOffset;
1001
Shift += ElementOffset;
1005
DeadInsts.push_back(SI);
1008
/// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
1009
/// an integer. Load the individual pieces to form the aggregate value.
1010
void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
1011
SmallVector<AllocaInst*, 32> &NewElts) {
1012
// Extract each element out of the NewElts according to its structure offset
1013
// and form the result value.
1014
const Type *AllocaEltTy = AI->getAllocatedType();
1015
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
1017
DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
1020
// There are two forms here: AI could be an array or struct. Both cases
1021
// have different ways to compute the element offset.
1022
const StructLayout *Layout = 0;
1023
uint64_t ArrayEltBitOffset = 0;
1024
if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
1025
Layout = TD->getStructLayout(EltSTy);
1027
const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
1028
ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
1032
Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
1034
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1035
// Load the value from the alloca. If the NewElt is an aggregate, cast
1036
// the pointer to an integer of the same size before doing the load.
1037
Value *SrcField = NewElts[i];
1038
const Type *FieldTy =
1039
cast<PointerType>(SrcField->getType())->getElementType();
1040
uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
1042
// Ignore zero sized fields like {}, they obviously contain no data.
1043
if (FieldSizeBits == 0) continue;
1045
const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
1047
if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() &&
1048
!FieldTy->isVectorTy())
1049
SrcField = new BitCastInst(SrcField,
1050
PointerType::getUnqual(FieldIntTy),
1052
SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
1054
// If SrcField is a fp or vector of the right size but that isn't an
1055
// integer type, bitcast to an integer so we can shift it.
1056
if (SrcField->getType() != FieldIntTy)
1057
SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
1059
// Zero extend the field to be the same size as the final alloca so that
1060
// we can shift and insert it.
1061
if (SrcField->getType() != ResultVal->getType())
1062
SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
1064
// Determine the number of bits to shift SrcField.
1066
if (Layout) // Struct case.
1067
Shift = Layout->getElementOffsetInBits(i);
1069
Shift = i*ArrayEltBitOffset;
1071
if (TD->isBigEndian())
1072
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
1075
Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
1076
SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
1079
ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
1082
// Handle tail padding by truncating the result
1083
if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
1084
ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
1086
LI->replaceAllUsesWith(ResultVal);
1087
DeadInsts.push_back(LI);
1090
/// HasPadding - Return true if the specified type has any structure or
1091
/// alignment padding, false otherwise.
1092
static bool HasPadding(const Type *Ty, const TargetData &TD) {
1093
if (const StructType *STy = dyn_cast<StructType>(Ty)) {
1094
const StructLayout *SL = TD.getStructLayout(STy);
1095
unsigned PrevFieldBitOffset = 0;
1096
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1097
unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
1099
// Padding in sub-elements?
1100
if (HasPadding(STy->getElementType(i), TD))
1103
// Check to see if there is any padding between this element and the
1106
unsigned PrevFieldEnd =
1107
PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
1108
if (PrevFieldEnd < FieldBitOffset)
1112
PrevFieldBitOffset = FieldBitOffset;
1115
// Check for tail padding.
1116
if (unsigned EltCount = STy->getNumElements()) {
1117
unsigned PrevFieldEnd = PrevFieldBitOffset +
1118
TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
1119
if (PrevFieldEnd < SL->getSizeInBits())
1123
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1124
return HasPadding(ATy->getElementType(), TD);
1125
} else if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
1126
return HasPadding(VTy->getElementType(), TD);
1128
return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
1131
/// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1132
/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
1133
/// or 1 if safe after canonicalization has been performed.
1134
bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
1135
// Loop over the use list of the alloca. We can only transform it if all of
1136
// the users are safe to transform.
1139
isSafeForScalarRepl(AI, AI, 0, Info);
1140
if (Info.isUnsafe) {
1141
DEBUG(dbgs() << "Cannot transform: " << *AI << '\n');
1145
// Okay, we know all the users are promotable. If the aggregate is a memcpy
1146
// source and destination, we have to be careful. In particular, the memcpy
1147
// could be moving around elements that live in structure padding of the LLVM
1148
// types, but may actually be used. In these cases, we refuse to promote the
1150
if (Info.isMemCpySrc && Info.isMemCpyDst &&
1151
HasPadding(AI->getAllocatedType(), *TD))
1157
/// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1158
/// the offset specified by Offset (which is specified in bytes).
1160
/// There are two cases we handle here:
1161
/// 1) A union of vector types of the same size and potentially its elements.
1162
/// Here we turn element accesses into insert/extract element operations.
1163
/// This promotes a <4 x float> with a store of float to the third element
1164
/// into a <4 x float> that uses insert element.
1165
/// 2) A fully general blob of memory, which we turn into some (potentially
1166
/// large) integer type with extract and insert operations where the loads
1167
/// and stores would mutate the memory.
1168
static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
1169
unsigned AllocaSize, const TargetData &TD,
1170
LLVMContext &Context) {
1171
// If this could be contributing to a vector, analyze it.
1172
if (VecTy != Type::getVoidTy(Context)) { // either null or a vector type.
1174
// If the In type is a vector that is the same size as the alloca, see if it
1175
// matches the existing VecTy.
1176
if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
1177
if (VInTy->getBitWidth()/8 == AllocaSize && Offset == 0) {
1178
// If we're storing/loading a vector of the right size, allow it as a
1179
// vector. If this the first vector we see, remember the type so that
1180
// we know the element size.
1185
} else if (In->isFloatTy() || In->isDoubleTy() ||
1186
(In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 &&
1187
isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
1188
// If we're accessing something that could be an element of a vector, see
1189
// if the implied vector agrees with what we already have and if Offset is
1190
// compatible with it.
1191
unsigned EltSize = In->getPrimitiveSizeInBits()/8;
1192
if (Offset % EltSize == 0 &&
1193
AllocaSize % EltSize == 0 &&
1195
cast<VectorType>(VecTy)->getElementType()
1196
->getPrimitiveSizeInBits()/8 == EltSize)) {
1198
VecTy = VectorType::get(In, AllocaSize/EltSize);
1204
// Otherwise, we have a case that we can't handle with an optimized vector
1205
// form. We can still turn this into a large integer.
1206
VecTy = Type::getVoidTy(Context);
1209
/// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
1210
/// its accesses to a single vector type, return true and set VecTy to
1211
/// the new type. If we could convert the alloca into a single promotable
1212
/// integer, return true but set VecTy to VoidTy. Further, if the use is not a
1213
/// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
1214
/// is the current offset from the base of the alloca being analyzed.
1216
/// If we see at least one access to the value that is as a vector type, set the
1218
bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
1219
bool &SawVec, uint64_t Offset,
1220
unsigned AllocaSize) {
1221
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1222
Instruction *User = cast<Instruction>(*UI);
1224
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1225
// Don't break volatile loads.
1226
if (LI->isVolatile())
1228
MergeInType(LI->getType(), Offset, VecTy,
1229
AllocaSize, *TD, V->getContext());
1230
SawVec |= LI->getType()->isVectorTy();
1234
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1235
// Storing the pointer, not into the value?
1236
if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
1237
MergeInType(SI->getOperand(0)->getType(), Offset,
1238
VecTy, AllocaSize, *TD, V->getContext());
1239
SawVec |= SI->getOperand(0)->getType()->isVectorTy();
1243
if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
1244
if (!CanConvertToScalar(BCI, IsNotTrivial, VecTy, SawVec, Offset,
1247
IsNotTrivial = true;
1251
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1252
// If this is a GEP with a variable indices, we can't handle it.
1253
if (!GEP->hasAllConstantIndices())
1256
// Compute the offset that this GEP adds to the pointer.
1257
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1258
uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(),
1259
&Indices[0], Indices.size());
1260
// See if all uses can be converted.
1261
if (!CanConvertToScalar(GEP, IsNotTrivial, VecTy, SawVec,Offset+GEPOffset,
1264
IsNotTrivial = true;
1268
// If this is a constant sized memset of a constant value (e.g. 0) we can
1270
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1271
// Store of constant value and constant size.
1272
if (isa<ConstantInt>(MSI->getValue()) &&
1273
isa<ConstantInt>(MSI->getLength())) {
1274
IsNotTrivial = true;
1279
// If this is a memcpy or memmove into or out of the whole allocation, we
1280
// can handle it like a load or store of the scalar type.
1281
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1282
if (ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength()))
1283
if (Len->getZExtValue() == AllocaSize && Offset == 0) {
1284
IsNotTrivial = true;
1289
// Otherwise, we cannot handle this!
1296
/// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1297
/// directly. This happens when we are converting an "integer union" to a
1298
/// single integer scalar, or when we are converting a "vector union" to a
1299
/// vector with insert/extractelement instructions.
1301
/// Offset is an offset from the original alloca, in bits that need to be
1302
/// shifted to the right. By the end of this, there should be no uses of Ptr.
1303
void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
1304
while (!Ptr->use_empty()) {
1305
Instruction *User = cast<Instruction>(Ptr->use_back());
1307
if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
1308
ConvertUsesToScalar(CI, NewAI, Offset);
1309
CI->eraseFromParent();
1313
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
1314
// Compute the offset that this GEP adds to the pointer.
1315
SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
1316
uint64_t GEPOffset = TD->getIndexedOffset(GEP->getPointerOperandType(),
1317
&Indices[0], Indices.size());
1318
ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
1319
GEP->eraseFromParent();
1323
IRBuilder<> Builder(User->getParent(), User);
1325
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1326
// The load is a bit extract from NewAI shifted right by Offset bits.
1327
Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
1329
= ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
1330
LI->replaceAllUsesWith(NewLoadVal);
1331
LI->eraseFromParent();
1335
if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1336
assert(SI->getOperand(0) != Ptr && "Consistency error!");
1337
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
1338
Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
1340
Builder.CreateStore(New, NewAI);
1341
SI->eraseFromParent();
1343
// If the load we just inserted is now dead, then the inserted store
1344
// overwrote the entire thing.
1345
if (Old->use_empty())
1346
Old->eraseFromParent();
1350
// If this is a constant sized memset of a constant value (e.g. 0) we can
1351
// transform it into a store of the expanded constant value.
1352
if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
1353
assert(MSI->getRawDest() == Ptr && "Consistency error!");
1354
unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
1355
if (NumBytes != 0) {
1356
unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
1358
// Compute the value replicated the right number of times.
1359
APInt APVal(NumBytes*8, Val);
1361
// Splat the value if non-zero.
1363
for (unsigned i = 1; i != NumBytes; ++i)
1364
APVal |= APVal << 8;
1366
Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
1367
Value *New = ConvertScalar_InsertValue(
1368
ConstantInt::get(User->getContext(), APVal),
1369
Old, Offset, Builder);
1370
Builder.CreateStore(New, NewAI);
1372
// If the load we just inserted is now dead, then the memset overwrote
1373
// the entire thing.
1374
if (Old->use_empty())
1375
Old->eraseFromParent();
1377
MSI->eraseFromParent();
1381
// If this is a memcpy or memmove into or out of the whole allocation, we
1382
// can handle it like a load or store of the scalar type.
1383
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
1384
assert(Offset == 0 && "must be store to start of alloca");
1386
// If the source and destination are both to the same alloca, then this is
1387
// a noop copy-to-self, just delete it. Otherwise, emit a load and store
1389
AllocaInst *OrigAI = cast<AllocaInst>(Ptr->getUnderlyingObject(0));
1391
if (MTI->getSource()->getUnderlyingObject(0) != OrigAI) {
1392
// Dest must be OrigAI, change this to be a load from the original
1393
// pointer (bitcasted), then a store to our new alloca.
1394
assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
1395
Value *SrcPtr = MTI->getSource();
1396
SrcPtr = Builder.CreateBitCast(SrcPtr, NewAI->getType());
1398
LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
1399
SrcVal->setAlignment(MTI->getAlignment());
1400
Builder.CreateStore(SrcVal, NewAI);
1401
} else if (MTI->getDest()->getUnderlyingObject(0) != OrigAI) {
1402
// Src must be OrigAI, change this to be a load from NewAI then a store
1403
// through the original dest pointer (bitcasted).
1404
assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
1405
LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
1407
Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), NewAI->getType());
1408
StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
1409
NewStore->setAlignment(MTI->getAlignment());
1411
// Noop transfer. Src == Dst
1414
MTI->eraseFromParent();
1418
llvm_unreachable("Unsupported operation!");
1422
/// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
1423
/// or vector value FromVal, extracting the bits from the offset specified by
1424
/// Offset. This returns the value, which is of type ToType.
1426
/// This happens when we are converting an "integer union" to a single
1427
/// integer scalar, or when we are converting a "vector union" to a vector with
1428
/// insert/extractelement instructions.
1430
/// Offset is an offset from the original alloca, in bits that need to be
1431
/// shifted to the right.
1432
Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
1433
uint64_t Offset, IRBuilder<> &Builder) {
1434
// If the load is of the whole new alloca, no conversion is needed.
1435
if (FromVal->getType() == ToType && Offset == 0)
1438
// If the result alloca is a vector type, this is either an element
1439
// access or a bitcast to another vector type of the same size.
1440
if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
1441
if (ToType->isVectorTy())
1442
return Builder.CreateBitCast(FromVal, ToType, "tmp");
1444
// Otherwise it must be an element access.
1447
unsigned EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
1448
Elt = Offset/EltSize;
1449
assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
1451
// Return the element extracted out of it.
1452
Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
1453
Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
1454
if (V->getType() != ToType)
1455
V = Builder.CreateBitCast(V, ToType, "tmp");
1459
// If ToType is a first class aggregate, extract out each of the pieces and
1460
// use insertvalue's to form the FCA.
1461
if (const StructType *ST = dyn_cast<StructType>(ToType)) {
1462
const StructLayout &Layout = *TD->getStructLayout(ST);
1463
Value *Res = UndefValue::get(ST);
1464
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1465
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
1466
Offset+Layout.getElementOffsetInBits(i),
1468
Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1473
if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
1474
uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
1475
Value *Res = UndefValue::get(AT);
1476
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1477
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
1478
Offset+i*EltSize, Builder);
1479
Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
1484
// Otherwise, this must be a union that was converted to an integer value.
1485
const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
1487
// If this is a big-endian system and the load is narrower than the
1488
// full alloca type, we need to do a shift to get the right bits.
1490
if (TD->isBigEndian()) {
1491
// On big-endian machines, the lowest bit is stored at the bit offset
1492
// from the pointer given by getTypeStoreSizeInBits. This matters for
1493
// integers with a bitwidth that is not a multiple of 8.
1494
ShAmt = TD->getTypeStoreSizeInBits(NTy) -
1495
TD->getTypeStoreSizeInBits(ToType) - Offset;
1500
// Note: we support negative bitwidths (with shl) which are not defined.
1501
// We do this to support (f.e.) loads off the end of a structure where
1502
// only some bits are used.
1503
if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
1504
FromVal = Builder.CreateLShr(FromVal,
1505
ConstantInt::get(FromVal->getType(),
1507
else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
1508
FromVal = Builder.CreateShl(FromVal,
1509
ConstantInt::get(FromVal->getType(),
1512
// Finally, unconditionally truncate the integer to the right width.
1513
unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
1514
if (LIBitWidth < NTy->getBitWidth())
1516
Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
1517
LIBitWidth), "tmp");
1518
else if (LIBitWidth > NTy->getBitWidth())
1520
Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
1521
LIBitWidth), "tmp");
1523
// If the result is an integer, this is a trunc or bitcast.
1524
if (ToType->isIntegerTy()) {
1526
} else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) {
1527
// Just do a bitcast, we know the sizes match up.
1528
FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
1530
// Otherwise must be a pointer.
1531
FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
1533
assert(FromVal->getType() == ToType && "Didn't convert right?");
1537
/// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
1538
/// or vector value "Old" at the offset specified by Offset.
1540
/// This happens when we are converting an "integer union" to a
1541
/// single integer scalar, or when we are converting a "vector union" to a
1542
/// vector with insert/extractelement instructions.
1544
/// Offset is an offset from the original alloca, in bits that need to be
1545
/// shifted to the right.
1546
Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
1547
uint64_t Offset, IRBuilder<> &Builder) {
1549
// Convert the stored type to the actual type, shift it left to insert
1550
// then 'or' into place.
1551
const Type *AllocaType = Old->getType();
1552
LLVMContext &Context = Old->getContext();
1554
if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
1555
uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
1556
uint64_t ValSize = TD->getTypeAllocSizeInBits(SV->getType());
1558
// Changing the whole vector with memset or with an access of a different
1560
if (ValSize == VecSize)
1561
return Builder.CreateBitCast(SV, AllocaType, "tmp");
1563
uint64_t EltSize = TD->getTypeAllocSizeInBits(VTy->getElementType());
1565
// Must be an element insertion.
1566
unsigned Elt = Offset/EltSize;
1568
if (SV->getType() != VTy->getElementType())
1569
SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
1571
SV = Builder.CreateInsertElement(Old, SV,
1572
ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
1577
// If SV is a first-class aggregate value, insert each value recursively.
1578
if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
1579
const StructLayout &Layout = *TD->getStructLayout(ST);
1580
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
1581
Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1582
Old = ConvertScalar_InsertValue(Elt, Old,
1583
Offset+Layout.getElementOffsetInBits(i),
1589
if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
1590
uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
1591
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1592
Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1593
Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
1598
// If SV is a float, convert it to the appropriate integer type.
1599
// If it is a pointer, do the same.
1600
unsigned SrcWidth = TD->getTypeSizeInBits(SV->getType());
1601
unsigned DestWidth = TD->getTypeSizeInBits(AllocaType);
1602
unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
1603
unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
1604
if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
1605
SV = Builder.CreateBitCast(SV,
1606
IntegerType::get(SV->getContext(),SrcWidth), "tmp");
1607
else if (SV->getType()->isPointerTy())
1608
SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(SV->getContext()), "tmp");
1610
// Zero extend or truncate the value if needed.
1611
if (SV->getType() != AllocaType) {
1612
if (SV->getType()->getPrimitiveSizeInBits() <
1613
AllocaType->getPrimitiveSizeInBits())
1614
SV = Builder.CreateZExt(SV, AllocaType, "tmp");
1616
// Truncation may be needed if storing more than the alloca can hold
1617
// (undefined behavior).
1618
SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
1619
SrcWidth = DestWidth;
1620
SrcStoreWidth = DestStoreWidth;
1624
// If this is a big-endian system and the store is narrower than the
1625
// full alloca type, we need to do a shift to get the right bits.
1627
if (TD->isBigEndian()) {
1628
// On big-endian machines, the lowest bit is stored at the bit offset
1629
// from the pointer given by getTypeStoreSizeInBits. This matters for
1630
// integers with a bitwidth that is not a multiple of 8.
1631
ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1636
// Note: we support negative bitwidths (with shr) which are not defined.
1637
// We do this to support (f.e.) stores off the end of a structure where
1638
// only some bits in the structure are set.
1639
APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1640
if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1641
SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
1644
} else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1645
SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
1647
Mask = Mask.lshr(-ShAmt);
1650
// Mask out the bits we are about to insert from the old value, and or
1652
if (SrcWidth != DestWidth) {
1653
assert(DestWidth > SrcWidth);
1654
Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
1655
SV = Builder.CreateOr(Old, SV, "ins");
1662
/// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1663
/// some part of a constant global variable. This intentionally only accepts
1664
/// constant expressions because we don't can't rewrite arbitrary instructions.
1665
static bool PointsToConstantGlobal(Value *V) {
1666
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
1667
return GV->isConstant();
1668
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
1669
if (CE->getOpcode() == Instruction::BitCast ||
1670
CE->getOpcode() == Instruction::GetElementPtr)
1671
return PointsToConstantGlobal(CE->getOperand(0));
1675
/// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1676
/// pointer to an alloca. Ignore any reads of the pointer, return false if we
1677
/// see any stores or other unknown uses. If we see pointer arithmetic, keep
1678
/// track of whether it moves the pointer (with isOffset) but otherwise traverse
1679
/// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1680
/// the alloca, and if the source pointer is a pointer to a constant global, we
1681
/// can optimize this.
1682
static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
1684
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
1685
if (LoadInst *LI = dyn_cast<LoadInst>(*UI))
1686
// Ignore non-volatile loads, they are always ok.
1687
if (!LI->isVolatile())
1690
if (BitCastInst *BCI = dyn_cast<BitCastInst>(*UI)) {
1691
// If uses of the bitcast are ok, we are ok.
1692
if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset))
1696
if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(*UI)) {
1697
// If the GEP has all zero indices, it doesn't offset the pointer. If it
1698
// doesn't, it does.
1699
if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
1700
isOffset || !GEP->hasAllZeroIndices()))
1705
// If this is isn't our memcpy/memmove, reject it as something we can't
1707
if (!isa<MemTransferInst>(*UI))
1710
// If we already have seen a copy, reject the second one.
1711
if (TheCopy) return false;
1713
// If the pointer has been offset from the start of the alloca, we can't
1714
// safely handle this.
1715
if (isOffset) return false;
1717
// If the memintrinsic isn't using the alloca as the dest, reject it.
1718
if (UI.getOperandNo() != 1) return false;
1720
MemIntrinsic *MI = cast<MemIntrinsic>(*UI);
1722
// If the source of the memcpy/move is not a constant global, reject it.
1723
if (!PointsToConstantGlobal(MI->getOperand(2)))
1726
// Otherwise, the transform is safe. Remember the copy instruction.
1732
/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1733
/// modified by a copy from a constant global. If we can prove this, we can
1734
/// replace any uses of the alloca with uses of the global directly.
1735
Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
1736
Instruction *TheCopy = 0;
1737
if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))