1
//===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3
// The LLVM Compiler Infrastructure
5
// This file is distributed under the University of Illinois Open Source
6
// License. See LICENSE.TXT for details.
8
//===----------------------------------------------------------------------===//
10
// This pass performs various transformations related to eliminating memcpy
11
// calls, or transforming sets of stores into memset's.
13
//===----------------------------------------------------------------------===//
15
#define DEBUG_TYPE "memcpyopt"
16
#include "llvm/Transforms/Scalar.h"
17
#include "llvm/IntrinsicInst.h"
18
#include "llvm/Instructions.h"
19
#include "llvm/LLVMContext.h"
20
#include "llvm/ADT/SmallVector.h"
21
#include "llvm/ADT/Statistic.h"
22
#include "llvm/Analysis/Dominators.h"
23
#include "llvm/Analysis/AliasAnalysis.h"
24
#include "llvm/Analysis/MemoryDependenceAnalysis.h"
25
#include "llvm/Support/Debug.h"
26
#include "llvm/Support/GetElementPtrTypeIterator.h"
27
#include "llvm/Support/raw_ostream.h"
28
#include "llvm/Target/TargetData.h"
32
STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
33
STATISTIC(NumMemSetInfer, "Number of memsets inferred");
34
STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
36
/// isBytewiseValue - If the specified value can be set by repeating the same
37
/// byte in memory, return the i8 value that it is represented with. This is
38
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
39
/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
40
/// byte store (e.g. i16 0x1234), return null.
41
static Value *isBytewiseValue(Value *V) {
42
LLVMContext &Context = V->getContext();
44
// All byte-wide stores are splatable, even of arbitrary variables.
45
if (V->getType()->isIntegerTy(8)) return V;
47
// Constant float and double values can be handled as integer values if the
48
// corresponding integer value is "byteable". An important case is 0.0.
49
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
50
if (CFP->getType()->isFloatTy())
51
V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context));
52
if (CFP->getType()->isDoubleTy())
53
V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context));
54
// Don't handle long double formats, which have strange constraints.
57
// We can handle constant integers that are power of two in size and a
58
// multiple of 8 bits.
59
if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
60
unsigned Width = CI->getBitWidth();
61
if (isPowerOf2_32(Width) && Width > 8) {
62
// We can handle this value if the recursive binary decomposition is the
63
// same at all levels.
64
APInt Val = CI->getValue();
66
while (Val.getBitWidth() != 8) {
67
unsigned NextWidth = Val.getBitWidth()/2;
68
Val2 = Val.lshr(NextWidth);
69
Val2.trunc(Val.getBitWidth()/2);
70
Val.trunc(Val.getBitWidth()/2);
72
// If the top/bottom halves aren't the same, reject it.
76
return ConstantInt::get(Context, Val);
80
// Conceptually, we could handle things like:
81
// %a = zext i8 %X to i16
84
// but until there is an example that actually needs this, it doesn't seem
85
// worth worrying about.
89
static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
90
bool &VariableIdxFound, TargetData &TD) {
91
// Skip over the first indices.
92
gep_type_iterator GTI = gep_type_begin(GEP);
93
for (unsigned i = 1; i != Idx; ++i, ++GTI)
96
// Compute the offset implied by the rest of the indices.
98
for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
99
ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
101
return VariableIdxFound = true;
102
if (OpC->isZero()) continue; // No offset.
104
// Handle struct indices, which add their field offset to the pointer.
105
if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
106
Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
110
// Otherwise, we have a sequential type like an array or vector. Multiply
111
// the index by the ElementSize.
112
uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
113
Offset += Size*OpC->getSExtValue();
119
/// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
120
/// constant offset, and return that constant offset. For example, Ptr1 might
121
/// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
122
static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
124
// Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
125
// base. After that base, they may have some number of common (and
126
// potentially variable) indices. After that they handle some constant
127
// offset, which determines their offset from each other. At this point, we
128
// handle no other case.
129
GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
130
GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
131
if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
134
// Skip any common indices and track the GEP types.
136
for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
137
if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
140
bool VariableIdxFound = false;
141
int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
142
int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
143
if (VariableIdxFound) return false;
145
Offset = Offset2-Offset1;
150
/// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
151
/// This allows us to analyze stores like:
156
/// which sometimes happens with stores to arrays of structs etc. When we see
157
/// the first store, we make a range [1, 2). The second store extends the range
158
/// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
159
/// two ranges into [0, 3) which is memset'able.
162
// Start/End - A semi range that describes the span that this range covers.
163
// The range is closed at the start and open at the end: [Start, End).
166
/// StartPtr - The getelementptr instruction that points to the start of the
170
/// Alignment - The known alignment of the first store.
173
/// TheStores - The actual stores that make up this range.
174
SmallVector<StoreInst*, 16> TheStores;
176
bool isProfitableToUseMemset(const TargetData &TD) const;
179
} // end anon namespace
181
bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
182
// If we found more than 8 stores to merge or 64 bytes, use memset.
183
if (TheStores.size() >= 8 || End-Start >= 64) return true;
185
// Assume that the code generator is capable of merging pairs of stores
186
// together if it wants to.
187
if (TheStores.size() <= 2) return false;
189
// If we have fewer than 8 stores, it can still be worthwhile to do this.
190
// For example, merging 4 i8 stores into an i32 store is useful almost always.
191
// However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
192
// memset will be split into 2 32-bit stores anyway) and doing so can
193
// pessimize the llvm optimizer.
195
// Since we don't have perfect knowledge here, make some assumptions: assume
196
// the maximum GPR width is the same size as the pointer size and assume that
197
// this width can be stored. If so, check to see whether we will end up
198
// actually reducing the number of stores used.
199
unsigned Bytes = unsigned(End-Start);
200
unsigned NumPointerStores = Bytes/TD.getPointerSize();
202
// Assume the remaining bytes if any are done a byte at a time.
203
unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
205
// If we will reduce the # stores (according to this heuristic), do the
206
// transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
208
return TheStores.size() > NumPointerStores+NumByteStores;
214
/// Ranges - A sorted list of the memset ranges. We use std::list here
215
/// because each element is relatively large and expensive to copy.
216
std::list<MemsetRange> Ranges;
217
typedef std::list<MemsetRange>::iterator range_iterator;
220
MemsetRanges(TargetData &td) : TD(td) {}
222
typedef std::list<MemsetRange>::const_iterator const_iterator;
223
const_iterator begin() const { return Ranges.begin(); }
224
const_iterator end() const { return Ranges.end(); }
225
bool empty() const { return Ranges.empty(); }
227
void addStore(int64_t OffsetFromFirst, StoreInst *SI);
230
} // end anon namespace
233
/// addStore - Add a new store to the MemsetRanges data structure. This adds a
234
/// new range for the specified store at the specified offset, merging into
235
/// existing ranges as appropriate.
236
void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
237
int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
239
// Do a linear search of the ranges to see if this can be joined and/or to
240
// find the insertion point in the list. We keep the ranges sorted for
241
// simplicity here. This is a linear search of a linked list, which is ugly,
242
// however the number of ranges is limited, so this won't get crazy slow.
243
range_iterator I = Ranges.begin(), E = Ranges.end();
245
while (I != E && Start > I->End)
248
// We now know that I == E, in which case we didn't find anything to merge
249
// with, or that Start <= I->End. If End < I->Start or I == E, then we need
250
// to insert a new range. Handle this now.
251
if (I == E || End < I->Start) {
252
MemsetRange &R = *Ranges.insert(I, MemsetRange());
255
R.StartPtr = SI->getPointerOperand();
256
R.Alignment = SI->getAlignment();
257
R.TheStores.push_back(SI);
261
// This store overlaps with I, add it.
262
I->TheStores.push_back(SI);
264
// At this point, we may have an interval that completely contains our store.
265
// If so, just add it to the interval and return.
266
if (I->Start <= Start && I->End >= End)
269
// Now we know that Start <= I->End and End >= I->Start so the range overlaps
270
// but is not entirely contained within the range.
272
// See if the range extends the start of the range. In this case, it couldn't
273
// possibly cause it to join the prior range, because otherwise we would have
275
if (Start < I->Start) {
277
I->StartPtr = SI->getPointerOperand();
278
I->Alignment = SI->getAlignment();
281
// Now we know that Start <= I->End and Start >= I->Start (so the startpoint
282
// is in or right at the end of I), and that End >= I->Start. Extend I out to
286
range_iterator NextI = I;
287
while (++NextI != E && End >= NextI->Start) {
288
// Merge the range in.
289
I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
290
if (NextI->End > I->End)
298
//===----------------------------------------------------------------------===//
300
//===----------------------------------------------------------------------===//
303
class MemCpyOpt : public FunctionPass {
304
bool runOnFunction(Function &F);
306
static char ID; // Pass identification, replacement for typeid
307
MemCpyOpt() : FunctionPass(&ID) {}
310
// This transformation requires dominator postdominator info
311
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
312
AU.setPreservesCFG();
313
AU.addRequired<DominatorTree>();
314
AU.addRequired<MemoryDependenceAnalysis>();
315
AU.addRequired<AliasAnalysis>();
316
AU.addPreserved<AliasAnalysis>();
317
AU.addPreserved<MemoryDependenceAnalysis>();
321
bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
322
bool processMemCpy(MemCpyInst *M);
323
bool processMemMove(MemMoveInst *M);
324
bool performCallSlotOptzn(MemCpyInst *cpy, CallInst *C);
325
bool iterateOnFunction(Function &F);
328
char MemCpyOpt::ID = 0;
331
// createMemCpyOptPass - The public interface to this file...
332
FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
334
static RegisterPass<MemCpyOpt> X("memcpyopt",
335
"MemCpy Optimization");
339
/// processStore - When GVN is scanning forward over instructions, we look for
340
/// some other patterns to fold away. In particular, this looks for stores to
341
/// neighboring locations of memory. If it sees enough consequtive ones
342
/// (currently 4) it attempts to merge them together into a memcpy/memset.
343
bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
344
if (SI->isVolatile()) return false;
346
LLVMContext &Context = SI->getContext();
348
// There are two cases that are interesting for this code to handle: memcpy
349
// and memset. Right now we only handle memset.
351
// Ensure that the value being stored is something that can be memset'able a
352
// byte at a time like "0" or "-1" or any width, as well as things like
353
// 0xA0A0A0A0 and 0.0.
354
Value *ByteVal = isBytewiseValue(SI->getOperand(0));
358
TargetData *TD = getAnalysisIfAvailable<TargetData>();
359
if (!TD) return false;
360
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
361
Module *M = SI->getParent()->getParent()->getParent();
363
// Okay, so we now have a single store that can be splatable. Scan to find
364
// all subsequent stores of the same value to offset from the same pointer.
365
// Join these together into ranges, so we can decide whether contiguous blocks
367
MemsetRanges Ranges(*TD);
369
Value *StartPtr = SI->getPointerOperand();
371
BasicBlock::iterator BI = SI;
372
for (++BI; !isa<TerminatorInst>(BI); ++BI) {
373
if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
374
// If the call is readnone, ignore it, otherwise bail out. We don't even
375
// allow readonly here because we don't want something like:
376
// A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
377
if (AA.getModRefBehavior(CallSite::get(BI)) ==
378
AliasAnalysis::DoesNotAccessMemory)
381
// TODO: If this is a memset, try to join it in.
384
} else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
387
// If this is a non-store instruction it is fine, ignore it.
388
StoreInst *NextStore = dyn_cast<StoreInst>(BI);
389
if (NextStore == 0) continue;
391
// If this is a store, see if we can merge it in.
392
if (NextStore->isVolatile()) break;
394
// Check to see if this stored value is of the same byte-splattable value.
395
if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
398
// Check to see if this store is to a constant offset from the start ptr.
400
if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
403
Ranges.addStore(Offset, NextStore);
406
// If we have no ranges, then we just had a single store with nothing that
407
// could be merged in. This is a very common case of course.
411
// If we had at least one store that could be merged in, add the starting
412
// store as well. We try to avoid this unless there is at least something
413
// interesting as a small compile-time optimization.
414
Ranges.addStore(0, SI);
416
Function *MemSetF = 0;
418
// Now that we have full information about ranges, loop over the ranges and
419
// emit memset's for anything big enough to be worthwhile.
420
bool MadeChange = false;
421
for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
423
const MemsetRange &Range = *I;
425
if (Range.TheStores.size() == 1) continue;
427
// If it is profitable to lower this range to memset, do so now.
428
if (!Range.isProfitableToUseMemset(*TD))
431
// Otherwise, we do want to transform this! Create a new memset. We put
432
// the memset right before the first instruction that isn't part of this
433
// memset block. This ensure that the memset is dominated by any addressing
434
// instruction needed by the start of the block.
435
BasicBlock::iterator InsertPt = BI;
438
const Type *Ty = Type::getInt64Ty(Context);
439
MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
442
// Get the starting pointer of the block.
443
StartPtr = Range.StartPtr;
445
// Cast the start ptr to be i8* as memset requires.
446
const Type *i8Ptr = Type::getInt8PtrTy(Context);
447
if (StartPtr->getType() != i8Ptr)
448
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
452
StartPtr, ByteVal, // Start, value
454
ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
456
ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment)
458
Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
459
DEBUG(dbgs() << "Replace stores:\n";
460
for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
461
dbgs() << *Range.TheStores[i];
462
dbgs() << "With: " << *C); C=C;
464
// Don't invalidate the iterator
467
// Zap all the stores.
468
for (SmallVector<StoreInst*, 16>::const_iterator
469
SI = Range.TheStores.begin(),
470
SE = Range.TheStores.end(); SI != SE; ++SI)
471
(*SI)->eraseFromParent();
480
/// performCallSlotOptzn - takes a memcpy and a call that it depends on,
481
/// and checks for the possibility of a call slot optimization by having
482
/// the call write its result directly into the destination of the memcpy.
483
bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
484
// The general transformation to keep in mind is
486
// call @func(..., src, ...)
487
// memcpy(dest, src, ...)
491
// memcpy(dest, src, ...)
492
// call @func(..., dest, ...)
494
// Since moving the memcpy is technically awkward, we additionally check that
495
// src only holds uninitialized values at the moment of the call, meaning that
496
// the memcpy can be discarded rather than moved.
498
// Deliberately get the source and destination with bitcasts stripped away,
499
// because we'll need to do type comparisons based on the underlying type.
500
Value *cpyDest = cpy->getDest();
501
Value *cpySrc = cpy->getSource();
502
CallSite CS = CallSite::get(C);
504
// We need to be able to reason about the size of the memcpy, so we require
505
// that it be a constant.
506
ConstantInt *cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
510
// Require that src be an alloca. This simplifies the reasoning considerably.
511
AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
515
// Check that all of src is copied to dest.
516
TargetData *TD = getAnalysisIfAvailable<TargetData>();
517
if (!TD) return false;
519
ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
523
uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
524
srcArraySize->getZExtValue();
526
if (cpyLength->getZExtValue() < srcSize)
529
// Check that accessing the first srcSize bytes of dest will not cause a
530
// trap. Otherwise the transform is invalid since it might cause a trap
531
// to occur earlier than it otherwise would.
532
if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
533
// The destination is an alloca. Check it is larger than srcSize.
534
ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
538
uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
539
destArraySize->getZExtValue();
541
if (destSize < srcSize)
543
} else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
544
// If the destination is an sret parameter then only accesses that are
545
// outside of the returned struct type can trap.
546
if (!A->hasStructRetAttr())
549
const Type *StructTy = cast<PointerType>(A->getType())->getElementType();
550
uint64_t destSize = TD->getTypeAllocSize(StructTy);
552
if (destSize < srcSize)
558
// Check that src is not accessed except via the call and the memcpy. This
559
// guarantees that it holds only undefined values when passed in (so the final
560
// memcpy can be dropped), that it is not read or written between the call and
561
// the memcpy, and that writing beyond the end of it is undefined.
562
SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
563
srcAlloca->use_end());
564
while (!srcUseList.empty()) {
565
User *UI = srcUseList.pop_back_val();
567
if (isa<BitCastInst>(UI)) {
568
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
570
srcUseList.push_back(*I);
571
} else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
572
if (G->hasAllZeroIndices())
573
for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
575
srcUseList.push_back(*I);
578
} else if (UI != C && UI != cpy) {
583
// Since we're changing the parameter to the callsite, we need to make sure
584
// that what would be the new parameter dominates the callsite.
585
DominatorTree &DT = getAnalysis<DominatorTree>();
586
if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
587
if (!DT.dominates(cpyDestInst, C))
590
// In addition to knowing that the call does not access src in some
591
// unexpected manner, for example via a global, which we deduce from
592
// the use analysis, we also need to know that it does not sneakily
593
// access dest. We rely on AA to figure this out for us.
594
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
595
if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
596
AliasAnalysis::NoModRef)
599
// All the checks have passed, so do the transformation.
600
bool changedArgument = false;
601
for (unsigned i = 0; i < CS.arg_size(); ++i)
602
if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
603
if (cpySrc->getType() != cpyDest->getType())
604
cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
605
cpyDest->getName(), C);
606
changedArgument = true;
607
if (CS.getArgument(i)->getType() == cpyDest->getType())
608
CS.setArgument(i, cpyDest);
610
CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
611
CS.getArgument(i)->getType(), cpyDest->getName(), C));
614
if (!changedArgument)
617
// Drop any cached information about the call, because we may have changed
618
// its dependence information by changing its parameter.
619
MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
620
MD.removeInstruction(C);
623
MD.removeInstruction(cpy);
624
cpy->eraseFromParent();
630
/// processMemCpy - perform simplication of memcpy's. If we have memcpy A which
631
/// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
632
/// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
633
/// This allows later passes to remove the first memcpy altogether.
634
bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
635
MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
637
// The are two possible optimizations we can do for memcpy:
638
// a) memcpy-memcpy xform which exposes redundance for DSE.
639
// b) call-memcpy xform for return slot optimization.
640
MemDepResult dep = MD.getDependency(M);
641
if (!dep.isClobber())
643
if (!isa<MemCpyInst>(dep.getInst())) {
644
if (CallInst *C = dyn_cast<CallInst>(dep.getInst()))
645
return performCallSlotOptzn(M, C);
649
MemCpyInst *MDep = cast<MemCpyInst>(dep.getInst());
651
// We can only transforms memcpy's where the dest of one is the source of the
653
if (M->getSource() != MDep->getDest())
656
// Second, the length of the memcpy's must be the same, or the preceeding one
657
// must be larger than the following one.
658
ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
659
ConstantInt *C2 = dyn_cast<ConstantInt>(M->getLength());
663
uint64_t DepSize = C1->getValue().getZExtValue();
664
uint64_t CpySize = C2->getValue().getZExtValue();
666
if (DepSize < CpySize)
669
// Finally, we have to make sure that the dest of the second does not
670
// alias the source of the first
671
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
672
if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
673
AliasAnalysis::NoAlias)
675
else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
676
AliasAnalysis::NoAlias)
678
else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
679
!= AliasAnalysis::NoAlias)
682
// If all checks passed, then we can transform these memcpy's
683
const Type *Ty = M->getLength()->getType();
684
Function *MemCpyFun = Intrinsic::getDeclaration(
685
M->getParent()->getParent()->getParent(),
686
M->getIntrinsicID(), &Ty, 1);
689
M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
692
CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
695
// If C and M don't interfere, then this is a valid transformation. If they
696
// did, this would mean that the two sources overlap, which would be bad.
697
if (MD.getDependency(C) == dep) {
698
MD.removeInstruction(M);
699
M->eraseFromParent();
704
// Otherwise, there was no point in doing this, so we remove the call we
705
// inserted and act like nothing happened.
706
MD.removeInstruction(C);
707
C->eraseFromParent();
711
/// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
712
/// are guaranteed not to alias.
713
bool MemCpyOpt::processMemMove(MemMoveInst *M) {
714
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
716
// If the memmove is a constant size, use it for the alias query, this allows
717
// us to optimize things like: memmove(P, P+64, 64);
718
uint64_t MemMoveSize = ~0ULL;
719
if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength()))
720
MemMoveSize = Len->getZExtValue();
722
// See if the pointers alias.
723
if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) !=
724
AliasAnalysis::NoAlias)
727
DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
729
// If not, then we know we can transform this.
730
Module *Mod = M->getParent()->getParent()->getParent();
731
const Type *Ty = M->getLength()->getType();
732
M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1));
734
// MemDep may have over conservative information about this instruction, just
735
// conservatively flush it from the cache.
736
getAnalysis<MemoryDependenceAnalysis>().removeInstruction(M);
743
// MemCpyOpt::iterateOnFunction - Executes one iteration of GVN.
744
bool MemCpyOpt::iterateOnFunction(Function &F) {
745
bool MadeChange = false;
747
// Walk all instruction in the function.
748
for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
749
for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
751
// Avoid invalidating the iterator.
752
Instruction *I = BI++;
754
if (StoreInst *SI = dyn_cast<StoreInst>(I))
755
MadeChange |= processStore(SI, BI);
756
else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
757
MadeChange |= processMemCpy(M);
758
else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) {
759
if (processMemMove(M)) {
760
--BI; // Reprocess the new memcpy.
770
// MemCpyOpt::runOnFunction - This is the main transformation entry point for a
773
bool MemCpyOpt::runOnFunction(Function &F) {
774
bool MadeChange = false;
776
if (!iterateOnFunction(F))