Teach dead store elimination that certain intrinsics write to memory just like

a store.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@86359 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Nick Lewycky 2009-11-07 08:34:40 +00:00
parent 60a0063d2a
commit 4de9b7f51f
3 changed files with 183 additions and 62 deletions

View File

@ -78,19 +78,84 @@ static RegisterPass<DSE> X("dse", "Dead Store Elimination");
FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
/// isValueAtLeastAsBigAs - Return true if V1 is greater than or equal to the
/// stored size of V2. This returns false if we don't know.
/// doesClobberMemory - Does this instruction clobber (write without reading)
/// some memory?
static bool doesClobberMemory(Instruction *I) {
if (isa<StoreInst>(I))
return true;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
switch (II->getIntrinsicID()) {
default: return false;
case Intrinsic::memset: case Intrinsic::memmove: case Intrinsic::memcpy:
case Intrinsic::lifetime_end: return true;
}
}
return false;
}
/// isElidable - If the memory this instruction and the memory it writes to is
/// unused, may we delete this instrtction?
static bool isElidable(Instruction *I) {
assert(doesClobberMemory(I));
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
return II->getIntrinsicID() != Intrinsic::lifetime_end;
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return !SI->isVolatile();
return true;
}
/// getPointerOperand - Return the pointer that is being clobbered.
static Value *getPointerOperand(Instruction *I) {
assert(doesClobberMemory(I));
if (StoreInst *SI = dyn_cast<StoreInst>(I))
return SI->getPointerOperand();
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I))
return MI->getOperand(1);
assert(cast<IntrinsicInst>(I)->getIntrinsicID() == Intrinsic::lifetime_end);
return cast<IntrinsicInst>(I)->getOperand(2);
}
/// getStoreSize - Return the length in bytes of the write by the clobbering
/// instruction. If variable or unknown, returns -1.
static unsigned getStoreSize(Instruction *I, const TargetData *TD = 0) {
assert(doesClobberMemory(I));
if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!TD) return -1u;
const PointerType *PTy =
cast<PointerType>(SI->getPointerOperand()->getType());
return TD->getTypeStoreSize(PTy);
}
Value *Len;
if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(I)) {
Len = MI->getLength();
} else {
assert(cast<IntrinsicInst>(I)->getIntrinsicID() ==
Intrinsic::lifetime_end);
Len = cast<IntrinsicInst>(I)->getOperand(0);
}
if (ConstantInt *LenCI = dyn_cast<ConstantInt>(Len))
if (!LenCI->isAllOnesValue())
return LenCI->getZExtValue();
return -1u;
}
/// isStoreAtLeastAsWideAs - Return true if the size of the store in I1 is
/// greater than or equal to the store in I2. This returns false if we don't
/// know.
///
static bool isValueAtLeastAsBigAs(Value *V1, Value *V2, const TargetData *TD) {
const Type *V1Ty = V1->getType(), *V2Ty = V2->getType();
static bool isStoreAtLeastAsWideAs(Instruction *I1, Instruction *I2,
const TargetData *TD) {
const Type *I1Ty = getPointerOperand(I1)->getType();
const Type *I2Ty = getPointerOperand(I2)->getType();
// Exactly the same type, must have exactly the same size.
if (V1Ty == V2Ty) return true;
if (I1Ty == I2Ty) return true;
// If we don't have target data, we don't know.
if (TD == 0) return false;
int I1Size = getStoreSize(I1, TD);
int I2Size = getStoreSize(I2, TD);
return TD->getTypeStoreSize(V1Ty) >= TD->getTypeStoreSize(V2Ty);
return I1Size != -1 && I2Size != -1 && I1Size >= I2Size;
}
bool DSE::runOnBasicBlock(BasicBlock &BB) {
@ -104,14 +169,9 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *Inst = BBI++;
// If we find a store or a free, get its memory dependence.
if (!isa<StoreInst>(Inst) && !isFreeCall(Inst))
if (!doesClobberMemory(Inst) && !isFreeCall(Inst))
continue;
// Don't molest volatile stores or do queries that will return "clobber".
if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
if (SI->isVolatile())
continue;
MemDepResult InstDep = MD.getDependency(Inst);
// Ignore non-local stores.
@ -124,16 +184,16 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
continue;
}
StoreInst *SI = cast<StoreInst>(Inst);
// If not a definite must-alias dependency, ignore it.
if (!InstDep.isDef())
continue;
// If this is a store-store dependence, then the previous store is dead so
// long as this store is at least as big as it.
if (StoreInst *DepStore = dyn_cast<StoreInst>(InstDep.getInst()))
if (isValueAtLeastAsBigAs(SI->getOperand(0), DepStore->getOperand(0),TD)){
if (doesClobberMemory(InstDep.getInst())) {
Instruction *DepStore = InstDep.getInst();
if (isStoreAtLeastAsWideAs(Inst, DepStore, TD) &&
isElidable(DepStore)){
// Delete the store and now-dead instructions that feed it.
DeleteDeadInstruction(DepStore);
NumFastStores++;
@ -146,25 +206,31 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
--BBI;
continue;
}
}
if (!isElidable(Inst))
continue;
// If we're storing the same value back to a pointer that we just
// loaded from, then the store can be removed.
if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
SI->getOperand(0) == DepLoad) {
// DeleteDeadInstruction can delete the current instruction. Save BBI
// in case we need it.
WeakVH NextInst(BBI);
DeleteDeadInstruction(SI);
if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
NumFastStores++;
MadeChange = true;
continue;
if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
if (LoadInst *DepLoad = dyn_cast<LoadInst>(InstDep.getInst())) {
if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
SI->getOperand(0) == DepLoad) {
// DeleteDeadInstruction can delete the current instruction. Save BBI
// in case we need it.
WeakVH NextInst(BBI);
DeleteDeadInstruction(SI);
if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin();
else if (BBI != BB.begin()) // Revisit this instruction if possible.
--BBI;
NumFastStores++;
MadeChange = true;
continue;
}
}
}
@ -176,7 +242,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// in case we need it.
WeakVH NextInst(BBI);
DeleteDeadInstruction(SI);
DeleteDeadInstruction(Inst);
if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin();
@ -202,11 +268,11 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
bool DSE::handleFreeWithNonTrivialDependency(Instruction *F, MemDepResult Dep) {
AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
StoreInst *Dependency = dyn_cast_or_null<StoreInst>(Dep.getInst());
if (!Dependency || Dependency->isVolatile())
Instruction *Dependency = Dep.getInst();
if (!Dependency || !doesClobberMemory(Dependency) || !isElidable(Dependency))
return false;
Value *DepPointer = Dependency->getPointerOperand()->getUnderlyingObject();
Value *DepPointer = getPointerOperand(Dependency)->getUnderlyingObject();
// Check for aliasing.
if (AA.alias(F->getOperand(1), 1, DepPointer, 1) !=
@ -251,39 +317,28 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
--BBI;
// If we find a store whose pointer is dead.
if (StoreInst* S = dyn_cast<StoreInst>(BBI)) {
if (!S->isVolatile()) {
if (doesClobberMemory(BBI)) {
if (isElidable(BBI)) {
// See through pointer-to-pointer bitcasts
Value* pointerOperand = S->getPointerOperand()->getUnderlyingObject();
Value *pointerOperand = getPointerOperand(BBI)->getUnderlyingObject();
// Alloca'd pointers or byval arguments (which are functionally like
// alloca's) are valid candidates for removal.
if (deadPointers.count(pointerOperand)) {
// DCE instructions only used to calculate that store.
Instruction *Dead = BBI;
BBI++;
DeleteDeadInstruction(S, &deadPointers);
DeleteDeadInstruction(Dead, &deadPointers);
NumFastStores++;
MadeChange = true;
continue;
}
}
continue;
}
// We can also remove memcpy's to local variables at the end of a function.
if (MemCpyInst *M = dyn_cast<MemCpyInst>(BBI)) {
Value *dest = M->getDest()->getUnderlyingObject();
if (deadPointers.count(dest)) {
BBI++;
DeleteDeadInstruction(M, &deadPointers);
NumFastOther++;
MadeChange = true;
// Because a memcpy or memmove is also a load, we can't skip it if we
// didn't remove it.
if (!isa<MemTransferInst>(BBI))
continue;
}
// Because a memcpy is also a load, we can't skip it if we didn't remove
// it.
}
Value* killPointer = 0;
@ -304,11 +359,11 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
killPointer = L->getPointerOperand();
} else if (VAArgInst* V = dyn_cast<VAArgInst>(BBI)) {
killPointer = V->getOperand(0);
} else if (isa<MemCpyInst>(BBI) &&
isa<ConstantInt>(cast<MemCpyInst>(BBI)->getLength())) {
killPointer = cast<MemCpyInst>(BBI)->getSource();
} else if (isa<MemTransferInst>(BBI) &&
isa<ConstantInt>(cast<MemTransferInst>(BBI)->getLength())) {
killPointer = cast<MemTransferInst>(BBI)->getSource();
killPointerSize = cast<ConstantInt>(
cast<MemCpyInst>(BBI)->getLength())->getZExtValue();
cast<MemTransferInst>(BBI)->getLength())->getZExtValue();
} else if (AllocaInst* A = dyn_cast<AllocaInst>(BBI)) {
deadPointers.erase(A);

View File

@ -0,0 +1,19 @@
; RUN: opt -S -dse < %s | FileCheck %s
declare void @llvm.lifetime.end(i64, i8*)
declare void @llvm.memset.i8(i8*, i8, i8, i32)
define void @test1() {
; CHECK: @test1
%A = alloca i8
store i8 0, i8* %A ;; Written to by memset
call void @llvm.lifetime.end(i64 1, i8* %A)
; CHECK: lifetime.end
call void @llvm.memset.i8(i8* %A, i8 0, i8 -1, i32 0)
; CHECK-NOT: memset
ret void
; CHECK: ret void
}

View File

@ -0,0 +1,47 @@
; RUN: opt -S -dse < %s | FileCheck %s
declare void @llvm.memcpy.i8(i8*, i8*, i8, i32)
declare void @llvm.memmove.i8(i8*, i8*, i8, i32)
declare void @llvm.memset.i8(i8*, i8, i8, i32)
define void @test1() {
; CHECK: @test1
%A = alloca i8
%B = alloca i8
store i8 0, i8* %A ;; Written to by memcpy
; CHECK-NOT: store
call void @llvm.memcpy.i8(i8* %A, i8* %B, i8 -1, i32 0)
ret void
; CHECK: ret void
}
define void @test2() {
; CHECK: @test2
%A = alloca i8
%B = alloca i8
store i8 0, i8* %A ;; Written to by memmove
; CHECK-NOT: store
call void @llvm.memmove.i8(i8* %A, i8* %B, i8 -1, i32 0)
ret void
; CHECK: ret void
}
define void @test3() {
; CHECK: @test3
%A = alloca i8
%B = alloca i8
store i8 0, i8* %A ;; Written to by memset
; CHECK-NOT: store
call void @llvm.memset.i8(i8* %A, i8 0, i8 -1, i32 0)
ret void
; CHECK: ret void
}