More detailed dependence test between volatile and non-volatile accesses

Differential Revision: http://reviews.llvm.org/D16857



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@261589 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Krzysztof Parzyszek 2016-02-22 23:07:43 +00:00
parent 2be99e439c
commit 30f366d087
2 changed files with 87 additions and 24 deletions

View File

@ -505,6 +505,22 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom(
// AliasAnalysis::callCapturesBefore.
OrderedBasicBlock OBB(BB);
// Return "true" if and only if the instruction I is either a non-simple
// load or a non-simple store.
auto isNonSimpleLoadOrStore = [] (Instruction *I) -> bool {
if (auto *LI = dyn_cast<LoadInst>(I))
return !LI->isSimple();
if (auto *SI = dyn_cast<StoreInst>(I))
return !SI->isSimple();
return false;
};
// Return "true" if I is not a load and not a store, but it does access
// memory.
auto isOtherMemAccess = [] (Instruction *I) -> bool {
return !isa<LoadInst>(I) && !isa<StoreInst>(I) && I->mayReadOrWriteMemory();
};
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
Instruction *Inst = &*--ScanIt;
@ -552,24 +568,16 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom(
return MemDepResult::getClobber(LI);
// Otherwise, volatile doesn't imply any special ordering
}
// Atomic loads have complications involved.
// A Monotonic (or higher) load is OK if the query inst is itself not atomic.
// FIXME: This is overly conservative.
if (LI->isAtomic() && LI->getOrdering() > Unordered) {
if (!QueryInst)
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(LI);
if (LI->getOrdering() != Monotonic)
return MemDepResult::getClobber(LI);
if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
if (!QueryLI->isSimple())
return MemDepResult::getClobber(LI);
} else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
if (!QuerySI->isSimple())
return MemDepResult::getClobber(LI);
} else if (QueryInst->mayReadOrWriteMemory()) {
return MemDepResult::getClobber(LI);
}
}
MemoryLocation LoadLoc = MemoryLocation::get(LI);
@ -630,20 +638,12 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom(
// Atomic stores have complications involved.
// A Monotonic store is OK if the query inst is itself not atomic.
// FIXME: This is overly conservative.
if (!SI->isUnordered()) {
if (!QueryInst)
if (!SI->isUnordered() && SI->isAtomic()) {
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(SI);
if (SI->getOrdering() != Monotonic)
return MemDepResult::getClobber(SI);
if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst)) {
if (!QueryLI->isSimple())
return MemDepResult::getClobber(SI);
} else if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst)) {
if (!QuerySI->isSimple())
return MemDepResult::getClobber(SI);
} else if (QueryInst->mayReadOrWriteMemory()) {
return MemDepResult::getClobber(SI);
}
}
// FIXME: this is overly conservative.
@ -651,7 +651,9 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom(
// non-aliasing locations, as normal accesses can for example be reordered
// with volatile accesses.
if (SI->isVolatile())
return MemDepResult::getClobber(SI);
if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(SI);
// If alias analysis can tell that this store is guaranteed to not modify
// the query pointer, ignore it. Use getModRefInfo to handle cases where
@ -963,7 +965,7 @@ getNonLocalPointerDependency(Instruction *QueryInst,
assert(Loc.Ptr->getType()->isPointerTy() &&
"Can't get pointer deps of a non-pointer!");
Result.clear();
// This routine does not expect to deal with volatile instructions.
// Doing so would require piping through the QueryInst all the way through.
// TODO: volatiles can't be elided, but they can be reordered with other

View File

@ -0,0 +1,61 @@
; RUN: opt -tbaa -gvn -S < %s | FileCheck %s
%struct.t = type { i32* }
; The loaded address and the location of the address itself are not aliased,
; so the second reload is not necessary. Check that it can be eliminated.
; CHECK-LABEL: test1
; CHECK: load
; CHECK-NOT: load
define void @test1(%struct.t* nocapture readonly %p, i32 %v) #0 {
entry:
%m = getelementptr inbounds %struct.t, %struct.t* %p, i32 0, i32 0
%0 = load i32*, i32** %m, align 4, !tbaa !1
store volatile i32 %v, i32* %0, align 4, !tbaa !6
%1 = load i32*, i32** %m, align 4, !tbaa !1
store volatile i32 %v, i32* %1, align 4, !tbaa !6
ret void
}
; The store via the loaded address may overwrite the address itself.
; Make sure that both loads remain.
; CHECK-LABEL: test2
; CHECK: load
; CHECK: store
; CHECK: load
define void @test2(%struct.t* nocapture readonly %p, i32 %v) #0 {
entry:
%m = getelementptr inbounds %struct.t, %struct.t* %p, i32 0, i32 0
%0 = load i32*, i32** %m, align 4, !tbaa !1
store volatile i32 %v, i32* %0, align 4, !tbaa !1
%1 = load i32*, i32** %m, align 4, !tbaa !1
store volatile i32 %v, i32* %1, align 4, !tbaa !1
ret void
}
; The loads are ordered and non-monotonic. Although they are not aliased to
; the stores, make sure both are preserved.
; CHECK-LABEL: test3
; CHECK: load
; CHECK: store
; CHECK: load
define void @test3(%struct.t* nocapture readonly %p, i32 %v) #0 {
entry:
%m = getelementptr inbounds %struct.t, %struct.t* %p, i32 0, i32 0
%0 = load atomic i32*, i32** %m acquire, align 4, !tbaa !1
store volatile i32 %v, i32* %0, align 4, !tbaa !6
%1 = load atomic i32*, i32** %m acquire, align 4, !tbaa !1
store volatile i32 %v, i32* %1, align 4, !tbaa !6
ret void
}
attributes #0 = { norecurse nounwind }
!1 = !{!2, !3, i64 0}
!2 = !{!"", !3, i64 0}
!3 = !{!"any pointer", !4, i64 0}
!4 = !{!"omnipotent char", !5, i64 0}
!5 = !{!"Simple C/C++ TBAA"}
!6 = !{!7, !7, i64 0}
!7 = !{!"int", !4, i64 0}