mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-16 16:16:45 +00:00
[MSSA] Add special handling for invariant/constant loads.
This is a follow-up to r277637. It teaches MemorySSA that invariant loads (and loads of provably constant memory) are always liveOnEntry. llvm-svn: 277640
This commit is contained in:
parent
b18bd3fefa
commit
556bbef48d
@ -170,6 +170,7 @@ template <> struct DenseMapInfo<MemoryLocOrCall> {
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
namespace {
|
||||
struct UpwardsMemoryQuery {
|
||||
// True if our original query started off as a call
|
||||
@ -251,6 +252,17 @@ static Reorderability getLoadReorderability(const LoadInst *Use,
|
||||
return Result;
|
||||
}
|
||||
|
||||
static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
|
||||
const Instruction *I) {
|
||||
// If the memory can't be changed, then loads of the memory can't be
|
||||
// clobbered.
|
||||
//
|
||||
// FIXME: We should handle invariant groups, as well. It's a bit harder,
|
||||
// because we need to pay close attention to invariant group barriers.
|
||||
return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
|
||||
AA.pointsToConstantMemory(I));
|
||||
}
|
||||
|
||||
static bool instructionClobbersQuery(MemoryDef *MD,
|
||||
const MemoryLocation &UseLoc,
|
||||
const Instruction *UseInst,
|
||||
@ -1332,6 +1344,11 @@ void MemorySSA::OptimizeUses::optimizeUsesInBlock(
|
||||
continue;
|
||||
}
|
||||
|
||||
if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
|
||||
MU->setDefiningAccess(MSSA->getLiveOnEntryDef());
|
||||
continue;
|
||||
}
|
||||
|
||||
MemoryLocOrCall UseMLOC(MU);
|
||||
auto &LocInfo = LocStackInfo[UseMLOC];
|
||||
// If the pop epoch changed, it means we've removed stuff from top of
|
||||
@ -2246,6 +2263,12 @@ MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
|
||||
if (auto *CacheResult = Cache.lookup(StartingAccess, Q.StartingLoc, Q.IsCall))
|
||||
return CacheResult;
|
||||
|
||||
if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
|
||||
MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
|
||||
Cache.insert(StartingAccess, LiveOnEntry, Q.StartingLoc, Q.IsCall);
|
||||
return LiveOnEntry;
|
||||
}
|
||||
|
||||
// Start with the thing we already think clobbers this location
|
||||
MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
; XFAIL: *
|
||||
; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
|
||||
; RUN: opt -aa-pipeline=basic-aa -passes='print<memoryssa>' -verify-memoryssa -disable-output < %s 2>&1 | FileCheck %s
|
||||
;
|
||||
@ -12,6 +11,7 @@
|
||||
|
||||
declare void @clobberAllTheThings()
|
||||
|
||||
; CHECK-LABEL: define i32 @foo
|
||||
define i32 @foo() {
|
||||
; CHECK: 1 = MemoryDef(liveOnEntry)
|
||||
; CHECK-NEXT: call void @clobberAllTheThings()
|
||||
@ -22,4 +22,20 @@ define i32 @foo() {
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
; CHECK-LABEL: define i32 @bar
|
||||
define i32 @bar(i32* %a) {
|
||||
; CHECK: 1 = MemoryDef(liveOnEntry)
|
||||
; CHECK-NEXT: call void @clobberAllTheThings()
|
||||
call void @clobberAllTheThings()
|
||||
|
||||
; CHECK: 2 = MemoryDef(1)
|
||||
; CHECK-NEXT: %1 = load atomic i32
|
||||
%1 = load atomic i32, i32* %a acquire, align 4, !invariant.load !0
|
||||
|
||||
; CHECK: MemoryUse(2)
|
||||
; CHECK-NEXT: %2 = load i32
|
||||
%2 = load i32, i32* %a, align 4
|
||||
ret i32 %2
|
||||
}
|
||||
|
||||
!0 = !{}
|
||||
|
@ -418,3 +418,36 @@ TEST_F(MemorySSATest, PartialWalkerCacheWithPhis) {
|
||||
MemoryAccess *UseClobber = Walker->getClobberingMemoryAccess(ALoad);
|
||||
EXPECT_EQ(UseClobber, MSSA.getMemoryAccess(FirstStore));
|
||||
}
|
||||
|
||||
// Test that our walker properly handles loads with the invariant group
|
||||
// attribute. It's a bit hacky, since we add the invariant attribute *after*
|
||||
// building MSSA. Otherwise, the use optimizer will optimize it for us, which
|
||||
// isn't what we want.
|
||||
// FIXME: It may be easier/cleaner to just add an 'optimize uses?' flag to MSSA.
|
||||
TEST_F(MemorySSATest, WalkerInvariantLoadOpt) {
|
||||
F = Function::Create(FunctionType::get(B.getVoidTy(), {}, false),
|
||||
GlobalValue::ExternalLinkage, "F", &M);
|
||||
B.SetInsertPoint(BasicBlock::Create(C, "", F));
|
||||
Type *Int8 = Type::getInt8Ty(C);
|
||||
Constant *One = ConstantInt::get(Int8, 1);
|
||||
Value *AllocA = B.CreateAlloca(Int8, One, "");
|
||||
|
||||
Instruction *Store = B.CreateStore(One, AllocA);
|
||||
Instruction *Load = B.CreateLoad(AllocA);
|
||||
|
||||
setupAnalyses();
|
||||
MemorySSA &MSSA = Analyses->MSSA;
|
||||
MemorySSAWalker *Walker = Analyses->Walker;
|
||||
|
||||
auto *LoadMA = cast<MemoryUse>(MSSA.getMemoryAccess(Load));
|
||||
auto *StoreMA = cast<MemoryDef>(MSSA.getMemoryAccess(Store));
|
||||
EXPECT_EQ(LoadMA->getDefiningAccess(), StoreMA);
|
||||
|
||||
// ...At the time of writing, no cache should exist for LoadMA. Be a bit
|
||||
// flexible to future changes.
|
||||
Walker->invalidateInfo(LoadMA);
|
||||
Load->setMetadata(LLVMContext::MD_invariant_load, MDNode::get(C, {}));
|
||||
|
||||
MemoryAccess *LoadClobber = Walker->getClobberingMemoryAccess(LoadMA);
|
||||
EXPECT_EQ(LoadClobber, MSSA.getLiveOnEntryDef());
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user