mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2025-02-18 03:07:52 +00:00
Remove dead TLI arg of isKnownNonNull and propagate deadness. NFC.
This actually uncovered a surprisingly large chain of ultimately unused TLI args. From what I can gather, this argument is a remnant of when isKnownNonNull would look at the TLI directly. The current approach seems to be that InferFunctionAttrs runs early in the pipeline and uses TLI to annotate the TLI-dependent non-null information as return attributes. This also removes the dependence of functionattrs on TLI altogether. llvm-svn: 274455
This commit is contained in:
parent
d53408cd68
commit
145ca3d2aa
@ -29,8 +29,7 @@ class MDNode;
|
||||
/// specified instruction.
|
||||
bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
|
||||
const Instruction *CtxI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
const TargetLibraryInfo *TLI = nullptr);
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// Returns true if V is always a dereferenceable pointer with alignment
|
||||
/// greater or equal than requested. If the context instruction is specified
|
||||
@ -39,8 +38,7 @@ bool isDereferenceablePointer(const Value *V, const DataLayout &DL,
|
||||
bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
|
||||
const DataLayout &DL,
|
||||
const Instruction *CtxI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
const TargetLibraryInfo *TLI = nullptr);
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// isSafeToLoadUnconditionally - Return true if we know that executing a load
|
||||
/// from this value cannot trap.
|
||||
@ -54,8 +52,7 @@ bool isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
|
||||
bool isSafeToLoadUnconditionally(Value *V, unsigned Align,
|
||||
const DataLayout &DL,
|
||||
Instruction *ScanFrom = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
const TargetLibraryInfo *TLI = nullptr);
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// DefMaxInstsToScan - the default number of maximum instructions
|
||||
/// to scan in the block, used by FindAvailableLoadedValue().
|
||||
|
@ -288,8 +288,7 @@ template <typename T> class ArrayRef;
|
||||
/// for such instructions, moving them may change the resulting value.
|
||||
bool isSafeToSpeculativelyExecute(const Value *V,
|
||||
const Instruction *CtxI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
const TargetLibraryInfo *TLI = nullptr);
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// Returns true if the result or effects of the given instructions \p I
|
||||
/// depend on or influence global memory.
|
||||
@ -304,7 +303,7 @@ template <typename T> class ArrayRef;
|
||||
/// Return true if this pointer couldn't possibly be null by its definition.
|
||||
/// This returns true for allocas, non-extern-weak globals, and byval
|
||||
/// arguments.
|
||||
bool isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI = nullptr);
|
||||
bool isKnownNonNull(const Value *V);
|
||||
|
||||
/// Return true if this pointer couldn't possibly be null. If the context
|
||||
/// instruction is specified, perform context-sensitive analysis and return
|
||||
@ -312,8 +311,7 @@ template <typename T> class ArrayRef;
|
||||
/// instruction.
|
||||
bool isKnownNonNullAt(const Value *V,
|
||||
const Instruction *CtxI = nullptr,
|
||||
const DominatorTree *DT = nullptr,
|
||||
const TargetLibraryInfo *TLI = nullptr);
|
||||
const DominatorTree *DT = nullptr);
|
||||
|
||||
/// Return true if it is valid to use the assumptions provided by an
|
||||
/// assume intrinsic, I, at the point in the control-flow identified by the
|
||||
|
@ -1975,7 +1975,7 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
|
||||
RHS = RHS->stripPointerCasts();
|
||||
|
||||
// A non-null pointer is not equal to a null pointer.
|
||||
if (llvm::isKnownNonNull(LHS, TLI) && isa<ConstantPointerNull>(RHS) &&
|
||||
if (llvm::isKnownNonNull(LHS) && isa<ConstantPointerNull>(RHS) &&
|
||||
(Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE))
|
||||
return ConstantInt::get(GetCompareTy(LHS),
|
||||
!CmpInst::isTrueWhenEqual(Pred));
|
||||
@ -2130,10 +2130,9 @@ computePointerICmp(const DataLayout &DL, const TargetLibraryInfo *TLI,
|
||||
// cannot be elided. We cannot fold malloc comparison to null. Also, the
|
||||
// dynamic allocation call could be either of the operands.
|
||||
Value *MI = nullptr;
|
||||
if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT, TLI))
|
||||
if (isAllocLikeFn(LHS, TLI) && llvm::isKnownNonNullAt(RHS, CxtI, DT))
|
||||
MI = LHS;
|
||||
else if (isAllocLikeFn(RHS, TLI) &&
|
||||
llvm::isKnownNonNullAt(LHS, CxtI, DT, TLI))
|
||||
else if (isAllocLikeFn(RHS, TLI) && llvm::isKnownNonNullAt(LHS, CxtI, DT))
|
||||
MI = RHS;
|
||||
// FIXME: We should also fold the compare when the pointer escapes, but the
|
||||
// compare dominates the pointer escape
|
||||
|
@ -54,21 +54,21 @@ static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
|
||||
static bool isDereferenceableAndAlignedPointer(
|
||||
const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
|
||||
const Instruction *CtxI, const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI, SmallPtrSetImpl<const Value *> &Visited) {
|
||||
SmallPtrSetImpl<const Value *> &Visited) {
|
||||
// Note that it is not safe to speculate into a malloc'd region because
|
||||
// malloc may return null.
|
||||
|
||||
// bitcast instructions are no-ops as far as dereferenceability is concerned.
|
||||
if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
|
||||
return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
|
||||
DL, CtxI, DT, TLI, Visited);
|
||||
DL, CtxI, DT, Visited);
|
||||
|
||||
bool CheckForNonNull = false;
|
||||
APInt KnownDerefBytes(Size.getBitWidth(),
|
||||
V->getPointerDereferenceableBytes(DL, CheckForNonNull));
|
||||
if (KnownDerefBytes.getBoolValue()) {
|
||||
if (KnownDerefBytes.uge(Size))
|
||||
if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT, TLI))
|
||||
if (!CheckForNonNull || isKnownNonNullAt(V, CtxI, DT))
|
||||
return isAligned(V, Align, DL);
|
||||
}
|
||||
|
||||
@ -89,17 +89,17 @@ static bool isDereferenceableAndAlignedPointer(
|
||||
|
||||
return Visited.insert(Base).second &&
|
||||
isDereferenceableAndAlignedPointer(Base, Align, Offset + Size, DL,
|
||||
CtxI, DT, TLI, Visited);
|
||||
CtxI, DT, Visited);
|
||||
}
|
||||
|
||||
// For gc.relocate, look through relocations
|
||||
if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
|
||||
return isDereferenceableAndAlignedPointer(
|
||||
RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, TLI, Visited);
|
||||
RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
|
||||
|
||||
if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
|
||||
return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
|
||||
DL, CtxI, DT, TLI, Visited);
|
||||
DL, CtxI, DT, Visited);
|
||||
|
||||
// If we don't know, assume the worst.
|
||||
return false;
|
||||
@ -108,8 +108,7 @@ static bool isDereferenceableAndAlignedPointer(
|
||||
bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
|
||||
const DataLayout &DL,
|
||||
const Instruction *CtxI,
|
||||
const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
const DominatorTree *DT) {
|
||||
// When dereferenceability information is provided by a dereferenceable
|
||||
// attribute, we know exactly how many bytes are dereferenceable. If we can
|
||||
// determine the exact offset to the attributed variable, we can use that
|
||||
@ -127,14 +126,13 @@ bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
|
||||
SmallPtrSet<const Value *, 32> Visited;
|
||||
return ::isDereferenceableAndAlignedPointer(
|
||||
V, Align, APInt(DL.getTypeSizeInBits(VTy), DL.getTypeStoreSize(Ty)), DL,
|
||||
CtxI, DT, TLI, Visited);
|
||||
CtxI, DT, Visited);
|
||||
}
|
||||
|
||||
bool llvm::isDereferenceablePointer(const Value *V, const DataLayout &DL,
|
||||
const Instruction *CtxI,
|
||||
const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT, TLI);
|
||||
const DominatorTree *DT) {
|
||||
return isDereferenceableAndAlignedPointer(V, 1, DL, CtxI, DT);
|
||||
}
|
||||
|
||||
/// \brief Test if A and B will obviously have the same value.
|
||||
@ -182,8 +180,7 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
|
||||
bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
|
||||
const DataLayout &DL,
|
||||
Instruction *ScanFrom,
|
||||
const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
const DominatorTree *DT) {
|
||||
// Zero alignment means that the load has the ABI alignment for the target
|
||||
if (Align == 0)
|
||||
Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
|
||||
@ -191,7 +188,7 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align,
|
||||
|
||||
// If DT is not specified we can't make context-sensitive query
|
||||
const Instruction* CtxI = DT ? ScanFrom : nullptr;
|
||||
if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT, TLI))
|
||||
if (isDereferenceableAndAlignedPointer(V, Align, DL, CtxI, DT))
|
||||
return true;
|
||||
|
||||
int64_t ByteOffset = 0;
|
||||
|
@ -3052,8 +3052,7 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
|
||||
|
||||
bool llvm::isSafeToSpeculativelyExecute(const Value *V,
|
||||
const Instruction *CtxI,
|
||||
const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
const DominatorTree *DT) {
|
||||
const Operator *Inst = dyn_cast<Operator>(V);
|
||||
if (!Inst)
|
||||
return false;
|
||||
@ -3104,8 +3103,8 @@ bool llvm::isSafeToSpeculativelyExecute(const Value *V,
|
||||
Attribute::SanitizeAddress))
|
||||
return false;
|
||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||
return isDereferenceableAndAlignedPointer(
|
||||
LI->getPointerOperand(), LI->getAlignment(), DL, CtxI, DT, TLI);
|
||||
return isDereferenceableAndAlignedPointer(LI->getPointerOperand(),
|
||||
LI->getAlignment(), DL, CtxI, DT);
|
||||
}
|
||||
case Instruction::Call: {
|
||||
if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
|
||||
@ -3190,7 +3189,7 @@ bool llvm::mayBeMemoryDependent(const Instruction &I) {
|
||||
}
|
||||
|
||||
/// Return true if we know that the specified value is never null.
|
||||
bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) {
|
||||
bool llvm::isKnownNonNull(const Value *V) {
|
||||
assert(V->getType()->isPointerTy() && "V must be pointer type");
|
||||
|
||||
// Alloca never returns null, malloc might.
|
||||
@ -3257,8 +3256,8 @@ static bool isKnownNonNullFromDominatingCondition(const Value *V,
|
||||
}
|
||||
|
||||
bool llvm::isKnownNonNullAt(const Value *V, const Instruction *CtxI,
|
||||
const DominatorTree *DT, const TargetLibraryInfo *TLI) {
|
||||
if (isKnownNonNull(V, TLI))
|
||||
const DominatorTree *DT) {
|
||||
if (isKnownNonNull(V))
|
||||
return true;
|
||||
|
||||
return CtxI ? ::isKnownNonNullFromDominatingCondition(V, CtxI, DT) : false;
|
||||
|
@ -782,7 +782,7 @@ static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) {
|
||||
/// \p Speculative based on whether the returned conclusion is a speculative
|
||||
/// conclusion due to SCC calls.
|
||||
static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
|
||||
const TargetLibraryInfo &TLI, bool &Speculative) {
|
||||
bool &Speculative) {
|
||||
assert(F->getReturnType()->isPointerTy() &&
|
||||
"nonnull only meaningful on pointer types");
|
||||
Speculative = false;
|
||||
@ -796,7 +796,7 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
|
||||
Value *RetVal = FlowsToReturn[i];
|
||||
|
||||
// If this value is locally known to be non-null, we're good
|
||||
if (isKnownNonNull(RetVal, &TLI))
|
||||
if (isKnownNonNull(RetVal))
|
||||
continue;
|
||||
|
||||
// Otherwise, we need to look upwards since we can't make any local
|
||||
@ -845,8 +845,7 @@ static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
|
||||
}
|
||||
|
||||
/// Deduce nonnull attributes for the SCC.
|
||||
static bool addNonNullAttrs(const SCCNodeSet &SCCNodes,
|
||||
const TargetLibraryInfo &TLI) {
|
||||
static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) {
|
||||
// Speculative that all functions in the SCC return only nonnull
|
||||
// pointers. We may refute this as we analyze functions.
|
||||
bool SCCReturnsNonNull = true;
|
||||
@ -873,7 +872,7 @@ static bool addNonNullAttrs(const SCCNodeSet &SCCNodes,
|
||||
continue;
|
||||
|
||||
bool Speculative = false;
|
||||
if (isReturnNonNull(F, SCCNodes, TLI, Speculative)) {
|
||||
if (isReturnNonNull(F, SCCNodes, Speculative)) {
|
||||
if (!Speculative) {
|
||||
// Mark the function eagerly since we may discover a function
|
||||
// which prevents us from speculating about the entire SCC
|
||||
@ -987,16 +986,9 @@ static bool addNoRecurseAttrs(const SCCNodeSet &SCCNodes) {
|
||||
|
||||
PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
|
||||
CGSCCAnalysisManager &AM) {
|
||||
Module &M = *C.begin()->getFunction().getParent();
|
||||
const ModuleAnalysisManager &MAM =
|
||||
AM.getResult<ModuleAnalysisManagerCGSCCProxy>(C).getManager();
|
||||
FunctionAnalysisManager &FAM =
|
||||
AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C).getManager();
|
||||
|
||||
// FIXME: Need some way to make it more reasonable to assume that this is
|
||||
// always cached.
|
||||
TargetLibraryInfo &TLI = *MAM.getCachedResult<TargetLibraryAnalysis>(M);
|
||||
|
||||
// We pass a lambda into functions to wire them up to the analysis manager
|
||||
// for getting function analyses.
|
||||
auto AARGetter = [&](Function &F) -> AAResults & {
|
||||
@ -1039,7 +1031,7 @@ PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
|
||||
// more precise attributes as well.
|
||||
if (!HasUnknownCall) {
|
||||
Changed |= addNoAliasAttrs(SCCNodes);
|
||||
Changed |= addNonNullAttrs(SCCNodes, TLI);
|
||||
Changed |= addNonNullAttrs(SCCNodes);
|
||||
Changed |= removeConvergentAttrs(SCCNodes);
|
||||
Changed |= addNoRecurseAttrs(SCCNodes);
|
||||
}
|
||||
@ -1059,13 +1051,9 @@ struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass {
|
||||
void getAnalysisUsage(AnalysisUsage &AU) const override {
|
||||
AU.setPreservesCFG();
|
||||
AU.addRequired<AssumptionCacheTracker>();
|
||||
AU.addRequired<TargetLibraryInfoWrapperPass>();
|
||||
getAAResultsAnalysisUsage(AU);
|
||||
CallGraphSCCPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
||||
private:
|
||||
TargetLibraryInfo *TLI;
|
||||
};
|
||||
}
|
||||
|
||||
@ -1074,7 +1062,6 @@ INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "functionattrs",
|
||||
"Deduce function attributes", false, false)
|
||||
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
|
||||
INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
|
||||
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
|
||||
INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs",
|
||||
"Deduce function attributes", false, false)
|
||||
|
||||
@ -1083,8 +1070,6 @@ Pass *llvm::createPostOrderFunctionAttrsLegacyPass() { return new PostOrderFunct
|
||||
bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
|
||||
if (skipSCC(SCC))
|
||||
return false;
|
||||
|
||||
TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
|
||||
bool Changed = false;
|
||||
|
||||
// We compute dedicated AA results for each function in the SCC as needed. We
|
||||
@ -1123,7 +1108,7 @@ bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
|
||||
// more precise attributes as well.
|
||||
if (!ExternalNode) {
|
||||
Changed |= addNoAliasAttrs(SCCNodes);
|
||||
Changed |= addNonNullAttrs(SCCNodes, *TLI);
|
||||
Changed |= addNonNullAttrs(SCCNodes);
|
||||
Changed |= removeConvergentAttrs(SCCNodes);
|
||||
Changed |= addNoRecurseAttrs(SCCNodes);
|
||||
}
|
||||
|
@ -2320,7 +2320,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
return replaceInstUsesWith(*II, ConstantPointerNull::get(PT));
|
||||
|
||||
// isKnownNonNull -> nonnull attribute
|
||||
if (isKnownNonNullAt(DerivedPtr, II, DT, TLI))
|
||||
if (isKnownNonNullAt(DerivedPtr, II, DT))
|
||||
II->addAttribute(AttributeSet::ReturnIndex, Attribute::NonNull);
|
||||
}
|
||||
|
||||
@ -2483,7 +2483,7 @@ Instruction *InstCombiner::visitCallSite(CallSite CS) {
|
||||
for (Value *V : CS.args()) {
|
||||
if (V->getType()->isPointerTy() &&
|
||||
!CS.paramHasAttr(ArgNo + 1, Attribute::NonNull) &&
|
||||
isKnownNonNullAt(V, CS.getInstruction(), DT, TLI))
|
||||
isKnownNonNullAt(V, CS.getInstruction(), DT))
|
||||
Indices.push_back(ArgNo + 1);
|
||||
ArgNo++;
|
||||
}
|
||||
|
@ -88,7 +88,6 @@ static bool sink(Instruction &I, const LoopInfo *LI, const DominatorTree *DT,
|
||||
const LoopSafetyInfo *SafetyInfo);
|
||||
static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
|
||||
const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI,
|
||||
const Loop *CurLoop,
|
||||
const LoopSafetyInfo *SafetyInfo,
|
||||
const Instruction *CtxI = nullptr);
|
||||
@ -365,7 +364,7 @@ bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
|
||||
if (CurLoop->hasLoopInvariantOperands(&I) &&
|
||||
canSinkOrHoistInst(I, AA, DT, TLI, CurLoop, CurAST, SafetyInfo) &&
|
||||
isSafeToExecuteUnconditionally(
|
||||
I, DT, TLI, CurLoop, SafetyInfo,
|
||||
I, DT, CurLoop, SafetyInfo,
|
||||
CurLoop->getLoopPreheader()->getTerminator()))
|
||||
Changed |= hoist(I, DT, CurLoop, SafetyInfo);
|
||||
}
|
||||
@ -490,8 +489,7 @@ bool canSinkOrHoistInst(Instruction &I, AliasAnalysis *AA, DominatorTree *DT,
|
||||
// TODO: Plumb the context instruction through to make hoisting and sinking
|
||||
// more powerful. Hoisting of loads already works due to the special casing
|
||||
// above.
|
||||
return isSafeToExecuteUnconditionally(I, DT, TLI, CurLoop, SafetyInfo,
|
||||
nullptr);
|
||||
return isSafeToExecuteUnconditionally(I, DT, CurLoop, SafetyInfo, nullptr);
|
||||
}
|
||||
|
||||
/// Returns true if a PHINode is a trivially replaceable with an
|
||||
@ -724,11 +722,10 @@ static bool hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
|
||||
/// or if it is a trapping instruction and is guaranteed to execute.
|
||||
static bool isSafeToExecuteUnconditionally(const Instruction &Inst,
|
||||
const DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI,
|
||||
const Loop *CurLoop,
|
||||
const LoopSafetyInfo *SafetyInfo,
|
||||
const Instruction *CtxI) {
|
||||
if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT, TLI))
|
||||
if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
|
||||
return true;
|
||||
|
||||
return isGuaranteedToExecute(Inst, DT, CurLoop, SafetyInfo);
|
||||
@ -926,7 +923,7 @@ bool llvm::promoteLoopAccessesToScalars(
|
||||
|
||||
if (!GuaranteedToExecute && !CanSpeculateLoad)
|
||||
CanSpeculateLoad = isSafeToExecuteUnconditionally(
|
||||
*Load, DT, TLI, CurLoop, SafetyInfo, Preheader->getTerminator());
|
||||
*Load, DT, CurLoop, SafetyInfo, Preheader->getTerminator());
|
||||
} else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
|
||||
// Stores *of* the pointer are not interesting, only stores *to* the
|
||||
// pointer.
|
||||
@ -959,7 +956,7 @@ bool llvm::promoteLoopAccessesToScalars(
|
||||
if (!GuaranteedToExecute && !CanSpeculateLoad) {
|
||||
CanSpeculateLoad = isDereferenceableAndAlignedPointer(
|
||||
Store->getPointerOperand(), Store->getAlignment(), MDL,
|
||||
Preheader->getTerminator(), DT, TLI);
|
||||
Preheader->getTerminator(), DT);
|
||||
}
|
||||
} else
|
||||
return Changed; // Not a load or store.
|
||||
|
@ -1,5 +1,5 @@
|
||||
; RUN: opt < %s -basicaa -functionattrs -rpo-functionattrs -S | FileCheck %s
|
||||
; RUN: opt < %s -aa-pipeline=basic-aa -passes='require<targetlibinfo>,cgscc(function-attrs),rpo-functionattrs' -S | FileCheck %s
|
||||
; RUN: opt < %s -aa-pipeline=basic-aa -passes='cgscc(function-attrs),rpo-functionattrs' -S | FileCheck %s
|
||||
|
||||
; CHECK: define i32 @leaf() #0
|
||||
define i32 @leaf() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
; RUN: opt < %s -functionattrs -S | FileCheck %s
|
||||
; RUN: opt < %s -aa-pipeline=basic-aa -passes='require<targetlibinfo>,cgscc(function-attrs)' -S | FileCheck %s
|
||||
; RUN: opt < %s -aa-pipeline=basic-aa -passes='cgscc(function-attrs)' -S | FileCheck %s
|
||||
@x = global i32 0
|
||||
|
||||
declare void @test1_1(i8* %x1_1, i8* readonly %y1_1, ...)
|
||||
|
Loading…
x
Reference in New Issue
Block a user