mirror of
https://github.com/RPCSX/llvm.git
synced 2025-04-03 00:31:49 +00:00
Revert r255115 until we figure out how to fix the bot failures.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@255117 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
69c30d5b6c
commit
bdd73bcbd7
@ -24,7 +24,6 @@
|
||||
#include "llvm/IR/ValueHandle.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
#include "llvm/Transforms/Utils/LoopUtils.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -194,10 +193,11 @@ public:
|
||||
const SmallVectorImpl<Instruction *> &Instrs) const;
|
||||
};
|
||||
|
||||
MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
|
||||
: PSE(PSE), InnermostLoop(L), AccessIdx(0),
|
||||
MemoryDepChecker(ScalarEvolution *Se, const Loop *L,
|
||||
SCEVUnionPredicate &Preds)
|
||||
: SE(Se), InnermostLoop(L), AccessIdx(0),
|
||||
ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
|
||||
RecordDependences(true) {}
|
||||
RecordDependences(true), Preds(Preds) {}
|
||||
|
||||
/// \brief Register the location (instructions are given increasing numbers)
|
||||
/// of a write access.
|
||||
@ -266,13 +266,7 @@ public:
|
||||
bool isWrite) const;
|
||||
|
||||
private:
|
||||
/// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
|
||||
/// applies dynamic knowledge to simplify SCEV expressions and convert them
|
||||
/// to a more usable form. We need this in case assumptions about SCEV
|
||||
/// expressions need to be made in order to avoid unknown dependences. For
|
||||
/// example we might assume a unit stride for a pointer in order to prove
|
||||
/// that a memory access is strided and doesn't wrap.
|
||||
PredicatedScalarEvolution &PSE;
|
||||
ScalarEvolution *SE;
|
||||
const Loop *InnermostLoop;
|
||||
|
||||
/// \brief Maps access locations (ptr, read/write) to program order.
|
||||
@ -323,6 +317,15 @@ private:
|
||||
/// \brief Check whether the data dependence could prevent store-load
|
||||
/// forwarding.
|
||||
bool couldPreventStoreLoadForward(unsigned Distance, unsigned TypeByteSize);
|
||||
|
||||
/// The SCEV predicate containing all the SCEV-related assumptions.
|
||||
/// The dependence checker needs this in order to convert SCEVs of pointers
|
||||
/// to more accurate expressions in the context of existing assumptions.
|
||||
/// We also need this in case assumptions about SCEV expressions need to
|
||||
/// be made in order to avoid unknown dependences. For example we might
|
||||
/// assume a unit stride for a pointer in order to prove that a memory access
|
||||
/// is strided and doesn't wrap.
|
||||
SCEVUnionPredicate &Preds;
|
||||
};
|
||||
|
||||
/// \brief Holds information about the memory runtime legality checks to verify
|
||||
@ -370,7 +373,7 @@ public:
|
||||
/// and change \p Preds.
|
||||
void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
|
||||
unsigned ASId, const ValueToValueMap &Strides,
|
||||
PredicatedScalarEvolution &PSE);
|
||||
SCEVUnionPredicate &Preds);
|
||||
|
||||
/// \brief No run-time memory checking is necessary.
|
||||
bool empty() const { return Pointers.empty(); }
|
||||
@ -505,8 +508,8 @@ private:
|
||||
/// ScalarEvolution, we will generate run-time checks by emitting a
|
||||
/// SCEVUnionPredicate.
|
||||
///
|
||||
/// Checks for both memory dependences and the SCEV predicates contained in the
|
||||
/// PSE must be emitted in order for the results of this analysis to be valid.
|
||||
/// Checks for both memory dependences and SCEV predicates must be emitted in
|
||||
/// order for the results of this analysis to be valid.
|
||||
class LoopAccessInfo {
|
||||
public:
|
||||
LoopAccessInfo(Loop *L, ScalarEvolution *SE, const DataLayout &DL,
|
||||
@ -588,12 +591,14 @@ public:
|
||||
return StoreToLoopInvariantAddress;
|
||||
}
|
||||
|
||||
/// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
|
||||
/// them to a more usable form. All SCEV expressions during the analysis
|
||||
/// should be re-written (and therefore simplified) according to PSE.
|
||||
/// The SCEV predicate contains all the SCEV-related assumptions.
|
||||
/// The is used to keep track of the minimal set of assumptions on SCEV
|
||||
/// expressions that the analysis needs to make in order to return a
|
||||
/// meaningful result. All SCEV expressions during the analysis should be
|
||||
/// re-written (and therefore simplified) according to Preds.
|
||||
/// A user of LoopAccessAnalysis will need to emit the runtime checks
|
||||
/// associated with this predicate.
|
||||
PredicatedScalarEvolution PSE;
|
||||
SCEVUnionPredicate Preds;
|
||||
|
||||
private:
|
||||
/// \brief Analyze the loop. Substitute symbolic strides using Strides.
|
||||
@ -614,6 +619,7 @@ private:
|
||||
MemoryDepChecker DepChecker;
|
||||
|
||||
Loop *TheLoop;
|
||||
ScalarEvolution *SE;
|
||||
const DataLayout &DL;
|
||||
const TargetLibraryInfo *TLI;
|
||||
AliasAnalysis *AA;
|
||||
@ -648,17 +654,18 @@ Value *stripIntegerCast(Value *V);
|
||||
/// If \p OrigPtr is not null, use it to look up the stride value instead of \p
|
||||
/// Ptr. \p PtrToStride provides the mapping between the pointer value and its
|
||||
/// stride as collected by LoopVectorizationLegality::collectStridedAccess.
|
||||
const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
|
||||
const SCEV *replaceSymbolicStrideSCEV(ScalarEvolution *SE,
|
||||
const ValueToValueMap &PtrToStride,
|
||||
Value *Ptr, Value *OrigPtr = nullptr);
|
||||
SCEVUnionPredicate &Preds, Value *Ptr,
|
||||
Value *OrigPtr = nullptr);
|
||||
|
||||
/// \brief Check the stride of the pointer and ensure that it does not wrap in
|
||||
/// the address space, assuming \p Preds is true.
|
||||
///
|
||||
/// If necessary this method will version the stride of the pointer according
|
||||
/// to \p PtrToStride and therefore add a new predicate to \p Preds.
|
||||
int isStridedPtr(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
|
||||
const ValueToValueMap &StridesMap);
|
||||
int isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
|
||||
const ValueToValueMap &StridesMap, SCEVUnionPredicate &Preds);
|
||||
|
||||
/// \brief This analysis provides dependence information for the memory accesses
|
||||
/// of a loop.
|
||||
|
@ -374,58 +374,6 @@ void computeLICMSafetyInfo(LICMSafetyInfo *, Loop *);
|
||||
|
||||
/// \brief Returns the instructions that use values defined in the loop.
|
||||
SmallVector<Instruction *, 8> findDefsUsedOutsideOfLoop(Loop *L);
|
||||
|
||||
/// An interface layer with SCEV used to manage how we see SCEV expressions for
|
||||
/// values in the context of existing predicates. We can add new predicates,
|
||||
/// but we cannot remove them.
|
||||
///
|
||||
/// This layer has multiple purposes:
|
||||
/// - provides a simple interface for SCEV versioning.
|
||||
/// - guarantees that the order of transformations applied on a SCEV
|
||||
/// expression for a single Value is consistent across two different
|
||||
/// getSCEV calls. This means that, for example, once we've obtained
|
||||
/// an AddRec expression for a certain value through expression rewriting,
|
||||
/// we will continue to get an AddRec expression for that Value.
|
||||
/// - lowers the number of expression rewrites.
|
||||
class PredicatedScalarEvolution {
|
||||
public:
|
||||
PredicatedScalarEvolution(ScalarEvolution &SE);
|
||||
const SCEVUnionPredicate &getUnionPredicate() const;
|
||||
/// \brief Returns the SCEV expression of V, in the context of the current
|
||||
/// SCEV predicate.
|
||||
/// The order of transformations applied on the expression of V returned
|
||||
/// by ScalarEvolution is guaranteed to be preserved, even when adding new
|
||||
/// predicates.
|
||||
const SCEV *getSCEV(Value *V);
|
||||
/// \brief Adds a new predicate.
|
||||
void addPredicate(const SCEVPredicate &Pred);
|
||||
/// \brief Returns the ScalarEvolution analysis used.
|
||||
ScalarEvolution *getSE() const { return &SE; }
|
||||
|
||||
private:
|
||||
/// \brief Increments the version number of the predicate.
|
||||
/// This needs to be called every time the SCEV predicate changes.
|
||||
void updateGeneration();
|
||||
/// Holds a SCEV and the version number of the SCEV predicate used to
|
||||
/// perform the rewrite of the expression.
|
||||
typedef std::pair<unsigned, const SCEV *> RewriteEntry;
|
||||
/// Maps a SCEV to the rewrite result of that SCEV at a certain version
|
||||
/// number. If this number doesn't match the current Generation, we will
|
||||
/// need to do a rewrite. To preserve the transformation order of previous
|
||||
/// rewrites, we will rewrite the previous result instead of the original
|
||||
/// SCEV.
|
||||
DenseMap<const SCEV *, RewriteEntry> RewriteMap;
|
||||
/// The ScalarEvolution analysis.
|
||||
ScalarEvolution &SE;
|
||||
/// The SCEVPredicate that forms our context. We will rewrite all expressions
|
||||
/// assuming that this predicate true.
|
||||
SCEVUnionPredicate Preds;
|
||||
/// Marks the version of the SCEV predicate used. When rewriting a SCEV
|
||||
/// expression we mark it with the version of the predicate. We use this to
|
||||
/// figure out if the predicate has changed from the last rewrite of the
|
||||
/// SCEV. If so, we need to perform a new rewrite.
|
||||
unsigned Generation;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -87,10 +87,11 @@ Value *llvm::stripIntegerCast(Value *V) {
|
||||
return V;
|
||||
}
|
||||
|
||||
const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
|
||||
const SCEV *llvm::replaceSymbolicStrideSCEV(ScalarEvolution *SE,
|
||||
const ValueToValueMap &PtrToStride,
|
||||
SCEVUnionPredicate &Preds,
|
||||
Value *Ptr, Value *OrigPtr) {
|
||||
const SCEV *OrigSCEV = PSE.getSCEV(Ptr);
|
||||
const SCEV *OrigSCEV = SE->getSCEV(Ptr);
|
||||
|
||||
// If there is an entry in the map return the SCEV of the pointer with the
|
||||
// symbolic stride replaced by one.
|
||||
@ -107,17 +108,16 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
|
||||
ValueToValueMap RewriteMap;
|
||||
RewriteMap[StrideVal] = One;
|
||||
|
||||
ScalarEvolution *SE = PSE.getSE();
|
||||
const auto *U = cast<SCEVUnknown>(SE->getSCEV(StrideVal));
|
||||
const auto *CT =
|
||||
static_cast<const SCEVConstant *>(SE->getOne(StrideVal->getType()));
|
||||
|
||||
PSE.addPredicate(*SE->getEqualPredicate(U, CT));
|
||||
auto *Expr = PSE.getSCEV(Ptr);
|
||||
Preds.add(SE->getEqualPredicate(U, CT));
|
||||
|
||||
DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *Expr
|
||||
const SCEV *ByOne = SE->rewriteUsingPredicate(OrigSCEV, Preds);
|
||||
DEBUG(dbgs() << "LAA: Replacing SCEV: " << *OrigSCEV << " by: " << *ByOne
|
||||
<< "\n");
|
||||
return Expr;
|
||||
return ByOne;
|
||||
}
|
||||
|
||||
// Otherwise, just return the SCEV of the original pointer.
|
||||
@ -127,12 +127,11 @@ const SCEV *llvm::replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
|
||||
void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, bool WritePtr,
|
||||
unsigned DepSetId, unsigned ASId,
|
||||
const ValueToValueMap &Strides,
|
||||
PredicatedScalarEvolution &PSE) {
|
||||
SCEVUnionPredicate &Preds) {
|
||||
// Get the stride replaced scev.
|
||||
const SCEV *Sc = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
|
||||
const SCEV *Sc = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr);
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Sc);
|
||||
assert(AR && "Invalid addrec expression");
|
||||
ScalarEvolution *SE = PSE.getSE();
|
||||
const SCEV *Ex = SE->getBackedgeTakenCount(Lp);
|
||||
|
||||
const SCEV *ScStart = AR->getStart();
|
||||
@ -424,10 +423,9 @@ public:
|
||||
typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
|
||||
|
||||
AccessAnalysis(const DataLayout &Dl, AliasAnalysis *AA, LoopInfo *LI,
|
||||
MemoryDepChecker::DepCandidates &DA,
|
||||
PredicatedScalarEvolution &PSE)
|
||||
MemoryDepChecker::DepCandidates &DA, SCEVUnionPredicate &Preds)
|
||||
: DL(Dl), AST(*AA), LI(LI), DepCands(DA), IsRTCheckAnalysisNeeded(false),
|
||||
PSE(PSE) {}
|
||||
Preds(Preds) {}
|
||||
|
||||
/// \brief Register a load and whether it is only read from.
|
||||
void addLoad(MemoryLocation &Loc, bool IsReadOnly) {
|
||||
@ -514,16 +512,16 @@ private:
|
||||
bool IsRTCheckAnalysisNeeded;
|
||||
|
||||
/// The SCEV predicate containing all the SCEV-related assumptions.
|
||||
PredicatedScalarEvolution &PSE;
|
||||
SCEVUnionPredicate &Preds;
|
||||
};
|
||||
|
||||
} // end anonymous namespace
|
||||
|
||||
/// \brief Check whether a pointer can participate in a runtime bounds check.
|
||||
static bool hasComputableBounds(PredicatedScalarEvolution &PSE,
|
||||
static bool hasComputableBounds(ScalarEvolution *SE,
|
||||
const ValueToValueMap &Strides, Value *Ptr,
|
||||
Loop *L) {
|
||||
const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
|
||||
Loop *L, SCEVUnionPredicate &Preds) {
|
||||
const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr);
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
|
||||
if (!AR)
|
||||
return false;
|
||||
@ -566,11 +564,11 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
|
||||
else
|
||||
++NumReadPtrChecks;
|
||||
|
||||
if (hasComputableBounds(PSE, StridesMap, Ptr, TheLoop) &&
|
||||
if (hasComputableBounds(SE, StridesMap, Ptr, TheLoop, Preds) &&
|
||||
// When we run after a failing dependency check we have to make sure
|
||||
// we don't have wrapping pointers.
|
||||
(!ShouldCheckStride ||
|
||||
isStridedPtr(PSE, Ptr, TheLoop, StridesMap) == 1)) {
|
||||
isStridedPtr(SE, Ptr, TheLoop, StridesMap, Preds) == 1)) {
|
||||
// The id of the dependence set.
|
||||
unsigned DepId;
|
||||
|
||||
@ -584,7 +582,7 @@ bool AccessAnalysis::canCheckPtrAtRT(RuntimePointerChecking &RtCheck,
|
||||
// Each access has its own dependence set.
|
||||
DepId = RunningDepId++;
|
||||
|
||||
RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, PSE);
|
||||
RtCheck.insert(TheLoop, Ptr, IsWrite, DepId, ASId, StridesMap, Preds);
|
||||
|
||||
DEBUG(dbgs() << "LAA: Found a runtime check ptr:" << *Ptr << '\n');
|
||||
} else {
|
||||
@ -819,8 +817,9 @@ static bool isNoWrapAddRec(Value *Ptr, const SCEVAddRecExpr *AR,
|
||||
}
|
||||
|
||||
/// \brief Check whether the access through \p Ptr has a constant stride.
|
||||
int llvm::isStridedPtr(PredicatedScalarEvolution &PSE, Value *Ptr,
|
||||
const Loop *Lp, const ValueToValueMap &StridesMap) {
|
||||
int llvm::isStridedPtr(ScalarEvolution *SE, Value *Ptr, const Loop *Lp,
|
||||
const ValueToValueMap &StridesMap,
|
||||
SCEVUnionPredicate &Preds) {
|
||||
Type *Ty = Ptr->getType();
|
||||
assert(Ty->isPointerTy() && "Unexpected non-ptr");
|
||||
|
||||
@ -832,7 +831,7 @@ int llvm::isStridedPtr(PredicatedScalarEvolution &PSE, Value *Ptr,
|
||||
return 0;
|
||||
}
|
||||
|
||||
const SCEV *PtrScev = replaceSymbolicStrideSCEV(PSE, StridesMap, Ptr);
|
||||
const SCEV *PtrScev = replaceSymbolicStrideSCEV(SE, StridesMap, Preds, Ptr);
|
||||
|
||||
const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrScev);
|
||||
if (!AR) {
|
||||
@ -855,16 +854,16 @@ int llvm::isStridedPtr(PredicatedScalarEvolution &PSE, Value *Ptr,
|
||||
// to access the pointer value "0" which is undefined behavior in address
|
||||
// space 0, therefore we can also vectorize this case.
|
||||
bool IsInBoundsGEP = isInBoundsGep(Ptr);
|
||||
bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, PSE.getSE(), Lp);
|
||||
bool IsNoWrapAddRec = isNoWrapAddRec(Ptr, AR, SE, Lp);
|
||||
bool IsInAddressSpaceZero = PtrTy->getAddressSpace() == 0;
|
||||
if (!IsNoWrapAddRec && !IsInBoundsGEP && !IsInAddressSpaceZero) {
|
||||
DEBUG(dbgs() << "LAA: Bad stride - Pointer may wrap in the address space "
|
||||
<< *Ptr << " SCEV: " << *PtrScev << "\n");
|
||||
<< *Ptr << " SCEV: " << *PtrScev << "\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Check the step is constant.
|
||||
const SCEV *Step = AR->getStepRecurrence(*PSE.getSE());
|
||||
const SCEV *Step = AR->getStepRecurrence(*SE);
|
||||
|
||||
// Calculate the pointer stride and check if it is constant.
|
||||
const SCEVConstant *C = dyn_cast<SCEVConstant>(Step);
|
||||
@ -1047,11 +1046,11 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
|
||||
BPtr->getType()->getPointerAddressSpace())
|
||||
return Dependence::Unknown;
|
||||
|
||||
const SCEV *AScev = replaceSymbolicStrideSCEV(PSE, Strides, APtr);
|
||||
const SCEV *BScev = replaceSymbolicStrideSCEV(PSE, Strides, BPtr);
|
||||
const SCEV *AScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, APtr);
|
||||
const SCEV *BScev = replaceSymbolicStrideSCEV(SE, Strides, Preds, BPtr);
|
||||
|
||||
int StrideAPtr = isStridedPtr(PSE, APtr, InnermostLoop, Strides);
|
||||
int StrideBPtr = isStridedPtr(PSE, BPtr, InnermostLoop, Strides);
|
||||
int StrideAPtr = isStridedPtr(SE, APtr, InnermostLoop, Strides, Preds);
|
||||
int StrideBPtr = isStridedPtr(SE, BPtr, InnermostLoop, Strides, Preds);
|
||||
|
||||
const SCEV *Src = AScev;
|
||||
const SCEV *Sink = BScev;
|
||||
@ -1068,10 +1067,10 @@ MemoryDepChecker::isDependent(const MemAccessInfo &A, unsigned AIdx,
|
||||
std::swap(StrideAPtr, StrideBPtr);
|
||||
}
|
||||
|
||||
const SCEV *Dist = PSE.getSE()->getMinusSCEV(Sink, Src);
|
||||
const SCEV *Dist = SE->getMinusSCEV(Sink, Src);
|
||||
|
||||
DEBUG(dbgs() << "LAA: Src Scev: " << *Src << "Sink Scev: " << *Sink
|
||||
<< "(Induction step: " << StrideAPtr << ")\n");
|
||||
<< "(Induction step: " << StrideAPtr << ")\n");
|
||||
DEBUG(dbgs() << "LAA: Distance for " << *InstMap[AIdx] << " to "
|
||||
<< *InstMap[BIdx] << ": " << *Dist << "\n");
|
||||
|
||||
@ -1344,10 +1343,10 @@ bool LoopAccessInfo::canAnalyzeLoop() {
|
||||
}
|
||||
|
||||
// ScalarEvolution needs to be able to find the exit count.
|
||||
const SCEV *ExitCount = PSE.getSE()->getBackedgeTakenCount(TheLoop);
|
||||
if (ExitCount == PSE.getSE()->getCouldNotCompute()) {
|
||||
emitAnalysis(LoopAccessReport()
|
||||
<< "could not determine number of loop iterations");
|
||||
const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
|
||||
if (ExitCount == SE->getCouldNotCompute()) {
|
||||
emitAnalysis(LoopAccessReport() <<
|
||||
"could not determine number of loop iterations");
|
||||
DEBUG(dbgs() << "LAA: SCEV could not compute the loop exit count.\n");
|
||||
return false;
|
||||
}
|
||||
@ -1448,7 +1447,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
|
||||
|
||||
MemoryDepChecker::DepCandidates DependentAccesses;
|
||||
AccessAnalysis Accesses(TheLoop->getHeader()->getModule()->getDataLayout(),
|
||||
AA, LI, DependentAccesses, PSE);
|
||||
AA, LI, DependentAccesses, Preds);
|
||||
|
||||
// Holds the analyzed pointers. We don't want to call GetUnderlyingObjects
|
||||
// multiple times on the same object. If the ptr is accessed twice, once
|
||||
@ -1499,7 +1498,8 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
|
||||
// read a few words, modify, and write a few words, and some of the
|
||||
// words may be written to the same address.
|
||||
bool IsReadOnlyPtr = false;
|
||||
if (Seen.insert(Ptr).second || !isStridedPtr(PSE, Ptr, TheLoop, Strides)) {
|
||||
if (Seen.insert(Ptr).second ||
|
||||
!isStridedPtr(SE, Ptr, TheLoop, Strides, Preds)) {
|
||||
++NumReads;
|
||||
IsReadOnlyPtr = true;
|
||||
}
|
||||
@ -1529,7 +1529,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
|
||||
// Find pointers with computable bounds. We are going to use this information
|
||||
// to place a runtime bound check.
|
||||
bool CanDoRTIfNeeded =
|
||||
Accesses.canCheckPtrAtRT(PtrRtChecking, PSE.getSE(), TheLoop, Strides);
|
||||
Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides);
|
||||
if (!CanDoRTIfNeeded) {
|
||||
emitAnalysis(LoopAccessReport() << "cannot identify array bounds");
|
||||
DEBUG(dbgs() << "LAA: We can't vectorize because we can't find "
|
||||
@ -1556,7 +1556,6 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
|
||||
PtrRtChecking.reset();
|
||||
PtrRtChecking.Need = true;
|
||||
|
||||
auto *SE = PSE.getSE();
|
||||
CanDoRTIfNeeded =
|
||||
Accesses.canCheckPtrAtRT(PtrRtChecking, SE, TheLoop, Strides, true);
|
||||
|
||||
@ -1599,7 +1598,7 @@ void LoopAccessInfo::emitAnalysis(LoopAccessReport &Message) {
|
||||
}
|
||||
|
||||
bool LoopAccessInfo::isUniform(Value *V) const {
|
||||
return (PSE.getSE()->isLoopInvariant(PSE.getSE()->getSCEV(V), TheLoop));
|
||||
return (SE->isLoopInvariant(SE->getSCEV(V), TheLoop));
|
||||
}
|
||||
|
||||
// FIXME: this function is currently a duplicate of the one in
|
||||
@ -1680,7 +1679,7 @@ std::pair<Instruction *, Instruction *> LoopAccessInfo::addRuntimeChecks(
|
||||
Instruction *Loc,
|
||||
const SmallVectorImpl<RuntimePointerChecking::PointerCheck> &PointerChecks)
|
||||
const {
|
||||
auto *SE = PSE.getSE();
|
||||
|
||||
SCEVExpander Exp(*SE, DL, "induction");
|
||||
auto ExpandedChecks =
|
||||
expandBounds(PointerChecks, TheLoop, Loc, SE, Exp, PtrRtChecking);
|
||||
@ -1750,7 +1749,7 @@ LoopAccessInfo::LoopAccessInfo(Loop *L, ScalarEvolution *SE,
|
||||
const TargetLibraryInfo *TLI, AliasAnalysis *AA,
|
||||
DominatorTree *DT, LoopInfo *LI,
|
||||
const ValueToValueMap &Strides)
|
||||
: PSE(*SE), PtrRtChecking(SE), DepChecker(PSE, L), TheLoop(L), DL(DL),
|
||||
: PtrRtChecking(SE), DepChecker(SE, L, Preds), TheLoop(L), SE(SE), DL(DL),
|
||||
TLI(TLI), AA(AA), DT(DT), LI(LI), NumLoads(0), NumStores(0),
|
||||
MaxSafeDepDistBytes(-1U), CanVecMem(false),
|
||||
StoreToLoopInvariantAddress(false) {
|
||||
@ -1787,7 +1786,7 @@ void LoopAccessInfo::print(raw_ostream &OS, unsigned Depth) const {
|
||||
<< "found in loop.\n";
|
||||
|
||||
OS.indent(Depth) << "SCEV assumptions:\n";
|
||||
PSE.getUnionPredicate().print(OS, Depth);
|
||||
Preds.print(OS, Depth);
|
||||
}
|
||||
|
||||
const LoopAccessInfo &
|
||||
|
@ -761,7 +761,7 @@ private:
|
||||
}
|
||||
|
||||
// Don't distribute the loop if we need too many SCEV run-time checks.
|
||||
const SCEVUnionPredicate &Pred = LAI.PSE.getUnionPredicate();
|
||||
const SCEVUnionPredicate &Pred = LAI.Preds;
|
||||
if (Pred.getComplexity() > DistributeSCEVCheckThreshold) {
|
||||
DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
|
||||
return false;
|
||||
@ -790,7 +790,7 @@ private:
|
||||
DEBUG(LAI.getRuntimePointerChecking()->printChecks(dbgs(), Checks));
|
||||
LoopVersioning LVer(LAI, L, LI, DT, SE, false);
|
||||
LVer.setAliasChecks(std::move(Checks));
|
||||
LVer.setSCEVChecks(LAI.PSE.getUnionPredicate());
|
||||
LVer.setSCEVChecks(LAI.Preds);
|
||||
LVer.versionLoop(DefsUsedOutside);
|
||||
}
|
||||
|
||||
|
@ -459,18 +459,17 @@ public:
|
||||
return false;
|
||||
}
|
||||
|
||||
if (LAI.PSE.getUnionPredicate().getComplexity() >
|
||||
LoadElimSCEVCheckThreshold) {
|
||||
if (LAI.Preds.getComplexity() > LoadElimSCEVCheckThreshold) {
|
||||
DEBUG(dbgs() << "Too many SCEV run-time checks needed.\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Point of no-return, start the transformation. First, version the loop if
|
||||
// necessary.
|
||||
if (!Checks.empty() || !LAI.PSE.getUnionPredicate().isAlwaysTrue()) {
|
||||
if (!Checks.empty() || !LAI.Preds.isAlwaysTrue()) {
|
||||
LoopVersioning LV(LAI, L, LI, DT, SE, false);
|
||||
LV.setAliasChecks(std::move(Checks));
|
||||
LV.setSCEVChecks(LAI.PSE.getUnionPredicate());
|
||||
LV.setSCEVChecks(LAI.Preds);
|
||||
LV.versionLoop();
|
||||
}
|
||||
|
||||
|
@ -727,46 +727,3 @@ SmallVector<Instruction *, 8> llvm::findDefsUsedOutsideOfLoop(Loop *L) {
|
||||
|
||||
return UsedOutside;
|
||||
}
|
||||
|
||||
PredicatedScalarEvolution::PredicatedScalarEvolution(ScalarEvolution &SE)
|
||||
: SE(SE), Generation(0) {}
|
||||
|
||||
const SCEV *PredicatedScalarEvolution::getSCEV(Value *V) {
|
||||
const SCEV *Expr = SE.getSCEV(V);
|
||||
RewriteEntry &Entry = RewriteMap[Expr];
|
||||
|
||||
// If we already have an entry and the version matches, return it.
|
||||
if (Entry.second && Generation == Entry.first)
|
||||
return Entry.second;
|
||||
|
||||
// We found an entry but it's stale. Rewrite the stale entry
|
||||
// acording to the current predicate.
|
||||
if (Entry.second)
|
||||
Expr = Entry.second;
|
||||
|
||||
const SCEV *NewSCEV = SE.rewriteUsingPredicate(Expr, Preds);
|
||||
Entry = {Generation, NewSCEV};
|
||||
|
||||
return NewSCEV;
|
||||
}
|
||||
|
||||
void PredicatedScalarEvolution::addPredicate(const SCEVPredicate &Pred) {
|
||||
if (Preds.implies(&Pred))
|
||||
return;
|
||||
Preds.add(&Pred);
|
||||
updateGeneration();
|
||||
}
|
||||
|
||||
const SCEVUnionPredicate &PredicatedScalarEvolution::getUnionPredicate() const {
|
||||
return Preds;
|
||||
}
|
||||
|
||||
void PredicatedScalarEvolution::updateGeneration() {
|
||||
// If the generation number wrapped recompute everything.
|
||||
if (++Generation == 0) {
|
||||
for (auto &II : RewriteMap) {
|
||||
const SCEV *Rewritten = II.second.second;
|
||||
II.second = {Generation, SE.rewriteUsingPredicate(Rewritten, Preds)};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ LoopVersioning::LoopVersioning(const LoopAccessInfo &LAI, Loop *L, LoopInfo *LI,
|
||||
assert(L->getLoopPreheader() && "No preheader");
|
||||
if (UseLAIChecks) {
|
||||
setAliasChecks(LAI.getRuntimePointerChecking()->getChecks());
|
||||
setSCEVChecks(LAI.PSE.getUnionPredicate());
|
||||
setSCEVChecks(LAI.Preds);
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,7 +58,7 @@ void LoopVersioning::versionLoop(
|
||||
LAI.addRuntimeChecks(RuntimeCheckBB->getTerminator(), AliasChecks);
|
||||
assert(MemRuntimeCheck && "called even though needsAnyChecking = false");
|
||||
|
||||
const SCEVUnionPredicate &Pred = LAI.PSE.getUnionPredicate();
|
||||
const SCEVUnionPredicate &Pred = LAI.Preds;
|
||||
SCEVExpander Exp(*SE, RuntimeCheckBB->getModule()->getDataLayout(),
|
||||
"scev.check");
|
||||
SCEVRuntimeCheck =
|
||||
|
@ -310,16 +310,15 @@ static GetElementPtrInst *getGEPInstruction(Value *Ptr) {
|
||||
/// and reduction variables that were found to a given vectorization factor.
|
||||
class InnerLoopVectorizer {
|
||||
public:
|
||||
InnerLoopVectorizer(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
|
||||
LoopInfo *LI, DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI,
|
||||
InnerLoopVectorizer(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
|
||||
DominatorTree *DT, const TargetLibraryInfo *TLI,
|
||||
const TargetTransformInfo *TTI, unsigned VecWidth,
|
||||
unsigned UnrollFactor)
|
||||
: OrigLoop(OrigLoop), PSE(PSE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
|
||||
VF(VecWidth), UF(UnrollFactor), Builder(PSE.getSE()->getContext()),
|
||||
unsigned UnrollFactor, SCEVUnionPredicate &Preds)
|
||||
: OrigLoop(OrigLoop), SE(SE), LI(LI), DT(DT), TLI(TLI), TTI(TTI),
|
||||
VF(VecWidth), UF(UnrollFactor), Builder(SE->getContext()),
|
||||
Induction(nullptr), OldInduction(nullptr), WidenMap(UnrollFactor),
|
||||
TripCount(nullptr), VectorTripCount(nullptr), Legal(nullptr),
|
||||
AddedSafetyChecks(false) {}
|
||||
AddedSafetyChecks(false), Preds(Preds) {}
|
||||
|
||||
// Perform the actual loop widening (vectorization).
|
||||
// MinimumBitWidths maps scalar integer values to the smallest bitwidth they
|
||||
@ -487,10 +486,8 @@ protected:
|
||||
|
||||
/// The original loop.
|
||||
Loop *OrigLoop;
|
||||
/// A wrapper around ScalarEvolution used to add runtime SCEV checks. Applies
|
||||
/// dynamic knowledge to simplify SCEV expressions and converts them to a
|
||||
/// more usable form.
|
||||
PredicatedScalarEvolution &PSE;
|
||||
/// Scev analysis to use.
|
||||
ScalarEvolution *SE;
|
||||
/// Loop Info.
|
||||
LoopInfo *LI;
|
||||
/// Dominator Tree.
|
||||
@ -554,15 +551,23 @@ protected:
|
||||
|
||||
// Record whether runtime check is added.
|
||||
bool AddedSafetyChecks;
|
||||
|
||||
/// The SCEV predicate containing all the SCEV-related assumptions.
|
||||
/// The predicate is used to simplify existing expressions in the
|
||||
/// context of existing SCEV assumptions. Since legality checking is
|
||||
/// not done here, we don't need to use this predicate to record
|
||||
/// further assumptions.
|
||||
SCEVUnionPredicate &Preds;
|
||||
};
|
||||
|
||||
class InnerLoopUnroller : public InnerLoopVectorizer {
|
||||
public:
|
||||
InnerLoopUnroller(Loop *OrigLoop, PredicatedScalarEvolution &PSE,
|
||||
LoopInfo *LI, DominatorTree *DT,
|
||||
const TargetLibraryInfo *TLI,
|
||||
const TargetTransformInfo *TTI, unsigned UnrollFactor)
|
||||
: InnerLoopVectorizer(OrigLoop, PSE, LI, DT, TLI, TTI, 1, UnrollFactor) {}
|
||||
InnerLoopUnroller(Loop *OrigLoop, ScalarEvolution *SE, LoopInfo *LI,
|
||||
DominatorTree *DT, const TargetLibraryInfo *TLI,
|
||||
const TargetTransformInfo *TTI, unsigned UnrollFactor,
|
||||
SCEVUnionPredicate &Preds)
|
||||
: InnerLoopVectorizer(OrigLoop, SE, LI, DT, TLI, TTI, 1, UnrollFactor,
|
||||
Preds) {}
|
||||
|
||||
private:
|
||||
void scalarizeInstruction(Instruction *Instr,
|
||||
@ -784,9 +789,9 @@ private:
|
||||
/// between the member and the group in a map.
|
||||
class InterleavedAccessInfo {
|
||||
public:
|
||||
InterleavedAccessInfo(PredicatedScalarEvolution &PSE, Loop *L,
|
||||
DominatorTree *DT)
|
||||
: PSE(PSE), TheLoop(L), DT(DT) {}
|
||||
InterleavedAccessInfo(ScalarEvolution *SE, Loop *L, DominatorTree *DT,
|
||||
SCEVUnionPredicate &Preds)
|
||||
: SE(SE), TheLoop(L), DT(DT), Preds(Preds) {}
|
||||
|
||||
~InterleavedAccessInfo() {
|
||||
SmallSet<InterleaveGroup *, 4> DelSet;
|
||||
@ -816,14 +821,17 @@ public:
|
||||
}
|
||||
|
||||
private:
|
||||
/// A wrapper around ScalarEvolution, used to add runtime SCEV checks.
|
||||
/// Simplifies SCEV expressions in the context of existing SCEV assumptions.
|
||||
/// The interleaved access analysis can also add new predicates (for example
|
||||
/// by versioning strides of pointers).
|
||||
PredicatedScalarEvolution &PSE;
|
||||
ScalarEvolution *SE;
|
||||
Loop *TheLoop;
|
||||
DominatorTree *DT;
|
||||
|
||||
/// The SCEV predicate containing all the SCEV-related assumptions.
|
||||
/// The predicate is used to simplify SCEV expressions in the
|
||||
/// context of existing SCEV assumptions. The interleaved access
|
||||
/// analysis can also add new predicates (for example by versioning
|
||||
/// strides of pointers).
|
||||
SCEVUnionPredicate &Preds;
|
||||
|
||||
/// Holds the relationships between the members and the interleave group.
|
||||
DenseMap<Instruction *, InterleaveGroup *> InterleaveGroupMap;
|
||||
|
||||
@ -1181,17 +1189,18 @@ static void emitMissedWarning(Function *F, Loop *L,
|
||||
/// induction variable and the different reduction variables.
|
||||
class LoopVectorizationLegality {
|
||||
public:
|
||||
LoopVectorizationLegality(Loop *L, PredicatedScalarEvolution &PSE,
|
||||
DominatorTree *DT, TargetLibraryInfo *TLI,
|
||||
AliasAnalysis *AA, Function *F,
|
||||
const TargetTransformInfo *TTI,
|
||||
LoopVectorizationLegality(Loop *L, ScalarEvolution *SE, DominatorTree *DT,
|
||||
TargetLibraryInfo *TLI, AliasAnalysis *AA,
|
||||
Function *F, const TargetTransformInfo *TTI,
|
||||
LoopAccessAnalysis *LAA,
|
||||
LoopVectorizationRequirements *R,
|
||||
const LoopVectorizeHints *H)
|
||||
: NumPredStores(0), TheLoop(L), PSE(PSE), TLI(TLI), TheFunction(F),
|
||||
TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr), InterleaveInfo(PSE, L, DT),
|
||||
Induction(nullptr), WidestIndTy(nullptr), HasFunNoNaNAttr(false),
|
||||
Requirements(R), Hints(H) {}
|
||||
const LoopVectorizeHints *H,
|
||||
SCEVUnionPredicate &Preds)
|
||||
: NumPredStores(0), TheLoop(L), SE(SE), TLI(TLI), TheFunction(F),
|
||||
TTI(TTI), DT(DT), LAA(LAA), LAI(nullptr),
|
||||
InterleaveInfo(SE, L, DT, Preds), Induction(nullptr),
|
||||
WidestIndTy(nullptr), HasFunNoNaNAttr(false), Requirements(R), Hints(H),
|
||||
Preds(Preds) {}
|
||||
|
||||
/// ReductionList contains the reduction descriptors for all
|
||||
/// of the reductions that were found in the loop.
|
||||
@ -1338,12 +1347,8 @@ private:
|
||||
|
||||
/// The loop that we evaluate.
|
||||
Loop *TheLoop;
|
||||
/// A wrapper around ScalarEvolution used to add runtime SCEV checks.
|
||||
/// Applies dynamic knowledge to simplify SCEV expressions in the context
|
||||
/// of existing SCEV assumptions. The analysis will also add a minimal set
|
||||
/// of new predicates if this is required to enable vectorization and
|
||||
/// unrolling.
|
||||
PredicatedScalarEvolution &PSE;
|
||||
/// Scev analysis.
|
||||
ScalarEvolution *SE;
|
||||
/// Target Library Info.
|
||||
TargetLibraryInfo *TLI;
|
||||
/// Parent function
|
||||
@ -1398,6 +1403,13 @@ private:
|
||||
/// While vectorizing these instructions we have to generate a
|
||||
/// call to the appropriate masked intrinsic
|
||||
SmallPtrSet<const Instruction *, 8> MaskedOp;
|
||||
|
||||
/// The SCEV predicate containing all the SCEV-related assumptions.
|
||||
/// The predicate is used to simplify SCEV expressions in the
|
||||
/// context of existing SCEV assumptions. The analysis will also
|
||||
/// add a minimal set of new predicates if this is required to
|
||||
/// enable vectorization/unrolling.
|
||||
SCEVUnionPredicate &Preds;
|
||||
};
|
||||
|
||||
/// LoopVectorizationCostModel - estimates the expected speedups due to
|
||||
@ -1415,7 +1427,8 @@ public:
|
||||
const TargetLibraryInfo *TLI, DemandedBits *DB,
|
||||
AssumptionCache *AC, const Function *F,
|
||||
const LoopVectorizeHints *Hints,
|
||||
SmallPtrSetImpl<const Value *> &ValuesToIgnore)
|
||||
SmallPtrSetImpl<const Value *> &ValuesToIgnore,
|
||||
SCEVUnionPredicate &Preds)
|
||||
: TheLoop(L), SE(SE), LI(LI), Legal(Legal), TTI(TTI), TLI(TLI), DB(DB),
|
||||
TheFunction(F), Hints(Hints), ValuesToIgnore(ValuesToIgnore) {}
|
||||
|
||||
@ -1745,12 +1758,12 @@ struct LoopVectorize : public FunctionPass {
|
||||
}
|
||||
}
|
||||
|
||||
PredicatedScalarEvolution PSE(*SE);
|
||||
SCEVUnionPredicate Preds;
|
||||
|
||||
// Check if it is legal to vectorize the loop.
|
||||
LoopVectorizationRequirements Requirements;
|
||||
LoopVectorizationLegality LVL(L, PSE, DT, TLI, AA, F, TTI, LAA,
|
||||
&Requirements, &Hints);
|
||||
LoopVectorizationLegality LVL(L, SE, DT, TLI, AA, F, TTI, LAA,
|
||||
&Requirements, &Hints, Preds);
|
||||
if (!LVL.canVectorize()) {
|
||||
DEBUG(dbgs() << "LV: Not vectorizing: Cannot prove legality.\n");
|
||||
emitMissedWarning(F, L, Hints);
|
||||
@ -1768,8 +1781,8 @@ struct LoopVectorize : public FunctionPass {
|
||||
}
|
||||
|
||||
// Use the cost model.
|
||||
LoopVectorizationCostModel CM(L, PSE.getSE(), LI, &LVL, *TTI, TLI, DB, AC,
|
||||
F, &Hints, ValuesToIgnore);
|
||||
LoopVectorizationCostModel CM(L, SE, LI, &LVL, *TTI, TLI, DB, AC, F, &Hints,
|
||||
ValuesToIgnore, Preds);
|
||||
|
||||
// Check the function attributes to find out if this function should be
|
||||
// optimized for size.
|
||||
@ -1880,7 +1893,7 @@ struct LoopVectorize : public FunctionPass {
|
||||
assert(IC > 1 && "interleave count should not be 1 or 0");
|
||||
// If we decided that it is not legal to vectorize the loop then
|
||||
// interleave it.
|
||||
InnerLoopUnroller Unroller(L, PSE, LI, DT, TLI, TTI, IC);
|
||||
InnerLoopUnroller Unroller(L, SE, LI, DT, TLI, TTI, IC, Preds);
|
||||
Unroller.vectorize(&LVL, CM.MinBWs);
|
||||
|
||||
emitOptimizationRemark(F->getContext(), LV_NAME, *F, L->getStartLoc(),
|
||||
@ -1888,7 +1901,7 @@ struct LoopVectorize : public FunctionPass {
|
||||
Twine(IC) + ")");
|
||||
} else {
|
||||
// If we decided that it is *legal* to vectorize the loop then do it.
|
||||
InnerLoopVectorizer LB(L, PSE, LI, DT, TLI, TTI, VF.Width, IC);
|
||||
InnerLoopVectorizer LB(L, SE, LI, DT, TLI, TTI, VF.Width, IC, Preds);
|
||||
LB.vectorize(&LVL, CM.MinBWs);
|
||||
++LoopsVectorized;
|
||||
|
||||
@ -1989,7 +2002,6 @@ Value *InnerLoopVectorizer::getStepVector(Value *Val, int StartIdx,
|
||||
|
||||
int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
|
||||
assert(Ptr->getType()->isPointerTy() && "Unexpected non-ptr");
|
||||
auto *SE = PSE.getSE();
|
||||
// Make sure that the pointer does not point to structs.
|
||||
if (Ptr->getType()->getPointerElementType()->isAggregateType())
|
||||
return 0;
|
||||
@ -2019,7 +2031,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
|
||||
|
||||
// Make sure that all of the index operands are loop invariant.
|
||||
for (unsigned i = 1; i < NumOperands; ++i)
|
||||
if (!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop))
|
||||
if (!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop))
|
||||
return 0;
|
||||
|
||||
InductionDescriptor II = Inductions[Phi];
|
||||
@ -2032,14 +2044,14 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
|
||||
// operand.
|
||||
for (unsigned i = 0; i != NumOperands; ++i)
|
||||
if (i != InductionOperand &&
|
||||
!SE->isLoopInvariant(PSE.getSCEV(Gep->getOperand(i)), TheLoop))
|
||||
!SE->isLoopInvariant(SE->getSCEV(Gep->getOperand(i)), TheLoop))
|
||||
return 0;
|
||||
|
||||
// We can emit wide load/stores only if the last non-zero index is the
|
||||
// induction variable.
|
||||
const SCEV *Last = nullptr;
|
||||
if (!Strides.count(Gep))
|
||||
Last = PSE.getSCEV(Gep->getOperand(InductionOperand));
|
||||
Last = SE->getSCEV(Gep->getOperand(InductionOperand));
|
||||
else {
|
||||
// Because of the multiplication by a stride we can have a s/zext cast.
|
||||
// We are going to replace this stride by 1 so the cast is safe to ignore.
|
||||
@ -2050,7 +2062,7 @@ int LoopVectorizationLegality::isConsecutivePtr(Value *Ptr) {
|
||||
// %idxprom = zext i32 %mul to i64 << Safe cast.
|
||||
// %arrayidx = getelementptr inbounds i32* %B, i64 %idxprom
|
||||
//
|
||||
Last = replaceSymbolicStrideSCEV(PSE, Strides,
|
||||
Last = replaceSymbolicStrideSCEV(SE, Strides, Preds,
|
||||
Gep->getOperand(InductionOperand), Gep);
|
||||
if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(Last))
|
||||
Last =
|
||||
@ -2408,9 +2420,8 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
|
||||
Ptr = Builder.Insert(Gep2);
|
||||
} else if (Gep) {
|
||||
setDebugLocFromInst(Builder, Gep);
|
||||
assert(PSE.getSE()->isLoopInvariant(PSE.getSCEV(Gep->getPointerOperand()),
|
||||
OrigLoop) &&
|
||||
"Base ptr must be invariant");
|
||||
assert(SE->isLoopInvariant(SE->getSCEV(Gep->getPointerOperand()),
|
||||
OrigLoop) && "Base ptr must be invariant");
|
||||
|
||||
// The last index does not have to be the induction. It can be
|
||||
// consecutive and be a function of the index. For example A[I+1];
|
||||
@ -2427,8 +2438,7 @@ void InnerLoopVectorizer::vectorizeMemoryInstruction(Instruction *Instr) {
|
||||
if (i == InductionOperand ||
|
||||
(GepOperandInst && OrigLoop->contains(GepOperandInst))) {
|
||||
assert((i == InductionOperand ||
|
||||
PSE.getSE()->isLoopInvariant(PSE.getSCEV(GepOperandInst),
|
||||
OrigLoop)) &&
|
||||
SE->isLoopInvariant(SE->getSCEV(GepOperandInst), OrigLoop)) &&
|
||||
"Must be last index or loop invariant");
|
||||
|
||||
VectorParts &GEPParts = getVectorValue(GepOperand);
|
||||
@ -2648,7 +2658,6 @@ Value *InnerLoopVectorizer::getOrCreateTripCount(Loop *L) {
|
||||
|
||||
IRBuilder<> Builder(L->getLoopPreheader()->getTerminator());
|
||||
// Find the loop boundaries.
|
||||
ScalarEvolution *SE = PSE.getSE();
|
||||
const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(OrigLoop);
|
||||
assert(BackedgeTakenCount != SE->getCouldNotCompute() &&
|
||||
"Invalid loop count");
|
||||
@ -2756,10 +2765,8 @@ void InnerLoopVectorizer::emitSCEVChecks(Loop *L, BasicBlock *Bypass) {
|
||||
// Generate the code to check that the SCEV assumptions that we made.
|
||||
// We want the new basic block to start at the first instruction in a
|
||||
// sequence of instructions that form a check.
|
||||
SCEVExpander Exp(*PSE.getSE(), Bypass->getModule()->getDataLayout(),
|
||||
"scev.check");
|
||||
Value *SCEVCheck =
|
||||
Exp.expandCodeForPredicate(&PSE.getUnionPredicate(), BB->getTerminator());
|
||||
SCEVExpander Exp(*SE, Bypass->getModule()->getDataLayout(), "scev.check");
|
||||
Value *SCEVCheck = Exp.expandCodeForPredicate(&Preds, BB->getTerminator());
|
||||
|
||||
if (auto *C = dyn_cast<ConstantInt>(SCEVCheck))
|
||||
if (C->isZero())
|
||||
@ -3778,9 +3785,8 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
|
||||
// Widen selects.
|
||||
// If the selector is loop invariant we can create a select
|
||||
// instruction with a scalar condition. Otherwise, use vector-select.
|
||||
auto *SE = PSE.getSE();
|
||||
bool InvariantCond =
|
||||
SE->isLoopInvariant(PSE.getSCEV(it->getOperand(0)), OrigLoop);
|
||||
bool InvariantCond = SE->isLoopInvariant(SE->getSCEV(it->getOperand(0)),
|
||||
OrigLoop);
|
||||
setDebugLocFromInst(Builder, &*it);
|
||||
|
||||
// The condition can be loop invariant but still defined inside the
|
||||
@ -3961,7 +3967,7 @@ void InnerLoopVectorizer::vectorizeBlockInLoop(BasicBlock *BB, PhiVector *PV) {
|
||||
|
||||
void InnerLoopVectorizer::updateAnalysis() {
|
||||
// Forget the original basic block.
|
||||
PSE.getSE()->forgetLoop(OrigLoop);
|
||||
SE->forgetLoop(OrigLoop);
|
||||
|
||||
// Update the dominator tree information.
|
||||
assert(DT->properlyDominates(LoopBypassBlocks.front(), LoopExitBlock) &&
|
||||
@ -4113,10 +4119,10 @@ bool LoopVectorizationLegality::canVectorize() {
|
||||
}
|
||||
|
||||
// ScalarEvolution needs to be able to find the exit count.
|
||||
const SCEV *ExitCount = PSE.getSE()->getBackedgeTakenCount(TheLoop);
|
||||
if (ExitCount == PSE.getSE()->getCouldNotCompute()) {
|
||||
emitAnalysis(VectorizationReport()
|
||||
<< "could not determine number of loop iterations");
|
||||
const SCEV *ExitCount = SE->getBackedgeTakenCount(TheLoop);
|
||||
if (ExitCount == SE->getCouldNotCompute()) {
|
||||
emitAnalysis(VectorizationReport() <<
|
||||
"could not determine number of loop iterations");
|
||||
DEBUG(dbgs() << "LV: SCEV could not compute the loop exit count.\n");
|
||||
return false;
|
||||
}
|
||||
@ -4156,7 +4162,7 @@ bool LoopVectorizationLegality::canVectorize() {
|
||||
if (Hints->getForce() == LoopVectorizeHints::FK_Enabled)
|
||||
SCEVThreshold = PragmaVectorizeSCEVCheckThreshold;
|
||||
|
||||
if (PSE.getUnionPredicate().getComplexity() > SCEVThreshold) {
|
||||
if (Preds.getComplexity() > SCEVThreshold) {
|
||||
emitAnalysis(VectorizationReport()
|
||||
<< "Too many SCEV assumptions need to be made and checked "
|
||||
<< "at runtime");
|
||||
@ -4262,7 +4268,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
|
||||
}
|
||||
|
||||
InductionDescriptor ID;
|
||||
if (InductionDescriptor::isInductionPHI(Phi, PSE.getSE(), ID)) {
|
||||
if (InductionDescriptor::isInductionPHI(Phi, SE, ID)) {
|
||||
Inductions[Phi] = ID;
|
||||
// Get the widest type.
|
||||
if (!WidestIndTy)
|
||||
@ -4331,8 +4337,7 @@ bool LoopVectorizationLegality::canVectorizeInstrs() {
|
||||
// second argument is the same (i.e. loop invariant)
|
||||
if (CI &&
|
||||
hasVectorInstrinsicScalarOpd(getIntrinsicIDForCall(CI, TLI), 1)) {
|
||||
auto *SE = PSE.getSE();
|
||||
if (!SE->isLoopInvariant(PSE.getSCEV(CI->getOperand(1)), TheLoop)) {
|
||||
if (!SE->isLoopInvariant(SE->getSCEV(CI->getOperand(1)), TheLoop)) {
|
||||
emitAnalysis(VectorizationReport(&*it)
|
||||
<< "intrinsic instruction cannot be vectorized");
|
||||
DEBUG(dbgs() << "LV: Found unvectorizable intrinsic " << *CI << "\n");
|
||||
@ -4405,7 +4410,7 @@ void LoopVectorizationLegality::collectStridedAccess(Value *MemAccess) {
|
||||
else
|
||||
return;
|
||||
|
||||
Value *Stride = getStrideFromPointer(Ptr, PSE.getSE(), TheLoop);
|
||||
Value *Stride = getStrideFromPointer(Ptr, SE, TheLoop);
|
||||
if (!Stride)
|
||||
return;
|
||||
|
||||
@ -4469,7 +4474,7 @@ bool LoopVectorizationLegality::canVectorizeMemory() {
|
||||
}
|
||||
|
||||
Requirements->addRuntimePointerChecks(LAI->getNumRuntimePointerChecks());
|
||||
PSE.addPredicate(LAI->PSE.getUnionPredicate());
|
||||
Preds.add(&LAI->Preds);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -4584,7 +4589,7 @@ void InterleavedAccessInfo::collectConstStridedAccesses(
|
||||
StoreInst *SI = dyn_cast<StoreInst>(I);
|
||||
|
||||
Value *Ptr = LI ? LI->getPointerOperand() : SI->getPointerOperand();
|
||||
int Stride = isStridedPtr(PSE, Ptr, TheLoop, Strides);
|
||||
int Stride = isStridedPtr(SE, Ptr, TheLoop, Strides, Preds);
|
||||
|
||||
// The factor of the corresponding interleave group.
|
||||
unsigned Factor = std::abs(Stride);
|
||||
@ -4593,7 +4598,7 @@ void InterleavedAccessInfo::collectConstStridedAccesses(
|
||||
if (Factor < 2 || Factor > MaxInterleaveGroupFactor)
|
||||
continue;
|
||||
|
||||
const SCEV *Scev = replaceSymbolicStrideSCEV(PSE, Strides, Ptr);
|
||||
const SCEV *Scev = replaceSymbolicStrideSCEV(SE, Strides, Preds, Ptr);
|
||||
PointerType *PtrTy = dyn_cast<PointerType>(Ptr->getType());
|
||||
unsigned Size = DL.getTypeAllocSize(PtrTy->getElementType());
|
||||
|
||||
@ -4680,8 +4685,8 @@ void InterleavedAccessInfo::analyzeInterleaving(
|
||||
continue;
|
||||
|
||||
// Calculate the distance and prepare for the rule 3.
|
||||
const SCEVConstant *DistToA = dyn_cast<SCEVConstant>(
|
||||
PSE.getSE()->getMinusSCEV(DesB.Scev, DesA.Scev));
|
||||
const SCEVConstant *DistToA =
|
||||
dyn_cast<SCEVConstant>(SE->getMinusSCEV(DesB.Scev, DesA.Scev));
|
||||
if (!DistToA)
|
||||
continue;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user