mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-24 12:19:53 +00:00
Rename some member variables from TD to DL.
TargetData was renamed DataLayout back in r165242. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@201581 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
2678b21a88
commit
39d8dcb53b
@ -55,7 +55,7 @@ class DominatorTree;
|
||||
|
||||
class AliasAnalysis {
|
||||
protected:
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
const TargetLibraryInfo *TLI;
|
||||
|
||||
private:
|
||||
@ -75,7 +75,7 @@ protected:
|
||||
|
||||
public:
|
||||
static char ID; // Class identification, replacement for typeinfo
|
||||
AliasAnalysis() : TD(0), TLI(0), AA(0) {}
|
||||
AliasAnalysis() : DL(0), TLI(0), AA(0) {}
|
||||
virtual ~AliasAnalysis(); // We want to be subclassed
|
||||
|
||||
/// UnknownSize - This is a special value which can be used with the
|
||||
@ -86,7 +86,7 @@ public:
|
||||
/// getDataLayout - Return a pointer to the current DataLayout object, or
|
||||
/// null if no DataLayout object is available.
|
||||
///
|
||||
const DataLayout *getDataLayout() const { return TD; }
|
||||
const DataLayout *getDataLayout() const { return DL; }
|
||||
|
||||
/// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
|
||||
/// object, or null if no TargetLibraryInfo object is available.
|
||||
|
@ -122,7 +122,7 @@ class IVUsers : public LoopPass {
|
||||
LoopInfo *LI;
|
||||
DominatorTree *DT;
|
||||
ScalarEvolution *SE;
|
||||
DataLayout *TD;
|
||||
DataLayout *DL;
|
||||
SmallPtrSet<Instruction*,16> Processed;
|
||||
|
||||
/// IVUses - A list of all tracked IV uses of induction variable expressions
|
||||
|
@ -99,7 +99,7 @@ public:
|
||||
|
||||
/// \brief Cost analyzer used by inliner.
|
||||
class InlineCostAnalysis : public CallGraphSCCPass {
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
const TargetTransformInfo *TTI;
|
||||
|
||||
public:
|
||||
|
@ -26,7 +26,7 @@ namespace llvm {
|
||||
/// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
|
||||
/// information.
|
||||
class LazyValueInfo : public FunctionPass {
|
||||
class DataLayout *TD;
|
||||
class DataLayout *DL;
|
||||
class TargetLibraryInfo *TLI;
|
||||
void *PImpl;
|
||||
LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION;
|
||||
|
@ -323,7 +323,7 @@ namespace llvm {
|
||||
|
||||
/// Current AA implementation, just a cache.
|
||||
AliasAnalysis *AA;
|
||||
DataLayout *TD;
|
||||
DataLayout *DL;
|
||||
DominatorTree *DT;
|
||||
OwningPtr<PredIteratorCache> PredCache;
|
||||
public:
|
||||
@ -415,7 +415,7 @@ namespace llvm {
|
||||
int64_t MemLocOffs,
|
||||
unsigned MemLocSize,
|
||||
const LoadInst *LI,
|
||||
const DataLayout &TD);
|
||||
const DataLayout &DL);
|
||||
|
||||
private:
|
||||
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,
|
||||
|
@ -36,8 +36,8 @@ class PHITransAddr {
|
||||
/// Addr - The actual address we're analyzing.
|
||||
Value *Addr;
|
||||
|
||||
/// TD - The target data we are playing with if known, otherwise null.
|
||||
const DataLayout *TD;
|
||||
/// The DataLayout we are playing with if known, otherwise null.
|
||||
const DataLayout *DL;
|
||||
|
||||
/// TLI - The target library info if known, otherwise null.
|
||||
const TargetLibraryInfo *TLI;
|
||||
@ -45,7 +45,7 @@ class PHITransAddr {
|
||||
/// InstInputs - The inputs for our symbolic address.
|
||||
SmallVector<Instruction*, 4> InstInputs;
|
||||
public:
|
||||
PHITransAddr(Value *addr, const DataLayout *td) : Addr(addr), TD(td), TLI(0) {
|
||||
PHITransAddr(Value *addr, const DataLayout *DL) : Addr(addr), DL(DL), TLI(0) {
|
||||
// If the address is an instruction, the whole thing is considered an input.
|
||||
if (Instruction *I = dyn_cast<Instruction>(Addr))
|
||||
InstInputs.push_back(I);
|
||||
|
@ -225,9 +225,9 @@ namespace llvm {
|
||||
///
|
||||
LoopInfo *LI;
|
||||
|
||||
/// TD - The target data information for the target we are targeting.
|
||||
/// The DataLayout information for the target we are targeting.
|
||||
///
|
||||
DataLayout *TD;
|
||||
DataLayout *DL;
|
||||
|
||||
/// TLI - The target library information for the target we are targeting.
|
||||
///
|
||||
|
@ -94,7 +94,7 @@ namespace llvm {
|
||||
explicit SCEVExpander(ScalarEvolution &se, const char *name)
|
||||
: SE(se), IVName(name), IVIncInsertLoop(0), IVIncInsertPos(0),
|
||||
CanonicalMode(true), LSRMode(false),
|
||||
Builder(se.getContext(), TargetFolder(se.TD)) {
|
||||
Builder(se.getContext(), TargetFolder(se.DL)) {
|
||||
#ifndef NDEBUG
|
||||
DebugType = "";
|
||||
#endif
|
||||
|
@ -24,13 +24,13 @@ namespace llvm {
|
||||
class DataLayout;
|
||||
|
||||
class IntrinsicLowering {
|
||||
const DataLayout& TD;
|
||||
const DataLayout& DL;
|
||||
|
||||
|
||||
bool Warned;
|
||||
public:
|
||||
explicit IntrinsicLowering(const DataLayout &td) :
|
||||
TD(td), Warned(false) {}
|
||||
explicit IntrinsicLowering(const DataLayout &DL) :
|
||||
DL(DL), Warned(false) {}
|
||||
|
||||
/// AddPrototypes - This method, if called, causes all of the prototypes
|
||||
/// that might be needed by an intrinsic lowering implementation to be
|
||||
|
@ -111,7 +111,7 @@ class ExecutionEngine {
|
||||
ExecutionEngineState EEState;
|
||||
|
||||
/// The target data for the platform for which execution is being performed.
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
|
||||
/// Whether lazy JIT compilation is enabled.
|
||||
bool CompilingLazily;
|
||||
@ -130,7 +130,7 @@ protected:
|
||||
/// optimize for the case where there is only one module.
|
||||
SmallVector<Module*, 1> Modules;
|
||||
|
||||
void setDataLayout(const DataLayout *td) { TD = td; }
|
||||
void setDataLayout(const DataLayout *Val) { DL = Val; }
|
||||
|
||||
/// getMemoryforGV - Allocate memory for a global variable.
|
||||
virtual char *getMemoryForGV(const GlobalVariable *GV);
|
||||
@ -238,7 +238,7 @@ public:
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
|
||||
const DataLayout *getDataLayout() const { return TD; }
|
||||
const DataLayout *getDataLayout() const { return DL; }
|
||||
|
||||
/// removeModule - Remove a Module from the list of modules. Returns true if
|
||||
/// M is found.
|
||||
|
@ -30,18 +30,18 @@ class DataLayout;
|
||||
|
||||
/// TargetFolder - Create constants with target dependent folding.
|
||||
class TargetFolder {
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
|
||||
/// Fold - Fold the constant using target specific information.
|
||||
Constant *Fold(Constant *C) const {
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *CF = ConstantFoldConstantExpression(CE, TD))
|
||||
if (Constant *CF = ConstantFoldConstantExpression(CE, DL))
|
||||
return CF;
|
||||
return C;
|
||||
}
|
||||
|
||||
public:
|
||||
explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {}
|
||||
explicit TargetFolder(const DataLayout *DL) : DL(DL) {}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Binary Operators
|
||||
|
@ -143,7 +143,7 @@ protected:
|
||||
|
||||
public:
|
||||
const TargetMachine &getTargetMachine() const { return TM; }
|
||||
const DataLayout *getDataLayout() const { return TD; }
|
||||
const DataLayout *getDataLayout() const { return DL; }
|
||||
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
|
||||
|
||||
bool isBigEndian() const { return !IsLittleEndian; }
|
||||
@ -1337,7 +1337,7 @@ public:
|
||||
|
||||
private:
|
||||
const TargetMachine &TM;
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
const TargetLoweringObjectFile &TLOF;
|
||||
|
||||
/// True if this is a little endian target.
|
||||
|
@ -31,10 +31,10 @@ class TargetSelectionDAGInfo {
|
||||
TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
|
||||
void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
|
||||
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
|
||||
protected:
|
||||
const DataLayout *getDataLayout() const { return TD; }
|
||||
const DataLayout *getDataLayout() const { return DL; }
|
||||
|
||||
public:
|
||||
explicit TargetSelectionDAGInfo(const TargetMachine &TM);
|
||||
|
@ -151,7 +151,7 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
|
||||
SmallVectorImpl<ReturnInst*> &Returns,
|
||||
const char *NameSuffix = "",
|
||||
ClonedCodeInfo *CodeInfo = 0,
|
||||
const DataLayout *TD = 0,
|
||||
const DataLayout *DL = 0,
|
||||
Instruction *TheCall = 0);
|
||||
|
||||
|
||||
@ -159,13 +159,13 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
|
||||
/// InlineFunction call, and records the auxiliary results produced by it.
|
||||
class InlineFunctionInfo {
|
||||
public:
|
||||
explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0)
|
||||
: CG(cg), TD(td) {}
|
||||
explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *DL = 0)
|
||||
: CG(cg), DL(DL) {}
|
||||
|
||||
/// CG - If non-null, InlineFunction will update the callgraph to reflect the
|
||||
/// changes it makes.
|
||||
CallGraph *CG;
|
||||
const DataLayout *TD;
|
||||
const DataLayout *DL;
|
||||
|
||||
/// StaticAllocas - InlineFunction fills this in with all static allocas that
|
||||
/// get copied into the caller.
|
||||
|
@ -416,9 +416,9 @@ AliasAnalysis::ModRefResult
|
||||
AliasAnalysis::callCapturesBefore(const Instruction *I,
|
||||
const AliasAnalysis::Location &MemLoc,
|
||||
DominatorTree *DT) {
|
||||
if (!DT || !TD) return AliasAnalysis::ModRef;
|
||||
if (!DT || !DL) return AliasAnalysis::ModRef;
|
||||
|
||||
const Value *Object = GetUnderlyingObject(MemLoc.Ptr, TD);
|
||||
const Value *Object = GetUnderlyingObject(MemLoc.Ptr, DL);
|
||||
if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
|
||||
isa<Constant>(Object))
|
||||
return AliasAnalysis::ModRef;
|
||||
@ -472,7 +472,7 @@ AliasAnalysis::~AliasAnalysis() {}
|
||||
/// AliasAnalysis interface before any other methods are called.
|
||||
///
|
||||
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
|
||||
TD = P->getAnalysisIfAvailable<DataLayout>();
|
||||
DL = P->getAnalysisIfAvailable<DataLayout>();
|
||||
TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
|
||||
AA = &P->getAnalysis<AliasAnalysis>();
|
||||
}
|
||||
@ -487,7 +487,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
/// if known, or a conservative value otherwise.
|
||||
///
|
||||
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {
|
||||
return TD ? TD->getTypeStoreSize(Ty) : UnknownSize;
|
||||
return DL ? DL->getTypeStoreSize(Ty) : UnknownSize;
|
||||
}
|
||||
|
||||
/// canBasicBlockModify - Return true if it is possible for execution of the
|
||||
|
@ -593,7 +593,7 @@ BasicAliasAnalysis::pointsToConstantMemory(const Location &Loc, bool OrLocal) {
|
||||
SmallVector<const Value *, 16> Worklist;
|
||||
Worklist.push_back(Loc.Ptr);
|
||||
do {
|
||||
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), TD);
|
||||
const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
|
||||
if (!Visited.insert(V)) {
|
||||
Visited.clear();
|
||||
return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
|
||||
@ -698,7 +698,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
|
||||
assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
|
||||
"AliasAnalysis query involving multiple functions!");
|
||||
|
||||
const Value *Object = GetUnderlyingObject(Loc.Ptr, TD);
|
||||
const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
|
||||
|
||||
// If this is a tail call and Loc.Ptr points to a stack location, we know that
|
||||
// the tail call cannot access or modify the local stack.
|
||||
@ -805,7 +805,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
|
||||
// LLVM's vld1 and vst1 intrinsics currently only support a single
|
||||
// vector register.
|
||||
uint64_t Size =
|
||||
TD ? TD->getTypeStoreSize(II->getType()) : UnknownSize;
|
||||
DL ? DL->getTypeStoreSize(II->getType()) : UnknownSize;
|
||||
if (isNoAlias(Location(II->getArgOperand(0), Size,
|
||||
II->getMetadata(LLVMContext::MD_tbaa)),
|
||||
Loc))
|
||||
@ -814,7 +814,7 @@ BasicAliasAnalysis::getModRefInfo(ImmutableCallSite CS,
|
||||
}
|
||||
case Intrinsic::arm_neon_vst1: {
|
||||
uint64_t Size =
|
||||
TD ? TD->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
|
||||
DL ? DL->getTypeStoreSize(II->getArgOperand(1)->getType()) : UnknownSize;
|
||||
if (isNoAlias(Location(II->getArgOperand(0), Size,
|
||||
II->getMetadata(LLVMContext::MD_tbaa)),
|
||||
Loc))
|
||||
@ -877,7 +877,7 @@ static bool areVarIndicesEqual(SmallVectorImpl<VariableGEPIndex> &Indices1,
|
||||
|
||||
/// aliasGEP - Provide a bunch of ad-hoc rules to disambiguate a GEP instruction
|
||||
/// against another pointer. We know that V1 is a GEP, but we don't know
|
||||
/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, TD),
|
||||
/// anything about V2. UnderlyingV1 is GetUnderlyingObject(GEP1, DL),
|
||||
/// UnderlyingV2 is the same for V2.
|
||||
///
|
||||
AliasAnalysis::AliasResult
|
||||
@ -911,13 +911,13 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
|
||||
int64_t GEP2BaseOffset;
|
||||
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
|
||||
const Value *GEP2BasePtr =
|
||||
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
|
||||
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL);
|
||||
const Value *GEP1BasePtr =
|
||||
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
|
||||
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
|
||||
// DecomposeGEPExpression and GetUnderlyingObject should return the
|
||||
// same result except when DecomposeGEPExpression has no DataLayout.
|
||||
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
|
||||
assert(TD == 0 &&
|
||||
assert(DL == 0 &&
|
||||
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
|
||||
return MayAlias;
|
||||
}
|
||||
@ -937,17 +937,17 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
|
||||
// exactly, see if the computed offset from the common pointer tells us
|
||||
// about the relation of the resulting pointer.
|
||||
const Value *GEP1BasePtr =
|
||||
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
|
||||
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
|
||||
|
||||
int64_t GEP2BaseOffset;
|
||||
SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
|
||||
const Value *GEP2BasePtr =
|
||||
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
|
||||
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, DL);
|
||||
|
||||
// DecomposeGEPExpression and GetUnderlyingObject should return the
|
||||
// same result except when DecomposeGEPExpression has no DataLayout.
|
||||
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
|
||||
assert(TD == 0 &&
|
||||
assert(DL == 0 &&
|
||||
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
|
||||
return MayAlias;
|
||||
}
|
||||
@ -977,12 +977,12 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
|
||||
return R;
|
||||
|
||||
const Value *GEP1BasePtr =
|
||||
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
|
||||
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, DL);
|
||||
|
||||
// DecomposeGEPExpression and GetUnderlyingObject should return the
|
||||
// same result except when DecomposeGEPExpression has no DataLayout.
|
||||
if (GEP1BasePtr != UnderlyingV1) {
|
||||
assert(TD == 0 &&
|
||||
assert(DL == 0 &&
|
||||
"DecomposeGEPExpression and GetUnderlyingObject disagree!");
|
||||
return MayAlias;
|
||||
}
|
||||
@ -1215,8 +1215,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
|
||||
return NoAlias; // Scalars cannot alias each other
|
||||
|
||||
// Figure out what objects these things are pointing to if we can.
|
||||
const Value *O1 = GetUnderlyingObject(V1, TD);
|
||||
const Value *O2 = GetUnderlyingObject(V2, TD);
|
||||
const Value *O1 = GetUnderlyingObject(V1, DL);
|
||||
const Value *O2 = GetUnderlyingObject(V2, DL);
|
||||
|
||||
// Null values in the default address space don't point to any object, so they
|
||||
// don't alias any other pointer.
|
||||
@ -1265,9 +1265,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
|
||||
|
||||
// If the size of one access is larger than the entire object on the other
|
||||
// side, then we know such behavior is undefined and can assume no alias.
|
||||
if (TD)
|
||||
if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
|
||||
(V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
|
||||
if (DL)
|
||||
if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *DL, *TLI)) ||
|
||||
(V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *DL, *TLI)))
|
||||
return NoAlias;
|
||||
|
||||
// Check the cache before climbing up use-def chains. This also terminates
|
||||
@ -1319,9 +1319,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
|
||||
// If both pointers are pointing into the same object and one of them
|
||||
// accesses is accessing the entire object, then the accesses must
|
||||
// overlap in some way.
|
||||
if (TD && O1 == O2)
|
||||
if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
|
||||
(V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
|
||||
if (DL && O1 == O2)
|
||||
if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *DL, *TLI)) ||
|
||||
(V2Size != UnknownSize && isObjectSize(O2, V2Size, *DL, *TLI)))
|
||||
return AliasCache[Locs] = PartialAlias;
|
||||
|
||||
AliasResult Result =
|
||||
|
@ -1203,7 +1203,7 @@ INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
|
||||
|
||||
char InlineCostAnalysis::ID = 0;
|
||||
|
||||
InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), TD(0) {}
|
||||
InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID), DL(0) {}
|
||||
|
||||
InlineCostAnalysis::~InlineCostAnalysis() {}
|
||||
|
||||
@ -1214,7 +1214,7 @@ void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
}
|
||||
|
||||
bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
|
||||
TD = getAnalysisIfAvailable<DataLayout>();
|
||||
DL = getAnalysisIfAvailable<DataLayout>();
|
||||
TTI = &getAnalysis<TargetTransformInfo>();
|
||||
return false;
|
||||
}
|
||||
@ -1272,7 +1272,7 @@ InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
|
||||
DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
|
||||
<< "...\n");
|
||||
|
||||
CallAnalyzer CA(TD, *TTI, *Callee, Threshold);
|
||||
CallAnalyzer CA(DL, *TTI, *Callee, Threshold);
|
||||
bool ShouldInline = CA.analyzeCall(CS);
|
||||
|
||||
DEBUG(CA.dump());
|
||||
|
@ -123,14 +123,14 @@ bool IVUsers::AddUsersImpl(Instruction *I,
|
||||
// IVUsers is used by LSR which assumes that all SCEV expressions are safe to
|
||||
// pass to SCEVExpander. Expressions are not safe to expand if they represent
|
||||
// operations that are not safe to speculate, namely integer division.
|
||||
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, TD))
|
||||
if (!isa<PHINode>(I) && !isSafeToSpeculativelyExecute(I, DL))
|
||||
return false;
|
||||
|
||||
// LSR is not APInt clean, do not touch integers bigger than 64-bits.
|
||||
// Also avoid creating IVs of non-native types. For example, we don't want a
|
||||
// 64-bit IV in 32-bit code just because the loop has one 64-bit cast.
|
||||
uint64_t Width = SE->getTypeSizeInBits(I->getType());
|
||||
if (Width > 64 || (TD && !TD->isLegalInteger(Width)))
|
||||
if (Width > 64 || (DL && !DL->isLegalInteger(Width)))
|
||||
return false;
|
||||
|
||||
// Get the symbolic expression for this instruction.
|
||||
@ -234,7 +234,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
|
||||
LI = &getAnalysis<LoopInfo>();
|
||||
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
||||
SE = &getAnalysis<ScalarEvolution>();
|
||||
TD = getAnalysisIfAvailable<DataLayout>();
|
||||
DL = getAnalysisIfAvailable<DataLayout>();
|
||||
|
||||
// Find all uses of induction variables in this loop, and categorize
|
||||
// them by stride. Start by finding all of the PHI nodes in the header for
|
||||
|
@ -1013,7 +1013,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
|
||||
if (PImpl)
|
||||
getCache(PImpl).clear();
|
||||
|
||||
TD = getAnalysisIfAvailable<DataLayout>();
|
||||
DL = getAnalysisIfAvailable<DataLayout>();
|
||||
TLI = &getAnalysis<TargetLibraryInfo>();
|
||||
|
||||
// Fully lazy.
|
||||
@ -1073,7 +1073,7 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
|
||||
// If we know the value is a constant, evaluate the conditional.
|
||||
Constant *Res = 0;
|
||||
if (Result.isConstant()) {
|
||||
Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, TD,
|
||||
Res = ConstantFoldCompareInstOperands(Pred, Result.getConstant(), C, DL,
|
||||
TLI);
|
||||
if (ConstantInt *ResCI = dyn_cast<ConstantInt>(Res))
|
||||
return ResCI->isZero() ? False : True;
|
||||
@ -1115,14 +1115,14 @@ LazyValueInfo::getPredicateOnEdge(unsigned Pred, Value *V, Constant *C,
|
||||
if (Pred == ICmpInst::ICMP_EQ) {
|
||||
// !C1 == C -> false iff C1 == C.
|
||||
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
|
||||
Result.getNotConstant(), C, TD,
|
||||
Result.getNotConstant(), C, DL,
|
||||
TLI);
|
||||
if (Res->isNullValue())
|
||||
return False;
|
||||
} else if (Pred == ICmpInst::ICMP_NE) {
|
||||
// !C1 != C -> true iff C1 == C.
|
||||
Res = ConstantFoldCompareInstOperands(ICmpInst::ICMP_NE,
|
||||
Result.getNotConstant(), C, TD,
|
||||
Result.getNotConstant(), C, DL,
|
||||
TLI);
|
||||
if (Res->isNullValue())
|
||||
return True;
|
||||
|
@ -87,7 +87,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
|
||||
bool MemoryDependenceAnalysis::runOnFunction(Function &) {
|
||||
AA = &getAnalysis<AliasAnalysis>();
|
||||
TD = getAnalysisIfAvailable<DataLayout>();
|
||||
DL = getAnalysisIfAvailable<DataLayout>();
|
||||
DominatorTreeWrapperPass *DTWP =
|
||||
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
|
||||
DT = DTWP ? &DTWP->getDomTree() : 0;
|
||||
@ -258,17 +258,17 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
|
||||
const Value *&MemLocBase,
|
||||
int64_t &MemLocOffs,
|
||||
const LoadInst *LI,
|
||||
const DataLayout *TD) {
|
||||
const DataLayout *DL) {
|
||||
// If we have no target data, we can't do this.
|
||||
if (TD == 0) return false;
|
||||
if (DL == 0) return false;
|
||||
|
||||
// If we haven't already computed the base/offset of MemLoc, do so now.
|
||||
if (MemLocBase == 0)
|
||||
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, TD);
|
||||
MemLocBase = GetPointerBaseWithConstantOffset(MemLoc.Ptr, MemLocOffs, DL);
|
||||
|
||||
unsigned Size = MemoryDependenceAnalysis::
|
||||
getLoadLoadClobberFullWidthSize(MemLocBase, MemLocOffs, MemLoc.Size,
|
||||
LI, *TD);
|
||||
LI, *DL);
|
||||
return Size != 0;
|
||||
}
|
||||
|
||||
@ -282,7 +282,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
|
||||
unsigned MemoryDependenceAnalysis::
|
||||
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
||||
unsigned MemLocSize, const LoadInst *LI,
|
||||
const DataLayout &TD) {
|
||||
const DataLayout &DL) {
|
||||
// We can only extend simple integer loads.
|
||||
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;
|
||||
|
||||
@ -295,7 +295,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
||||
// Get the base of this load.
|
||||
int64_t LIOffs = 0;
|
||||
const Value *LIBase =
|
||||
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &TD);
|
||||
GetPointerBaseWithConstantOffset(LI->getPointerOperand(), LIOffs, &DL);
|
||||
|
||||
// If the two pointers are not based on the same pointer, we can't tell that
|
||||
// they are related.
|
||||
@ -331,7 +331,7 @@ getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
|
||||
// If this load size is bigger than our known alignment or would not fit
|
||||
// into a native integer register, then we fail.
|
||||
if (NewLoadByteSize > LoadAlign ||
|
||||
!TD.fitsInLegalInteger(NewLoadByteSize*8))
|
||||
!DL.fitsInLegalInteger(NewLoadByteSize*8))
|
||||
return 0;
|
||||
|
||||
if (LIOffs+NewLoadByteSize > MemLocEnd &&
|
||||
@ -424,7 +424,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
|
||||
if (IntegerType *ITy = dyn_cast<IntegerType>(LI->getType()))
|
||||
if (LI->getAlignment()*8 > ITy->getPrimitiveSizeInBits() &&
|
||||
isLoadLoadClobberIfExtendedToFullWidth(MemLoc, MemLocBase,
|
||||
MemLocOffset, LI, TD))
|
||||
MemLocOffset, LI, DL))
|
||||
return MemDepResult::getClobber(Inst);
|
||||
|
||||
continue;
|
||||
@ -500,7 +500,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
|
||||
// need to continue scanning until the malloc call.
|
||||
const TargetLibraryInfo *TLI = AA->getTargetLibraryInfo();
|
||||
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, TLI)) {
|
||||
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
|
||||
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, DL);
|
||||
|
||||
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))
|
||||
return MemDepResult::getDef(Inst);
|
||||
@ -773,7 +773,7 @@ getNonLocalPointerDependency(const AliasAnalysis::Location &Loc, bool isLoad,
|
||||
"Can't get pointer deps of a non-pointer!");
|
||||
Result.clear();
|
||||
|
||||
PHITransAddr Address(const_cast<Value *>(Loc.Ptr), TD);
|
||||
PHITransAddr Address(const_cast<Value *>(Loc.Ptr), DL);
|
||||
|
||||
// This is the set of blocks we've inspected, and the pointer we consider in
|
||||
// each block. Because of critical edges, we currently bail out if querying
|
||||
|
@ -36,7 +36,7 @@ namespace {
|
||||
virtual void initializePass() {
|
||||
// Note: NoAA does not call InitializeAliasAnalysis because it's
|
||||
// special and does not support chaining.
|
||||
TD = getAnalysisIfAvailable<DataLayout>();
|
||||
DL = getAnalysisIfAvailable<DataLayout>();
|
||||
}
|
||||
|
||||
virtual AliasResult alias(const Location &LocA, const Location &LocB) {
|
||||
|
@ -229,7 +229,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
|
||||
return GEP;
|
||||
|
||||
// Simplify the GEP to handle 'gep x, 0' -> x etc.
|
||||
if (Value *V = SimplifyGEPInst(GEPOps, TD, TLI, DT)) {
|
||||
if (Value *V = SimplifyGEPInst(GEPOps, DL, TLI, DT)) {
|
||||
for (unsigned i = 0, e = GEPOps.size(); i != e; ++i)
|
||||
RemoveInstInputs(GEPOps[i], InstInputs);
|
||||
|
||||
@ -285,7 +285,7 @@ Value *PHITransAddr::PHITranslateSubExpr(Value *V, BasicBlock *CurBB,
|
||||
}
|
||||
|
||||
// See if the add simplifies away.
|
||||
if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, TD, TLI, DT)) {
|
||||
if (Value *Res = SimplifyAddInst(LHS, RHS, isNSW, isNUW, DL, TLI, DT)) {
|
||||
// If we simplified the operands, the LHS is no longer an input, but Res
|
||||
// is.
|
||||
RemoveInstInputs(LHS, InstInputs);
|
||||
@ -372,7 +372,7 @@ InsertPHITranslatedSubExpr(Value *InVal, BasicBlock *CurBB,
|
||||
SmallVectorImpl<Instruction*> &NewInsts) {
|
||||
// See if we have a version of this value already available and dominating
|
||||
// PredBB. If so, there is no need to insert a new instance of it.
|
||||
PHITransAddr Tmp(InVal, TD);
|
||||
PHITransAddr Tmp(InVal, DL);
|
||||
if (!Tmp.PHITranslateValue(CurBB, PredBB, &DT))
|
||||
return Tmp.getAddr();
|
||||
|
||||
|
@ -2663,12 +2663,12 @@ const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
|
||||
// If we have DataLayout, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
if (TD)
|
||||
return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
|
||||
if (DL)
|
||||
return getConstant(IntTy, DL->getTypeAllocSize(AllocTy));
|
||||
|
||||
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
|
||||
C = Folded;
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
assert(Ty == IntTy && "Effective SCEV type doesn't match");
|
||||
@ -2681,14 +2681,14 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
|
||||
// If we have DataLayout, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
if (TD) {
|
||||
if (DL) {
|
||||
return getConstant(IntTy,
|
||||
TD->getStructLayout(STy)->getElementOffset(FieldNo));
|
||||
DL->getStructLayout(STy)->getElementOffset(FieldNo));
|
||||
}
|
||||
|
||||
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, DL, TLI))
|
||||
C = Folded;
|
||||
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
|
||||
@ -2736,8 +2736,8 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
// If we have a DataLayout, use it!
|
||||
if (TD)
|
||||
return TD->getTypeSizeInBits(Ty);
|
||||
if (DL)
|
||||
return DL->getTypeSizeInBits(Ty);
|
||||
|
||||
// Integer types have fixed sizes.
|
||||
if (Ty->isIntegerTy())
|
||||
@ -2763,8 +2763,8 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
||||
// The only other support type is pointer.
|
||||
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
|
||||
|
||||
if (TD)
|
||||
return TD->getIntPtrType(Ty);
|
||||
if (DL)
|
||||
return DL->getIntPtrType(Ty);
|
||||
|
||||
// Without DataLayout, conservatively assume pointers are 64-bit.
|
||||
return Type::getInt64Ty(getContext());
|
||||
@ -3232,7 +3232,7 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
|
||||
// PHI's incoming blocks are in a different loop, in which case doing so
|
||||
// risks breaking LCSSA form. Instcombine would normally zap these, but
|
||||
// it doesn't have DominatorTree information, so it may miss cases.
|
||||
if (Value *V = SimplifyInstruction(PN, TD, TLI, DT))
|
||||
if (Value *V = SimplifyInstruction(PN, DL, TLI, DT))
|
||||
if (LI->replacementPreservesLCSSAForm(PN, V))
|
||||
return getSCEV(V);
|
||||
|
||||
@ -3503,7 +3503,7 @@ ScalarEvolution::getUnsignedRange(const SCEV *S) {
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||
// For a SCEVUnknown, ask ValueTracking.
|
||||
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
|
||||
ComputeMaskedBits(U->getValue(), Zeros, Ones, TD);
|
||||
ComputeMaskedBits(U->getValue(), Zeros, Ones, DL);
|
||||
if (Ones == ~Zeros + 1)
|
||||
return setUnsignedRange(U, ConservativeResult);
|
||||
return setUnsignedRange(U,
|
||||
@ -3653,9 +3653,9 @@ ScalarEvolution::getSignedRange(const SCEV *S) {
|
||||
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
|
||||
// For a SCEVUnknown, ask ValueTracking.
|
||||
if (!U->getValue()->getType()->isIntegerTy() && !TD)
|
||||
if (!U->getValue()->getType()->isIntegerTy() && !DL)
|
||||
return setSignedRange(U, ConservativeResult);
|
||||
unsigned NS = ComputeNumSignBits(U->getValue(), TD);
|
||||
unsigned NS = ComputeNumSignBits(U->getValue(), DL);
|
||||
if (NS <= 1)
|
||||
return setSignedRange(U, ConservativeResult);
|
||||
return setSignedRange(U, ConservativeResult.intersectWith(
|
||||
@ -3762,7 +3762,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
unsigned TZ = A.countTrailingZeros();
|
||||
unsigned BitWidth = A.getBitWidth();
|
||||
APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
|
||||
ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, TD);
|
||||
ComputeMaskedBits(U->getOperand(0), KnownZero, KnownOne, DL);
|
||||
|
||||
APInt EffectiveMask =
|
||||
APInt::getLowBitsSet(BitWidth, BitWidth - LZ - TZ).shl(TZ);
|
||||
@ -4956,7 +4956,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
|
||||
/// reason, return null.
|
||||
static Constant *EvaluateExpression(Value *V, const Loop *L,
|
||||
DenseMap<Instruction *, Constant *> &Vals,
|
||||
const DataLayout *TD,
|
||||
const DataLayout *DL,
|
||||
const TargetLibraryInfo *TLI) {
|
||||
// Convenient constant check, but redundant for recursive calls.
|
||||
if (Constant *C = dyn_cast<Constant>(V)) return C;
|
||||
@ -4983,7 +4983,7 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
|
||||
if (!Operands[i]) return 0;
|
||||
continue;
|
||||
}
|
||||
Constant *C = EvaluateExpression(Operand, L, Vals, TD, TLI);
|
||||
Constant *C = EvaluateExpression(Operand, L, Vals, DL, TLI);
|
||||
Vals[Operand] = C;
|
||||
if (!C) return 0;
|
||||
Operands[i] = C;
|
||||
@ -4991,12 +4991,12 @@ static Constant *EvaluateExpression(Value *V, const Loop *L,
|
||||
|
||||
if (CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
|
||||
Operands[1], TD, TLI);
|
||||
Operands[1], DL, TLI);
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
||||
if (!LI->isVolatile())
|
||||
return ConstantFoldLoadFromConstPtr(Operands[0], TD);
|
||||
return ConstantFoldLoadFromConstPtr(Operands[0], DL);
|
||||
}
|
||||
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD,
|
||||
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, DL,
|
||||
TLI);
|
||||
}
|
||||
|
||||
@ -5052,7 +5052,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
|
||||
// Compute the value of the PHIs for the next iteration.
|
||||
// EvaluateExpression adds non-phi values to the CurrentIterVals map.
|
||||
DenseMap<Instruction *, Constant *> NextIterVals;
|
||||
Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD,
|
||||
Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL,
|
||||
TLI);
|
||||
if (NextPHI == 0)
|
||||
return 0; // Couldn't evaluate!
|
||||
@ -5078,7 +5078,7 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
|
||||
Constant *&NextPHI = NextIterVals[PHI];
|
||||
if (!NextPHI) { // Not already computed.
|
||||
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
|
||||
NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
|
||||
NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
|
||||
}
|
||||
if (NextPHI != I->second)
|
||||
StoppedEvolving = false;
|
||||
@ -5134,7 +5134,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
|
||||
for (unsigned IterationNum = 0; IterationNum != MaxIterations;++IterationNum){
|
||||
ConstantInt *CondVal =
|
||||
dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, CurrentIterVals,
|
||||
TD, TLI));
|
||||
DL, TLI));
|
||||
|
||||
// Couldn't symbolically evaluate.
|
||||
if (!CondVal) return getCouldNotCompute();
|
||||
@ -5164,7 +5164,7 @@ const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
|
||||
if (NextPHI) continue; // Already computed!
|
||||
|
||||
Value *BEValue = PHI->getIncomingValue(SecondIsBackedge);
|
||||
NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD, TLI);
|
||||
NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, DL, TLI);
|
||||
}
|
||||
CurrentIterVals.swap(NextIterVals);
|
||||
}
|
||||
@ -5369,14 +5369,14 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
|
||||
Constant *C = 0;
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
|
||||
Operands[0], Operands[1], TD,
|
||||
Operands[0], Operands[1], DL,
|
||||
TLI);
|
||||
else if (const LoadInst *LI = dyn_cast<LoadInst>(I)) {
|
||||
if (!LI->isVolatile())
|
||||
C = ConstantFoldLoadFromConstPtr(Operands[0], TD);
|
||||
C = ConstantFoldLoadFromConstPtr(Operands[0], DL);
|
||||
} else
|
||||
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
|
||||
Operands, TD, TLI);
|
||||
Operands, DL, TLI);
|
||||
if (!C) return V;
|
||||
return getSCEV(C);
|
||||
}
|
||||
@ -7385,7 +7385,7 @@ ScalarEvolution::ScalarEvolution()
|
||||
bool ScalarEvolution::runOnFunction(Function &F) {
|
||||
this->F = &F;
|
||||
LI = &getAnalysis<LoopInfo>();
|
||||
TD = getAnalysisIfAvailable<DataLayout>();
|
||||
DL = getAnalysisIfAvailable<DataLayout>();
|
||||
TLI = &getAnalysis<TargetLibraryInfo>();
|
||||
DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
|
||||
return false;
|
||||
|
@ -210,7 +210,7 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||
const SCEV *&Remainder,
|
||||
const SCEV *Factor,
|
||||
ScalarEvolution &SE,
|
||||
const DataLayout *TD) {
|
||||
const DataLayout *DL) {
|
||||
// Everything is divisible by one.
|
||||
if (Factor->isOne())
|
||||
return true;
|
||||
@ -250,7 +250,7 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||
// In a Mul, check if there is a constant operand which is a multiple
|
||||
// of the given factor.
|
||||
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
|
||||
if (TD) {
|
||||
if (DL) {
|
||||
// With DataLayout, the size is known. Check if there is a constant
|
||||
// operand which is a multiple of the given factor. If so, we can
|
||||
// factor it.
|
||||
@ -270,7 +270,7 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
|
||||
const SCEV *SOp = M->getOperand(i);
|
||||
const SCEV *Remainder = SE.getConstant(SOp->getType(), 0);
|
||||
if (FactorOutConstant(SOp, Remainder, Factor, SE, TD) &&
|
||||
if (FactorOutConstant(SOp, Remainder, Factor, SE, DL) &&
|
||||
Remainder->isZero()) {
|
||||
SmallVector<const SCEV *, 4> NewMulOps(M->op_begin(), M->op_end());
|
||||
NewMulOps[i] = SOp;
|
||||
@ -285,12 +285,12 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||
if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
|
||||
const SCEV *Step = A->getStepRecurrence(SE);
|
||||
const SCEV *StepRem = SE.getConstant(Step->getType(), 0);
|
||||
if (!FactorOutConstant(Step, StepRem, Factor, SE, TD))
|
||||
if (!FactorOutConstant(Step, StepRem, Factor, SE, DL))
|
||||
return false;
|
||||
if (!StepRem->isZero())
|
||||
return false;
|
||||
const SCEV *Start = A->getStart();
|
||||
if (!FactorOutConstant(Start, Remainder, Factor, SE, TD))
|
||||
if (!FactorOutConstant(Start, Remainder, Factor, SE, DL))
|
||||
return false;
|
||||
S = SE.getAddRecExpr(Start, Step, A->getLoop(),
|
||||
A->getNoWrapFlags(SCEV::FlagNW));
|
||||
@ -404,8 +404,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// without the other.
|
||||
SplitAddRecs(Ops, Ty, SE);
|
||||
|
||||
Type *IntPtrTy = SE.TD
|
||||
? SE.TD->getIntPtrType(PTy)
|
||||
Type *IntPtrTy = SE.DL
|
||||
? SE.DL->getIntPtrType(PTy)
|
||||
: Type::getInt64Ty(PTy->getContext());
|
||||
|
||||
// Descend down the pointer's type and attempt to convert the other
|
||||
@ -424,7 +424,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
const SCEV *Op = Ops[i];
|
||||
const SCEV *Remainder = SE.getConstant(Ty, 0);
|
||||
if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.TD)) {
|
||||
if (FactorOutConstant(Op, Remainder, ElSize, SE, SE.DL)) {
|
||||
// Op now has ElSize factored out.
|
||||
ScaledOps.push_back(Op);
|
||||
if (!Remainder->isZero())
|
||||
@ -458,13 +458,13 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
bool FoundFieldNo = false;
|
||||
// An empty struct has no fields.
|
||||
if (STy->getNumElements() == 0) break;
|
||||
if (SE.TD) {
|
||||
if (SE.DL) {
|
||||
// With DataLayout, field offsets are known. See if a constant offset
|
||||
// falls within any of the struct fields.
|
||||
if (Ops.empty()) break;
|
||||
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
|
||||
if (SE.getTypeSizeInBits(C->getType()) <= 64) {
|
||||
const StructLayout &SL = *SE.TD->getStructLayout(STy);
|
||||
const StructLayout &SL = *SE.DL->getStructLayout(STy);
|
||||
uint64_t FullOffset = C->getValue()->getZExtValue();
|
||||
if (FullOffset < SL.getSizeInBytes()) {
|
||||
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
|
||||
|
@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
TD.getIntPtrType(Context), (Type *)0);
|
||||
DL.getIntPtrType(Context), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::memmove:
|
||||
M.getOrInsertFunction("memmove",
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
TD.getIntPtrType(Context), (Type *)0);
|
||||
DL.getIntPtrType(Context), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::memset:
|
||||
M.getOrInsertFunction("memset",
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt8PtrTy(Context),
|
||||
Type::getInt32Ty(M.getContext()),
|
||||
TD.getIntPtrType(Context), (Type *)0);
|
||||
DL.getIntPtrType(Context), (Type *)0);
|
||||
break;
|
||||
case Intrinsic::sqrt:
|
||||
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");
|
||||
@ -463,7 +463,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
break; // Strip out annotate intrinsic
|
||||
|
||||
case Intrinsic::memcpy: {
|
||||
Type *IntPtr = TD.getIntPtrType(Context);
|
||||
Type *IntPtr = DL.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
@ -474,7 +474,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
break;
|
||||
}
|
||||
case Intrinsic::memmove: {
|
||||
Type *IntPtr = TD.getIntPtrType(Context);
|
||||
Type *IntPtr = DL.getIntPtrType(Context);
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
@ -486,7 +486,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
}
|
||||
case Intrinsic::memset: {
|
||||
Value *Op0 = CI->getArgOperand(0);
|
||||
Type *IntPtr = TD.getIntPtrType(Op0->getType());
|
||||
Type *IntPtr = DL.getIntPtrType(Op0->getType());
|
||||
Value *Size = Builder.CreateIntCast(CI->getArgOperand(2), IntPtr,
|
||||
/* isSigned */ false);
|
||||
Value *Ops[3];
|
||||
|
@ -16,7 +16,7 @@
|
||||
using namespace llvm;
|
||||
|
||||
TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
|
||||
: TD(TM.getDataLayout()) {
|
||||
: DL(TM.getDataLayout()) {
|
||||
}
|
||||
|
||||
TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {
|
||||
|
@ -661,11 +661,11 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
|
||||
/// NOTE: The constructor takes ownership of TLOF.
|
||||
TargetLoweringBase::TargetLoweringBase(const TargetMachine &tm,
|
||||
const TargetLoweringObjectFile *tlof)
|
||||
: TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
|
||||
: TM(tm), DL(TM.getDataLayout()), TLOF(*tlof) {
|
||||
initActions();
|
||||
|
||||
// Perform these initializations only once.
|
||||
IsLittleEndian = TD->isLittleEndian();
|
||||
IsLittleEndian = DL->isLittleEndian();
|
||||
MaxStoresPerMemset = MaxStoresPerMemcpy = MaxStoresPerMemmove = 8;
|
||||
MaxStoresPerMemsetOptSize = MaxStoresPerMemcpyOptSize
|
||||
= MaxStoresPerMemmoveOptSize = 4;
|
||||
@ -802,7 +802,7 @@ MVT TargetLoweringBase::getPointerTy(uint32_t AS) const {
|
||||
}
|
||||
|
||||
unsigned TargetLoweringBase::getPointerSizeInBits(uint32_t AS) const {
|
||||
return TD->getPointerSizeInBits(AS);
|
||||
return DL->getPointerSizeInBits(AS);
|
||||
}
|
||||
|
||||
unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
|
||||
@ -811,7 +811,7 @@ unsigned TargetLoweringBase::getPointerTypeSizeInBits(Type *Ty) const {
|
||||
}
|
||||
|
||||
MVT TargetLoweringBase::getScalarShiftAmountTy(EVT LHSTy) const {
|
||||
return MVT::getIntegerVT(8*TD->getPointerSize(0));
|
||||
return MVT::getIntegerVT(8*DL->getPointerSize(0));
|
||||
}
|
||||
|
||||
EVT TargetLoweringBase::getShiftAmountTy(EVT LHSTy) const {
|
||||
@ -1286,7 +1286,7 @@ void llvm::GetReturnInfo(Type* ReturnType, AttributeSet attr,
|
||||
/// function arguments in the caller parameter area. This is the actual
|
||||
/// alignment, not its logarithm.
|
||||
unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty) const {
|
||||
return TD->getABITypeAlignment(Ty);
|
||||
return DL->getABITypeAlignment(Ty);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -590,8 +590,8 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
|
||||
case Instruction::GetElementPtr: {
|
||||
// Compute the index
|
||||
GenericValue Result = getConstantValue(Op0);
|
||||
APInt Offset(TD->getPointerSizeInBits(), 0);
|
||||
cast<GEPOperator>(CE)->accumulateConstantOffset(*TD, Offset);
|
||||
APInt Offset(DL->getPointerSizeInBits(), 0);
|
||||
cast<GEPOperator>(CE)->accumulateConstantOffset(*DL, Offset);
|
||||
|
||||
char* tmp = (char*) Result.PointerVal;
|
||||
Result = PTOGV(tmp + Offset.getSExtValue());
|
||||
@ -678,16 +678,16 @@ GenericValue ExecutionEngine::getConstantValue(const Constant *C) {
|
||||
}
|
||||
case Instruction::PtrToInt: {
|
||||
GenericValue GV = getConstantValue(Op0);
|
||||
uint32_t PtrWidth = TD->getTypeSizeInBits(Op0->getType());
|
||||
uint32_t PtrWidth = DL->getTypeSizeInBits(Op0->getType());
|
||||
assert(PtrWidth <= 64 && "Bad pointer width");
|
||||
GV.IntVal = APInt(PtrWidth, uintptr_t(GV.PointerVal));
|
||||
uint32_t IntWidth = TD->getTypeSizeInBits(CE->getType());
|
||||
uint32_t IntWidth = DL->getTypeSizeInBits(CE->getType());
|
||||
GV.IntVal = GV.IntVal.zextOrTrunc(IntWidth);
|
||||
return GV;
|
||||
}
|
||||
case Instruction::IntToPtr: {
|
||||
GenericValue GV = getConstantValue(Op0);
|
||||
uint32_t PtrWidth = TD->getTypeSizeInBits(CE->getType());
|
||||
uint32_t PtrWidth = DL->getTypeSizeInBits(CE->getType());
|
||||
GV.IntVal = GV.IntVal.zextOrTrunc(PtrWidth);
|
||||
assert(GV.IntVal.getBitWidth() <= 64 && "Bad pointer width");
|
||||
GV.PointerVal = PointerTy(uintptr_t(GV.IntVal.getZExtValue()));
|
||||
|
@ -343,7 +343,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
|
||||
// If the pointer is already known to be sufficiently aligned, or if we can
|
||||
// round it up to a larger alignment, then we don't need a temporary.
|
||||
if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
|
||||
IFI.TD) >= ByValAlignment)
|
||||
IFI.DL) >= ByValAlignment)
|
||||
return Arg;
|
||||
|
||||
// Otherwise, we have to make a memcpy to get a safe alignment. This is bad
|
||||
@ -356,8 +356,8 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
|
||||
|
||||
// Create the alloca. If we have DataLayout, use nice alignment.
|
||||
unsigned Align = 1;
|
||||
if (IFI.TD)
|
||||
Align = IFI.TD->getPrefTypeAlignment(AggTy);
|
||||
if (IFI.DL)
|
||||
Align = IFI.DL->getPrefTypeAlignment(AggTy);
|
||||
|
||||
// If the byval had an alignment specified, we *must* use at least that
|
||||
// alignment, as it is required by the byval argument (and uses of the
|
||||
@ -377,11 +377,11 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
|
||||
Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
|
||||
|
||||
Value *Size;
|
||||
if (IFI.TD == 0)
|
||||
if (IFI.DL == 0)
|
||||
Size = ConstantExpr::getSizeOf(AggTy);
|
||||
else
|
||||
Size = ConstantInt::get(Type::getInt64Ty(Context),
|
||||
IFI.TD->getTypeStoreSize(AggTy));
|
||||
IFI.DL->getTypeStoreSize(AggTy));
|
||||
|
||||
// Always generate a memcpy of alignment 1 here because we don't know
|
||||
// the alignment of the src pointer. Other optimizations can infer
|
||||
@ -599,7 +599,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
|
||||
// happy with whatever the cloner can do.
|
||||
CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
|
||||
/*ModuleLevelChanges=*/false, Returns, ".i",
|
||||
&InlinedFunctionInfo, IFI.TD, TheCall);
|
||||
&InlinedFunctionInfo, IFI.DL, TheCall);
|
||||
|
||||
// Remember the first block that is newly cloned over.
|
||||
FirstNewBlock = LastBlock; ++FirstNewBlock;
|
||||
@ -669,9 +669,9 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
|
||||
ConstantInt *AllocaSize = 0;
|
||||
if (ConstantInt *AIArraySize =
|
||||
dyn_cast<ConstantInt>(AI->getArraySize())) {
|
||||
if (IFI.TD) {
|
||||
if (IFI.DL) {
|
||||
Type *AllocaType = AI->getAllocatedType();
|
||||
uint64_t AllocaTypeSize = IFI.TD->getTypeAllocSize(AllocaType);
|
||||
uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
|
||||
uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
|
||||
assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
|
||||
// Check that array size doesn't saturate uint64_t and doesn't
|
||||
@ -908,7 +908,7 @@ bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
|
||||
// the entries are the same or undef). If so, remove the PHI so it doesn't
|
||||
// block other optimizations.
|
||||
if (PHI) {
|
||||
if (Value *V = SimplifyInstruction(PHI, IFI.TD)) {
|
||||
if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
|
||||
PHI->replaceAllUsesWith(V);
|
||||
PHI->eraseFromParent();
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user