Move TargetData to DataLayout.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@165402 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Micah Villmow 2012-10-08 16:38:25 +00:00
parent 2b4b44e0d2
commit 3574eca1b0
229 changed files with 947 additions and 1000 deletions

View File

@ -145,7 +145,7 @@ static inline LLVMBool LLVMInitializeNativeTarget(void) {
/*===-- Target Data -------------------------------------------------------===*/ /*===-- Target Data -------------------------------------------------------===*/
/** Creates target data from a target layout string. /** Creates target data from a target layout string.
See the constructor llvm::TargetData::TargetData. */ See the constructor llvm::DataLayout::DataLayout. */
LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep); LLVMTargetDataRef LLVMCreateTargetData(const char *StringRep);
/** Adds target data information to a pass manager. This does not take ownership /** Adds target data information to a pass manager. This does not take ownership
@ -160,48 +160,48 @@ void LLVMAddTargetLibraryInfo(LLVMTargetLibraryInfoRef, LLVMPassManagerRef);
/** Converts target data to a target layout string. The string must be disposed /** Converts target data to a target layout string. The string must be disposed
with LLVMDisposeMessage. with LLVMDisposeMessage.
See the constructor llvm::TargetData::TargetData. */ See the constructor llvm::DataLayout::DataLayout. */
char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef); char *LLVMCopyStringRepOfTargetData(LLVMTargetDataRef);
/** Returns the byte order of a target, either LLVMBigEndian or /** Returns the byte order of a target, either LLVMBigEndian or
LLVMLittleEndian. LLVMLittleEndian.
See the method llvm::TargetData::isLittleEndian. */ See the method llvm::DataLayout::isLittleEndian. */
enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef); enum LLVMByteOrdering LLVMByteOrder(LLVMTargetDataRef);
/** Returns the pointer size in bytes for a target. /** Returns the pointer size in bytes for a target.
See the method llvm::TargetData::getPointerSize. */ See the method llvm::DataLayout::getPointerSize. */
unsigned LLVMPointerSize(LLVMTargetDataRef); unsigned LLVMPointerSize(LLVMTargetDataRef);
/** Returns the integer type that is the same size as a pointer on a target. /** Returns the integer type that is the same size as a pointer on a target.
See the method llvm::TargetData::getIntPtrType. */ See the method llvm::DataLayout::getIntPtrType. */
LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef); LLVMTypeRef LLVMIntPtrType(LLVMTargetDataRef);
/** Computes the size of a type in bytes for a target. /** Computes the size of a type in bytes for a target.
See the method llvm::TargetData::getTypeSizeInBits. */ See the method llvm::DataLayout::getTypeSizeInBits. */
unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef); unsigned long long LLVMSizeOfTypeInBits(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the storage size of a type in bytes for a target. /** Computes the storage size of a type in bytes for a target.
See the method llvm::TargetData::getTypeStoreSize. */ See the method llvm::DataLayout::getTypeStoreSize. */
unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef); unsigned long long LLVMStoreSizeOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the ABI size of a type in bytes for a target. /** Computes the ABI size of a type in bytes for a target.
See the method llvm::TargetData::getTypeAllocSize. */ See the method llvm::DataLayout::getTypeAllocSize. */
unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef); unsigned long long LLVMABISizeOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the ABI alignment of a type in bytes for a target. /** Computes the ABI alignment of a type in bytes for a target.
See the method llvm::TargetData::getTypeABISize. */ See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); unsigned LLVMABIAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the call frame alignment of a type in bytes for a target. /** Computes the call frame alignment of a type in bytes for a target.
See the method llvm::TargetData::getTypeABISize. */ See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); unsigned LLVMCallFrameAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the preferred alignment of a type in bytes for a target. /** Computes the preferred alignment of a type in bytes for a target.
See the method llvm::TargetData::getTypeABISize. */ See the method llvm::DataLayout::getTypeABISize. */
unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef); unsigned LLVMPreferredAlignmentOfType(LLVMTargetDataRef, LLVMTypeRef);
/** Computes the preferred alignment of a global variable in bytes for a target. /** Computes the preferred alignment of a global variable in bytes for a target.
See the method llvm::TargetData::getPreferredAlignment. */ See the method llvm::DataLayout::getPreferredAlignment. */
unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef, unsigned LLVMPreferredAlignmentOfGlobal(LLVMTargetDataRef,
LLVMValueRef GlobalVar); LLVMValueRef GlobalVar);
@ -216,7 +216,7 @@ unsigned long long LLVMOffsetOfElement(LLVMTargetDataRef, LLVMTypeRef StructTy,
unsigned Element); unsigned Element);
/** Deallocates a TargetData. /** Deallocates a TargetData.
See the destructor llvm::TargetData::~TargetData. */ See the destructor llvm::DataLayout::~DataLayout. */
void LLVMDisposeTargetData(LLVMTargetDataRef); void LLVMDisposeTargetData(LLVMTargetDataRef);
/** /**
@ -227,15 +227,15 @@ void LLVMDisposeTargetData(LLVMTargetDataRef);
} }
namespace llvm { namespace llvm {
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
inline TargetData *unwrap(LLVMTargetDataRef P) { inline DataLayout *unwrap(LLVMTargetDataRef P) {
return reinterpret_cast<TargetData*>(P); return reinterpret_cast<DataLayout*>(P);
} }
inline LLVMTargetDataRef wrap(const TargetData *P) { inline LLVMTargetDataRef wrap(const DataLayout *P) {
return reinterpret_cast<LLVMTargetDataRef>(const_cast<TargetData*>(P)); return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout*>(P));
} }
inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) { inline TargetLibraryInfo *unwrap(LLVMTargetLibraryInfoRef P) {

View File

@ -104,7 +104,7 @@ char *LLVMGetTargetMachineCPU(LLVMTargetMachineRef T);
LLVMDisposeMessage. */ LLVMDisposeMessage. */
char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T); char *LLVMGetTargetMachineFeatureString(LLVMTargetMachineRef T);
/** Returns the llvm::TargetData used for this llvm:TargetMachine. */ /** Returns the llvm::DataLayout used for this llvm:TargetMachine. */
LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T); LLVMTargetDataRef LLVMGetTargetMachineData(LLVMTargetMachineRef T);
/** Emits an asm or object file for the given module to the filename. This /** Emits an asm or object file for the given module to the filename. This

View File

@ -45,7 +45,7 @@ namespace llvm {
class LoadInst; class LoadInst;
class StoreInst; class StoreInst;
class VAArgInst; class VAArgInst;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class Pass; class Pass;
class AnalysisUsage; class AnalysisUsage;
@ -55,7 +55,7 @@ class DominatorTree;
class AliasAnalysis { class AliasAnalysis {
protected: protected:
const TargetData *TD; const DataLayout *TD;
const TargetLibraryInfo *TLI; const TargetLibraryInfo *TLI;
private: private:
@ -83,17 +83,17 @@ public:
/// know the sizes of the potential memory references. /// know the sizes of the potential memory references.
static uint64_t const UnknownSize = ~UINT64_C(0); static uint64_t const UnknownSize = ~UINT64_C(0);
/// getTargetData - Return a pointer to the current TargetData object, or /// getDataLayout - Return a pointer to the current DataLayout object, or
/// null if no TargetData object is available. /// null if no DataLayout object is available.
/// ///
const TargetData *getTargetData() const { return TD; } const DataLayout *getDataLayout() const { return TD; }
/// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo /// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
/// object, or null if no TargetLibraryInfo object is available. /// object, or null if no TargetLibraryInfo object is available.
/// ///
const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; } const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
/// getTypeStoreSize - Return the TargetData store size for the given type, /// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise. /// if known, or a conservative value otherwise.
/// ///
uint64_t getTypeStoreSize(Type *Ty); uint64_t getTypeStoreSize(Type *Ty);

View File

@ -22,11 +22,11 @@ namespace llvm {
class BasicBlock; class BasicBlock;
class Function; class Function;
class Instruction; class Instruction;
class TargetData; class DataLayout;
class Value; class Value;
/// \brief Check whether an instruction is likely to be "free" when lowered. /// \brief Check whether an instruction is likely to be "free" when lowered.
bool isInstructionFree(const Instruction *I, const TargetData *TD = 0); bool isInstructionFree(const Instruction *I, const DataLayout *TD = 0);
/// \brief Check whether a call will lower to something small. /// \brief Check whether a call will lower to something small.
/// ///
@ -85,10 +85,10 @@ namespace llvm {
NumRets(0) {} NumRets(0) {}
/// \brief Add information about a block to the current state. /// \brief Add information about a block to the current state.
void analyzeBasicBlock(const BasicBlock *BB, const TargetData *TD = 0); void analyzeBasicBlock(const BasicBlock *BB, const DataLayout *TD = 0);
/// \brief Add information about a function to the current state. /// \brief Add information about a function to the current state.
void analyzeFunction(Function *F, const TargetData *TD = 0); void analyzeFunction(Function *F, const DataLayout *TD = 0);
}; };
} }

View File

@ -12,7 +12,7 @@
// //
// Also, to supplement the basic VMCore ConstantExpr simplifications, // Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file declares some additional folding routines that can make use of // this file declares some additional folding routines that can make use of
// TargetData information. These functions cannot go in VMCore due to library // DataLayout information. These functions cannot go in VMCore due to library
// dependency issues. // dependency issues.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -24,7 +24,7 @@ namespace llvm {
class Constant; class Constant;
class ConstantExpr; class ConstantExpr;
class Instruction; class Instruction;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class Function; class Function;
class Type; class Type;
@ -36,14 +36,14 @@ namespace llvm {
/// Note that this fails if not all of the operands are constant. Otherwise, /// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads /// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form. /// and stores, which have no constant expression form.
Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0, Constant *ConstantFoldInstruction(Instruction *I, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0); const TargetLibraryInfo *TLI = 0);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression /// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is /// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned. /// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(const ConstantExpr *CE, Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0); const TargetLibraryInfo *TLI = 0);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the /// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@ -54,7 +54,7 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
/// ///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops, ArrayRef<Constant *> Ops,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0); const TargetLibraryInfo *TLI = 0);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare /// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
@ -63,7 +63,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// ///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate, Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *LHS, Constant *RHS, Constant *LHS, Constant *RHS,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0); const TargetLibraryInfo *TLI = 0);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue /// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
@ -75,7 +75,7 @@ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would /// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable, /// produce if it is constant and determinable. If this is not determinable,
/// return null. /// return null.
Constant *ConstantFoldLoadFromConstPtr(Constant *C, const TargetData *TD = 0); Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD = 0);
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a /// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the /// getelementptr constantexpr, return the constant value being addressed by the

View File

@ -28,7 +28,7 @@ class IVUsers;
class ScalarEvolution; class ScalarEvolution;
class SCEV; class SCEV;
class IVUsers; class IVUsers;
class TargetData; class DataLayout;
/// IVStrideUse - Keep track of one use of a strided induction variable. /// IVStrideUse - Keep track of one use of a strided induction variable.
/// The Expr member keeps track of the expression, User is the actual user /// The Expr member keeps track of the expression, User is the actual user
@ -123,7 +123,7 @@ class IVUsers : public LoopPass {
LoopInfo *LI; LoopInfo *LI;
DominatorTree *DT; DominatorTree *DT;
ScalarEvolution *SE; ScalarEvolution *SE;
TargetData *TD; DataLayout *TD;
SmallPtrSet<Instruction*,16> Processed; SmallPtrSet<Instruction*,16> Processed;
/// IVUses - A list of all tracked IV uses of induction variable expressions /// IVUses - A list of all tracked IV uses of induction variable expressions

View File

@ -26,7 +26,7 @@
namespace llvm { namespace llvm {
class CallSite; class CallSite;
class TargetData; class DataLayout;
namespace InlineConstants { namespace InlineConstants {
// Various magic constants used to adjust heuristics. // Various magic constants used to adjust heuristics.
@ -104,13 +104,13 @@ namespace llvm {
/// InlineCostAnalyzer - Cost analyzer used by inliner. /// InlineCostAnalyzer - Cost analyzer used by inliner.
class InlineCostAnalyzer { class InlineCostAnalyzer {
// TargetData if available, or null. // DataLayout if available, or null.
const TargetData *TD; const DataLayout *TD;
public: public:
InlineCostAnalyzer(): TD(0) {} InlineCostAnalyzer(): TD(0) {}
void setTargetData(const TargetData *TData) { TD = TData; } void setDataLayout(const DataLayout *TData) { TD = TData; }
/// \brief Get an InlineCost object representing the cost of inlining this /// \brief Get an InlineCost object representing the cost of inlining this
/// callsite. /// callsite.

View File

@ -24,7 +24,7 @@ namespace llvm {
class ArrayRef; class ArrayRef;
class DominatorTree; class DominatorTree;
class Instruction; class Instruction;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class Type; class Type;
class Value; class Value;
@ -32,122 +32,122 @@ namespace llvm {
/// SimplifyAddInst - Given operands for an Add, see if we can /// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifySubInst - Given operands for a Sub, see if we can /// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW, Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyMulInst - Given operands for a Mul, see if we can /// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifySDivInst - Given operands for an SDiv, see if we can /// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyUDivInst - Given operands for a UDiv, see if we can /// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyFDivInst - Given operands for an FDiv, see if we can /// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyFDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifySRemInst - Given operands for an SRem, see if we can /// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyURemInst - Given operands for a URem, see if we can /// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyFRemInst - Given operands for an FRem, see if we can /// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyFRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyShlInst - Given operands for a Shl, see if we can /// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyLShrInst - Given operands for a LShr, see if we can /// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyAShrInst - Given operands for a AShr, see if we can /// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyAndInst - Given operands for an And, see if we can /// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyOrInst - Given operands for an Or, see if we can /// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyXorInst - Given operands for a Xor, see if we can /// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const TargetData *TD = 0, Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can /// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can /// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold /// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null. /// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can /// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD = 0, Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
@ -155,13 +155,13 @@ namespace llvm {
/// can fold the result. If not, this returns null. /// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val, Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs, ArrayRef<unsigned> Idxs,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold /// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
/// the result. If not, this returns null. /// the result. If not, this returns null.
Value *SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD = 0, Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
@ -171,20 +171,20 @@ namespace llvm {
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can /// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can /// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null. /// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
/// SimplifyInstruction - See if we can compute a simplified version of this /// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null. /// instruction. If not, this returns null.
Value *SimplifyInstruction(Instruction *I, const TargetData *TD = 0, Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
@ -198,7 +198,7 @@ namespace llvm {
/// ///
/// The function returns true if any simplifications were performed. /// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
@ -209,7 +209,7 @@ namespace llvm {
/// of the users impacted. It returns true if any simplifications were /// of the users impacted. It returns true if any simplifications were
/// performed. /// performed.
bool recursivelySimplifyInstruction(Instruction *I, bool recursivelySimplifyInstruction(Instruction *I,
const TargetData *TD = 0, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0, const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0); const DominatorTree *DT = 0);
} // end namespace llvm } // end namespace llvm

View File

@ -19,14 +19,14 @@
namespace llvm { namespace llvm {
class Constant; class Constant;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class Value; class Value;
/// LazyValueInfo - This pass computes, caches, and vends lazy value constraint /// LazyValueInfo - This pass computes, caches, and vends lazy value constraint
/// information. /// information.
class LazyValueInfo : public FunctionPass { class LazyValueInfo : public FunctionPass {
class TargetData *TD; class DataLayout *TD;
class TargetLibraryInfo *TLI; class TargetLibraryInfo *TLI;
void *PImpl; void *PImpl;
LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION; LazyValueInfo(const LazyValueInfo&) LLVM_DELETED_FUNCTION;

View File

@ -19,7 +19,7 @@
namespace llvm { namespace llvm {
class AliasAnalysis; class AliasAnalysis;
class TargetData; class DataLayout;
class MDNode; class MDNode;
/// isSafeToLoadUnconditionally - Return true if we know that executing a load /// isSafeToLoadUnconditionally - Return true if we know that executing a load
@ -27,7 +27,7 @@ class MDNode;
/// specified pointer, we do a quick local scan of the basic block containing /// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed. /// ScanFrom, to determine if the address is already accessed.
bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, bool isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const TargetData *TD = 0); unsigned Align, const DataLayout *TD = 0);
/// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at /// FindAvailableLoadedValue - Scan the ScanBB block backwards (starting at
/// the instruction before ScanFrom) checking to see if we have the value at /// the instruction before ScanFrom) checking to see if we have the value at

View File

@ -27,7 +27,7 @@
namespace llvm { namespace llvm {
class CallInst; class CallInst;
class PointerType; class PointerType;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class Type; class Type;
class Value; class Value;
@ -81,7 +81,7 @@ static inline CallInst *extractMallocCall(Value *I,
/// isArrayMalloc - Returns the corresponding CallInst if the instruction /// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size /// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL. /// is not constant 1. Otherwise, return NULL.
const CallInst *isArrayMalloc(const Value *I, const TargetData *TD, const CallInst *isArrayMalloc(const Value *I, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// getMallocType - Returns the PointerType resulting from the malloc call. /// getMallocType - Returns the PointerType resulting from the malloc call.
@ -103,7 +103,7 @@ Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// then return that multiple. For non-array mallocs, the multiple is /// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined. /// determined.
Value *getMallocArraySize(CallInst *CI, const TargetData *TD, Value *getMallocArraySize(CallInst *CI, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
bool LookThroughSExt = false); bool LookThroughSExt = false);
@ -141,7 +141,7 @@ static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
/// object size in Size if successful, and false otherwise. /// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables. /// byval arguments, and global variables.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD, bool getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
const TargetLibraryInfo *TLI, bool RoundToAlign = false); const TargetLibraryInfo *TLI, bool RoundToAlign = false);
@ -153,7 +153,7 @@ typedef std::pair<APInt, APInt> SizeOffsetType;
class ObjectSizeOffsetVisitor class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> { : public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
const TargetData *TD; const DataLayout *TD;
const TargetLibraryInfo *TLI; const TargetLibraryInfo *TLI;
bool RoundToAlign; bool RoundToAlign;
unsigned IntTyBits; unsigned IntTyBits;
@ -167,7 +167,7 @@ class ObjectSizeOffsetVisitor
} }
public: public:
ObjectSizeOffsetVisitor(const TargetData *TD, const TargetLibraryInfo *TLI, ObjectSizeOffsetVisitor(const DataLayout *TD, const TargetLibraryInfo *TLI,
LLVMContext &Context, bool RoundToAlign = false); LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetType compute(Value *V); SizeOffsetType compute(Value *V);
@ -213,7 +213,7 @@ class ObjectSizeOffsetEvaluator
typedef DenseMap<const Value*, WeakEvalType> CacheMapTy; typedef DenseMap<const Value*, WeakEvalType> CacheMapTy;
typedef SmallPtrSet<const Value*, 8> PtrSetTy; typedef SmallPtrSet<const Value*, 8> PtrSetTy;
const TargetData *TD; const DataLayout *TD;
const TargetLibraryInfo *TLI; const TargetLibraryInfo *TLI;
LLVMContext &Context; LLVMContext &Context;
BuilderTy Builder; BuilderTy Builder;
@ -228,7 +228,7 @@ class ObjectSizeOffsetEvaluator
SizeOffsetEvalType compute_(Value *V); SizeOffsetEvalType compute_(Value *V);
public: public:
ObjectSizeOffsetEvaluator(const TargetData *TD, const TargetLibraryInfo *TLI, ObjectSizeOffsetEvaluator(const DataLayout *TD, const TargetLibraryInfo *TLI,
LLVMContext &Context); LLVMContext &Context);
SizeOffsetEvalType compute(Value *V); SizeOffsetEvalType compute(Value *V);

View File

@ -29,7 +29,7 @@ namespace llvm {
class Instruction; class Instruction;
class CallSite; class CallSite;
class AliasAnalysis; class AliasAnalysis;
class TargetData; class DataLayout;
class MemoryDependenceAnalysis; class MemoryDependenceAnalysis;
class PredIteratorCache; class PredIteratorCache;
class DominatorTree; class DominatorTree;
@ -323,7 +323,7 @@ namespace llvm {
/// Current AA implementation, just a cache. /// Current AA implementation, just a cache.
AliasAnalysis *AA; AliasAnalysis *AA;
TargetData *TD; DataLayout *TD;
DominatorTree *DT; DominatorTree *DT;
OwningPtr<PredIteratorCache> PredCache; OwningPtr<PredIteratorCache> PredCache;
public: public:
@ -412,7 +412,7 @@ namespace llvm {
int64_t MemLocOffs, int64_t MemLocOffs,
unsigned MemLocSize, unsigned MemLocSize,
const LoadInst *LI, const LoadInst *LI,
const TargetData &TD); const DataLayout &TD);
private: private:
MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall, MemDepResult getCallSiteDependencyFrom(CallSite C, bool isReadOnlyCall,

View File

@ -19,7 +19,7 @@
namespace llvm { namespace llvm {
class DominatorTree; class DominatorTree;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
/// PHITransAddr - An address value which tracks and handles phi translation. /// PHITransAddr - An address value which tracks and handles phi translation.
@ -37,7 +37,7 @@ class PHITransAddr {
Value *Addr; Value *Addr;
/// TD - The target data we are playing with if known, otherwise null. /// TD - The target data we are playing with if known, otherwise null.
const TargetData *TD; const DataLayout *TD;
/// TLI - The target library info if known, otherwise null. /// TLI - The target library info if known, otherwise null.
const TargetLibraryInfo *TLI; const TargetLibraryInfo *TLI;
@ -45,7 +45,7 @@ class PHITransAddr {
/// InstInputs - The inputs for our symbolic address. /// InstInputs - The inputs for our symbolic address.
SmallVector<Instruction*, 4> InstInputs; SmallVector<Instruction*, 4> InstInputs;
public: public:
PHITransAddr(Value *addr, const TargetData *td) : Addr(addr), TD(td), TLI(0) { PHITransAddr(Value *addr, const DataLayout *td) : Addr(addr), TD(td), TLI(0) {
// If the address is an instruction, the whole thing is considered an input. // If the address is an instruction, the whole thing is considered an input.
if (Instruction *I = dyn_cast<Instruction>(Addr)) if (Instruction *I = dyn_cast<Instruction>(Addr))
InstInputs.push_back(I); InstInputs.push_back(I);

View File

@ -40,7 +40,7 @@ namespace llvm {
class DominatorTree; class DominatorTree;
class Type; class Type;
class ScalarEvolution; class ScalarEvolution;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class LLVMContext; class LLVMContext;
class Loop; class Loop;
@ -227,7 +227,7 @@ namespace llvm {
/// TD - The target data information for the target we are targeting. /// TD - The target data information for the target we are targeting.
/// ///
TargetData *TD; DataLayout *TD;
/// TLI - The target library information for the target we are targeting. /// TLI - The target library information for the target we are targeting.
/// ///

View File

@ -22,7 +22,7 @@ namespace llvm {
class Value; class Value;
class Instruction; class Instruction;
class APInt; class APInt;
class TargetData; class DataLayout;
class StringRef; class StringRef;
class MDNode; class MDNode;
@ -37,27 +37,27 @@ namespace llvm {
/// same width as the vector element, and the bit is set only if it is true /// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector. /// for all of the elements in the vector.
void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, void ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const TargetData *TD = 0, unsigned Depth = 0); const DataLayout *TD = 0, unsigned Depth = 0);
void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero); void computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero);
/// ComputeSignBit - Determine whether the sign bit is known to be zero or /// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits. /// one. Convenience wrapper around ComputeMaskedBits.
void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, void ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const TargetData *TD = 0, unsigned Depth = 0); const DataLayout *TD = 0, unsigned Depth = 0);
/// isPowerOfTwo - Return true if the given value is known to have exactly one /// isPowerOfTwo - Return true if the given value is known to have exactly one
/// bit set when defined. For vectors return true if every element is known to /// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer /// be a power of two when defined. Supports values with integer or pointer
/// type and vectors of integers. If 'OrZero' is set then returns true if the /// type and vectors of integers. If 'OrZero' is set then returns true if the
/// given value is either a power of two or zero. /// given value is either a power of two or zero.
bool isPowerOfTwo(Value *V, const TargetData *TD = 0, bool OrZero = false, bool isPowerOfTwo(Value *V, const DataLayout *TD = 0, bool OrZero = false,
unsigned Depth = 0); unsigned Depth = 0);
/// isKnownNonZero - Return true if the given value is known to be non-zero /// isKnownNonZero - Return true if the given value is known to be non-zero
/// when defined. For vectors return true if every element is known to be /// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and /// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers. /// vectors of integers.
bool isKnownNonZero(Value *V, const TargetData *TD = 0, unsigned Depth = 0); bool isKnownNonZero(Value *V, const DataLayout *TD = 0, unsigned Depth = 0);
/// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use /// MaskedValueIsZero - Return true if 'V & Mask' is known to be zero. We use
/// this predicate to simplify operations downstream. Mask is known to be /// this predicate to simplify operations downstream. Mask is known to be
@ -69,7 +69,7 @@ namespace llvm {
/// same width as the vector element, and the bit is set only if it is true /// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector. /// for all of the elements in the vector.
bool MaskedValueIsZero(Value *V, const APInt &Mask, bool MaskedValueIsZero(Value *V, const APInt &Mask,
const TargetData *TD = 0, unsigned Depth = 0); const DataLayout *TD = 0, unsigned Depth = 0);
/// ComputeNumSignBits - Return the number of times the sign bit of the /// ComputeNumSignBits - Return the number of times the sign bit of the
@ -80,7 +80,7 @@ namespace llvm {
/// ///
/// 'Op' must have a scalar integer type. /// 'Op' must have a scalar integer type.
/// ///
unsigned ComputeNumSignBits(Value *Op, const TargetData *TD = 0, unsigned ComputeNumSignBits(Value *Op, const DataLayout *TD = 0,
unsigned Depth = 0); unsigned Depth = 0);
/// ComputeMultiple - This function computes the integer multiple of Base that /// ComputeMultiple - This function computes the integer multiple of Base that
@ -118,10 +118,10 @@ namespace llvm {
/// it can be expressed as a base pointer plus a constant offset. Return the /// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller. /// base and offset to the caller.
Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, Value *GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const TargetData &TD); const DataLayout &TD);
static inline const Value * static inline const Value *
GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset, GetPointerBaseWithConstantOffset(const Value *Ptr, int64_t &Offset,
const TargetData &TD) { const DataLayout &TD) {
return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD); return GetPointerBaseWithConstantOffset(const_cast<Value*>(Ptr), Offset,TD);
} }
@ -143,10 +143,10 @@ namespace llvm {
/// being addressed. Note that the returned value has pointer type if the /// being addressed. Note that the returned value has pointer type if the
/// specified value does. If the MaxLookup value is non-zero, it limits the /// specified value does. If the MaxLookup value is non-zero, it limits the
/// number of instructions to be stripped off. /// number of instructions to be stripped off.
Value *GetUnderlyingObject(Value *V, const TargetData *TD = 0, Value *GetUnderlyingObject(Value *V, const DataLayout *TD = 0,
unsigned MaxLookup = 6); unsigned MaxLookup = 6);
static inline const Value * static inline const Value *
GetUnderlyingObject(const Value *V, const TargetData *TD = 0, GetUnderlyingObject(const Value *V, const DataLayout *TD = 0,
unsigned MaxLookup = 6) { unsigned MaxLookup = 6) {
return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup); return GetUnderlyingObject(const_cast<Value *>(V), TD, MaxLookup);
} }
@ -156,7 +156,7 @@ namespace llvm {
/// multiple objects. /// multiple objects.
void GetUnderlyingObjects(Value *V, void GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects, SmallVectorImpl<Value *> &Objects,
const TargetData *TD = 0, const DataLayout *TD = 0,
unsigned MaxLookup = 6); unsigned MaxLookup = 6);
/// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer /// onlyUsedByLifetimeMarkers - Return true if the only users of this pointer
@ -182,7 +182,7 @@ namespace llvm {
/// However, this method can return true for instructions that read memory; /// However, this method can return true for instructions that read memory;
/// for such instructions, moving them may change the resulting value. /// for such instructions, moving them may change the resulting value.
bool isSafeToSpeculativelyExecute(const Value *V, bool isSafeToSpeculativelyExecute(const Value *V,
const TargetData *TD = 0); const DataLayout *TD = 0);
} // end namespace llvm } // end namespace llvm

View File

@ -48,7 +48,7 @@ namespace llvm {
class DwarfException; class DwarfException;
class Mangler; class Mangler;
class TargetLoweringObjectFile; class TargetLoweringObjectFile;
class TargetData; class DataLayout;
class TargetMachine; class TargetMachine;
/// AsmPrinter - This class is intended to be used as a driving class for all /// AsmPrinter - This class is intended to be used as a driving class for all
@ -131,8 +131,8 @@ namespace llvm {
/// getObjFileLowering - Return information about object file lowering. /// getObjFileLowering - Return information about object file lowering.
const TargetLoweringObjectFile &getObjFileLowering() const; const TargetLoweringObjectFile &getObjFileLowering() const;
/// getTargetData - Return information about data layout. /// getDataLayout - Return information about data layout.
const TargetData &getTargetData() const; const DataLayout &getDataLayout() const;
/// getCurrentSection() - Return the current section we are emitting to. /// getCurrentSection() - Return the current section we are emitting to.
const MCSection *getCurrentSection() const; const MCSection *getCurrentSection() const;

View File

@ -32,7 +32,7 @@ class MachineFunction;
class MachineInstr; class MachineInstr;
class MachineFrameInfo; class MachineFrameInfo;
class MachineRegisterInfo; class MachineRegisterInfo;
class TargetData; class DataLayout;
class TargetInstrInfo; class TargetInstrInfo;
class TargetLibraryInfo; class TargetLibraryInfo;
class TargetLowering; class TargetLowering;
@ -54,7 +54,7 @@ protected:
MachineConstantPool &MCP; MachineConstantPool &MCP;
DebugLoc DL; DebugLoc DL;
const TargetMachine &TM; const TargetMachine &TM;
const TargetData &TD; const DataLayout &TD;
const TargetInstrInfo &TII; const TargetInstrInfo &TII;
const TargetLowering &TLI; const TargetLowering &TLI;
const TargetRegisterInfo &TRI; const TargetRegisterInfo &TRI;

View File

@ -21,15 +21,15 @@
namespace llvm { namespace llvm {
class CallInst; class CallInst;
class Module; class Module;
class TargetData; class DataLayout;
class IntrinsicLowering { class IntrinsicLowering {
const TargetData& TD; const DataLayout& TD;
bool Warned; bool Warned;
public: public:
explicit IntrinsicLowering(const TargetData &td) : explicit IntrinsicLowering(const DataLayout &td) :
TD(td), Warned(false) {} TD(td), Warned(false) {}
/// AddPrototypes - This method, if called, causes all of the prototypes /// AddPrototypes - This method, if called, causes all of the prototypes

View File

@ -25,7 +25,7 @@ namespace llvm {
class Constant; class Constant;
class FoldingSetNodeID; class FoldingSetNodeID;
class TargetData; class DataLayout;
class TargetMachine; class TargetMachine;
class Type; class Type;
class MachineConstantPool; class MachineConstantPool;
@ -132,14 +132,14 @@ public:
/// address of the function constant pool values. /// address of the function constant pool values.
/// @brief The machine constant pool. /// @brief The machine constant pool.
class MachineConstantPool { class MachineConstantPool {
const TargetData *TD; ///< The machine's TargetData. const DataLayout *TD; ///< The machine's DataLayout.
unsigned PoolAlignment; ///< The alignment for the pool. unsigned PoolAlignment; ///< The alignment for the pool.
std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants. std::vector<MachineConstantPoolEntry> Constants; ///< The pool of constants.
/// MachineConstantPoolValues that use an existing MachineConstantPoolEntry. /// MachineConstantPoolValues that use an existing MachineConstantPoolEntry.
DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries; DenseSet<MachineConstantPoolValue*> MachineCPVsSharingEntries;
public: public:
/// @brief The only constructor. /// @brief The only constructor.
explicit MachineConstantPool(const TargetData *td) explicit MachineConstantPool(const DataLayout *td)
: TD(td), PoolAlignment(1) {} : TD(td), PoolAlignment(1) {}
~MachineConstantPool(); ~MachineConstantPool();

View File

@ -21,7 +21,7 @@
namespace llvm { namespace llvm {
class raw_ostream; class raw_ostream;
class TargetData; class DataLayout;
class TargetRegisterClass; class TargetRegisterClass;
class Type; class Type;
class MachineFunction; class MachineFunction;

View File

@ -26,7 +26,7 @@
namespace llvm { namespace llvm {
class MachineBasicBlock; class MachineBasicBlock;
class TargetData; class DataLayout;
class raw_ostream; class raw_ostream;
/// MachineJumpTableEntry - One jump table in the jump table info. /// MachineJumpTableEntry - One jump table in the jump table info.
@ -84,9 +84,9 @@ public:
JTEntryKind getEntryKind() const { return EntryKind; } JTEntryKind getEntryKind() const { return EntryKind; }
/// getEntrySize - Return the size of each entry in the jump table. /// getEntrySize - Return the size of each entry in the jump table.
unsigned getEntrySize(const TargetData &TD) const; unsigned getEntrySize(const DataLayout &TD) const;
/// getEntryAlignment - Return the alignment of each entry in the jump table. /// getEntryAlignment - Return the alignment of each entry in the jump table.
unsigned getEntryAlignment(const TargetData &TD) const; unsigned getEntryAlignment(const DataLayout &TD) const;
/// createJumpTableIndex - Create a new jump table. /// createJumpTableIndex - Create a new jump table.
/// ///

View File

@ -19,8 +19,8 @@
#define LLVM_DERIVED_TYPES_H #define LLVM_DERIVED_TYPES_H
#include "llvm/Type.h" #include "llvm/Type.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/DataTypes.h" #include "llvm/Support/DataTypes.h"
#include "llvm/Support/Compiler.h"
namespace llvm { namespace llvm {
@ -184,7 +184,7 @@ public:
/// Independent of what kind of struct you have, the body of a struct type are /// Independent of what kind of struct you have, the body of a struct type are
/// laid out in memory consequtively with the elements directly one after the /// laid out in memory consequtively with the elements directly one after the
/// other (if the struct is packed) or (if not packed) with padding between the /// other (if the struct is packed) or (if not packed) with padding between the
/// elements as defined by TargetData (which is required to match what the code /// elements as defined by DataLayout (which is required to match what the code
/// generator for a target expects). /// generator for a target expects).
/// ///
class StructType : public CompositeType { class StructType : public CompositeType {

View File

@ -42,7 +42,7 @@ class JITMemoryManager;
class MachineCodeInfo; class MachineCodeInfo;
class Module; class Module;
class MutexGuard; class MutexGuard;
class TargetData; class DataLayout;
class Triple; class Triple;
class Type; class Type;
@ -104,7 +104,7 @@ class ExecutionEngine {
ExecutionEngineState EEState; ExecutionEngineState EEState;
/// The target data for the platform for which execution is being performed. /// The target data for the platform for which execution is being performed.
const TargetData *TD; const DataLayout *TD;
/// Whether lazy JIT compilation is enabled. /// Whether lazy JIT compilation is enabled.
bool CompilingLazily; bool CompilingLazily;
@ -123,7 +123,7 @@ protected:
/// optimize for the case where there is only one module. /// optimize for the case where there is only one module.
SmallVector<Module*, 1> Modules; SmallVector<Module*, 1> Modules;
void setTargetData(const TargetData *td) { TD = td; } void setDataLayout(const DataLayout *td) { TD = td; }
/// getMemoryforGV - Allocate memory for a global variable. /// getMemoryforGV - Allocate memory for a global variable.
virtual char *getMemoryForGV(const GlobalVariable *GV); virtual char *getMemoryForGV(const GlobalVariable *GV);
@ -213,7 +213,7 @@ public:
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//
const TargetData *getTargetData() const { return TD; } const DataLayout *getDataLayout() const { return TD; }
/// removeModule - Remove a Module from the list of modules. Returns true if /// removeModule - Remove a Module from the list of modules. Returns true if
/// M is found. /// M is found.

View File

@ -563,7 +563,7 @@ public:
/// IntPtrTy argument is used to make accurate determinations for casts /// IntPtrTy argument is used to make accurate determinations for casts
/// involving Integer and Pointer types. They are no-op casts if the integer /// involving Integer and Pointer types. They are no-op casts if the integer
/// is the same size as the pointer. However, pointer size varies with /// is the same size as the pointer. However, pointer size varies with
/// platform. Generally, the result of TargetData::getIntPtrType() should be /// platform. Generally, the result of DataLayout::getIntPtrType() should be
/// passed in. If that's not available, use Type::Int64Ty, which will make /// passed in. If that's not available, use Type::Int64Ty, which will make
/// the isNoopCast call conservative. /// the isNoopCast call conservative.
/// @brief Determine if the described cast is a no-op cast. /// @brief Determine if the described cast is a no-op cast.

View File

@ -26,11 +26,11 @@
namespace llvm { namespace llvm {
class TargetData; class DataLayout;
/// TargetFolder - Create constants with target dependent folding. /// TargetFolder - Create constants with target dependent folding.
class TargetFolder { class TargetFolder {
const TargetData *TD; const DataLayout *TD;
/// Fold - Fold the constant using target specific information. /// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const { Constant *Fold(Constant *C) const {
@ -41,7 +41,7 @@ class TargetFolder {
} }
public: public:
explicit TargetFolder(const TargetData *TheTD) : TD(TheTD) {} explicit TargetFolder(const DataLayout *TheTD) : TD(TheTD) {}
//===--------------------------------------------------------------------===// //===--------------------------------------------------------------------===//
// Binary Operators // Binary Operators

View File

@ -22,7 +22,7 @@ class GlobalValue;
template <typename T> class SmallVectorImpl; template <typename T> class SmallVectorImpl;
class MCContext; class MCContext;
class MCSymbol; class MCSymbol;
class TargetData; class DataLayout;
class Mangler { class Mangler {
public: public:
@ -34,7 +34,7 @@ public:
private: private:
MCContext &Context; MCContext &Context;
const TargetData &TD; const DataLayout &TD;
/// AnonGlobalIDs - We need to give global values the same name every time /// AnonGlobalIDs - We need to give global values the same name every time
/// they are mangled. This keeps track of the number we give to anonymous /// they are mangled. This keeps track of the number we give to anonymous
@ -47,7 +47,7 @@ private:
unsigned NextAnonGlobalID; unsigned NextAnonGlobalID;
public: public:
Mangler(MCContext &context, const TargetData &td) Mangler(MCContext &context, const DataLayout &td)
: Context(context), TD(td), NextAnonGlobalID(1) {} : Context(context), TD(td), NextAnonGlobalID(1) {}
/// getSymbol - Return the MCSymbol for the specified global value. This /// getSymbol - Return the MCSymbol for the specified global value. This

View File

@ -1,53 +0,0 @@
//===-- llvm/Target/TargetData.h - Data size & alignment info ---*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the wrapper for DataLayout to provide compatibility
// with the old TargetData class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_TARGET_TARGETDATA_H
#define LLVM_TARGET_TARGETDATA_H
#include "llvm/DataLayout.h"
#include "llvm/Pass.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
/// TargetData - This class is just a wrapper to help with the transition to the
/// new DataLayout class.
class TargetData : public DataLayout {
public:
/// Default ctor.
///
/// @note This has to exist, because this is a pass, but it should never be
/// used.
TargetData() : DataLayout() {}
/// Constructs a TargetData from a specification string.
/// See DataLayout::init().
explicit TargetData(StringRef TargetDescription)
: DataLayout(TargetDescription) {}
/// Initialize target data from properties stored in the module.
explicit TargetData(const Module *M) : DataLayout(M) {}
TargetData(const TargetData &TD) : DataLayout(TD) {}
template <typename UIntTy>
static UIntTy RoundUpAlignment(UIntTy Val, unsigned Alignment) {
return DataLayout::RoundUpAlignment(Val, Alignment);
}
};
} // End llvm namespace
#endif

View File

@ -50,7 +50,7 @@ namespace llvm {
class MCContext; class MCContext;
class MCExpr; class MCExpr;
template<typename T> class SmallVectorImpl; template<typename T> class SmallVectorImpl;
class TargetData; class DataLayout;
class TargetRegisterClass; class TargetRegisterClass;
class TargetLibraryInfo; class TargetLibraryInfo;
class TargetLoweringObjectFile; class TargetLoweringObjectFile;
@ -137,7 +137,7 @@ public:
virtual ~TargetLowering(); virtual ~TargetLowering();
const TargetMachine &getTargetMachine() const { return TM; } const TargetMachine &getTargetMachine() const { return TM; }
const TargetData *getTargetData() const { return TD; } const DataLayout *getDataLayout() const { return TD; }
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; } const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; } bool isBigEndian() const { return !IsLittleEndian; }
@ -1789,7 +1789,7 @@ public:
private: private:
const TargetMachine &TM; const TargetMachine &TM;
const TargetData *TD; const DataLayout *TD;
const TargetLoweringObjectFile &TLOF; const TargetLoweringObjectFile &TLOF;
/// PointerTy - The type to use for pointers, usually i32 or i64. /// PointerTy - The type to use for pointers, usually i32 or i64.

View File

@ -31,7 +31,7 @@ class MCCodeGenInfo;
class MCContext; class MCContext;
class PassManagerBase; class PassManagerBase;
class Target; class Target;
class TargetData; class DataLayout;
class TargetELFWriterInfo; class TargetELFWriterInfo;
class TargetFrameLowering; class TargetFrameLowering;
class TargetInstrInfo; class TargetInstrInfo;
@ -106,7 +106,7 @@ public:
virtual const TargetFrameLowering *getFrameLowering() const { return 0; } virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; } virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; } virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
virtual const TargetData *getTargetData() const { return 0; } virtual const DataLayout *getDataLayout() const { return 0; }
/// getMCAsmInfo - Return target specific asm information. /// getMCAsmInfo - Return target specific asm information.
/// ///

View File

@ -20,7 +20,7 @@
namespace llvm { namespace llvm {
class TargetData; class DataLayout;
class TargetMachine; class TargetMachine;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -31,10 +31,10 @@ class TargetSelectionDAGInfo {
TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION; TargetSelectionDAGInfo(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION; void operator=(const TargetSelectionDAGInfo &) LLVM_DELETED_FUNCTION;
const TargetData *TD; const DataLayout *TD;
protected: protected:
const TargetData *getTargetData() const { return TD; } const DataLayout *getDataLayout() const { return TD; }
public: public:
explicit TargetSelectionDAGInfo(const TargetMachine &TM); explicit TargetSelectionDAGInfo(const TargetMachine &TM);

View File

@ -21,7 +21,7 @@
namespace llvm { namespace llvm {
class CallSite; class CallSite;
class TargetData; class DataLayout;
class InlineCost; class InlineCost;
template<class PtrType, unsigned SmallSize> template<class PtrType, unsigned SmallSize>
class SmallPtrSet; class SmallPtrSet;

View File

@ -19,7 +19,7 @@
namespace llvm { namespace llvm {
class Value; class Value;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
/// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*. /// CastToCStr - Return V if it is an i8*, otherwise cast it to i8*.
@ -28,52 +28,52 @@ namespace llvm {
/// EmitStrLen - Emit a call to the strlen function to the builder, for the /// EmitStrLen - Emit a call to the strlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, and the /// specified pointer. Ptr is required to be some pointer type, and the
/// return value has 'intptr_t' type. /// return value has 'intptr_t' type.
Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const TargetData *TD, Value *EmitStrLen(Value *Ptr, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// EmitStrNLen - Emit a call to the strnlen function to the builder, for the /// EmitStrNLen - Emit a call to the strnlen function to the builder, for the
/// specified pointer. Ptr is required to be some pointer type, MaxLen must /// specified pointer. Ptr is required to be some pointer type, MaxLen must
/// be of size_t type, and the return value has 'intptr_t' type. /// be of size_t type, and the return value has 'intptr_t' type.
Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B, Value *EmitStrNLen(Value *Ptr, Value *MaxLen, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI); const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitStrChr - Emit a call to the strchr function to the builder, for the /// EmitStrChr - Emit a call to the strchr function to the builder, for the
/// specified pointer and character. Ptr is required to be some pointer type, /// specified pointer and character. Ptr is required to be some pointer type,
/// and the return value has 'i8*' type. /// and the return value has 'i8*' type.
Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const TargetData *TD, Value *EmitStrChr(Value *Ptr, char C, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// EmitStrNCmp - Emit a call to the strncmp function to the builder. /// EmitStrNCmp - Emit a call to the strncmp function to the builder.
Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B, Value *EmitStrNCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI); const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitStrCpy - Emit a call to the strcpy function to the builder, for the /// EmitStrCpy - Emit a call to the strcpy function to the builder, for the
/// specified pointer arguments. /// specified pointer arguments.
Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B, Value *EmitStrCpy(Value *Dst, Value *Src, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strcpy"); StringRef Name = "strcpy");
/// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the /// EmitStrNCpy - Emit a call to the strncpy function to the builder, for the
/// specified pointer arguments and length. /// specified pointer arguments and length.
Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B, Value *EmitStrNCpy(Value *Dst, Value *Src, Value *Len, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
StringRef Name = "strncpy"); StringRef Name = "strncpy");
/// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder. /// EmitMemCpyChk - Emit a call to the __memcpy_chk function to the builder.
/// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src /// This expects that the Len and ObjSize have type 'intptr_t' and Dst/Src
/// are pointers. /// are pointers.
Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize, Value *EmitMemCpyChk(Value *Dst, Value *Src, Value *Len, Value *ObjSize,
IRBuilder<> &B, const TargetData *TD, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is /// EmitMemChr - Emit a call to the memchr function. This assumes that Ptr is
/// a pointer, Val is an i32 value, and Len is an 'intptr_t' value. /// a pointer, Val is an i32 value, and Len is an 'intptr_t' value.
Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B, Value *EmitMemChr(Value *Ptr, Value *Val, Value *Len, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI); const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitMemCmp - Emit a call to the memcmp function. /// EmitMemCmp - Emit a call to the memcmp function.
Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B, Value *EmitMemCmp(Value *Ptr1, Value *Ptr2, Value *Len, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI); const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name' /// EmitUnaryFloatFnCall - Emit a call to the unary function named 'Name'
/// (e.g. 'floor'). This function is known to take a single of type matching /// (e.g. 'floor'). This function is known to take a single of type matching
@ -85,28 +85,28 @@ namespace llvm {
/// EmitPutChar - Emit a call to the putchar function. This assumes that Char /// EmitPutChar - Emit a call to the putchar function. This assumes that Char
/// is an integer. /// is an integer.
Value *EmitPutChar(Value *Char, IRBuilder<> &B, const TargetData *TD, Value *EmitPutChar(Value *Char, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// EmitPutS - Emit a call to the puts function. This assumes that Str is /// EmitPutS - Emit a call to the puts function. This assumes that Str is
/// some pointer. /// some pointer.
Value *EmitPutS(Value *Str, IRBuilder<> &B, const TargetData *TD, Value *EmitPutS(Value *Str, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// EmitFPutC - Emit a call to the fputc function. This assumes that Char is /// EmitFPutC - Emit a call to the fputc function. This assumes that Char is
/// an i32, and File is a pointer to FILE. /// an i32, and File is a pointer to FILE.
Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B, Value *EmitFPutC(Value *Char, Value *File, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI); const DataLayout *TD, const TargetLibraryInfo *TLI);
/// EmitFPutS - Emit a call to the puts function. Str is required to be a /// EmitFPutS - Emit a call to the puts function. Str is required to be a
/// pointer and File is a pointer to FILE. /// pointer and File is a pointer to FILE.
Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const TargetData *TD, Value *EmitFPutS(Value *Str, Value *File, IRBuilder<> &B, const DataLayout *TD,
const TargetLibraryInfo *TLI); const TargetLibraryInfo *TLI);
/// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is /// EmitFWrite - Emit a call to the fwrite function. This assumes that Ptr is
/// a pointer, Size is an 'intptr_t', and File is a pointer to FILE. /// a pointer, Size is an 'intptr_t', and File is a pointer to FILE.
Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B, Value *EmitFWrite(Value *Ptr, Value *Size, Value *File, IRBuilder<> &B,
const TargetData *TD, const TargetLibraryInfo *TLI); const DataLayout *TD, const TargetLibraryInfo *TLI);
/// SimplifyFortifiedLibCalls - Helper class for folding checked library /// SimplifyFortifiedLibCalls - Helper class for folding checked library
/// calls (e.g. __strcpy_chk) into their unchecked counterparts. /// calls (e.g. __strcpy_chk) into their unchecked counterparts.
@ -118,7 +118,7 @@ namespace llvm {
bool isString) const = 0; bool isString) const = 0;
public: public:
virtual ~SimplifyFortifiedLibCalls(); virtual ~SimplifyFortifiedLibCalls();
bool fold(CallInst *CI, const TargetData *TD, const TargetLibraryInfo *TLI); bool fold(CallInst *CI, const DataLayout *TD, const TargetLibraryInfo *TLI);
}; };
} }

View File

@ -39,7 +39,7 @@ class ReturnInst;
class CallSite; class CallSite;
class Trace; class Trace;
class CallGraph; class CallGraph;
class TargetData; class DataLayout;
class Loop; class Loop;
class LoopInfo; class LoopInfo;
class AllocaInst; class AllocaInst;
@ -150,7 +150,7 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
SmallVectorImpl<ReturnInst*> &Returns, SmallVectorImpl<ReturnInst*> &Returns,
const char *NameSuffix = "", const char *NameSuffix = "",
ClonedCodeInfo *CodeInfo = 0, ClonedCodeInfo *CodeInfo = 0,
const TargetData *TD = 0, const DataLayout *TD = 0,
Instruction *TheCall = 0); Instruction *TheCall = 0);
@ -158,13 +158,13 @@ void CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
/// InlineFunction call, and records the auxiliary results produced by it. /// InlineFunction call, and records the auxiliary results produced by it.
class InlineFunctionInfo { class InlineFunctionInfo {
public: public:
explicit InlineFunctionInfo(CallGraph *cg = 0, const TargetData *td = 0) explicit InlineFunctionInfo(CallGraph *cg = 0, const DataLayout *td = 0)
: CG(cg), TD(td) {} : CG(cg), TD(td) {}
/// CG - If non-null, InlineFunction will update the callgraph to reflect the /// CG - If non-null, InlineFunction will update the callgraph to reflect the
/// changes it makes. /// changes it makes.
CallGraph *CG; CallGraph *CG;
const TargetData *TD; const DataLayout *TD;
/// StaticAllocas - InlineFunction fills this in with all static allocas that /// StaticAllocas - InlineFunction fills this in with all static allocas that
/// get copied into the caller. /// get copied into the caller.

View File

@ -18,7 +18,7 @@
#include "llvm/IRBuilder.h" #include "llvm/IRBuilder.h"
#include "llvm/Operator.h" #include "llvm/Operator.h"
#include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
namespace llvm { namespace llvm {
@ -35,7 +35,7 @@ class Pass;
class PHINode; class PHINode;
class AllocaInst; class AllocaInst;
class ConstantExpr; class ConstantExpr;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class DIBuilder; class DIBuilder;
@ -84,7 +84,7 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
/// ///
/// This returns true if it changed the code, note that it can delete /// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block. /// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0, bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0); const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -103,7 +103,7 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0,
/// .. and delete the predecessor corresponding to the '1', this will attempt to /// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0. /// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred, void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
TargetData *TD = 0); DataLayout *TD = 0);
/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its /// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
@ -134,7 +134,7 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// of the CFG. It returns true if a modification was made, possibly deleting /// of the CFG. It returns true if a modification was made, possibly deleting
/// the basic block that was pointed to. /// the basic block that was pointed to.
/// ///
bool SimplifyCFG(BasicBlock *BB, const TargetData *TD = 0); bool SimplifyCFG(BasicBlock *BB, const DataLayout *TD = 0);
/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch, /// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
/// and if a predecessor branches to us and one of our successors, fold the /// and if a predecessor branches to us and one of our successors, fold the
@ -162,10 +162,10 @@ AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0);
/// and it is more than the alignment of the ultimate object, see if we can /// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed. /// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign, unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const TargetData *TD = 0); const DataLayout *TD = 0);
/// getKnownAlignment - Try to infer an alignment for the specified pointer. /// getKnownAlignment - Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) { static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
return getOrEnforceKnownAlignment(V, 0, TD); return getOrEnforceKnownAlignment(V, 0, TD);
} }
@ -175,7 +175,7 @@ static inline unsigned getKnownAlignment(Value *V, const TargetData *TD = 0) {
/// When NoAssumptions is true, no assumptions about index computation not /// When NoAssumptions is true, no assumptions about index computation not
/// overflowing is made. /// overflowing is made.
template<typename IRBuilderTy> template<typename IRBuilderTy>
Value *EmitGEPOffset(IRBuilderTy *Builder, const TargetData &TD, User *GEP, Value *EmitGEPOffset(IRBuilderTy *Builder, const DataLayout &TD, User *GEP,
bool NoAssumptions = false) { bool NoAssumptions = false) {
gep_type_iterator GTI = gep_type_begin(GEP); gep_type_iterator GTI = gep_type_begin(GEP);
Type *IntPtrTy = TD.getIntPtrType(GEP->getContext()); Type *IntPtrTy = TD.getIntPtrType(GEP->getContext());

View File

@ -252,7 +252,7 @@ public:
/// isSized - Return true if it makes sense to take the size of this type. To /// isSized - Return true if it makes sense to take the size of this type. To
/// get the actual size for a particular target, it is reasonable to use the /// get the actual size for a particular target, it is reasonable to use the
/// TargetData subsystem to do this. /// DataLayout subsystem to do this.
/// ///
bool isSized() const { bool isSized() const {
// If it's a primitive, it is always sized. // If it's a primitive, it is always sized.
@ -276,7 +276,7 @@ public:
/// ///
/// Note that this may not reflect the size of memory allocated for an /// Note that this may not reflect the size of memory allocated for an
/// instance of the type or the number of bytes that are written when an /// instance of the type or the number of bytes that are written when an
/// instance of the type is stored to memory. The TargetData class provides /// instance of the type is stored to memory. The DataLayout class provides
/// additional query functions to provide this information. /// additional query functions to provide this information.
/// ///
unsigned getPrimitiveSizeInBits() const; unsigned getPrimitiveSizeInBits() const;

View File

@ -35,7 +35,7 @@
#include "llvm/Instructions.h" #include "llvm/Instructions.h"
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Type.h" #include "llvm/Type.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm; using namespace llvm;
@ -452,7 +452,7 @@ AliasAnalysis::~AliasAnalysis() {}
/// AliasAnalysis interface before any other methods are called. /// AliasAnalysis interface before any other methods are called.
/// ///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) { void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
TD = P->getAnalysisIfAvailable<TargetData>(); TD = P->getAnalysisIfAvailable<DataLayout>();
TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>(); TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>(); AA = &P->getAnalysis<AliasAnalysis>();
} }
@ -463,7 +463,7 @@ void AliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<AliasAnalysis>(); // All AA's chain AU.addRequired<AliasAnalysis>(); // All AA's chain
} }
/// getTypeStoreSize - Return the TargetData store size for the given type, /// getTypeStoreSize - Return the DataLayout store size for the given type,
/// if known, or a conservative value otherwise. /// if known, or a conservative value otherwise.
/// ///
uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) { uint64_t AliasAnalysis::getTypeStoreSize(Type *Ty) {

View File

@ -18,7 +18,7 @@
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Type.h" #include "llvm/Type.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h" #include "llvm/Assembly/Writer.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"

View File

@ -29,7 +29,7 @@
#include "llvm/Analysis/MemoryBuiltins.h" #include "llvm/Analysis/MemoryBuiltins.h"
#include "llvm/Analysis/InstructionSimplify.h" #include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
@ -84,7 +84,7 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or /// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown. /// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD, static uint64_t getObjectSize(const Value *V, const DataLayout &TD,
const TargetLibraryInfo &TLI, const TargetLibraryInfo &TLI,
bool RoundToAlign = false) { bool RoundToAlign = false) {
uint64_t Size; uint64_t Size;
@ -96,7 +96,7 @@ static uint64_t getObjectSize(const Value *V, const TargetData &TD,
/// isObjectSmallerThan - Return true if we can prove that the object specified /// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size. /// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size, static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const TargetData &TD, const DataLayout &TD,
const TargetLibraryInfo &TLI) { const TargetLibraryInfo &TLI) {
// This function needs to use the aligned object size because we allow // This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment. // reads a bit past the end given sufficient alignment.
@ -108,7 +108,7 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
/// isObjectSize - Return true if we can prove that the object specified /// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size. /// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size, static bool isObjectSize(const Value *V, uint64_t Size,
const TargetData &TD, const TargetLibraryInfo &TLI) { const DataLayout &TD, const TargetLibraryInfo &TLI) {
uint64_t ObjectSize = getObjectSize(V, TD, TLI); uint64_t ObjectSize = getObjectSize(V, TD, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
} }
@ -151,7 +151,7 @@ namespace {
/// represented in the result. /// represented in the result.
static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset, static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
ExtensionKind &Extension, ExtensionKind &Extension,
const TargetData &TD, unsigned Depth) { const DataLayout &TD, unsigned Depth) {
assert(V->getType()->isIntegerTy() && "Not an integer value"); assert(V->getType()->isIntegerTy() && "Not an integer value");
// Limit our recursion depth. // Limit our recursion depth.
@ -226,14 +226,14 @@ static Value *GetLinearExpression(Value *V, APInt &Scale, APInt &Offset,
/// specified amount, but which may have other unrepresented high bits. As such, /// specified amount, but which may have other unrepresented high bits. As such,
/// the gep cannot necessarily be reconstructed from its decomposed form. /// the gep cannot necessarily be reconstructed from its decomposed form.
/// ///
/// When TargetData is around, this function is capable of analyzing everything /// When DataLayout is around, this function is capable of analyzing everything
/// that GetUnderlyingObject can look through. When not, it just looks /// that GetUnderlyingObject can look through. When not, it just looks
/// through pointer casts. /// through pointer casts.
/// ///
static const Value * static const Value *
DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
SmallVectorImpl<VariableGEPIndex> &VarIndices, SmallVectorImpl<VariableGEPIndex> &VarIndices,
const TargetData *TD) { const DataLayout *TD) {
// Limit recursion depth to limit compile time in crazy cases. // Limit recursion depth to limit compile time in crazy cases.
unsigned MaxLookup = 6; unsigned MaxLookup = 6;
@ -277,7 +277,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs,
->getElementType()->isSized()) ->getElementType()->isSized())
return V; return V;
// If we are lacking TargetData information, we can't compute the offets of // If we are lacking DataLayout information, we can't compute the offets of
// elements computed by GEPs. However, we can handle bitcast equivalent // elements computed by GEPs. However, we can handle bitcast equivalent
// GEPs. // GEPs.
if (TD == 0) { if (TD == 0) {
@ -868,7 +868,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
const Value *GEP1BasePtr = const Value *GEP1BasePtr =
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
// DecomposeGEPExpression and GetUnderlyingObject should return the // DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no TargetData. // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
assert(TD == 0 && assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!"); "DecomposeGEPExpression and GetUnderlyingObject disagree!");
@ -902,7 +902,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD); DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices, TD);
// DecomposeGEPExpression and GetUnderlyingObject should return the // DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no TargetData. // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) { if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
assert(TD == 0 && assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!"); "DecomposeGEPExpression and GetUnderlyingObject disagree!");
@ -937,7 +937,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD); DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices, TD);
// DecomposeGEPExpression and GetUnderlyingObject should return the // DecomposeGEPExpression and GetUnderlyingObject should return the
// same result except when DecomposeGEPExpression has no TargetData. // same result except when DecomposeGEPExpression has no DataLayout.
if (GEP1BasePtr != UnderlyingV1) { if (GEP1BasePtr != UnderlyingV1) {
assert(TD == 0 && assert(TD == 0 &&
"DecomposeGEPExpression and GetUnderlyingObject disagree!"); "DecomposeGEPExpression and GetUnderlyingObject disagree!");

View File

@ -15,7 +15,7 @@
#include "llvm/Function.h" #include "llvm/Function.h"
#include "llvm/Support/CallSite.h" #include "llvm/Support/CallSite.h"
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
@ -54,7 +54,7 @@ bool llvm::callIsSmall(ImmutableCallSite CS) {
return false; return false;
} }
bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) { bool llvm::isInstructionFree(const Instruction *I, const DataLayout *TD) {
if (isa<PHINode>(I)) if (isa<PHINode>(I))
return true; return true;
@ -119,7 +119,7 @@ bool llvm::isInstructionFree(const Instruction *I, const TargetData *TD) {
/// analyzeBasicBlock - Fill in the current structure with information gleaned /// analyzeBasicBlock - Fill in the current structure with information gleaned
/// from the specified block. /// from the specified block.
void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
const TargetData *TD) { const DataLayout *TD) {
++NumBlocks; ++NumBlocks;
unsigned NumInstsBeforeThisBB = NumInsts; unsigned NumInstsBeforeThisBB = NumInsts;
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
@ -189,7 +189,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB; NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB;
} }
void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) { void CodeMetrics::analyzeFunction(Function *F, const DataLayout *TD) {
// If this function contains a call that "returns twice" (e.g., setjmp or // If this function contains a call that "returns twice" (e.g., setjmp or
// _setjmp) and it isn't marked with "returns twice" itself, never inline it. // _setjmp) and it isn't marked with "returns twice" itself, never inline it.
// This is a hack because we depend on the user marking their local variables // This is a hack because we depend on the user marking their local variables

View File

@ -11,7 +11,7 @@
// //
// Also, to supplement the basic VMCore ConstantExpr simplifications, // Also, to supplement the basic VMCore ConstantExpr simplifications,
// this file defines some additional folding routines that can make use of // this file defines some additional folding routines that can make use of
// TargetData information. These functions cannot go in VMCore due to library // DataLayout information. These functions cannot go in VMCore due to library
// dependency issues. // dependency issues.
// //
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -25,7 +25,7 @@
#include "llvm/Intrinsics.h" #include "llvm/Intrinsics.h"
#include "llvm/Operator.h" #include "llvm/Operator.h"
#include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h" #include "llvm/ADT/StringMap.h"
@ -42,10 +42,10 @@ using namespace llvm;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with /// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// TargetData. This always returns a non-null constant, but it may be a /// DataLayout. This always returns a non-null constant, but it may be a
/// ConstantExpr if unfoldable. /// ConstantExpr if unfoldable.
static Constant *FoldBitCast(Constant *C, Type *DestTy, static Constant *FoldBitCast(Constant *C, Type *DestTy,
const TargetData &TD) { const DataLayout &TD) {
// Catch the obvious splat cases. // Catch the obvious splat cases.
if (C->isNullValue() && !DestTy->isX86_MMXTy()) if (C->isNullValue() && !DestTy->isX86_MMXTy())
return Constant::getNullValue(DestTy); return Constant::getNullValue(DestTy);
@ -218,7 +218,7 @@ static Constant *FoldBitCast(Constant *C, Type *DestTy,
/// from a global, return the global and the constant. Because of /// from a global, return the global and the constant. Because of
/// constantexprs, this function is recursive. /// constantexprs, this function is recursive.
static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV, static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
int64_t &Offset, const TargetData &TD) { int64_t &Offset, const DataLayout &TD) {
// Trivial case, constant is the global. // Trivial case, constant is the global.
if ((GV = dyn_cast<GlobalValue>(C))) { if ((GV = dyn_cast<GlobalValue>(C))) {
Offset = 0; Offset = 0;
@ -274,7 +274,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
/// the CurPtr buffer. TD is the target data. /// the CurPtr buffer. TD is the target data.
static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
unsigned char *CurPtr, unsigned BytesLeft, unsigned char *CurPtr, unsigned BytesLeft,
const TargetData &TD) { const DataLayout &TD) {
assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) && assert(ByteOffset <= TD.getTypeAllocSize(C->getType()) &&
"Out of range access"); "Out of range access");
@ -388,7 +388,7 @@ static bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset,
} }
static Constant *FoldReinterpretLoadFromConstPtr(Constant *C, static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
const TargetData &TD) { const DataLayout &TD) {
Type *LoadTy = cast<PointerType>(C->getType())->getElementType(); Type *LoadTy = cast<PointerType>(C->getType())->getElementType();
IntegerType *IntType = dyn_cast<IntegerType>(LoadTy); IntegerType *IntType = dyn_cast<IntegerType>(LoadTy);
@ -455,7 +455,7 @@ static Constant *FoldReinterpretLoadFromConstPtr(Constant *C,
/// produce if it is constant and determinable. If this is not determinable, /// produce if it is constant and determinable. If this is not determinable,
/// return null. /// return null.
Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
const TargetData *TD) { const DataLayout *TD) {
// First, try the easy cases: // First, try the easy cases:
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C))
if (GV->isConstant() && GV->hasDefinitiveInitializer()) if (GV->isConstant() && GV->hasDefinitiveInitializer())
@ -529,7 +529,7 @@ Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C,
return 0; return 0;
} }
static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout *TD){
if (LI->isVolatile()) return 0; if (LI->isVolatile()) return 0;
if (Constant *C = dyn_cast<Constant>(LI->getOperand(0))) if (Constant *C = dyn_cast<Constant>(LI->getOperand(0)))
@ -543,7 +543,7 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
/// these together. If target data info is available, it is provided as TD, /// these together. If target data info is available, it is provided as TD,
/// otherwise TD is null. /// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const TargetData *TD){ Constant *Op1, const DataLayout *TD){
// SROA // SROA
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl. // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
@ -572,7 +572,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// explicitly cast them so that they aren't implicitly casted by the /// explicitly cast them so that they aren't implicitly casted by the
/// getelementptr. /// getelementptr.
static Constant *CastGEPIndices(ArrayRef<Constant *> Ops, static Constant *CastGEPIndices(ArrayRef<Constant *> Ops,
Type *ResultTy, const TargetData *TD, Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
if (!TD) return 0; if (!TD) return 0;
Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext()); Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
@ -622,7 +622,7 @@ static Constant* StripPtrCastKeepAS(Constant* Ptr) {
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP /// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
/// constant expression, do so. /// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops, static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
Type *ResultTy, const TargetData *TD, Type *ResultTy, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
Constant *Ptr = Ops[0]; Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() || if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized() ||
@ -786,7 +786,7 @@ static Constant *SymbolicallyEvaluateGEP(ArrayRef<Constant *> Ops,
/// this function can only fail when attempting to fold instructions like loads /// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form. /// and stores, which have no constant expression form.
Constant *llvm::ConstantFoldInstruction(Instruction *I, Constant *llvm::ConstantFoldInstruction(Instruction *I,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
// Handle PHI nodes quickly here... // Handle PHI nodes quickly here...
if (PHINode *PN = dyn_cast<PHINode>(I)) { if (PHINode *PN = dyn_cast<PHINode>(I)) {
@ -856,10 +856,10 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I,
} }
/// ConstantFoldConstantExpression - Attempt to fold the constant expression /// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is /// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned. /// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE, Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
SmallVector<Constant*, 8> Ops; SmallVector<Constant*, 8> Ops;
for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end(); for (User::const_op_iterator i = CE->op_begin(), e = CE->op_end();
@ -889,7 +889,7 @@ Constant *llvm::ConstantFoldConstantExpression(const ConstantExpr *CE,
/// ///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy, Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops, ArrayRef<Constant *> Ops,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
// Handle easy binops first. // Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) { if (Instruction::isBinaryOp(Opcode)) {
@ -976,7 +976,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
/// ///
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate, Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *Ops0, Constant *Ops1, Constant *Ops0, Constant *Ops1,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
// fold: icmp (inttoptr x), null -> icmp x, 0 // fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null // fold: icmp (ptrtoint x), 0 -> icmp x, null

View File

@ -22,7 +22,7 @@
#include "llvm/Analysis/LoopPass.h" #include "llvm/Analysis/LoopPass.h"
#include "llvm/Analysis/ScalarEvolutionExpressions.h" #include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/ValueTracking.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Assembly/Writer.h" #include "llvm/Assembly/Writer.h"
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
@ -235,7 +235,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
LI = &getAnalysis<LoopInfo>(); LI = &getAnalysis<LoopInfo>();
DT = &getAnalysis<DominatorTree>(); DT = &getAnalysis<DominatorTree>();
SE = &getAnalysis<ScalarEvolution>(); SE = &getAnalysis<ScalarEvolution>();
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<DataLayout>();
// Find all uses of induction variables in this loop, and categorize // Find all uses of induction variables in this loop, and categorize
// them by stride. Start by finding all of the PHI nodes in the header for // them by stride. Start by finding all of the PHI nodes in the header for

View File

@ -24,7 +24,7 @@
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/Operator.h" #include "llvm/Operator.h"
#include "llvm/GlobalAlias.h" #include "llvm/GlobalAlias.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
@ -41,8 +41,8 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
typedef InstVisitor<CallAnalyzer, bool> Base; typedef InstVisitor<CallAnalyzer, bool> Base;
friend class InstVisitor<CallAnalyzer, bool>; friend class InstVisitor<CallAnalyzer, bool>;
// TargetData if available, or null. // DataLayout if available, or null.
const TargetData *const TD; const DataLayout *const TD;
// The called function. // The called function.
Function &F; Function &F;
@ -126,7 +126,7 @@ class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
bool visitCallSite(CallSite CS); bool visitCallSite(CallSite CS);
public: public:
CallAnalyzer(const TargetData *TD, Function &Callee, int Threshold) CallAnalyzer(const DataLayout *TD, Function &Callee, int Threshold)
: TD(TD), F(Callee), Threshold(Threshold), Cost(0), : TD(TD), F(Callee), Threshold(Threshold), Cost(0),
AlwaysInline(F.getFnAttributes().hasAlwaysInlineAttr()), AlwaysInline(F.getFnAttributes().hasAlwaysInlineAttr()),
IsCallerRecursive(false), IsRecursiveCall(false), IsCallerRecursive(false), IsRecursiveCall(false),
@ -833,7 +833,7 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
// one load and one store per word copied. // one load and one store per word copied.
// FIXME: The maxStoresPerMemcpy setting from the target should be used // FIXME: The maxStoresPerMemcpy setting from the target should be used
// here instead of a magic number of 8, but it's not available via // here instead of a magic number of 8, but it's not available via
// TargetData. // DataLayout.
NumStores = std::min(NumStores, 8U); NumStores = std::min(NumStores, 8U);
Cost -= 2 * NumStores * InlineConstants::InstrCost; Cost -= 2 * NumStores * InlineConstants::InstrCost;

View File

@ -31,7 +31,7 @@
#include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/PatternMatch.h" #include "llvm/Support/PatternMatch.h"
#include "llvm/Support/ValueHandle.h" #include "llvm/Support/ValueHandle.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
using namespace llvm::PatternMatch; using namespace llvm::PatternMatch;
@ -42,11 +42,11 @@ STATISTIC(NumFactor , "Number of factorizations");
STATISTIC(NumReassoc, "Number of reassociations"); STATISTIC(NumReassoc, "Number of reassociations");
struct Query { struct Query {
const TargetData *TD; const DataLayout *TD;
const TargetLibraryInfo *TLI; const TargetLibraryInfo *TLI;
const DominatorTree *DT; const DominatorTree *DT;
Query(const TargetData *td, const TargetLibraryInfo *tli, Query(const DataLayout *td, const TargetLibraryInfo *tli,
const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {} const DominatorTree *dt) : TD(td), TLI(tli), DT(dt) {}
}; };
@ -651,7 +651,7 @@ static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
} }
Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT), return ::SimplifyAddInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit); RecursionLimit);
@ -664,7 +664,7 @@ Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
/// if the GEP has all-constant indices. Returns false if any non-constant /// if the GEP has all-constant indices. Returns false if any non-constant
/// index is encountered leaving the 'Offset' in an undefined state. The /// index is encountered leaving the 'Offset' in an undefined state. The
/// 'Offset' APInt must be the bitwidth of the target's pointer size. /// 'Offset' APInt must be the bitwidth of the target's pointer size.
static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP, static bool accumulateGEPOffset(const DataLayout &TD, GEPOperator *GEP,
APInt &Offset) { APInt &Offset) {
unsigned IntPtrWidth = TD.getPointerSizeInBits(); unsigned IntPtrWidth = TD.getPointerSizeInBits();
assert(IntPtrWidth == Offset.getBitWidth()); assert(IntPtrWidth == Offset.getBitWidth());
@ -696,7 +696,7 @@ static bool accumulateGEPOffset(const TargetData &TD, GEPOperator *GEP,
/// accumulates the total constant offset applied in the returned constant. It /// accumulates the total constant offset applied in the returned constant. It
/// returns 0 if V is not a pointer, and returns the constant '0' if there are /// returns 0 if V is not a pointer, and returns the constant '0' if there are
/// no constant offsets applied. /// no constant offsets applied.
static Constant *stripAndComputeConstantOffsets(const TargetData &TD, static Constant *stripAndComputeConstantOffsets(const DataLayout &TD,
Value *&V) { Value *&V) {
if (!V->getType()->isPointerTy()) if (!V->getType()->isPointerTy())
return 0; return 0;
@ -731,7 +731,7 @@ static Constant *stripAndComputeConstantOffsets(const TargetData &TD,
/// \brief Compute the constant difference between two pointer values. /// \brief Compute the constant difference between two pointer values.
/// If the difference is not a constant, returns zero. /// If the difference is not a constant, returns zero.
static Constant *computePointerDifference(const TargetData &TD, static Constant *computePointerDifference(const DataLayout &TD,
Value *LHS, Value *RHS) { Value *LHS, Value *RHS) {
Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS); Constant *LHSOffset = stripAndComputeConstantOffsets(TD, LHS);
if (!LHSOffset) if (!LHSOffset)
@ -880,7 +880,7 @@ static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
} }
Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT), return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit); RecursionLimit);
@ -951,7 +951,7 @@ static Value *SimplifyMulInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyMulInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1039,7 +1039,7 @@ static Value *SimplifySDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifySDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1055,7 +1055,7 @@ static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyUDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1074,7 +1074,7 @@ static Value *SimplifyFDivInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyFDivInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1144,7 +1144,7 @@ static Value *SimplifySRemInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifySRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1160,7 +1160,7 @@ static Value *SimplifyURemInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyURemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1179,7 +1179,7 @@ static Value *SimplifyFRemInst(Value *Op0, Value *Op1, const Query &,
return 0; return 0;
} }
Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyFRemInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1248,7 +1248,7 @@ static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
} }
Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW, Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT), return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Query (TD, TLI, DT),
RecursionLimit); RecursionLimit);
@ -1275,7 +1275,7 @@ static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
} }
Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact, Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT), return ::SimplifyLShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@ -1307,7 +1307,7 @@ static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
} }
Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact, Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT), return ::SimplifyAShrInst(Op0, Op1, isExact, Query (TD, TLI, DT),
@ -1407,7 +1407,7 @@ static Value *SimplifyAndInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyAndInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1501,7 +1501,7 @@ static Value *SimplifyOrInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyOrInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1561,7 +1561,7 @@ static Value *SimplifyXorInst(Value *Op0, Value *Op1, const Query &Q,
return 0; return 0;
} }
Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const TargetData *TD, Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyXorInst(Op0, Op1, Query (TD, TLI, DT), RecursionLimit);
@ -1591,7 +1591,7 @@ static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
return 0; return 0;
} }
static Constant *computePointerICmp(const TargetData &TD, static Constant *computePointerICmp(const DataLayout &TD,
CmpInst::Predicate Pred, CmpInst::Predicate Pred,
Value *LHS, Value *RHS) { Value *LHS, Value *RHS) {
// We can only fold certain predicates on pointer comparisons. // We can only fold certain predicates on pointer comparisons.
@ -2399,7 +2399,7 @@ static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
} }
Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS, Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT), return ::SimplifyICmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@ -2496,7 +2496,7 @@ static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
} }
Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS, Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT), return ::SimplifyFCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
@ -2531,7 +2531,7 @@ static Value *SimplifySelectInst(Value *CondVal, Value *TrueVal,
} }
Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal, Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT), return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Query (TD, TLI, DT),
@ -2579,7 +2579,7 @@ static Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const Query &Q, unsigned) {
return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1)); return ConstantExpr::getGetElementPtr(cast<Constant>(Ops[0]), Ops.slice(1));
} }
Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const TargetData *TD, Value *llvm::SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyGEPInst(Ops, Query (TD, TLI, DT), RecursionLimit);
@ -2616,7 +2616,7 @@ static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val, Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs, ArrayRef<unsigned> Idxs,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT), return ::SimplifyInsertValueInst(Agg, Val, Idxs, Query (TD, TLI, DT),
@ -2664,7 +2664,7 @@ static Value *SimplifyTruncInst(Value *Op, Type *Ty, const Query &Q, unsigned) {
return 0; return 0;
} }
Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const TargetData *TD, Value *llvm::SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyTruncInst(Op, Ty, Query (TD, TLI, DT), RecursionLimit);
@ -2730,7 +2730,7 @@ static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
} }
Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS, Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit); return ::SimplifyBinOp(Opcode, LHS, RHS, Query (TD, TLI, DT), RecursionLimit);
} }
@ -2745,7 +2745,7 @@ static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
} }
Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS, Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const TargetData *TD, const TargetLibraryInfo *TLI, const DataLayout *TD, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT), return ::SimplifyCmpInst(Predicate, LHS, RHS, Query (TD, TLI, DT),
RecursionLimit); RecursionLimit);
@ -2761,7 +2761,7 @@ static Value *SimplifyCallInst(CallInst *CI, const Query &) {
/// SimplifyInstruction - See if we can compute a simplified version of this /// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null. /// instruction. If not, this returns null.
Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD, Value *llvm::SimplifyInstruction(Instruction *I, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
Value *Result; Value *Result;
@ -2881,7 +2881,7 @@ Value *llvm::SimplifyInstruction(Instruction *I, const TargetData *TD,
/// This routine returns 'true' only when *it* simplifies something. The passed /// This routine returns 'true' only when *it* simplifies something. The passed
/// in simplified value does not count toward this. /// in simplified value does not count toward this.
static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV, static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
bool Simplified = false; bool Simplified = false;
@ -2936,14 +2936,14 @@ static bool replaceAndRecursivelySimplifyImpl(Instruction *I, Value *SimpleV,
} }
bool llvm::recursivelySimplifyInstruction(Instruction *I, bool llvm::recursivelySimplifyInstruction(Instruction *I,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT); return replaceAndRecursivelySimplifyImpl(I, 0, TD, TLI, DT);
} }
bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV, bool llvm::replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
const DominatorTree *DT) { const DominatorTree *DT) {
assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!"); assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");

View File

@ -19,7 +19,7 @@
#include "llvm/Instructions.h" #include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CFG.h" #include "llvm/Support/CFG.h"
#include "llvm/Support/ConstantRange.h" #include "llvm/Support/ConstantRange.h"
@ -212,7 +212,7 @@ public:
// Unless we can prove that the two Constants are different, we must // Unless we can prove that the two Constants are different, we must
// move to overdefined. // move to overdefined.
// FIXME: use TargetData/TargetLibraryInfo for smarter constant folding. // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>( if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE, ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getConstant(), getConstant(),
@ -238,7 +238,7 @@ public:
// Unless we can prove that the two Constants are different, we must // Unless we can prove that the two Constants are different, we must
// move to overdefined. // move to overdefined.
// FIXME: use TargetData/TargetLibraryInfo for smarter constant folding. // FIXME: use DataLayout/TargetLibraryInfo for smarter constant folding.
if (ConstantInt *Res = dyn_cast<ConstantInt>( if (ConstantInt *Res = dyn_cast<ConstantInt>(
ConstantFoldCompareInstOperands(CmpInst::ICMP_NE, ConstantFoldCompareInstOperands(CmpInst::ICMP_NE,
getNotConstant(), getNotConstant(),
@ -1009,7 +1009,7 @@ bool LazyValueInfo::runOnFunction(Function &F) {
if (PImpl) if (PImpl)
getCache(PImpl).clear(); getCache(PImpl).clear();
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>(); TLI = &getAnalysis<TargetLibraryInfo>();
// Fully lazy. // Fully lazy.

View File

@ -43,7 +43,7 @@
#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h" #include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/PassManager.h" #include "llvm/PassManager.h"
@ -103,7 +103,7 @@ namespace {
Module *Mod; Module *Mod;
AliasAnalysis *AA; AliasAnalysis *AA;
DominatorTree *DT; DominatorTree *DT;
TargetData *TD; DataLayout *TD;
TargetLibraryInfo *TLI; TargetLibraryInfo *TLI;
std::string Messages; std::string Messages;
@ -177,7 +177,7 @@ bool Lint::runOnFunction(Function &F) {
Mod = F.getParent(); Mod = F.getParent();
AA = &getAnalysis<AliasAnalysis>(); AA = &getAnalysis<AliasAnalysis>();
DT = &getAnalysis<DominatorTree>(); DT = &getAnalysis<DominatorTree>();
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>(); TLI = &getAnalysis<TargetLibraryInfo>();
visit(F); visit(F);
dbgs() << MessagesStr.str(); dbgs() << MessagesStr.str();
@ -506,7 +506,7 @@ void Lint::visitShl(BinaryOperator &I) {
"Undefined result: Shift count out of range", &I); "Undefined result: Shift count out of range", &I);
} }
static bool isZero(Value *V, TargetData *TD) { static bool isZero(Value *V, DataLayout *TD) {
// Assume undef could be zero. // Assume undef could be zero.
if (isa<UndefValue>(V)) return true; if (isa<UndefValue>(V)) return true;

View File

@ -13,7 +13,7 @@
#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/Loads.h"
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/GlobalAlias.h" #include "llvm/GlobalAlias.h"
#include "llvm/GlobalVariable.h" #include "llvm/GlobalVariable.h"
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
@ -52,8 +52,8 @@ static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
/// bitcasts to get back to the underlying object being addressed, keeping /// bitcasts to get back to the underlying object being addressed, keeping
/// track of the offset in bytes from the GEPs relative to the result. /// track of the offset in bytes from the GEPs relative to the result.
/// This is closely related to GetUnderlyingObject but is located /// This is closely related to GetUnderlyingObject but is located
/// here to avoid making VMCore depend on TargetData. /// here to avoid making VMCore depend on DataLayout.
static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD, static Value *getUnderlyingObjectWithOffset(Value *V, const DataLayout *TD,
uint64_t &ByteOffset, uint64_t &ByteOffset,
unsigned MaxLookup = 6) { unsigned MaxLookup = 6) {
if (!V->getType()->isPointerTy()) if (!V->getType()->isPointerTy())
@ -85,7 +85,7 @@ static Value *getUnderlyingObjectWithOffset(Value *V, const TargetData *TD,
/// specified pointer, we do a quick local scan of the basic block containing /// specified pointer, we do a quick local scan of the basic block containing
/// ScanFrom, to determine if the address is already accessed. /// ScanFrom, to determine if the address is already accessed.
bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom, bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
unsigned Align, const TargetData *TD) { unsigned Align, const DataLayout *TD) {
uint64_t ByteOffset = 0; uint64_t ByteOffset = 0;
Value *Base = V; Value *Base = V;
if (TD) if (TD)

View File

@ -35,7 +35,7 @@
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
STATISTIC(NumAnswered, "Number of dependence queries answered"); STATISTIC(NumAnswered, "Number of dependence queries answered");

View File

@ -25,7 +25,7 @@
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h" #include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
using namespace llvm; using namespace llvm;
@ -190,7 +190,7 @@ const CallInst *llvm::extractMallocCall(const Value *I,
return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0; return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
} }
static Value *computeArraySize(const CallInst *CI, const TargetData *TD, static Value *computeArraySize(const CallInst *CI, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) { bool LookThroughSExt = false) {
if (!CI) if (!CI)
@ -220,7 +220,7 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
/// is a call to malloc whose array size can be determined and the array size /// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL. /// is not constant 1. Otherwise, return NULL.
const CallInst *llvm::isArrayMalloc(const Value *I, const CallInst *llvm::isArrayMalloc(const Value *I,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
const CallInst *CI = extractMallocCall(I, TLI); const CallInst *CI = extractMallocCall(I, TLI);
Value *ArraySize = computeArraySize(CI, TD, TLI); Value *ArraySize = computeArraySize(CI, TD, TLI);
@ -281,7 +281,7 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI,
/// then return that multiple. For non-array mallocs, the multiple is /// then return that multiple. For non-array mallocs, the multiple is
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined. /// determined.
Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD, Value *llvm::getMallocArraySize(CallInst *CI, const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
bool LookThroughSExt) { bool LookThroughSExt) {
assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call"); assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
@ -341,7 +341,7 @@ const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
/// object size in Size if successful, and false otherwise. /// object size in Size if successful, and false otherwise.
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables. /// byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD, bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const DataLayout *TD,
const TargetLibraryInfo *TLI, bool RoundToAlign) { const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!TD) if (!TD)
return false; return false;
@ -373,7 +373,7 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
return Size; return Size;
} }
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD, ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
LLVMContext &Context, LLVMContext &Context,
bool RoundToAlign) bool RoundToAlign)
@ -559,7 +559,7 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
} }
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD, ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const DataLayout *TD,
const TargetLibraryInfo *TLI, const TargetLibraryInfo *TLI,
LLVMContext &Context) LLVMContext &Context)
: TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) { : TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {

View File

@ -30,7 +30,7 @@
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
#include "llvm/Support/PredIteratorCache.h" #include "llvm/Support/PredIteratorCache.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses"); STATISTIC(NumCacheNonLocal, "Number of fully cached non-local responses");
@ -89,7 +89,7 @@ void MemoryDependenceAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
bool MemoryDependenceAnalysis::runOnFunction(Function &) { bool MemoryDependenceAnalysis::runOnFunction(Function &) {
AA = &getAnalysis<AliasAnalysis>(); AA = &getAnalysis<AliasAnalysis>();
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<DataLayout>();
DT = getAnalysisIfAvailable<DominatorTree>(); DT = getAnalysisIfAvailable<DominatorTree>();
if (PredCache == 0) if (PredCache == 0)
PredCache.reset(new PredIteratorCache()); PredCache.reset(new PredIteratorCache());
@ -256,7 +256,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
const Value *&MemLocBase, const Value *&MemLocBase,
int64_t &MemLocOffs, int64_t &MemLocOffs,
const LoadInst *LI, const LoadInst *LI,
const TargetData *TD) { const DataLayout *TD) {
// If we have no target data, we can't do this. // If we have no target data, we can't do this.
if (TD == 0) return false; if (TD == 0) return false;
@ -280,7 +280,7 @@ isLoadLoadClobberIfExtendedToFullWidth(const AliasAnalysis::Location &MemLoc,
unsigned MemoryDependenceAnalysis:: unsigned MemoryDependenceAnalysis::
getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs, getLoadLoadClobberFullWidthSize(const Value *MemLocBase, int64_t MemLocOffs,
unsigned MemLocSize, const LoadInst *LI, unsigned MemLocSize, const LoadInst *LI,
const TargetData &TD) { const DataLayout &TD) {
// We can only extend simple integer loads. // We can only extend simple integer loads.
if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0; if (!isa<IntegerType>(LI->getType()) || !LI->isSimple()) return 0;

View File

@ -15,7 +15,7 @@
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/Passes.h" #include "llvm/Analysis/Passes.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
namespace { namespace {
@ -36,7 +36,7 @@ namespace {
virtual void initializePass() { virtual void initializePass() {
// Note: NoAA does not call InitializeAliasAnalysis because it's // Note: NoAA does not call InitializeAliasAnalysis because it's
// special and does not support chaining. // special and does not support chaining.
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<DataLayout>();
} }
virtual AliasResult alias(const Location &LocA, const Location &LocB) { virtual AliasResult alias(const Location &LocA, const Location &LocB) {

View File

@ -73,7 +73,7 @@
#include "llvm/Analysis/LoopInfo.h" #include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h" #include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h" #include "llvm/Assembly/Writer.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h" #include "llvm/Support/ConstantRange.h"
@ -2582,7 +2582,7 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
} }
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) { const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
// If we have TargetData, we can bypass creating a target-independent // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt. // constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization. // This is just a compile-time optimization.
if (TD) if (TD)
@ -2608,7 +2608,7 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy, const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
unsigned FieldNo) { unsigned FieldNo) {
// If we have TargetData, we can bypass creating a target-independent // If we have DataLayout, we can bypass creating a target-independent
// constant expression and then folding it back into a ConstantInt. // constant expression and then folding it back into a ConstantInt.
// This is just a compile-time optimization. // This is just a compile-time optimization.
if (TD) if (TD)
@ -2673,7 +2673,7 @@ bool ScalarEvolution::isSCEVable(Type *Ty) const {
uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const { uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!"); assert(isSCEVable(Ty) && "Type is not SCEVable!");
// If we have a TargetData, use it! // If we have a DataLayout, use it!
if (TD) if (TD)
return TD->getTypeSizeInBits(Ty); return TD->getTypeSizeInBits(Ty);
@ -2681,7 +2681,7 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
if (Ty->isIntegerTy()) if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits(); return Ty->getPrimitiveSizeInBits();
// The only other support type is pointer. Without TargetData, conservatively // The only other support type is pointer. Without DataLayout, conservatively
// assume pointers are 64-bit. // assume pointers are 64-bit.
assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!"); assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
return 64; return 64;
@ -2701,7 +2701,7 @@ Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!"); assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
if (TD) return TD->getIntPtrType(getContext()); if (TD) return TD->getIntPtrType(getContext());
// Without TargetData, conservatively assume pointers are 64-bit. // Without DataLayout, conservatively assume pointers are 64-bit.
return Type::getInt64Ty(getContext()); return Type::getInt64Ty(getContext());
} }
@ -4751,7 +4751,7 @@ static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
/// reason, return null. /// reason, return null.
static Constant *EvaluateExpression(Value *V, const Loop *L, static Constant *EvaluateExpression(Value *V, const Loop *L,
DenseMap<Instruction *, Constant *> &Vals, DenseMap<Instruction *, Constant *> &Vals,
const TargetData *TD, const DataLayout *TD,
const TargetLibraryInfo *TLI) { const TargetLibraryInfo *TLI) {
// Convenient constant check, but redundant for recursive calls. // Convenient constant check, but redundant for recursive calls.
if (Constant *C = dyn_cast<Constant>(V)) return C; if (Constant *C = dyn_cast<Constant>(V)) return C;
@ -6590,7 +6590,7 @@ ScalarEvolution::ScalarEvolution()
bool ScalarEvolution::runOnFunction(Function &F) { bool ScalarEvolution::runOnFunction(Function &F) {
this->F = &F; this->F = &F;
LI = &getAnalysis<LoopInfo>(); LI = &getAnalysis<LoopInfo>();
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<DataLayout>();
TLI = &getAnalysis<TargetLibraryInfo>(); TLI = &getAnalysis<TargetLibraryInfo>();
DT = &getAnalysis<DominatorTree>(); DT = &getAnalysis<DominatorTree>();
return false; return false;

View File

@ -18,7 +18,7 @@
#include "llvm/IntrinsicInst.h" #include "llvm/IntrinsicInst.h"
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/ADT/STLExtras.h" #include "llvm/ADT/STLExtras.h"
@ -212,7 +212,7 @@ static bool FactorOutConstant(const SCEV *&S,
const SCEV *&Remainder, const SCEV *&Remainder,
const SCEV *Factor, const SCEV *Factor,
ScalarEvolution &SE, ScalarEvolution &SE,
const TargetData *TD) { const DataLayout *TD) {
// Everything is divisible by one. // Everything is divisible by one.
if (Factor->isOne()) if (Factor->isOne())
return true; return true;
@ -253,7 +253,7 @@ static bool FactorOutConstant(const SCEV *&S,
// of the given factor. // of the given factor.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) { if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
if (TD) { if (TD) {
// With TargetData, the size is known. Check if there is a constant // With DataLayout, the size is known. Check if there is a constant
// operand which is a multiple of the given factor. If so, we can // operand which is a multiple of the given factor. If so, we can
// factor it. // factor it.
const SCEVConstant *FC = cast<SCEVConstant>(Factor); const SCEVConstant *FC = cast<SCEVConstant>(Factor);
@ -267,7 +267,7 @@ static bool FactorOutConstant(const SCEV *&S,
return true; return true;
} }
} else { } else {
// Without TargetData, check if Factor can be factored out of any of the // Without DataLayout, check if Factor can be factored out of any of the
// Mul's operands. If so, we can just remove it. // Mul's operands. If so, we can just remove it.
for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) { for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
const SCEV *SOp = M->getOperand(i); const SCEV *SOp = M->getOperand(i);
@ -458,7 +458,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
// An empty struct has no fields. // An empty struct has no fields.
if (STy->getNumElements() == 0) break; if (STy->getNumElements() == 0) break;
if (SE.TD) { if (SE.TD) {
// With TargetData, field offsets are known. See if a constant offset // With DataLayout, field offsets are known. See if a constant offset
// falls within any of the struct fields. // falls within any of the struct fields.
if (Ops.empty()) break; if (Ops.empty()) break;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0])) if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[0]))
@ -477,7 +477,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
} }
} }
} else { } else {
// Without TargetData, just check for an offsetof expression of the // Without DataLayout, just check for an offsetof expression of the
// appropriate struct type. // appropriate struct type.
for (unsigned i = 0, e = Ops.size(); i != e; ++i) for (unsigned i = 0, e = Ops.size(); i != e; ++i)
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) { if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {

View File

@ -22,7 +22,7 @@
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Metadata.h" #include "llvm/Metadata.h"
#include "llvm/Operator.h" #include "llvm/Operator.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Support/ConstantRange.h" #include "llvm/Support/ConstantRange.h"
#include "llvm/Support/GetElementPtrTypeIterator.h" #include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/MathExtras.h" #include "llvm/Support/MathExtras.h"
@ -36,7 +36,7 @@ const unsigned MaxDepth = 6;
/// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if /// getBitWidth - Returns the bitwidth of the given scalar or pointer type (if
/// unknown returns 0). For vector types, returns the element type's bitwidth. /// unknown returns 0). For vector types, returns the element type's bitwidth.
static unsigned getBitWidth(Type *Ty, const TargetData *TD) { static unsigned getBitWidth(Type *Ty, const DataLayout *TD) {
if (unsigned BitWidth = Ty->getScalarSizeInBits()) if (unsigned BitWidth = Ty->getScalarSizeInBits())
return BitWidth; return BitWidth;
assert(isa<PointerType>(Ty) && "Expected a pointer type!"); assert(isa<PointerType>(Ty) && "Expected a pointer type!");
@ -46,7 +46,7 @@ static unsigned getBitWidth(Type *Ty, const TargetData *TD) {
static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW, static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne, APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2, APInt &KnownZero2, APInt &KnownOne2,
const TargetData *TD, unsigned Depth) { const DataLayout *TD, unsigned Depth) {
if (!Add) { if (!Add) {
if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) { if (ConstantInt *CLHS = dyn_cast<ConstantInt>(Op0)) {
// We know that the top bits of C-X are clear if X contains less bits // We know that the top bits of C-X are clear if X contains less bits
@ -132,7 +132,7 @@ static void ComputeMaskedBitsAddSub(bool Add, Value *Op0, Value *Op1, bool NSW,
static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW, static void ComputeMaskedBitsMul(Value *Op0, Value *Op1, bool NSW,
APInt &KnownZero, APInt &KnownOne, APInt &KnownZero, APInt &KnownOne,
APInt &KnownZero2, APInt &KnownOne2, APInt &KnownZero2, APInt &KnownOne2,
const TargetData *TD, unsigned Depth) { const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = KnownZero.getBitWidth(); unsigned BitWidth = KnownZero.getBitWidth();
ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1); ComputeMaskedBits(Op1, KnownZero, KnownOne, TD, Depth+1);
ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1); ComputeMaskedBits(Op0, KnownZero2, KnownOne2, TD, Depth+1);
@ -226,7 +226,7 @@ void llvm::computeMaskedBitsLoad(const MDNode &Ranges, APInt &KnownZero) {
/// same width as the vector element, and the bit is set only if it is true /// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector. /// for all of the elements in the vector.
void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne, void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
const TargetData *TD, unsigned Depth) { const DataLayout *TD, unsigned Depth) {
assert(V && "No Value?"); assert(V && "No Value?");
assert(Depth <= MaxDepth && "Limit Search Depth"); assert(Depth <= MaxDepth && "Limit Search Depth");
unsigned BitWidth = KnownZero.getBitWidth(); unsigned BitWidth = KnownZero.getBitWidth();
@ -787,7 +787,7 @@ void llvm::ComputeMaskedBits(Value *V, APInt &KnownZero, APInt &KnownOne,
/// ComputeSignBit - Determine whether the sign bit is known to be zero or /// ComputeSignBit - Determine whether the sign bit is known to be zero or
/// one. Convenience wrapper around ComputeMaskedBits. /// one. Convenience wrapper around ComputeMaskedBits.
void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne, void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
const TargetData *TD, unsigned Depth) { const DataLayout *TD, unsigned Depth) {
unsigned BitWidth = getBitWidth(V->getType(), TD); unsigned BitWidth = getBitWidth(V->getType(), TD);
if (!BitWidth) { if (!BitWidth) {
KnownZero = false; KnownZero = false;
@ -805,7 +805,7 @@ void llvm::ComputeSignBit(Value *V, bool &KnownZero, bool &KnownOne,
/// bit set when defined. For vectors return true if every element is known to /// bit set when defined. For vectors return true if every element is known to
/// be a power of two when defined. Supports values with integer or pointer /// be a power of two when defined. Supports values with integer or pointer
/// types and vectors of integers. /// types and vectors of integers.
bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero, bool llvm::isPowerOfTwo(Value *V, const DataLayout *TD, bool OrZero,
unsigned Depth) { unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) { if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue()) if (C->isNullValue())
@ -868,7 +868,7 @@ bool llvm::isPowerOfTwo(Value *V, const TargetData *TD, bool OrZero,
/// when defined. For vectors return true if every element is known to be /// when defined. For vectors return true if every element is known to be
/// non-zero when defined. Supports values with integer or pointer type and /// non-zero when defined. Supports values with integer or pointer type and
/// vectors of integers. /// vectors of integers.
bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) { bool llvm::isKnownNonZero(Value *V, const DataLayout *TD, unsigned Depth) {
if (Constant *C = dyn_cast<Constant>(V)) { if (Constant *C = dyn_cast<Constant>(V)) {
if (C->isNullValue()) if (C->isNullValue())
return false; return false;
@ -995,7 +995,7 @@ bool llvm::isKnownNonZero(Value *V, const TargetData *TD, unsigned Depth) {
/// same width as the vector element, and the bit is set only if it is true /// same width as the vector element, and the bit is set only if it is true
/// for all of the elements in the vector. /// for all of the elements in the vector.
bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask, bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
const TargetData *TD, unsigned Depth) { const DataLayout *TD, unsigned Depth) {
APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0); APInt KnownZero(Mask.getBitWidth(), 0), KnownOne(Mask.getBitWidth(), 0);
ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth); ComputeMaskedBits(V, KnownZero, KnownOne, TD, Depth);
assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?"); assert((KnownZero & KnownOne) == 0 && "Bits known to be one AND zero?");
@ -1012,10 +1012,10 @@ bool llvm::MaskedValueIsZero(Value *V, const APInt &Mask,
/// ///
/// 'Op' must have a scalar integer type. /// 'Op' must have a scalar integer type.
/// ///
unsigned llvm::ComputeNumSignBits(Value *V, const TargetData *TD, unsigned llvm::ComputeNumSignBits(Value *V, const DataLayout *TD,
unsigned Depth) { unsigned Depth) {
assert((TD || V->getType()->isIntOrIntVectorTy()) && assert((TD || V->getType()->isIntOrIntVectorTy()) &&
"ComputeNumSignBits requires a TargetData object to operate " "ComputeNumSignBits requires a DataLayout object to operate "
"on non-integer values!"); "on non-integer values!");
Type *Ty = V->getType(); Type *Ty = V->getType();
unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) : unsigned TyBits = TD ? TD->getTypeSizeInBits(V->getType()->getScalarType()) :
@ -1591,7 +1591,7 @@ Value *llvm::FindInsertedValue(Value *V, ArrayRef<unsigned> idx_range,
/// it can be expressed as a base pointer plus a constant offset. Return the /// it can be expressed as a base pointer plus a constant offset. Return the
/// base and offset to the caller. /// base and offset to the caller.
Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset, Value *llvm::GetPointerBaseWithConstantOffset(Value *Ptr, int64_t &Offset,
const TargetData &TD) { const DataLayout &TD) {
Operator *PtrOp = dyn_cast<Operator>(Ptr); Operator *PtrOp = dyn_cast<Operator>(Ptr);
if (PtrOp == 0 || Ptr->getType()->isVectorTy()) if (PtrOp == 0 || Ptr->getType()->isVectorTy())
return Ptr; return Ptr;
@ -1777,7 +1777,7 @@ uint64_t llvm::GetStringLength(Value *V) {
} }
Value * Value *
llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) { llvm::GetUnderlyingObject(Value *V, const DataLayout *TD, unsigned MaxLookup) {
if (!V->getType()->isPointerTy()) if (!V->getType()->isPointerTy())
return V; return V;
for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) { for (unsigned Count = 0; MaxLookup == 0 || Count < MaxLookup; ++Count) {
@ -1808,7 +1808,7 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) {
void void
llvm::GetUnderlyingObjects(Value *V, llvm::GetUnderlyingObjects(Value *V,
SmallVectorImpl<Value *> &Objects, SmallVectorImpl<Value *> &Objects,
const TargetData *TD, const DataLayout *TD,
unsigned MaxLookup) { unsigned MaxLookup) {
SmallPtrSet<Value *, 4> Visited; SmallPtrSet<Value *, 4> Visited;
SmallVector<Value *, 4> Worklist; SmallVector<Value *, 4> Worklist;
@ -1853,7 +1853,7 @@ bool llvm::onlyUsedByLifetimeMarkers(const Value *V) {
} }
bool llvm::isSafeToSpeculativelyExecute(const Value *V, bool llvm::isSafeToSpeculativelyExecute(const Value *V,
const TargetData *TD) { const DataLayout *TD) {
const Operator *Inst = dyn_cast<Operator>(V); const Operator *Inst = dyn_cast<Operator>(V);
if (!Inst) if (!Inst)
return false; return false;

View File

@ -21,7 +21,7 @@
#include "llvm/Module.h" #include "llvm/Module.h"
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
@ -79,7 +79,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
uint64_t StartingOffset) { uint64_t StartingOffset) {
// Given a struct type, recursively traverse the elements. // Given a struct type, recursively traverse the elements.
if (StructType *STy = dyn_cast<StructType>(Ty)) { if (StructType *STy = dyn_cast<StructType>(Ty)) {
const StructLayout *SL = TLI.getTargetData()->getStructLayout(STy); const StructLayout *SL = TLI.getDataLayout()->getStructLayout(STy);
for (StructType::element_iterator EB = STy->element_begin(), for (StructType::element_iterator EB = STy->element_begin(),
EI = EB, EI = EB,
EE = STy->element_end(); EE = STy->element_end();
@ -91,7 +91,7 @@ void llvm::ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
// Given an array type, recursively traverse the elements. // Given an array type, recursively traverse the elements.
if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) { if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
Type *EltTy = ATy->getElementType(); Type *EltTy = ATy->getElementType();
uint64_t EltSize = TLI.getTargetData()->getTypeAllocSize(EltTy); uint64_t EltSize = TLI.getDataLayout()->getTypeAllocSize(EltTy);
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets, ComputeValueVTs(TLI, EltTy, ValueVTs, Offsets,
StartingOffset + i * EltSize); StartingOffset + i * EltSize);

View File

@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"

View File

@ -33,7 +33,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
@ -67,7 +67,7 @@ static gcp_map_type &getGCMap(void *&P) {
/// getGVAlignmentLog2 - Return the alignment to use for the specified global /// getGVAlignmentLog2 - Return the alignment to use for the specified global
/// value in log2 form. This rounds up to the preferred alignment if possible /// value in log2 form. This rounds up to the preferred alignment if possible
/// and legal. /// and legal.
static unsigned getGVAlignmentLog2(const GlobalValue *GV, const TargetData &TD, static unsigned getGVAlignmentLog2(const GlobalValue *GV, const DataLayout &TD,
unsigned InBits = 0) { unsigned InBits = 0) {
unsigned NumBits = 0; unsigned NumBits = 0;
if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV)) if (const GlobalVariable *GVar = dyn_cast<GlobalVariable>(GV))
@ -131,9 +131,9 @@ const TargetLoweringObjectFile &AsmPrinter::getObjFileLowering() const {
} }
/// getTargetData - Return information about data layout. /// getDataLayout - Return information about data layout.
const TargetData &AsmPrinter::getTargetData() const { const DataLayout &AsmPrinter::getDataLayout() const {
return *TM.getTargetData(); return *TM.getDataLayout();
} }
/// getCurrentSection() - Return the current section we are emitting to. /// getCurrentSection() - Return the current section we are emitting to.
@ -160,7 +160,7 @@ bool AsmPrinter::doInitialization(Module &M) {
const_cast<TargetLoweringObjectFile&>(getObjFileLowering()) const_cast<TargetLoweringObjectFile&>(getObjFileLowering())
.Initialize(OutContext, TM); .Initialize(OutContext, TM);
Mang = new Mangler(OutContext, *TM.getTargetData()); Mang = new Mangler(OutContext, *TM.getDataLayout());
// Allow the target to emit any magic that it wants at the start of the file. // Allow the target to emit any magic that it wants at the start of the file.
EmitStartOfAsmFile(M); EmitStartOfAsmFile(M);
@ -280,7 +280,7 @@ void AsmPrinter::EmitGlobalVariable(const GlobalVariable *GV) {
SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM); SectionKind GVKind = TargetLoweringObjectFile::getKindForGlobal(GV, TM);
const TargetData *TD = TM.getTargetData(); const DataLayout *TD = TM.getDataLayout();
uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType()); uint64_t Size = TD->getTypeAllocSize(GV->getType()->getElementType());
// If the alignment is specified, we *must* obey it. Overaligning a global // If the alignment is specified, we *must* obey it. Overaligning a global
@ -991,7 +991,7 @@ void AsmPrinter::EmitConstantPool() {
Kind = SectionKind::getReadOnlyWithRelLocal(); Kind = SectionKind::getReadOnlyWithRelLocal();
break; break;
case 0: case 0:
switch (TM.getTargetData()->getTypeAllocSize(CPE.getType())) { switch (TM.getDataLayout()->getTypeAllocSize(CPE.getType())) {
case 4: Kind = SectionKind::getMergeableConst4(); break; case 4: Kind = SectionKind::getMergeableConst4(); break;
case 8: Kind = SectionKind::getMergeableConst8(); break; case 8: Kind = SectionKind::getMergeableConst8(); break;
case 16: Kind = SectionKind::getMergeableConst16();break; case 16: Kind = SectionKind::getMergeableConst16();break;
@ -1037,7 +1037,7 @@ void AsmPrinter::EmitConstantPool() {
OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/); OutStreamer.EmitFill(NewOffset - Offset, 0/*fillval*/, 0/*addrspace*/);
Type *Ty = CPE.getType(); Type *Ty = CPE.getType();
Offset = NewOffset + TM.getTargetData()->getTypeAllocSize(Ty); Offset = NewOffset + TM.getDataLayout()->getTypeAllocSize(Ty);
OutStreamer.EmitLabel(GetCPISymbol(CPI)); OutStreamer.EmitLabel(GetCPISymbol(CPI));
if (CPE.isMachineConstantPoolEntry()) if (CPE.isMachineConstantPoolEntry())
@ -1080,7 +1080,7 @@ void AsmPrinter::EmitJumpTableInfo() {
JTInDiffSection = true; JTInDiffSection = true;
} }
EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getTargetData()))); EmitAlignment(Log2_32(MJTI->getEntryAlignment(*TM.getDataLayout())));
// Jump tables in code sections are marked with a data_region directive // Jump tables in code sections are marked with a data_region directive
// where that's supported. // where that's supported.
@ -1196,7 +1196,7 @@ void AsmPrinter::EmitJumpTableEntry(const MachineJumpTableInfo *MJTI,
assert(Value && "Unknown entry kind!"); assert(Value && "Unknown entry kind!");
unsigned EntrySize = MJTI->getEntrySize(*TM.getTargetData()); unsigned EntrySize = MJTI->getEntrySize(*TM.getDataLayout());
OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0); OutStreamer.EmitValue(Value, EntrySize, /*addrspace*/0);
} }
@ -1298,7 +1298,7 @@ void AsmPrinter::EmitXXStructorList(const Constant *List, bool isCtor) {
} }
// Emit the function pointers in the target-specific order // Emit the function pointers in the target-specific order
const TargetData *TD = TM.getTargetData(); const DataLayout *TD = TM.getDataLayout();
unsigned Align = Log2_32(TD->getPointerPrefAlignment()); unsigned Align = Log2_32(TD->getPointerPrefAlignment());
std::stable_sort(Structors.begin(), Structors.end(), priority_order); std::stable_sort(Structors.begin(), Structors.end(), priority_order);
for (unsigned i = 0, e = Structors.size(); i != e; ++i) { for (unsigned i = 0, e = Structors.size(); i != e; ++i) {
@ -1414,7 +1414,7 @@ void AsmPrinter::EmitLabelPlusOffset(const MCSymbol *Label, uint64_t Offset,
// if required for correctness. // if required for correctness.
// //
void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const { void AsmPrinter::EmitAlignment(unsigned NumBits, const GlobalValue *GV) const {
if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getTargetData(), NumBits); if (GV) NumBits = getGVAlignmentLog2(GV, *TM.getDataLayout(), NumBits);
if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment. if (NumBits == 0) return; // 1-byte aligned: no need to emit alignment.
@ -1453,10 +1453,10 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
switch (CE->getOpcode()) { switch (CE->getOpcode()) {
default: default:
// If the code isn't optimized, there may be outstanding folding // If the code isn't optimized, there may be outstanding folding
// opportunities. Attempt to fold the expression using TargetData as a // opportunities. Attempt to fold the expression using DataLayout as a
// last resort before giving up. // last resort before giving up.
if (Constant *C = if (Constant *C =
ConstantFoldConstantExpression(CE, AP.TM.getTargetData())) ConstantFoldConstantExpression(CE, AP.TM.getDataLayout()))
if (C != CE) if (C != CE)
return lowerConstant(C, AP); return lowerConstant(C, AP);
@ -1470,7 +1470,7 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
report_fatal_error(OS.str()); report_fatal_error(OS.str());
} }
case Instruction::GetElementPtr: { case Instruction::GetElementPtr: {
const TargetData &TD = *AP.TM.getTargetData(); const DataLayout &TD = *AP.TM.getDataLayout();
// Generate a symbolic expression for the byte address // Generate a symbolic expression for the byte address
const Constant *PtrVal = CE->getOperand(0); const Constant *PtrVal = CE->getOperand(0);
SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end()); SmallVector<Value*, 8> IdxVec(CE->op_begin()+1, CE->op_end());
@ -1499,7 +1499,7 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
return lowerConstant(CE->getOperand(0), AP); return lowerConstant(CE->getOperand(0), AP);
case Instruction::IntToPtr: { case Instruction::IntToPtr: {
const TargetData &TD = *AP.TM.getTargetData(); const DataLayout &TD = *AP.TM.getDataLayout();
// Handle casts to pointers by changing them into casts to the appropriate // Handle casts to pointers by changing them into casts to the appropriate
// integer type. This promotes constant folding and simplifies this code. // integer type. This promotes constant folding and simplifies this code.
Constant *Op = CE->getOperand(0); Constant *Op = CE->getOperand(0);
@ -1509,7 +1509,7 @@ static const MCExpr *lowerConstant(const Constant *CV, AsmPrinter &AP) {
} }
case Instruction::PtrToInt: { case Instruction::PtrToInt: {
const TargetData &TD = *AP.TM.getTargetData(); const DataLayout &TD = *AP.TM.getDataLayout();
// Support only foldable casts to/from pointers that can be eliminated by // Support only foldable casts to/from pointers that can be eliminated by
// changing the pointer to the appropriately sized integer type. // changing the pointer to the appropriately sized integer type.
Constant *Op = CE->getOperand(0); Constant *Op = CE->getOperand(0);
@ -1583,7 +1583,7 @@ static int isRepeatedByteSequence(const Value *V, TargetMachine &TM) {
if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) { if (const ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
if (CI->getBitWidth() > 64) return -1; if (CI->getBitWidth() > 64) return -1;
uint64_t Size = TM.getTargetData()->getTypeAllocSize(V->getType()); uint64_t Size = TM.getDataLayout()->getTypeAllocSize(V->getType());
uint64_t Value = CI->getZExtValue(); uint64_t Value = CI->getZExtValue();
// Make sure the constant is at least 8 bits long and has a power // Make sure the constant is at least 8 bits long and has a power
@ -1627,7 +1627,7 @@ static void emitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
// See if we can aggregate this into a .fill, if so, emit it as such. // See if we can aggregate this into a .fill, if so, emit it as such.
int Value = isRepeatedByteSequence(CDS, AP.TM); int Value = isRepeatedByteSequence(CDS, AP.TM);
if (Value != -1) { if (Value != -1) {
uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CDS->getType()); uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CDS->getType());
// Don't emit a 1-byte object as a .fill. // Don't emit a 1-byte object as a .fill.
if (Bytes > 1) if (Bytes > 1)
return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace); return AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
@ -1677,7 +1677,7 @@ static void emitGlobalConstantDataSequential(const ConstantDataSequential *CDS,
} }
} }
const TargetData &TD = *AP.TM.getTargetData(); const DataLayout &TD = *AP.TM.getDataLayout();
unsigned Size = TD.getTypeAllocSize(CDS->getType()); unsigned Size = TD.getTypeAllocSize(CDS->getType());
unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) * unsigned EmittedSize = TD.getTypeAllocSize(CDS->getType()->getElementType()) *
CDS->getNumElements(); CDS->getNumElements();
@ -1693,7 +1693,7 @@ static void emitGlobalConstantArray(const ConstantArray *CA, unsigned AddrSpace,
int Value = isRepeatedByteSequence(CA, AP.TM); int Value = isRepeatedByteSequence(CA, AP.TM);
if (Value != -1) { if (Value != -1) {
uint64_t Bytes = AP.TM.getTargetData()->getTypeAllocSize(CA->getType()); uint64_t Bytes = AP.TM.getDataLayout()->getTypeAllocSize(CA->getType());
AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace); AP.OutStreamer.EmitFill(Bytes, Value, AddrSpace);
} }
else { else {
@ -1707,7 +1707,7 @@ static void emitGlobalConstantVector(const ConstantVector *CV,
for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i) for (unsigned i = 0, e = CV->getType()->getNumElements(); i != e; ++i)
emitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP); emitGlobalConstantImpl(CV->getOperand(i), AddrSpace, AP);
const TargetData &TD = *AP.TM.getTargetData(); const DataLayout &TD = *AP.TM.getDataLayout();
unsigned Size = TD.getTypeAllocSize(CV->getType()); unsigned Size = TD.getTypeAllocSize(CV->getType());
unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) * unsigned EmittedSize = TD.getTypeAllocSize(CV->getType()->getElementType()) *
CV->getType()->getNumElements(); CV->getType()->getNumElements();
@ -1718,7 +1718,7 @@ static void emitGlobalConstantVector(const ConstantVector *CV,
static void emitGlobalConstantStruct(const ConstantStruct *CS, static void emitGlobalConstantStruct(const ConstantStruct *CS,
unsigned AddrSpace, AsmPrinter &AP) { unsigned AddrSpace, AsmPrinter &AP) {
// Print the fields in successive locations. Pad to align if needed! // Print the fields in successive locations. Pad to align if needed!
const TargetData *TD = AP.TM.getTargetData(); const DataLayout *TD = AP.TM.getDataLayout();
unsigned Size = TD->getTypeAllocSize(CS->getType()); unsigned Size = TD->getTypeAllocSize(CS->getType());
const StructLayout *Layout = TD->getStructLayout(CS->getType()); const StructLayout *Layout = TD->getStructLayout(CS->getType());
uint64_t SizeSoFar = 0; uint64_t SizeSoFar = 0;
@ -1798,7 +1798,7 @@ static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
<< DoubleVal.convertToDouble() << '\n'; << DoubleVal.convertToDouble() << '\n';
} }
if (AP.TM.getTargetData()->isBigEndian()) { if (AP.TM.getDataLayout()->isBigEndian()) {
AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace); AP.OutStreamer.EmitIntValue(p[1], 2, AddrSpace);
AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace); AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
} else { } else {
@ -1807,7 +1807,7 @@ static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
} }
// Emit the tail padding for the long double. // Emit the tail padding for the long double.
const TargetData &TD = *AP.TM.getTargetData(); const DataLayout &TD = *AP.TM.getDataLayout();
AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) - AP.OutStreamer.EmitZeros(TD.getTypeAllocSize(CFP->getType()) -
TD.getTypeStoreSize(CFP->getType()), AddrSpace); TD.getTypeStoreSize(CFP->getType()), AddrSpace);
return; return;
@ -1819,7 +1819,7 @@ static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
// API needed to prevent premature destruction. // API needed to prevent premature destruction.
APInt API = CFP->getValueAPF().bitcastToAPInt(); APInt API = CFP->getValueAPF().bitcastToAPInt();
const uint64_t *p = API.getRawData(); const uint64_t *p = API.getRawData();
if (AP.TM.getTargetData()->isBigEndian()) { if (AP.TM.getDataLayout()->isBigEndian()) {
AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace); AP.OutStreamer.EmitIntValue(p[0], 8, AddrSpace);
AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace); AP.OutStreamer.EmitIntValue(p[1], 8, AddrSpace);
} else { } else {
@ -1830,7 +1830,7 @@ static void emitGlobalConstantFP(const ConstantFP *CFP, unsigned AddrSpace,
static void emitGlobalConstantLargeInt(const ConstantInt *CI, static void emitGlobalConstantLargeInt(const ConstantInt *CI,
unsigned AddrSpace, AsmPrinter &AP) { unsigned AddrSpace, AsmPrinter &AP) {
const TargetData *TD = AP.TM.getTargetData(); const DataLayout *TD = AP.TM.getDataLayout();
unsigned BitWidth = CI->getBitWidth(); unsigned BitWidth = CI->getBitWidth();
assert((BitWidth & 63) == 0 && "only support multiples of 64-bits"); assert((BitWidth & 63) == 0 && "only support multiples of 64-bits");
@ -1846,7 +1846,7 @@ static void emitGlobalConstantLargeInt(const ConstantInt *CI,
static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace, static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
AsmPrinter &AP) { AsmPrinter &AP) {
const TargetData *TD = AP.TM.getTargetData(); const DataLayout *TD = AP.TM.getDataLayout();
uint64_t Size = TD->getTypeAllocSize(CV->getType()); uint64_t Size = TD->getTypeAllocSize(CV->getType());
if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV)) if (isa<ConstantAggregateZero>(CV) || isa<UndefValue>(CV))
return AP.OutStreamer.EmitZeros(Size, AddrSpace); return AP.OutStreamer.EmitZeros(Size, AddrSpace);
@ -1911,7 +1911,7 @@ static void emitGlobalConstantImpl(const Constant *CV, unsigned AddrSpace,
/// EmitGlobalConstant - Print a general LLVM constant to the .s file. /// EmitGlobalConstant - Print a general LLVM constant to the .s file.
void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) { void AsmPrinter::EmitGlobalConstant(const Constant *CV, unsigned AddrSpace) {
uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType()); uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
if (Size) if (Size)
emitGlobalConstantImpl(CV, AddrSpace, *this); emitGlobalConstantImpl(CV, AddrSpace, *this);
else if (MAI->hasSubsectionsViaSymbols()) { else if (MAI->hasSubsectionsViaSymbols()) {

View File

@ -18,7 +18,7 @@
#include "llvm/MC/MCSection.h" #include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
@ -112,7 +112,7 @@ unsigned AsmPrinter::GetSizeOfEncodedValue(unsigned Encoding) const {
switch (Encoding & 0x07) { switch (Encoding & 0x07) {
default: llvm_unreachable("Invalid encoded value."); default: llvm_unreachable("Invalid encoded value.");
case dwarf::DW_EH_PE_absptr: return TM.getTargetData()->getPointerSize(); case dwarf::DW_EH_PE_absptr: return TM.getDataLayout()->getPointerSize();
case dwarf::DW_EH_PE_udata2: return 2; case dwarf::DW_EH_PE_udata2: return 2;
case dwarf::DW_EH_PE_udata4: return 4; case dwarf::DW_EH_PE_udata4: return 4;
case dwarf::DW_EH_PE_udata8: return 8; case dwarf::DW_EH_PE_udata8: return 8;

View File

@ -17,7 +17,7 @@
#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Support/Allocator.h" #include "llvm/Support/Allocator.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
@ -200,7 +200,7 @@ void DIEInteger::EmitValue(AsmPrinter *Asm, unsigned Form) const {
case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return; case dwarf::DW_FORM_udata: Asm->EmitULEB128(Integer); return;
case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return; case dwarf::DW_FORM_sdata: Asm->EmitSLEB128(Integer); return;
case dwarf::DW_FORM_addr: case dwarf::DW_FORM_addr:
Size = Asm->getTargetData().getPointerSize(); break; Size = Asm->getDataLayout().getPointerSize(); break;
default: llvm_unreachable("DIE Value form not supported yet"); default: llvm_unreachable("DIE Value form not supported yet");
} }
Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/); Asm->OutStreamer.EmitIntValue(Integer, Size, 0/*addrspace*/);
@ -222,7 +222,7 @@ unsigned DIEInteger::SizeOf(AsmPrinter *AP, unsigned Form) const {
case dwarf::DW_FORM_data8: return sizeof(int64_t); case dwarf::DW_FORM_data8: return sizeof(int64_t);
case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer); case dwarf::DW_FORM_udata: return MCAsmInfo::getULEB128Size(Integer);
case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer); case dwarf::DW_FORM_sdata: return MCAsmInfo::getSLEB128Size(Integer);
case dwarf::DW_FORM_addr: return AP->getTargetData().getPointerSize(); case dwarf::DW_FORM_addr: return AP->getDataLayout().getPointerSize();
default: llvm_unreachable("DIE Value form not supported yet"); default: llvm_unreachable("DIE Value form not supported yet");
} }
} }
@ -249,7 +249,7 @@ void DIELabel::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const { unsigned DIELabel::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4; if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getTargetData().getPointerSize(); return AP->getDataLayout().getPointerSize();
} }
#ifndef NDEBUG #ifndef NDEBUG
@ -273,7 +273,7 @@ void DIEDelta::EmitValue(AsmPrinter *AP, unsigned Form) const {
unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const { unsigned DIEDelta::SizeOf(AsmPrinter *AP, unsigned Form) const {
if (Form == dwarf::DW_FORM_data4) return 4; if (Form == dwarf::DW_FORM_data4) return 4;
if (Form == dwarf::DW_FORM_strp) return 4; if (Form == dwarf::DW_FORM_strp) return 4;
return AP->getTargetData().getPointerSize(); return AP->getDataLayout().getPointerSize();
} }
#ifndef NDEBUG #ifndef NDEBUG

View File

@ -25,7 +25,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"

View File

@ -22,7 +22,7 @@
#include "llvm/Instructions.h" #include "llvm/Instructions.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
@ -510,7 +510,7 @@ bool CompileUnit::addConstantFPValue(DIE *Die, const MachineOperand &MO) {
const char *FltPtr = (const char*)FltVal.getRawData(); const char *FltPtr = (const char*)FltVal.getRawData();
int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte. int NumBytes = FltVal.getBitWidth() / 8; // 8 bits per byte.
bool LittleEndian = Asm->getTargetData().isLittleEndian(); bool LittleEndian = Asm->getDataLayout().isLittleEndian();
int Incr = (LittleEndian ? 1 : -1); int Incr = (LittleEndian ? 1 : -1);
int Start = (LittleEndian ? 0 : NumBytes - 1); int Start = (LittleEndian ? 0 : NumBytes - 1);
int Stop = (LittleEndian ? NumBytes : -1); int Stop = (LittleEndian ? NumBytes : -1);
@ -552,7 +552,7 @@ bool CompileUnit::addConstantValue(DIE *Die, const ConstantInt *CI,
const uint64_t *Ptr64 = Val.getRawData(); const uint64_t *Ptr64 = Val.getRawData();
int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte. int NumBytes = Val.getBitWidth() / 8; // 8 bits per byte.
bool LittleEndian = Asm->getTargetData().isLittleEndian(); bool LittleEndian = Asm->getDataLayout().isLittleEndian();
// Output the constant to DWARF one byte at a time. // Output the constant to DWARF one byte at a time.
for (int i = 0; i < NumBytes; i++) { for (int i = 0; i < NumBytes; i++) {
@ -1227,7 +1227,7 @@ void CompileUnit::createGlobalVariableDIE(const MDNode *N) {
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu); addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_constu);
SmallVector<Value*, 3> Idx(CE->op_begin()+1, CE->op_end()); SmallVector<Value*, 3> Idx(CE->op_begin()+1, CE->op_end());
addUInt(Block, 0, dwarf::DW_FORM_udata, addUInt(Block, 0, dwarf::DW_FORM_udata,
Asm->getTargetData().getIndexedOffset(Ptr->getType(), Idx)); Asm->getDataLayout().getIndexedOffset(Ptr->getType(), Idx));
addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus); addUInt(Block, 0, dwarf::DW_FORM_data1, dwarf::DW_OP_plus);
addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block); addBlock(VariableDIE, dwarf::DW_AT_location, 0, Block);
} }
@ -1459,7 +1459,7 @@ DIE *CompileUnit::createMemberDIE(DIDerivedType DT) {
Offset -= FieldOffset; Offset -= FieldOffset;
// Maybe we need to work from the other end. // Maybe we need to work from the other end.
if (Asm->getTargetData().isLittleEndian()) if (Asm->getDataLayout().isLittleEndian())
Offset = FieldSize - (Offset + Size); Offset = FieldSize - (Offset + Size);
addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset); addUInt(MemberDie, dwarf::DW_AT_bit_offset, 0, Offset);

View File

@ -27,7 +27,7 @@
#include "llvm/MC/MCSection.h" #include "llvm/MC/MCSection.h"
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
@ -384,7 +384,7 @@ DIE *DwarfDebug::constructLexicalScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately. // DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size() DebugRangeSymbols.size()
* Asm->getTargetData().getPointerSize()); * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(), for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) { RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@ -450,7 +450,7 @@ DIE *DwarfDebug::constructInlinedScopeDIE(CompileUnit *TheCU,
// DW_AT_ranges appropriately. // DW_AT_ranges appropriately.
TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4, TheCU->addUInt(ScopeDIE, dwarf::DW_AT_ranges, dwarf::DW_FORM_data4,
DebugRangeSymbols.size() DebugRangeSymbols.size()
* Asm->getTargetData().getPointerSize()); * Asm->getDataLayout().getPointerSize());
for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(), for (SmallVector<InsnRange, 4>::const_iterator RI = Ranges.begin(),
RE = Ranges.end(); RI != RE; ++RI) { RE = Ranges.end(); RI != RE; ++RI) {
DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first)); DebugRangeSymbols.push_back(getLabelBeforeInsn(RI->first));
@ -1765,7 +1765,7 @@ void DwarfDebug::emitDebugInfo() {
Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"), Asm->EmitSectionOffset(Asm->GetTempSymbol("abbrev_begin"),
DwarfAbbrevSectionSym); DwarfAbbrevSectionSym);
Asm->OutStreamer.AddComment("Address Size (in bytes)"); Asm->OutStreamer.AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getTargetData().getPointerSize()); Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
emitDIE(Die); emitDIE(Die);
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID())); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("info_end", TheCU->getID()));
@ -1811,14 +1811,14 @@ void DwarfDebug::emitEndOfLineMatrix(unsigned SectionEnd) {
Asm->EmitInt8(0); Asm->EmitInt8(0);
Asm->OutStreamer.AddComment("Op size"); Asm->OutStreamer.AddComment("Op size");
Asm->EmitInt8(Asm->getTargetData().getPointerSize() + 1); Asm->EmitInt8(Asm->getDataLayout().getPointerSize() + 1);
Asm->OutStreamer.AddComment("DW_LNE_set_address"); Asm->OutStreamer.AddComment("DW_LNE_set_address");
Asm->EmitInt8(dwarf::DW_LNE_set_address); Asm->EmitInt8(dwarf::DW_LNE_set_address);
Asm->OutStreamer.AddComment("Section end label"); Asm->OutStreamer.AddComment("Section end label");
Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd), Asm->OutStreamer.EmitSymbolValue(Asm->GetTempSymbol("section_end",SectionEnd),
Asm->getTargetData().getPointerSize(), Asm->getDataLayout().getPointerSize(),
0/*AddrSpace*/); 0/*AddrSpace*/);
// Mark end of matrix. // Mark end of matrix.
@ -2047,7 +2047,7 @@ void DwarfDebug::emitDebugLoc() {
// Start the dwarf loc section. // Start the dwarf loc section.
Asm->OutStreamer.SwitchSection( Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfLocSection()); Asm->getObjFileLowering().getDwarfLocSection());
unsigned char Size = Asm->getTargetData().getPointerSize(); unsigned char Size = Asm->getDataLayout().getPointerSize();
Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0)); Asm->OutStreamer.EmitLabel(Asm->GetTempSymbol("debug_loc", 0));
unsigned index = 1; unsigned index = 1;
for (SmallVector<DotDebugLocEntry, 4>::iterator for (SmallVector<DotDebugLocEntry, 4>::iterator
@ -2144,7 +2144,7 @@ void DwarfDebug::emitDebugRanges() {
// Start the dwarf ranges section. // Start the dwarf ranges section.
Asm->OutStreamer.SwitchSection( Asm->OutStreamer.SwitchSection(
Asm->getObjFileLowering().getDwarfRangesSection()); Asm->getObjFileLowering().getDwarfRangesSection());
unsigned char Size = Asm->getTargetData().getPointerSize(); unsigned char Size = Asm->getDataLayout().getPointerSize();
for (SmallVector<const MCSymbol *, 8>::iterator for (SmallVector<const MCSymbol *, 8>::iterator
I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end(); I = DebugRangeSymbols.begin(), E = DebugRangeSymbols.end();
I != E; ++I) { I != E; ++I) {
@ -2202,7 +2202,7 @@ void DwarfDebug::emitDebugInlineInfo() {
Asm->OutStreamer.AddComment("Dwarf Version"); Asm->OutStreamer.AddComment("Dwarf Version");
Asm->EmitInt16(dwarf::DWARF_VERSION); Asm->EmitInt16(dwarf::DWARF_VERSION);
Asm->OutStreamer.AddComment("Address Size (in bytes)"); Asm->OutStreamer.AddComment("Address Size (in bytes)");
Asm->EmitInt8(Asm->getTargetData().getPointerSize()); Asm->EmitInt8(Asm->getDataLayout().getPointerSize());
for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(), for (SmallVector<const MDNode *, 4>::iterator I = InlinedSPNodes.begin(),
E = InlinedSPNodes.end(); I != E; ++I) { E = InlinedSPNodes.end(); I != E; ++I) {
@ -2233,7 +2233,7 @@ void DwarfDebug::emitDebugInlineInfo() {
if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc"); if (Asm->isVerbose()) Asm->OutStreamer.AddComment("low_pc");
Asm->OutStreamer.EmitSymbolValue(LI->first, Asm->OutStreamer.EmitSymbolValue(LI->first,
Asm->getTargetData().getPointerSize(),0); Asm->getDataLayout().getPointerSize(),0);
} }
} }

View File

@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
@ -417,7 +417,7 @@ void DwarfException::EmitExceptionTable() {
// that we're omitting that bit. // that we're omitting that bit.
TTypeEncoding = dwarf::DW_EH_PE_omit; TTypeEncoding = dwarf::DW_EH_PE_omit;
// dwarf::DW_EH_PE_absptr // dwarf::DW_EH_PE_absptr
TypeFormatSize = Asm->getTargetData().getPointerSize(); TypeFormatSize = Asm->getDataLayout().getPointerSize();
} else { } else {
// Okay, we have actual filters or typeinfos to emit. As such, we need to // Okay, we have actual filters or typeinfos to emit. As such, we need to
// pick a type encoding for them. We're about to emit a list of pointers to // pick a type encoding for them. We're about to emit a list of pointers to

View File

@ -20,7 +20,7 @@
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/ADT/SmallString.h" #include "llvm/ADT/SmallString.h"
@ -91,7 +91,7 @@ void OcamlGCMetadataPrinter::beginAssembly(AsmPrinter &AP) {
/// either condition is detected in a function which uses the GC. /// either condition is detected in a function which uses the GC.
/// ///
void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) { void OcamlGCMetadataPrinter::finishAssembly(AsmPrinter &AP) {
unsigned IntPtrSize = AP.TM.getTargetData()->getPointerSize(); unsigned IntPtrSize = AP.TM.getDataLayout()->getPointerSize();
AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection()); AP.OutStreamer.SwitchSection(AP.getObjFileLowering().getTextSection());
EmitCamlGlobal(getModule(), AP, "code_end"); EmitCamlGlobal(getModule(), AP, "code_end");

View File

@ -24,7 +24,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"

View File

@ -18,7 +18,7 @@
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
using namespace llvm; using namespace llvm;

View File

@ -21,7 +21,7 @@
#include "llvm/Support/CallSite.h" #include "llvm/Support/CallSite.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
template <class ArgIt> template <class ArgIt>

View File

@ -21,7 +21,7 @@
#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCContext.h" #include "llvm/MC/MCContext.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Assembly/Writer.h" #include "llvm/Assembly/Writer.h"

View File

@ -28,7 +28,7 @@
#include "llvm/MC/MCContext.h" #include "llvm/MC/MCContext.h"
#include "llvm/Analysis/ConstantFolding.h" #include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
@ -62,7 +62,7 @@ MachineFunction::MachineFunction(const Function *F, const TargetMachine &TM,
if (Fn->getFnAttributes().hasStackAlignmentAttr()) if (Fn->getFnAttributes().hasStackAlignmentAttr())
FrameInfo->ensureMaxAlignment(Fn->getAttributes(). FrameInfo->ensureMaxAlignment(Fn->getAttributes().
getFnAttributes().getStackAlignment()); getFnAttributes().getStackAlignment());
ConstantPool = new (Allocator) MachineConstantPool(TM.getTargetData()); ConstantPool = new (Allocator) MachineConstantPool(TM.getDataLayout());
Alignment = TM.getTargetLowering()->getMinFunctionAlignment(); Alignment = TM.getTargetLowering()->getMinFunctionAlignment();
// FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn. // FIXME: Shouldn't use pref alignment if explicit alignment is set on Fn.
if (!Fn->getFnAttributes().hasOptimizeForSizeAttr()) if (!Fn->getFnAttributes().hasOptimizeForSizeAttr())
@ -545,7 +545,7 @@ void MachineFrameInfo::dump(const MachineFunction &MF) const {
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
/// getEntrySize - Return the size of each entry in the jump table. /// getEntrySize - Return the size of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const { unsigned MachineJumpTableInfo::getEntrySize(const DataLayout &TD) const {
// The size of a jump table entry is 4 bytes unless the entry is just the // The size of a jump table entry is 4 bytes unless the entry is just the
// address of a block, in which case it is the pointer size. // address of a block, in which case it is the pointer size.
switch (getEntryKind()) { switch (getEntryKind()) {
@ -564,7 +564,7 @@ unsigned MachineJumpTableInfo::getEntrySize(const TargetData &TD) const {
} }
/// getEntryAlignment - Return the alignment of each entry in the jump table. /// getEntryAlignment - Return the alignment of each entry in the jump table.
unsigned MachineJumpTableInfo::getEntryAlignment(const TargetData &TD) const { unsigned MachineJumpTableInfo::getEntryAlignment(const DataLayout &TD) const {
// The alignment of a jump table entry is the alignment of int32 unless the // The alignment of a jump table entry is the alignment of int32 unless the
// entry is just the address of a block, in which case it is the pointer // entry is just the address of a block, in which case it is the pointer
// alignment. // alignment.
@ -670,7 +670,7 @@ MachineConstantPool::~MachineConstantPool() {
/// CanShareConstantPoolEntry - Test whether the given two constants /// CanShareConstantPoolEntry - Test whether the given two constants
/// can be allocated the same constant pool entry. /// can be allocated the same constant pool entry.
static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B, static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
const TargetData *TD) { const DataLayout *TD) {
// Handle the trivial case quickly. // Handle the trivial case quickly.
if (A == B) return true; if (A == B) return true;
@ -694,7 +694,7 @@ static bool CanShareConstantPoolEntry(const Constant *A, const Constant *B,
// Try constant folding a bitcast of both instructions to an integer. If we // Try constant folding a bitcast of both instructions to an integer. If we
// get two identical ConstantInt's, then we are good to share them. We use // get two identical ConstantInt's, then we are good to share them. We use
// the constant folding APIs to do this so that we get the benefit of // the constant folding APIs to do this so that we get the benefit of
// TargetData. // DataLayout.
if (isa<PointerType>(A->getType())) if (isa<PointerType>(A->getType()))
A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy, A = ConstantFoldInstOperands(Instruction::PtrToInt, IntTy,
const_cast<Constant*>(A), TD); const_cast<Constant*>(A), TD);

View File

@ -25,7 +25,7 @@
using namespace llvm; using namespace llvm;
using namespace llvm::dwarf; using namespace llvm::dwarf;
// Handle the Pass registration stuff necessary to use TargetData's. // Handle the Pass registration stuff necessary to use DataLayout's.
INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo", INITIALIZE_PASS(MachineModuleInfo, "machinemoduleinfo",
"Machine Module Information", false, false) "Machine Module Information", false, false)
char MachineModuleInfo::ID = 0; char MachineModuleInfo::ID = 0;

View File

@ -23,7 +23,7 @@
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/Analysis/AliasAnalysis.h" #include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"
@ -5345,7 +5345,7 @@ SDValue DAGCombiner::CombineConsecutiveLoads(SDNode *N, EVT VT) {
!LD2->isVolatile() && !LD2->isVolatile() &&
DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) { DAG.isConsecutiveLoad(LD2, LD1, LD1VT.getSizeInBits()/8, 1)) {
unsigned Align = LD1->getAlignment(); unsigned Align = LD1->getAlignment();
unsigned NewAlign = TLI.getTargetData()-> unsigned NewAlign = TLI.getDataLayout()->
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
if (NewAlign <= Align && if (NewAlign <= Align &&
@ -5414,7 +5414,7 @@ SDValue DAGCombiner::visitBITCAST(SDNode *N) {
!cast<LoadSDNode>(N0)->isVolatile() && !cast<LoadSDNode>(N0)->isVolatile() &&
(!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) { (!LegalOperations || TLI.isOperationLegal(ISD::LOAD, VT))) {
LoadSDNode *LN0 = cast<LoadSDNode>(N0); LoadSDNode *LN0 = cast<LoadSDNode>(N0);
unsigned Align = TLI.getTargetData()-> unsigned Align = TLI.getDataLayout()->
getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext())); getABITypeAlignment(VT.getTypeForEVT(*DAG.getContext()));
unsigned OrigAlign = LN0->getAlignment(); unsigned OrigAlign = LN0->getAlignment();
@ -7341,7 +7341,7 @@ SDValue DAGCombiner::ReduceLoadOpStoreWidth(SDNode *N) {
unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff); unsigned NewAlign = MinAlign(LD->getAlignment(), PtrOff);
Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext()); Type *NewVTTy = NewVT.getTypeForEVT(*DAG.getContext());
if (NewAlign < TLI.getTargetData()->getABITypeAlignment(NewVTTy)) if (NewAlign < TLI.getDataLayout()->getABITypeAlignment(NewVTTy))
return SDValue(); return SDValue();
SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(), SDValue NewPtr = DAG.getNode(ISD::ADD, LD->getDebugLoc(),
@ -7403,7 +7403,7 @@ SDValue DAGCombiner::TransformFPLoadStorePair(SDNode *N) {
unsigned LDAlign = LD->getAlignment(); unsigned LDAlign = LD->getAlignment();
unsigned STAlign = ST->getAlignment(); unsigned STAlign = ST->getAlignment();
Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext()); Type *IntVTTy = IntVT.getTypeForEVT(*DAG.getContext());
unsigned ABIAlign = TLI.getTargetData()->getABITypeAlignment(IntVTTy); unsigned ABIAlign = TLI.getDataLayout()->getABITypeAlignment(IntVTTy);
if (LDAlign < ABIAlign || STAlign < ABIAlign) if (LDAlign < ABIAlign || STAlign < ABIAlign)
return SDValue(); return SDValue();
@ -7856,7 +7856,7 @@ SDValue DAGCombiner::visitSTORE(SDNode *N) {
ST->isUnindexed()) { ST->isUnindexed()) {
unsigned OrigAlign = ST->getAlignment(); unsigned OrigAlign = ST->getAlignment();
EVT SVT = Value.getOperand(0).getValueType(); EVT SVT = Value.getOperand(0).getValueType();
unsigned Align = TLI.getTargetData()-> unsigned Align = TLI.getDataLayout()->
getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext())); getABITypeAlignment(SVT.getTypeForEVT(*DAG.getContext()));
if (Align <= OrigAlign && if (Align <= OrigAlign &&
((!LegalOperations && !ST->isVolatile()) || ((!LegalOperations && !ST->isVolatile()) ||
@ -8249,7 +8249,7 @@ SDValue DAGCombiner::visitEXTRACT_VECTOR_ELT(SDNode *N) {
// Check the resultant load doesn't need a higher alignment than the // Check the resultant load doesn't need a higher alignment than the
// original load. // original load.
unsigned NewAlign = unsigned NewAlign =
TLI.getTargetData() TLI.getDataLayout()
->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext())); ->getABITypeAlignment(LVT.getTypeForEVT(*DAG.getContext()));
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT)) if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, LVT))
@ -9137,7 +9137,7 @@ SDValue DAGCombiner::SimplifySelectCC(DebugLoc DL, SDValue N0, SDValue N1,
const_cast<ConstantFP*>(TV->getConstantFPValue()) const_cast<ConstantFP*>(TV->getConstantFPValue())
}; };
Type *FPTy = Elts[0]->getType(); Type *FPTy = Elts[0]->getType();
const TargetData &TD = *TLI.getTargetData(); const DataLayout &TD = *TLI.getDataLayout();
// Create a ConstantArray of the two constants. // Create a ConstantArray of the two constants.
Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts); Constant *CA = ConstantArray::get(ArrayType::get(FPTy, 2), Elts);

View File

@ -53,7 +53,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Analysis/Loads.h" #include "llvm/Analysis/Loads.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLibraryInfo.h" #include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
@ -1059,7 +1059,7 @@ FastISel::FastISel(FunctionLoweringInfo &funcInfo,
MFI(*FuncInfo.MF->getFrameInfo()), MFI(*FuncInfo.MF->getFrameInfo()),
MCP(*FuncInfo.MF->getConstantPool()), MCP(*FuncInfo.MF->getConstantPool()),
TM(FuncInfo.MF->getTarget()), TM(FuncInfo.MF->getTarget()),
TD(*TM.getTargetData()), TD(*TM.getDataLayout()),
TII(*TM.getInstrInfo()), TII(*TM.getInstrInfo()),
TLI(*TM.getTargetLowering()), TLI(*TM.getTargetLowering()),
TRI(*TM.getRegisterInfo()), TRI(*TM.getRegisterInfo()),

View File

@ -29,7 +29,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"
@ -80,9 +80,9 @@ void FunctionLoweringInfo::set(const Function &fn, MachineFunction &mf) {
if (const AllocaInst *AI = dyn_cast<AllocaInst>(I)) if (const AllocaInst *AI = dyn_cast<AllocaInst>(I))
if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) { if (const ConstantInt *CUI = dyn_cast<ConstantInt>(AI->getArraySize())) {
Type *Ty = AI->getAllocatedType(); Type *Ty = AI->getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
unsigned Align = unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
AI->getAlignment()); AI->getAlignment());
TySize *= CUI->getZExtValue(); // Get total allocated size. TySize *= CUI->getZExtValue(); // Get total allocated size.

View File

@ -20,7 +20,7 @@
#include "llvm/CodeGen/MachineFunction.h" #include "llvm/CodeGen/MachineFunction.h"
#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
@ -390,10 +390,10 @@ void InstrEmitter::AddOperand(MachineInstr *MI, SDValue Op,
Type *Type = CP->getType(); Type *Type = CP->getType();
// MachineConstantPool wants an explicit alignment. // MachineConstantPool wants an explicit alignment.
if (Align == 0) { if (Align == 0) {
Align = TM->getTargetData()->getPrefTypeAlignment(Type); Align = TM->getDataLayout()->getPrefTypeAlignment(Type);
if (Align == 0) { if (Align == 0) {
// Alignment of vector types. FIXME! // Alignment of vector types. FIXME!
Align = TM->getTargetData()->getTypeAllocSize(Type); Align = TM->getDataLayout()->getTypeAllocSize(Type);
} }
} }

View File

@ -22,7 +22,7 @@
#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
@ -718,7 +718,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// expand it. // expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment) if (ST->getAlignment() < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), ExpandUnalignedStore(cast<StoreSDNode>(Node),
DAG, TLI, this); DAG, TLI, this);
@ -824,7 +824,7 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// expand it. // expand it.
if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) { if (!TLI.allowsUnalignedMemoryAccesses(ST->getMemoryVT())) {
Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext()); Type *Ty = ST->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment= TLI.getTargetData()->getABITypeAlignment(Ty); unsigned ABIAlignment= TLI.getDataLayout()->getABITypeAlignment(Ty);
if (ST->getAlignment() < ABIAlignment) if (ST->getAlignment() < ABIAlignment)
ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this); ExpandUnalignedStore(cast<StoreSDNode>(Node), DAG, TLI, this);
} }
@ -874,7 +874,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) { if (!TLI.allowsUnalignedMemoryAccesses(LD->getMemoryVT())) {
Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); Type *Ty = LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = unsigned ABIAlignment =
TLI.getTargetData()->getABITypeAlignment(Ty); TLI.getDataLayout()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){ if (LD->getAlignment() < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain); ExpandUnalignedLoad(cast<LoadSDNode>(Node), DAG, TLI, RVal, RChain);
} }
@ -1059,7 +1059,7 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
Type *Ty = Type *Ty =
LD->getMemoryVT().getTypeForEVT(*DAG.getContext()); LD->getMemoryVT().getTypeForEVT(*DAG.getContext());
unsigned ABIAlignment = unsigned ABIAlignment =
TLI.getTargetData()->getABITypeAlignment(Ty); TLI.getDataLayout()->getABITypeAlignment(Ty);
if (LD->getAlignment() < ABIAlignment){ if (LD->getAlignment() < ABIAlignment){
ExpandUnalignedLoad(cast<LoadSDNode>(Node), ExpandUnalignedLoad(cast<LoadSDNode>(Node),
DAG, TLI, Value, Chain); DAG, TLI, Value, Chain);
@ -1625,7 +1625,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
DebugLoc dl) { DebugLoc dl) {
// Create the stack frame object. // Create the stack frame object.
unsigned SrcAlign = unsigned SrcAlign =
TLI.getTargetData()->getPrefTypeAlignment(SrcOp.getValueType(). TLI.getDataLayout()->getPrefTypeAlignment(SrcOp.getValueType().
getTypeForEVT(*DAG.getContext())); getTypeForEVT(*DAG.getContext()));
SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign); SDValue FIPtr = DAG.CreateStackTemporary(SlotVT, SrcAlign);
@ -1637,7 +1637,7 @@ SDValue SelectionDAGLegalize::EmitStackConvert(SDValue SrcOp,
unsigned SlotSize = SlotVT.getSizeInBits(); unsigned SlotSize = SlotVT.getSizeInBits();
unsigned DestSize = DestVT.getSizeInBits(); unsigned DestSize = DestVT.getSizeInBits();
Type *DestType = DestVT.getTypeForEVT(*DAG.getContext()); Type *DestType = DestVT.getTypeForEVT(*DAG.getContext());
unsigned DestAlign = TLI.getTargetData()->getPrefTypeAlignment(DestType); unsigned DestAlign = TLI.getDataLayout()->getPrefTypeAlignment(DestType);
// Emit a store to the stack slot. Use a truncstore if the input value is // Emit a store to the stack slot. Use a truncstore if the input value is
// later than DestVT. // later than DestVT.
@ -2786,7 +2786,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
// Increment the pointer, VAList, to the next vaarg // Increment the pointer, VAList, to the next vaarg
Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList, Tmp3 = DAG.getNode(ISD::ADD, dl, TLI.getPointerTy(), VAList,
DAG.getConstant(TLI.getTargetData()-> DAG.getConstant(TLI.getDataLayout()->
getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())), getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext())),
TLI.getPointerTy())); TLI.getPointerTy()));
// Store the incremented VAList to the legalized pointer // Store the incremented VAList to the legalized pointer
@ -3365,7 +3365,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node) {
EVT PTy = TLI.getPointerTy(); EVT PTy = TLI.getPointerTy();
const TargetData &TD = *TLI.getTargetData(); const DataLayout &TD = *TLI.getDataLayout();
unsigned EntrySize = unsigned EntrySize =
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD); DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);

View File

@ -15,7 +15,7 @@
#include "LegalizeTypes.h" #include "LegalizeTypes.h"
#include "llvm/CallingConv.h" #include "llvm/CallingConv.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SetVector.h"
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"

View File

@ -20,7 +20,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "LegalizeTypes.h" #include "LegalizeTypes.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -146,7 +146,7 @@ void DAGTypeLegalizer::ExpandRes_BITCAST(SDNode *N, SDValue &Lo, SDValue &Hi) {
// Create the stack frame object. Make sure it is aligned for both // Create the stack frame object. Make sure it is aligned for both
// the source and expanded destination types. // the source and expanded destination types.
unsigned Alignment = unsigned Alignment =
TLI.getTargetData()->getPrefTypeAlignment(NOutVT. TLI.getDataLayout()->getPrefTypeAlignment(NOutVT.
getTypeForEVT(*DAG.getContext())); getTypeForEVT(*DAG.getContext()));
SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment); SDValue StackPtr = DAG.CreateStackTemporary(InVT, Alignment);
int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex(); int SPFI = cast<FrameIndexSDNode>(StackPtr.getNode())->getIndex();

View File

@ -21,7 +21,7 @@
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include "LegalizeTypes.h" #include "LegalizeTypes.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
using namespace llvm; using namespace llvm;
@ -749,7 +749,7 @@ void DAGTypeLegalizer::SplitVecRes_INSERT_VECTOR_ELT(SDNode *N, SDValue &Lo,
SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx); SDValue EltPtr = GetVectorElementPointer(StackPtr, EltVT, Idx);
Type *VecType = VecVT.getTypeForEVT(*DAG.getContext()); Type *VecType = VecVT.getTypeForEVT(*DAG.getContext());
unsigned Alignment = unsigned Alignment =
TLI.getTargetData()->getPrefTypeAlignment(VecType); TLI.getDataLayout()->getPrefTypeAlignment(VecType);
Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT, Store = DAG.getTruncStore(Store, dl, Elt, EltPtr, MachinePointerInfo(), EltVT,
false, false, 0); false, false, 0);

View File

@ -17,7 +17,7 @@
#include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallSet.h"

View File

@ -22,7 +22,7 @@
#include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/CodeGen/ScheduleHazardRecognizer.h" #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"

View File

@ -25,7 +25,7 @@
#include "llvm/CodeGen/SchedulerRegistry.h" #include "llvm/CodeGen/SchedulerRegistry.h"
#include "llvm/CodeGen/SelectionDAGISel.h" #include "llvm/CodeGen/SelectionDAGISel.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"

View File

@ -29,7 +29,7 @@
#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetSelectionDAGInfo.h" #include "llvm/Target/TargetSelectionDAGInfo.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"
@ -883,7 +883,7 @@ unsigned SelectionDAG::getEVTAlignment(EVT VT) const {
PointerType::get(Type::getInt8Ty(*getContext()), 0) : PointerType::get(Type::getInt8Ty(*getContext()), 0) :
VT.getTypeForEVT(*getContext()); VT.getTypeForEVT(*getContext());
return TLI.getTargetData()->getABITypeAlignment(Ty); return TLI.getDataLayout()->getABITypeAlignment(Ty);
} }
// EntryNode could meaningfully have debug info if we can find it... // EntryNode could meaningfully have debug info if we can find it...
@ -1173,7 +1173,7 @@ SDValue SelectionDAG::getConstantPool(const Constant *C, EVT VT,
assert((TargetFlags == 0 || isTarget) && assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals"); "Cannot set target flags on target-independent globals");
if (Alignment == 0) if (Alignment == 0)
Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType()); Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID; FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@ -1200,7 +1200,7 @@ SDValue SelectionDAG::getConstantPool(MachineConstantPoolValue *C, EVT VT,
assert((TargetFlags == 0 || isTarget) && assert((TargetFlags == 0 || isTarget) &&
"Cannot set target flags on target-independent globals"); "Cannot set target flags on target-independent globals");
if (Alignment == 0) if (Alignment == 0)
Alignment = TLI.getTargetData()->getPrefTypeAlignment(C->getType()); Alignment = TLI.getDataLayout()->getPrefTypeAlignment(C->getType());
unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool; unsigned Opc = isTarget ? ISD::TargetConstantPool : ISD::ConstantPool;
FoldingSetNodeID ID; FoldingSetNodeID ID;
AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0); AddNodeIDNode(ID, Opc, getVTList(VT), 0, 0);
@ -1544,7 +1544,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT, unsigned minAlign) {
unsigned ByteSize = VT.getStoreSize(); unsigned ByteSize = VT.getStoreSize();
Type *Ty = VT.getTypeForEVT(*getContext()); Type *Ty = VT.getTypeForEVT(*getContext());
unsigned StackAlign = unsigned StackAlign =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), minAlign); std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty), minAlign);
int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false); int FrameIdx = FrameInfo->CreateStackObject(ByteSize, StackAlign, false);
return getFrameIndex(FrameIdx, TLI.getPointerTy()); return getFrameIndex(FrameIdx, TLI.getPointerTy());
@ -1557,7 +1557,7 @@ SDValue SelectionDAG::CreateStackTemporary(EVT VT1, EVT VT2) {
VT2.getStoreSizeInBits())/8; VT2.getStoreSizeInBits())/8;
Type *Ty1 = VT1.getTypeForEVT(*getContext()); Type *Ty1 = VT1.getTypeForEVT(*getContext());
Type *Ty2 = VT2.getTypeForEVT(*getContext()); Type *Ty2 = VT2.getTypeForEVT(*getContext());
const TargetData *TD = TLI.getTargetData(); const DataLayout *TD = TLI.getDataLayout();
unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1), unsigned Align = std::max(TD->getPrefTypeAlignment(Ty1),
TD->getPrefTypeAlignment(Ty2)); TD->getPrefTypeAlignment(Ty2));
@ -3451,7 +3451,7 @@ static bool FindOptimalMemOpLowering(std::vector<EVT> &MemOps,
DAG.getMachineFunction()); DAG.getMachineFunction());
if (VT == MVT::Other) { if (VT == MVT::Other) {
if (DstAlign >= TLI.getTargetData()->getPointerPrefAlignment() || if (DstAlign >= TLI.getDataLayout()->getPointerPrefAlignment() ||
TLI.allowsUnalignedMemoryAccesses(VT)) { TLI.allowsUnalignedMemoryAccesses(VT)) {
VT = TLI.getPointerTy(); VT = TLI.getPointerTy();
} else { } else {
@ -3539,7 +3539,7 @@ static SDValue getMemcpyLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
if (DstAlignCanChange) { if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
if (NewAlign > Align) { if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@ -3628,7 +3628,7 @@ static SDValue getMemmoveLoadsAndStores(SelectionDAG &DAG, DebugLoc dl,
if (DstAlignCanChange) { if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
if (NewAlign > Align) { if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@ -3703,7 +3703,7 @@ static SDValue getMemsetStores(SelectionDAG &DAG, DebugLoc dl,
if (DstAlignCanChange) { if (DstAlignCanChange) {
Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext()); Type *Ty = MemOps[0].getTypeForEVT(*DAG.getContext());
unsigned NewAlign = (unsigned) TLI.getTargetData()->getABITypeAlignment(Ty); unsigned NewAlign = (unsigned) TLI.getDataLayout()->getABITypeAlignment(Ty);
if (NewAlign > Align) { if (NewAlign > Align) {
// Give the stack frame object a larger alignment if needed. // Give the stack frame object a larger alignment if needed.
if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign) if (MFI->getObjectAlignment(FI->getIndex()) < NewAlign)
@ -3797,7 +3797,7 @@ SDValue SelectionDAG::getMemcpy(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call. // Emit a library call.
TargetLowering::ArgListTy Args; TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry; TargetLowering::ArgListEntry Entry;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext()); Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry);
@ -3852,7 +3852,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, DebugLoc dl, SDValue Dst,
// Emit a library call. // Emit a library call.
TargetLowering::ArgListTy Args; TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry; TargetLowering::ArgListEntry Entry;
Entry.Ty = TLI.getTargetData()->getIntPtrType(*getContext()); Entry.Ty = TLI.getDataLayout()->getIntPtrType(*getContext());
Entry.Node = Dst; Args.push_back(Entry); Entry.Node = Dst; Args.push_back(Entry);
Entry.Node = Src; Args.push_back(Entry); Entry.Node = Src; Args.push_back(Entry);
Entry.Node = Size; Args.push_back(Entry); Entry.Node = Size; Args.push_back(Entry);
@ -3901,7 +3901,7 @@ SDValue SelectionDAG::getMemset(SDValue Chain, DebugLoc dl, SDValue Dst,
return Result; return Result;
// Emit a library call. // Emit a library call.
Type *IntPtrTy = TLI.getTargetData()->getIntPtrType(*getContext()); Type *IntPtrTy = TLI.getDataLayout()->getIntPtrType(*getContext());
TargetLowering::ArgListTy Args; TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry; TargetLowering::ArgListEntry Entry;
Entry.Node = Dst; Entry.Ty = IntPtrTy; Entry.Node = Dst; Entry.Ty = IntPtrTy;
@ -6097,7 +6097,7 @@ unsigned SelectionDAG::InferPtrAlignment(SDValue Ptr) const {
unsigned PtrWidth = TLI.getPointerTy().getSizeInBits(); unsigned PtrWidth = TLI.getPointerTy().getSizeInBits();
APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0); APInt KnownZero(PtrWidth, 0), KnownOne(PtrWidth, 0);
llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne, llvm::ComputeMaskedBits(const_cast<GlobalValue*>(GV), KnownZero, KnownOne,
TLI.getTargetData()); TLI.getDataLayout());
unsigned AlignBits = KnownZero.countTrailingOnes(); unsigned AlignBits = KnownZero.countTrailingOnes();
unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0; unsigned Align = AlignBits ? 1 << std::min(31U, AlignBits) : 0;
if (Align) if (Align)

View File

@ -44,7 +44,7 @@
#include "llvm/CodeGen/MachineModuleInfo.h" #include "llvm/CodeGen/MachineModuleInfo.h"
#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/SelectionDAG.h" #include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetIntrinsicInfo.h" #include "llvm/Target/TargetIntrinsicInfo.h"
@ -847,7 +847,7 @@ void SelectionDAGBuilder::init(GCFunctionInfo *gfi, AliasAnalysis &aa,
AA = &aa; AA = &aa;
GFI = gfi; GFI = gfi;
LibInfo = li; LibInfo = li;
TD = DAG.getTarget().getTargetData(); TD = DAG.getTarget().getDataLayout();
Context = DAG.getContext(); Context = DAG.getContext();
LPadToCallSiteMap.clear(); LPadToCallSiteMap.clear();
} }
@ -3208,9 +3208,9 @@ void SelectionDAGBuilder::visitAlloca(const AllocaInst &I) {
return; // getValue will auto-populate this. return; // getValue will auto-populate this.
Type *Ty = I.getAllocatedType(); Type *Ty = I.getAllocatedType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
unsigned Align = unsigned Align =
std::max((unsigned)TLI.getTargetData()->getPrefTypeAlignment(Ty), std::max((unsigned)TLI.getDataLayout()->getPrefTypeAlignment(Ty),
I.getAlignment()); I.getAlignment());
SDValue AllocSize = getValue(I.getArraySize()); SDValue AllocSize = getValue(I.getArraySize());
@ -5308,9 +5308,9 @@ void SelectionDAGBuilder::LowerCallTo(ImmutableCallSite CS, SDValue Callee,
int DemoteStackIdx = -100; int DemoteStackIdx = -100;
if (!CanLowerReturn) { if (!CanLowerReturn) {
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize( uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(
FTy->getReturnType()); FTy->getReturnType());
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment( unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(
FTy->getReturnType()); FTy->getReturnType());
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); DemoteStackIdx = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
@ -5775,7 +5775,7 @@ public:
/// MVT::Other. /// MVT::Other.
EVT getCallOperandValEVT(LLVMContext &Context, EVT getCallOperandValEVT(LLVMContext &Context,
const TargetLowering &TLI, const TargetLowering &TLI,
const TargetData *TD) const { const DataLayout *TD) const {
if (CallOperandVal == 0) return MVT::Other; if (CallOperandVal == 0) return MVT::Other;
if (isa<BasicBlock>(CallOperandVal)) if (isa<BasicBlock>(CallOperandVal))
@ -6079,8 +6079,8 @@ void SelectionDAGBuilder::visitInlineAsm(ImmutableCallSite CS) {
// Otherwise, create a stack slot and emit a store to it before the // Otherwise, create a stack slot and emit a store to it before the
// asm. // asm.
Type *Ty = OpVal->getType(); Type *Ty = OpVal->getType();
uint64_t TySize = TLI.getTargetData()->getTypeAllocSize(Ty); uint64_t TySize = TLI.getDataLayout()->getTypeAllocSize(Ty);
unsigned Align = TLI.getTargetData()->getPrefTypeAlignment(Ty); unsigned Align = TLI.getDataLayout()->getPrefTypeAlignment(Ty);
MachineFunction &MF = DAG.getMachineFunction(); MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false); int SSFI = MF.getFrameInfo()->CreateStackObject(TySize, Align, false);
SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy()); SDValue StackSlot = DAG.getFrameIndex(SSFI, TLI.getPointerTy());
@ -6428,7 +6428,7 @@ void SelectionDAGBuilder::visitVAStart(const CallInst &I) {
} }
void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) { void SelectionDAGBuilder::visitVAArg(const VAArgInst &I) {
const TargetData &TD = *TLI.getTargetData(); const DataLayout &TD = *TLI.getDataLayout();
SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(), SDValue V = DAG.getVAArg(TLI.getValueType(I.getType()), getCurDebugLoc(),
getRoot(), getValue(I.getOperand(0)), getRoot(), getValue(I.getOperand(0)),
DAG.getSrcValue(I.getOperand(0)), DAG.getSrcValue(I.getOperand(0)),
@ -6474,7 +6474,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Args[i].Node.getResNo() + Value); Args[i].Node.getResNo() + Value);
ISD::ArgFlagsTy Flags; ISD::ArgFlagsTy Flags;
unsigned OriginalAlignment = unsigned OriginalAlignment =
getTargetData()->getABITypeAlignment(ArgTy); getDataLayout()->getABITypeAlignment(ArgTy);
if (Args[i].isZExt) if (Args[i].isZExt)
Flags.setZExt(); Flags.setZExt();
@ -6488,7 +6488,7 @@ TargetLowering::LowerCallTo(TargetLowering::CallLoweringInfo &CLI) const {
Flags.setByVal(); Flags.setByVal();
PointerType *Ty = cast<PointerType>(Args[i].Ty); PointerType *Ty = cast<PointerType>(Args[i].Ty);
Type *ElementTy = Ty->getElementType(); Type *ElementTy = Ty->getElementType();
Flags.setByValSize(getTargetData()->getTypeAllocSize(ElementTy)); Flags.setByValSize(getDataLayout()->getTypeAllocSize(ElementTy));
// For ByVal, alignment should come from FE. BE will guess if this // For ByVal, alignment should come from FE. BE will guess if this
// info is not there but there are cases it cannot get right. // info is not there but there are cases it cannot get right.
unsigned FrameAlign; unsigned FrameAlign;
@ -6663,7 +6663,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) {
const Function &F = *LLVMBB->getParent(); const Function &F = *LLVMBB->getParent();
SelectionDAG &DAG = SDB->DAG; SelectionDAG &DAG = SDB->DAG;
DebugLoc dl = SDB->getCurDebugLoc(); DebugLoc dl = SDB->getCurDebugLoc();
const TargetData *TD = TLI.getTargetData(); const DataLayout *TD = TLI.getDataLayout();
SmallVector<ISD::InputArg, 16> Ins; SmallVector<ISD::InputArg, 16> Ins;
// Check whether the function can return without sret-demotion. // Check whether the function can return without sret-demotion.

View File

@ -66,7 +66,7 @@ class ShuffleVectorInst;
class SIToFPInst; class SIToFPInst;
class StoreInst; class StoreInst;
class SwitchInst; class SwitchInst;
class TargetData; class DataLayout;
class TargetLibraryInfo; class TargetLibraryInfo;
class TargetLowering; class TargetLowering;
class TruncInst; class TruncInst;
@ -285,7 +285,7 @@ public:
const TargetMachine &TM; const TargetMachine &TM;
const TargetLowering &TLI; const TargetLowering &TLI;
SelectionDAG &DAG; SelectionDAG &DAG;
const TargetData *TD; const DataLayout *TD;
AliasAnalysis *AA; AliasAnalysis *AA;
const TargetLibraryInfo *LibInfo; const TargetLibraryInfo *LibInfo;

View File

@ -14,7 +14,7 @@
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCExpr.h" #include "llvm/MC/MCExpr.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLoweringObjectFile.h" #include "llvm/Target/TargetLoweringObjectFile.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetRegisterInfo.h"
@ -515,7 +515,7 @@ static void InitCmpLibcallCCs(ISD::CondCode *CCs) {
/// NOTE: The constructor takes ownership of TLOF. /// NOTE: The constructor takes ownership of TLOF.
TargetLowering::TargetLowering(const TargetMachine &tm, TargetLowering::TargetLowering(const TargetMachine &tm,
const TargetLoweringObjectFile *tlof) const TargetLoweringObjectFile *tlof)
: TM(tm), TD(TM.getTargetData()), TLOF(*tlof) { : TM(tm), TD(TM.getDataLayout()), TLOF(*tlof) {
// All operations default to being supported. // All operations default to being supported.
memset(OpActions, 0, sizeof(OpActions)); memset(OpActions, 0, sizeof(OpActions));
memset(LoadExtActions, 0, sizeof(LoadExtActions)); memset(LoadExtActions, 0, sizeof(LoadExtActions));

View File

@ -16,7 +16,7 @@
using namespace llvm; using namespace llvm;
TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM) TargetSelectionDAGInfo::TargetSelectionDAGInfo(const TargetMachine &TM)
: TD(TM.getTargetData()) { : TD(TM.getDataLayout()) {
} }
TargetSelectionDAGInfo::~TargetSelectionDAGInfo() { TargetSelectionDAGInfo::~TargetSelectionDAGInfo() {

View File

@ -30,7 +30,7 @@
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Scalar.h" #include "llvm/Transforms/Scalar.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h"
@ -191,7 +191,7 @@ setupFunctionContext(Function &F, ArrayRef<LandingPadInst*> LPads) {
// that needs to be restored on all exits from the function. This is an alloca // that needs to be restored on all exits from the function. This is an alloca
// because the value needs to be added to the global context list. // because the value needs to be added to the global context list.
unsigned Align = unsigned Align =
TLI->getTargetData()->getPrefTypeAlignment(FunctionContextTy); TLI->getDataLayout()->getPrefTypeAlignment(FunctionContextTy);
FuncCtx = FuncCtx =
new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin()); new AllocaInst(FunctionContextTy, 0, Align, "fn_context", EntryBB->begin());

View File

@ -26,7 +26,7 @@
#include "llvm/Module.h" #include "llvm/Module.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/Triple.h" #include "llvm/ADT/Triple.h"
@ -117,7 +117,7 @@ bool StackProtector::ContainsProtectableArray(Type *Ty, bool InStruct) const {
// If an array has more than SSPBufferSize bytes of allocated space, then we // If an array has more than SSPBufferSize bytes of allocated space, then we
// emit stack protectors. // emit stack protectors.
if (TM.Options.SSPBufferSize <= TLI->getTargetData()->getTypeAllocSize(AT)) if (TM.Options.SSPBufferSize <= TLI->getDataLayout()->getTypeAllocSize(AT))
return true; return true;
} }

View File

@ -27,7 +27,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetOptions.h"
#include "llvm/Support/Dwarf.h" #include "llvm/Support/Dwarf.h"
@ -77,9 +77,9 @@ void TargetLoweringObjectFileELF::emitPersonalityValue(MCStreamer &Streamer,
Flags, Flags,
SectionKind::getDataRel(), SectionKind::getDataRel(),
0, Label->getName()); 0, Label->getName());
unsigned Size = TM.getTargetData()->getPointerSize(); unsigned Size = TM.getDataLayout()->getPointerSize();
Streamer.SwitchSection(Sec); Streamer.SwitchSection(Sec);
Streamer.EmitValueToAlignment(TM.getTargetData()->getPointerABIAlignment()); Streamer.EmitValueToAlignment(TM.getDataLayout()->getPointerABIAlignment());
Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject); Streamer.EmitSymbolAttribute(Label, MCSA_ELF_TypeObject);
const MCExpr *E = MCConstantExpr::Create(Size, getContext()); const MCExpr *E = MCConstantExpr::Create(Size, getContext());
Streamer.EmitELFSize(Label, E); Streamer.EmitELFSize(Label, E);
@ -247,7 +247,7 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// FIXME: this is getting the alignment of the character, not the // FIXME: this is getting the alignment of the character, not the
// alignment of the global! // alignment of the global!
unsigned Align = unsigned Align =
TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV)); TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV));
const char *SizeSpec = ".rodata.str1."; const char *SizeSpec = ".rodata.str1.";
if (Kind.isMergeable2ByteCString()) if (Kind.isMergeable2ByteCString())
@ -522,14 +522,14 @@ SelectSectionForGlobal(const GlobalValue *GV, SectionKind Kind,
// FIXME: Alignment check should be handled by section classifier. // FIXME: Alignment check should be handled by section classifier.
if (Kind.isMergeable1ByteCString() && if (Kind.isMergeable1ByteCString() &&
TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32) TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return CStringSection; return CStringSection;
// Do not put 16-bit arrays in the UString section if they have an // Do not put 16-bit arrays in the UString section if they have an
// externally visible label, this runs into issues with certain linker // externally visible label, this runs into issues with certain linker
// versions. // versions.
if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() && if (Kind.isMergeable2ByteCString() && !GV->hasExternalLinkage() &&
TM.getTargetData()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32) TM.getDataLayout()->getPreferredAlignment(cast<GlobalVariable>(GV)) < 32)
return UStringSection; return UStringSection;
if (Kind.isMergeableConst()) { if (Kind.isMergeableConst()) {

View File

@ -29,7 +29,7 @@
#include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/Host.h" #include "llvm/Support/Host.h"
#include "llvm/Support/TargetRegistry.h" #include "llvm/Support/TargetRegistry.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include <cmath> #include <cmath>
#include <cstring> #include <cstring>
@ -91,11 +91,11 @@ class GVMemoryBlock : public CallbackVH {
public: public:
/// \brief Returns the address the GlobalVariable should be written into. The /// \brief Returns the address the GlobalVariable should be written into. The
/// GVMemoryBlock object prefixes that. /// GVMemoryBlock object prefixes that.
static char *Create(const GlobalVariable *GV, const TargetData& TD) { static char *Create(const GlobalVariable *GV, const DataLayout& TD) {
Type *ElTy = GV->getType()->getElementType(); Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy); size_t GVSize = (size_t)TD.getTypeAllocSize(ElTy);
void *RawMemory = ::operator new( void *RawMemory = ::operator new(
TargetData::RoundUpAlignment(sizeof(GVMemoryBlock), DataLayout::RoundUpAlignment(sizeof(GVMemoryBlock),
TD.getPreferredAlignment(GV)) TD.getPreferredAlignment(GV))
+ GVSize); + GVSize);
new(RawMemory) GVMemoryBlock(GV); new(RawMemory) GVMemoryBlock(GV);
@ -113,7 +113,7 @@ public:
} // anonymous namespace } // anonymous namespace
char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) { char *ExecutionEngine::getMemoryForGV(const GlobalVariable *GV) {
return GVMemoryBlock::Create(GV, *getTargetData()); return GVMemoryBlock::Create(GV, *getDataLayout());
} }
bool ExecutionEngine::removeModule(Module *M) { bool ExecutionEngine::removeModule(Module *M) {
@ -267,7 +267,7 @@ public:
void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE, void *ArgvArray::reset(LLVMContext &C, ExecutionEngine *EE,
const std::vector<std::string> &InputArgv) { const std::vector<std::string> &InputArgv) {
clear(); // Free the old contents. clear(); // Free the old contents.
unsigned PtrSize = EE->getTargetData()->getPointerSize(); unsigned PtrSize = EE->getDataLayout()->getPointerSize();
Array = new char[(InputArgv.size()+1)*PtrSize]; Array = new char[(InputArgv.size()+1)*PtrSize];
DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n"); DEBUG(dbgs() << "JIT: ARGV = " << (void*)Array << "\n");
@ -342,7 +342,7 @@ void ExecutionEngine::runStaticConstructorsDestructors(bool isDtors) {
#ifndef NDEBUG #ifndef NDEBUG
/// isTargetNullPtr - Return whether the target pointer stored at Loc is null. /// isTargetNullPtr - Return whether the target pointer stored at Loc is null.
static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) { static bool isTargetNullPtr(ExecutionEngine *EE, void *Loc) {
unsigned PtrSize = EE->getTargetData()->getPointerSize(); unsigned PtrSize = EE->getDataLayout()->getPointerSize();
for (unsigned i = 0; i < PtrSize; ++i) for (unsigned i = 0; i < PtrSize; ++i)
if (*(i + (uint8_t*)Loc)) if (*(i + (uint8_t*)Loc))
return false; return false;
@ -856,7 +856,7 @@ static void StoreIntToMemory(const APInt &IntVal, uint8_t *Dst,
void ExecutionEngine::StoreValueToMemory(const GenericValue &Val, void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
GenericValue *Ptr, Type *Ty) { GenericValue *Ptr, Type *Ty) {
const unsigned StoreBytes = getTargetData()->getTypeStoreSize(Ty); const unsigned StoreBytes = getDataLayout()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) { switch (Ty->getTypeID()) {
case Type::IntegerTyID: case Type::IntegerTyID:
@ -882,7 +882,7 @@ void ExecutionEngine::StoreValueToMemory(const GenericValue &Val,
dbgs() << "Cannot store value of type " << *Ty << "!\n"; dbgs() << "Cannot store value of type " << *Ty << "!\n";
} }
if (sys::isLittleEndianHost() != getTargetData()->isLittleEndian()) if (sys::isLittleEndianHost() != getDataLayout()->isLittleEndian())
// Host and target are different endian - reverse the stored bytes. // Host and target are different endian - reverse the stored bytes.
std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr); std::reverse((uint8_t*)Ptr, StoreBytes + (uint8_t*)Ptr);
} }
@ -918,7 +918,7 @@ static void LoadIntFromMemory(APInt &IntVal, uint8_t *Src, unsigned LoadBytes) {
void ExecutionEngine::LoadValueFromMemory(GenericValue &Result, void ExecutionEngine::LoadValueFromMemory(GenericValue &Result,
GenericValue *Ptr, GenericValue *Ptr,
Type *Ty) { Type *Ty) {
const unsigned LoadBytes = getTargetData()->getTypeStoreSize(Ty); const unsigned LoadBytes = getDataLayout()->getTypeStoreSize(Ty);
switch (Ty->getTypeID()) { switch (Ty->getTypeID()) {
case Type::IntegerTyID: case Type::IntegerTyID:
@ -959,20 +959,20 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) { if (const ConstantVector *CP = dyn_cast<ConstantVector>(Init)) {
unsigned ElementSize = unsigned ElementSize =
getTargetData()->getTypeAllocSize(CP->getType()->getElementType()); getDataLayout()->getTypeAllocSize(CP->getType()->getElementType());
for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i) for (unsigned i = 0, e = CP->getNumOperands(); i != e; ++i)
InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize); InitializeMemory(CP->getOperand(i), (char*)Addr+i*ElementSize);
return; return;
} }
if (isa<ConstantAggregateZero>(Init)) { if (isa<ConstantAggregateZero>(Init)) {
memset(Addr, 0, (size_t)getTargetData()->getTypeAllocSize(Init->getType())); memset(Addr, 0, (size_t)getDataLayout()->getTypeAllocSize(Init->getType()));
return; return;
} }
if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) { if (const ConstantArray *CPA = dyn_cast<ConstantArray>(Init)) {
unsigned ElementSize = unsigned ElementSize =
getTargetData()->getTypeAllocSize(CPA->getType()->getElementType()); getDataLayout()->getTypeAllocSize(CPA->getType()->getElementType());
for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i) for (unsigned i = 0, e = CPA->getNumOperands(); i != e; ++i)
InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize); InitializeMemory(CPA->getOperand(i), (char*)Addr+i*ElementSize);
return; return;
@ -980,7 +980,7 @@ void ExecutionEngine::InitializeMemory(const Constant *Init, void *Addr) {
if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) { if (const ConstantStruct *CPS = dyn_cast<ConstantStruct>(Init)) {
const StructLayout *SL = const StructLayout *SL =
getTargetData()->getStructLayout(cast<StructType>(CPS->getType())); getDataLayout()->getStructLayout(cast<StructType>(CPS->getType()));
for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i) for (unsigned i = 0, e = CPS->getNumOperands(); i != e; ++i)
InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i)); InitializeMemory(CPS->getOperand(i), (char*)Addr+SL->getElementOffset(i));
return; return;
@ -1127,7 +1127,7 @@ void ExecutionEngine::EmitGlobalVariable(const GlobalVariable *GV) {
InitializeMemory(GV->getInitializer(), GA); InitializeMemory(GV->getInitializer(), GA);
Type *ElTy = GV->getType()->getElementType(); Type *ElTy = GV->getType()->getElementType();
size_t GVSize = (size_t)getTargetData()->getTypeAllocSize(ElTy); size_t GVSize = (size_t)getDataLayout()->getTypeAllocSize(ElTy);
NumInitBytes += (unsigned)GVSize; NumInitBytes += (unsigned)GVSize;
++NumGlobals; ++NumGlobals;
} }

View File

@ -239,7 +239,7 @@ void *LLVMRecompileAndRelinkFunction(LLVMExecutionEngineRef EE, LLVMValueRef Fn)
} }
LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) { LLVMTargetDataRef LLVMGetExecutionEngineTargetData(LLVMExecutionEngineRef EE) {
return wrap(unwrap(EE)->getTargetData()); return wrap(unwrap(EE)->getDataLayout());
} }
void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global, void LLVMAddGlobalMapping(LLVMExecutionEngineRef EE, LLVMValueRef Global,

View File

@ -25,7 +25,7 @@
#include "llvm/Config/config.h" // Detect libffi #include "llvm/Config/config.h" // Detect libffi
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/DynamicLibrary.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Support/ManagedStatic.h" #include "llvm/Support/ManagedStatic.h"
#include "llvm/Support/Mutex.h" #include "llvm/Support/Mutex.h"
#include <csignal> #include <csignal>
@ -180,7 +180,7 @@ static void *ffiValueFor(Type *Ty, const GenericValue &AV,
static bool ffiInvoke(RawFunc Fn, Function *F, static bool ffiInvoke(RawFunc Fn, Function *F,
const std::vector<GenericValue> &ArgVals, const std::vector<GenericValue> &ArgVals,
const TargetData *TD, GenericValue &Result) { const DataLayout *TD, GenericValue &Result) {
ffi_cif cif; ffi_cif cif;
FunctionType *FTy = F->getFunctionType(); FunctionType *FTy = F->getFunctionType();
const unsigned NumArgs = F->arg_size(); const unsigned NumArgs = F->arg_size();
@ -276,7 +276,7 @@ GenericValue Interpreter::callExternalFunction(Function *F,
FunctionsLock->release(); FunctionsLock->release();
GenericValue Result; GenericValue Result;
if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getTargetData(), Result)) if (RawFn != 0 && ffiInvoke(RawFn, F, ArgVals, getDataLayout(), Result))
return Result; return Result;
#endif // USE_LIBFFI #endif // USE_LIBFFI
@ -376,7 +376,7 @@ GenericValue lle_X_sprintf(FunctionType *FT,
case 'x': case 'X': case 'x': case 'X':
if (HowLong >= 1) { if (HowLong >= 1) {
if (HowLong == 1 && if (HowLong == 1 &&
TheInterpreter->getTargetData()->getPointerSizeInBits() == 64 && TheInterpreter->getDataLayout()->getPointerSizeInBits() == 64 &&
sizeof(long) < sizeof(int64_t)) { sizeof(long) < sizeof(int64_t)) {
// Make sure we use %lld with a 64 bit argument because we might be // Make sure we use %lld with a 64 bit argument because we might be
// compiling LLI on a 32 bit compiler. // compiling LLI on a 32 bit compiler.

View File

@ -48,7 +48,7 @@ Interpreter::Interpreter(Module *M)
: ExecutionEngine(M), TD(M) { : ExecutionEngine(M), TD(M) {
memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped)); memset(&ExitValue.Untyped, 0, sizeof(ExitValue.Untyped));
setTargetData(&TD); setDataLayout(&TD);
// Initialize the "backend" // Initialize the "backend"
initializeExecutionEngine(); initializeExecutionEngine();
initializeExternalFunctions(); initializeExternalFunctions();

View File

@ -17,7 +17,7 @@
#include "llvm/Function.h" #include "llvm/Function.h"
#include "llvm/ExecutionEngine/ExecutionEngine.h" #include "llvm/ExecutionEngine/ExecutionEngine.h"
#include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Support/CallSite.h" #include "llvm/Support/CallSite.h"
#include "llvm/Support/DataTypes.h" #include "llvm/Support/DataTypes.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
@ -82,7 +82,7 @@ struct ExecutionContext {
// //
class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> { class Interpreter : public ExecutionEngine, public InstVisitor<Interpreter> {
GenericValue ExitValue; // The return value of the called function GenericValue ExitValue; // The return value of the called function
TargetData TD; DataLayout TD;
IntrinsicLowering *IL; IntrinsicLowering *IL;
// The runtime stack of executing code. The top of the stack is the current // The runtime stack of executing code. The top of the stack is the current

View File

@ -24,7 +24,7 @@
#include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h" #include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h" #include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetJITInfo.h" #include "llvm/Target/TargetJITInfo.h"
#include "llvm/Support/Dwarf.h" #include "llvm/Support/Dwarf.h"
@ -272,7 +272,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
: ExecutionEngine(M), TM(tm), TJI(tji), : ExecutionEngine(M), TM(tm), TJI(tji),
JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()), JMM(jmm ? jmm : JITMemoryManager::CreateDefaultMemManager()),
AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) { AllocateGVsWithCode(GVsWithCode), isAlreadyCodeGenerating(false) {
setTargetData(TM.getTargetData()); setDataLayout(TM.getDataLayout());
jitstate = new JITState(M); jitstate = new JITState(M);
@ -285,7 +285,7 @@ JIT::JIT(Module *M, TargetMachine &tm, TargetJITInfo &tji,
// Add target data // Add target data
MutexGuard locked(lock); MutexGuard locked(lock);
FunctionPassManager &PM = jitstate->getPM(locked); FunctionPassManager &PM = jitstate->getPM(locked);
PM.add(new TargetData(*TM.getTargetData())); PM.add(new DataLayout(*TM.getDataLayout()));
// Turn the machine code intermediate representation into bytes in memory that // Turn the machine code intermediate representation into bytes in memory that
// may be executed. // may be executed.
@ -339,7 +339,7 @@ void JIT::addModule(Module *M) {
jitstate = new JITState(M); jitstate = new JITState(M);
FunctionPassManager &PM = jitstate->getPM(locked); FunctionPassManager &PM = jitstate->getPM(locked);
PM.add(new TargetData(*TM.getTargetData())); PM.add(new DataLayout(*TM.getDataLayout()));
// Turn the machine code intermediate representation into bytes in memory // Turn the machine code intermediate representation into bytes in memory
// that may be executed. // that may be executed.
@ -370,7 +370,7 @@ bool JIT::removeModule(Module *M) {
jitstate = new JITState(Modules[0]); jitstate = new JITState(Modules[0]);
FunctionPassManager &PM = jitstate->getPM(locked); FunctionPassManager &PM = jitstate->getPM(locked);
PM.add(new TargetData(*TM.getTargetData())); PM.add(new DataLayout(*TM.getDataLayout()));
// Turn the machine code intermediate representation into bytes in memory // Turn the machine code intermediate representation into bytes in memory
// that may be executed. // that may be executed.
@ -815,8 +815,8 @@ char* JIT::getMemoryForGV(const GlobalVariable* GV) {
// through the memory manager which puts them near the code but not in the // through the memory manager which puts them near the code but not in the
// same buffer. // same buffer.
Type *GlobalType = GV->getType()->getElementType(); Type *GlobalType = GV->getType()->getElementType();
size_t S = getTargetData()->getTypeAllocSize(GlobalType); size_t S = getDataLayout()->getTypeAllocSize(GlobalType);
size_t A = getTargetData()->getPreferredAlignment(GV); size_t A = getDataLayout()->getPreferredAlignment(GV);
if (GV->isThreadLocal()) { if (GV->isThreadLocal()) {
MutexGuard locked(lock); MutexGuard locked(lock);
Ptr = TJI.allocateThreadLocalMemory(S); Ptr = TJI.allocateThreadLocalMemory(S);

View File

@ -24,7 +24,7 @@
#include "llvm/MC/MCAsmInfo.h" #include "llvm/MC/MCAsmInfo.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Support/ErrorHandling.h" #include "llvm/Support/ErrorHandling.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetFrameLowering.h" #include "llvm/Target/TargetFrameLowering.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
@ -42,7 +42,7 @@ unsigned char* JITDwarfEmitter::EmitDwarfTable(MachineFunction& F,
assert(MMI && "MachineModuleInfo not registered!"); assert(MMI && "MachineModuleInfo not registered!");
const TargetMachine& TM = F.getTarget(); const TargetMachine& TM = F.getTarget();
TD = TM.getTargetData(); TD = TM.getDataLayout();
stackGrowthDirection = TM.getFrameLowering()->getStackGrowthDirection(); stackGrowthDirection = TM.getFrameLowering()->getStackGrowthDirection();
RI = TM.getRegisterInfo(); RI = TM.getRegisterInfo();
MAI = TM.getMCAsmInfo(); MAI = TM.getMCAsmInfo();

View File

@ -23,12 +23,12 @@ class MachineFunction;
class MachineModuleInfo; class MachineModuleInfo;
class MachineMove; class MachineMove;
class MCAsmInfo; class MCAsmInfo;
class TargetData; class DataLayout;
class TargetMachine; class TargetMachine;
class TargetRegisterInfo; class TargetRegisterInfo;
class JITDwarfEmitter { class JITDwarfEmitter {
const TargetData* TD; const DataLayout* TD;
JITCodeEmitter* JCE; JITCodeEmitter* JCE;
const TargetRegisterInfo* RI; const TargetRegisterInfo* RI;
const MCAsmInfo *MAI; const MCAsmInfo *MAI;

View File

@ -30,7 +30,7 @@
#include "llvm/ExecutionEngine/GenericValue.h" #include "llvm/ExecutionEngine/GenericValue.h"
#include "llvm/ExecutionEngine/JITEventListener.h" #include "llvm/ExecutionEngine/JITEventListener.h"
#include "llvm/ExecutionEngine/JITMemoryManager.h" #include "llvm/ExecutionEngine/JITMemoryManager.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetInstrInfo.h"
#include "llvm/Target/TargetJITInfo.h" #include "llvm/Target/TargetJITInfo.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
@ -763,7 +763,7 @@ void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
} }
static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP, static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
const TargetData *TD) { const DataLayout *TD) {
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return 0; if (Constants.empty()) return 0;
@ -1058,7 +1058,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants(); const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
if (Constants.empty()) return; if (Constants.empty()) return;
unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData()); unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getDataLayout());
unsigned Align = MCP->getConstantPoolAlignment(); unsigned Align = MCP->getConstantPoolAlignment();
ConstantPoolBase = allocateSpace(Size, Align); ConstantPoolBase = allocateSpace(Size, Align);
ConstantPool = MCP; ConstantPool = MCP;
@ -1087,7 +1087,7 @@ void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
dbgs().write_hex(CAddr) << "]\n"); dbgs().write_hex(CAddr) << "]\n");
Type *Ty = CPE.Val.ConstVal->getType(); Type *Ty = CPE.Val.ConstVal->getType();
Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty); Offset += TheJIT->getDataLayout()->getTypeAllocSize(Ty);
} }
} }
@ -1104,14 +1104,14 @@ void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
for (unsigned i = 0, e = JT.size(); i != e; ++i) for (unsigned i = 0, e = JT.size(); i != e; ++i)
NumEntries += JT[i].MBBs.size(); NumEntries += JT[i].MBBs.size();
unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getTargetData()); unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getDataLayout());
// Just allocate space for all the jump tables now. We will fix up the actual // Just allocate space for all the jump tables now. We will fix up the actual
// MBB entries in the tables after we emit the code for each block, since then // MBB entries in the tables after we emit the code for each block, since then
// we will know the final locations of the MBBs in memory. // we will know the final locations of the MBBs in memory.
JumpTable = MJTI; JumpTable = MJTI;
JumpTableBase = allocateSpace(NumEntries * EntrySize, JumpTableBase = allocateSpace(NumEntries * EntrySize,
MJTI->getEntryAlignment(*TheJIT->getTargetData())); MJTI->getEntryAlignment(*TheJIT->getDataLayout()));
} }
void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) { void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
@ -1128,7 +1128,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
case MachineJumpTableInfo::EK_BlockAddress: { case MachineJumpTableInfo::EK_BlockAddress: {
// EK_BlockAddress - Each entry is a plain address of block, e.g.: // EK_BlockAddress - Each entry is a plain address of block, e.g.:
// .word LBB123 // .word LBB123
assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) && assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == sizeof(void*) &&
"Cross JIT'ing?"); "Cross JIT'ing?");
// For each jump table, map each target in the jump table to the address of // For each jump table, map each target in the jump table to the address of
@ -1148,7 +1148,7 @@ void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
case MachineJumpTableInfo::EK_Custom32: case MachineJumpTableInfo::EK_Custom32:
case MachineJumpTableInfo::EK_GPRel32BlockAddress: case MachineJumpTableInfo::EK_GPRel32BlockAddress:
case MachineJumpTableInfo::EK_LabelDifference32: { case MachineJumpTableInfo::EK_LabelDifference32: {
assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == 4&&"Cross JIT'ing?"); assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == 4&&"Cross JIT'ing?");
// For each jump table, place the offset from the beginning of the table // For each jump table, place the offset from the beginning of the table
// to the target address. // to the target address.
int *SlotPtr = (int*)JumpTableBase; int *SlotPtr = (int*)JumpTableBase;
@ -1224,7 +1224,7 @@ uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables(); const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
assert(Index < JT.size() && "Invalid jump table index!"); assert(Index < JT.size() && "Invalid jump table index!");
unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getTargetData()); unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getDataLayout());
unsigned Offset = 0; unsigned Offset = 0;
for (unsigned i = 0; i < Index; ++i) for (unsigned i = 0; i < Index; ++i)

View File

@ -21,7 +21,7 @@
#include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/DynamicLibrary.h"
#include "llvm/Support/MemoryBuffer.h" #include "llvm/Support/MemoryBuffer.h"
#include "llvm/Support/MutexGuard.h" #include "llvm/Support/MutexGuard.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
using namespace llvm; using namespace llvm;
@ -54,7 +54,7 @@ MCJIT::MCJIT(Module *m, TargetMachine *tm, RTDyldMemoryManager *MM,
: ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM), : ExecutionEngine(m), TM(tm), Ctx(0), MemMgr(MM), Dyld(MM),
isCompiled(false), M(m) { isCompiled(false), M(m) {
setTargetData(TM->getTargetData()); setDataLayout(TM->getDataLayout());
} }
MCJIT::~MCJIT() { MCJIT::~MCJIT() {
@ -80,7 +80,7 @@ void MCJIT::emitObject(Module *m) {
PassManager PM; PassManager PM;
PM.add(new TargetData(*TM->getTargetData())); PM.add(new DataLayout(*TM->getDataLayout()));
// The RuntimeDyld will take ownership of this shortly // The RuntimeDyld will take ownership of this shortly
OwningPtr<ObjectBufferStream> Buffer(new ObjectBufferStream()); OwningPtr<ObjectBufferStream> Buffer(new ObjectBufferStream());

View File

@ -42,7 +42,7 @@
#include "llvm/MC/MCStreamer.h" #include "llvm/MC/MCStreamer.h"
#include "llvm/MC/MCSymbol.h" #include "llvm/MC/MCSymbol.h"
#include "llvm/Target/Mangler.h" #include "llvm/Target/Mangler.h"
#include "llvm/Target/TargetData.h" #include "llvm/DataLayout.h"
#include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetMachine.h"
#include "llvm/Support/CommandLine.h" #include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h" #include "llvm/Support/Debug.h"
@ -303,7 +303,7 @@ void ARMAsmPrinter::EmitFunctionEntryLabel() {
} }
void ARMAsmPrinter::EmitXXStructor(const Constant *CV) { void ARMAsmPrinter::EmitXXStructor(const Constant *CV) {
uint64_t Size = TM.getTargetData()->getTypeAllocSize(CV->getType()); uint64_t Size = TM.getDataLayout()->getTypeAllocSize(CV->getType());
assert(Size && "C++ constructor pointer had zero size!"); assert(Size && "C++ constructor pointer had zero size!");
const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts()); const GlobalValue *GV = dyn_cast<GlobalValue>(CV->stripPointerCasts());
@ -909,7 +909,7 @@ MCSymbol *ARMAsmPrinter::GetARMGVSymbol(const GlobalValue *GV) {
void ARMAsmPrinter:: void ARMAsmPrinter::
EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) { EmitMachineConstantPoolValue(MachineConstantPoolValue *MCPV) {
int Size = TM.getTargetData()->getTypeAllocSize(MCPV->getType()); int Size = TM.getDataLayout()->getTypeAllocSize(MCPV->getType());
ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV); ARMConstantPoolValue *ACPV = static_cast<ARMConstantPoolValue*>(MCPV);

View File

@ -47,7 +47,7 @@ namespace {
class ARMCodeEmitter : public MachineFunctionPass { class ARMCodeEmitter : public MachineFunctionPass {
ARMJITInfo *JTI; ARMJITInfo *JTI;
const ARMBaseInstrInfo *II; const ARMBaseInstrInfo *II;
const TargetData *TD; const DataLayout *TD;
const ARMSubtarget *Subtarget; const ARMSubtarget *Subtarget;
TargetMachine &TM; TargetMachine &TM;
JITCodeEmitter &MCE; JITCodeEmitter &MCE;
@ -67,7 +67,7 @@ namespace {
ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce) ARMCodeEmitter(TargetMachine &tm, JITCodeEmitter &mce)
: MachineFunctionPass(ID), JTI(0), : MachineFunctionPass(ID), JTI(0),
II((const ARMBaseInstrInfo *)tm.getInstrInfo()), II((const ARMBaseInstrInfo *)tm.getInstrInfo()),
TD(tm.getTargetData()), TM(tm), TD(tm.getDataLayout()), TM(tm),
MCE(mce), MCPEs(0), MJTEs(0), MCE(mce), MCPEs(0), MJTEs(0),
IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {} IsPIC(TM.getRelocationModel() == Reloc::PIC_), IsThumb(false) {}
@ -376,7 +376,7 @@ bool ARMCodeEmitter::runOnMachineFunction(MachineFunction &MF) {
"JIT relocation model must be set to static or default!"); "JIT relocation model must be set to static or default!");
JTI = ((ARMBaseTargetMachine &)MF.getTarget()).getJITInfo(); JTI = ((ARMBaseTargetMachine &)MF.getTarget()).getJITInfo();
II = (const ARMBaseInstrInfo *)MF.getTarget().getInstrInfo(); II = (const ARMBaseInstrInfo *)MF.getTarget().getInstrInfo();
TD = MF.getTarget().getTargetData(); TD = MF.getTarget().getDataLayout();
Subtarget = &TM.getSubtarget<ARMSubtarget>(); Subtarget = &TM.getSubtarget<ARMSubtarget>();
MCPEs = &MF.getConstantPool()->getConstants(); MCPEs = &MF.getConstantPool()->getConstants();
MJTEs = 0; MJTEs = 0;

Some files were not shown because too many files have changed in this diff Show More