mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-14 23:48:56 +00:00
Remove getDataLayout() from TargetLowering
Summary: This change is part of a series of commits dedicated to have a single DataLayout during compilation by using always the one owned by the module. Reviewers: echristo Subscribers: yaron.keren, rafael, llvm-commits, jholewinski Differential Revision: http://reviews.llvm.org/D11042 From: Mehdi Amini <mehdi.amini@apple.com> git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@241779 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
0e496c884c
commit
691b2ff11e
@ -161,7 +161,6 @@ protected:
|
||||
|
||||
public:
|
||||
const TargetMachine &getTargetMachine() const { return TM; }
|
||||
const DataLayout *getDataLayout() const { return TM.getDataLayout(); }
|
||||
|
||||
virtual bool useSoftFloat() const { return false; }
|
||||
|
||||
|
@ -4546,7 +4546,7 @@ SDValue SelectionDAG::getMemmove(SDValue Chain, SDLoc dl, SDValue Dst,
|
||||
// Emit a library call.
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Ty = TLI->getDataLayout()->getIntPtrType(*getContext());
|
||||
Entry.Ty = getDataLayout().getIntPtrType(*getContext());
|
||||
Entry.Node = Dst; Args.push_back(Entry);
|
||||
Entry.Node = Src; Args.push_back(Entry);
|
||||
Entry.Node = Size; Args.push_back(Entry);
|
||||
|
@ -6434,6 +6434,7 @@ SDValue AArch64TargetLowering::LowerVSETCC(SDValue Op,
|
||||
bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
const CallInst &I,
|
||||
unsigned Intrinsic) const {
|
||||
auto &DL = I.getModule()->getDataLayout();
|
||||
switch (Intrinsic) {
|
||||
case Intrinsic::aarch64_neon_ld2:
|
||||
case Intrinsic::aarch64_neon_ld3:
|
||||
@ -6449,7 +6450,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
case Intrinsic::aarch64_neon_ld4r: {
|
||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
||||
// Conservatively set memVT to the entire set of vectors loaded.
|
||||
uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
|
||||
uint64_t NumElts = DL.getTypeAllocSize(I.getType()) / 8;
|
||||
Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
|
||||
Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
|
||||
Info.offset = 0;
|
||||
@ -6475,7 +6476,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Type *ArgTy = I.getArgOperand(ArgI)->getType();
|
||||
if (!ArgTy->isVectorTy())
|
||||
break;
|
||||
NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
|
||||
NumElts += DL.getTypeAllocSize(ArgTy) / 8;
|
||||
}
|
||||
Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
|
||||
Info.ptrVal = I.getArgOperand(I.getNumArgOperands() - 1);
|
||||
@ -6493,7 +6494,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Info.memVT = MVT::getVT(PtrTy->getElementType());
|
||||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.vol = true;
|
||||
Info.readMem = true;
|
||||
Info.writeMem = false;
|
||||
@ -6506,7 +6507,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Info.memVT = MVT::getVT(PtrTy->getElementType());
|
||||
Info.ptrVal = I.getArgOperand(1);
|
||||
Info.offset = 0;
|
||||
Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.vol = true;
|
||||
Info.readMem = false;
|
||||
Info.writeMem = true;
|
||||
@ -6643,6 +6644,7 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
|
||||
break;
|
||||
case Instruction::GetElementPtr: {
|
||||
gep_type_iterator GTI = gep_type_begin(Instr);
|
||||
auto &DL = Ext->getModule()->getDataLayout();
|
||||
std::advance(GTI, U.getOperandNo());
|
||||
Type *IdxTy = *GTI;
|
||||
// This extension will end up with a shift because of the scaling factor.
|
||||
@ -6650,7 +6652,7 @@ bool AArch64TargetLowering::isExtFreeImpl(const Instruction *Ext) const {
|
||||
// Get the shift amount based on the scaling factor:
|
||||
// log2(sizeof(IdxTy)) - log2(8).
|
||||
uint64_t ShiftAmt =
|
||||
countTrailingZeros(getDataLayout()->getTypeStoreSizeInBits(IdxTy)) - 3;
|
||||
countTrailingZeros(DL.getTypeStoreSizeInBits(IdxTy)) - 3;
|
||||
// Is the constant foldable in the shift of the addressing mode?
|
||||
// I.e., shift amount is between 1 and 4 inclusive.
|
||||
if (ShiftAmt == 0 || ShiftAmt > 4)
|
||||
@ -6714,10 +6716,10 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
|
||||
assert(Shuffles.size() == Indices.size() &&
|
||||
"Unmatched number of shufflevectors and indices");
|
||||
|
||||
const DataLayout *DL = getDataLayout();
|
||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||
|
||||
VectorType *VecTy = Shuffles[0]->getType();
|
||||
unsigned VecSize = DL->getTypeAllocSizeInBits(VecTy);
|
||||
unsigned VecSize = DL.getTypeAllocSizeInBits(VecTy);
|
||||
|
||||
// Skip illegal vector types.
|
||||
if (VecSize != 64 && VecSize != 128)
|
||||
@ -6727,8 +6729,8 @@ bool AArch64TargetLowering::lowerInterleavedLoad(
|
||||
// load integer vectors first and then convert to pointer vectors.
|
||||
Type *EltTy = VecTy->getVectorElementType();
|
||||
if (EltTy->isPointerTy())
|
||||
VecTy = VectorType::get(DL->getIntPtrType(EltTy),
|
||||
VecTy->getVectorNumElements());
|
||||
VecTy =
|
||||
VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
|
||||
|
||||
Type *PtrTy = VecTy->getPointerTo(LI->getPointerAddressSpace());
|
||||
Type *Tys[2] = {VecTy, PtrTy};
|
||||
@ -6802,8 +6804,8 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
|
||||
Type *EltTy = VecTy->getVectorElementType();
|
||||
VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts);
|
||||
|
||||
const DataLayout *DL = getDataLayout();
|
||||
unsigned SubVecSize = DL->getTypeAllocSizeInBits(SubVecTy);
|
||||
const DataLayout &DL = SI->getModule()->getDataLayout();
|
||||
unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy);
|
||||
|
||||
// Skip illegal vector types.
|
||||
if (SubVecSize != 64 && SubVecSize != 128)
|
||||
@ -6816,7 +6818,7 @@ bool AArch64TargetLowering::lowerInterleavedStore(StoreInst *SI,
|
||||
// StN intrinsics don't support pointer vectors as arguments. Convert pointer
|
||||
// vectors to integer vectors.
|
||||
if (EltTy->isPointerTy()) {
|
||||
Type *IntTy = DL->getIntPtrType(EltTy);
|
||||
Type *IntTy = DL.getIntPtrType(EltTy);
|
||||
unsigned NumOpElts =
|
||||
dyn_cast<VectorType>(Op0->getType())->getVectorNumElements();
|
||||
|
||||
|
@ -416,7 +416,7 @@ unsigned AArch64TTIImpl::getInterleavedMemoryOpCost(
|
||||
if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
|
||||
unsigned NumElts = VecTy->getVectorNumElements();
|
||||
Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
|
||||
unsigned SubVecSize = TLI->getDataLayout()->getTypeAllocSize(SubVecTy);
|
||||
unsigned SubVecSize = DL.getTypeAllocSize(SubVecTy);
|
||||
|
||||
// ldN/stN only support legal vector types of size 64 or 128 in bits.
|
||||
if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
|
||||
|
@ -545,9 +545,8 @@ bool AMDGPUTargetLowering::isTruncateFree(Type *Source, Type *Dest) const {
|
||||
}
|
||||
|
||||
bool AMDGPUTargetLowering::isZExtFree(Type *Src, Type *Dest) const {
|
||||
const DataLayout *DL = getDataLayout();
|
||||
unsigned SrcSize = DL->getTypeSizeInBits(Src->getScalarType());
|
||||
unsigned DestSize = DL->getTypeSizeInBits(Dest->getScalarType());
|
||||
unsigned SrcSize = Src->getScalarSizeInBits();
|
||||
unsigned DestSize = Dest->getScalarSizeInBits();
|
||||
|
||||
return SrcSize == 32 && DestSize == 64;
|
||||
}
|
||||
@ -697,7 +696,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
|
||||
const SDValue &InitPtr,
|
||||
SDValue Chain,
|
||||
SelectionDAG &DAG) const {
|
||||
const DataLayout *TD = getDataLayout();
|
||||
const DataLayout &TD = DAG.getDataLayout();
|
||||
SDLoc DL(InitPtr);
|
||||
Type *InitTy = Init->getType();
|
||||
|
||||
@ -705,20 +704,20 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
|
||||
EVT VT = EVT::getEVT(InitTy);
|
||||
PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
|
||||
return DAG.getStore(Chain, DL, DAG.getConstant(*CI, DL, VT), InitPtr,
|
||||
MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
|
||||
TD->getPrefTypeAlignment(InitTy));
|
||||
MachinePointerInfo(UndefValue::get(PtrTy)), false,
|
||||
false, TD.getPrefTypeAlignment(InitTy));
|
||||
}
|
||||
|
||||
if (const ConstantFP *CFP = dyn_cast<ConstantFP>(Init)) {
|
||||
EVT VT = EVT::getEVT(CFP->getType());
|
||||
PointerType *PtrTy = PointerType::get(CFP->getType(), 0);
|
||||
return DAG.getStore(Chain, DL, DAG.getConstantFP(*CFP, DL, VT), InitPtr,
|
||||
MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
|
||||
TD->getPrefTypeAlignment(CFP->getType()));
|
||||
MachinePointerInfo(UndefValue::get(PtrTy)), false,
|
||||
false, TD.getPrefTypeAlignment(CFP->getType()));
|
||||
}
|
||||
|
||||
if (StructType *ST = dyn_cast<StructType>(InitTy)) {
|
||||
const StructLayout *SL = TD->getStructLayout(ST);
|
||||
const StructLayout *SL = TD.getStructLayout(ST);
|
||||
|
||||
EVT PtrVT = InitPtr.getValueType();
|
||||
SmallVector<SDValue, 8> Chains;
|
||||
@ -745,7 +744,7 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
|
||||
else
|
||||
llvm_unreachable("Unexpected type");
|
||||
|
||||
unsigned EltSize = TD->getTypeAllocSize(SeqTy->getElementType());
|
||||
unsigned EltSize = TD.getTypeAllocSize(SeqTy->getElementType());
|
||||
SmallVector<SDValue, 8> Chains;
|
||||
for (unsigned i = 0; i < NumElements; ++i) {
|
||||
SDValue Offset = DAG.getConstant(i * EltSize, DL, PtrVT);
|
||||
@ -762,8 +761,8 @@ SDValue AMDGPUTargetLowering::LowerConstantInitializer(const Constant* Init,
|
||||
EVT VT = EVT::getEVT(InitTy);
|
||||
PointerType *PtrTy = PointerType::get(InitTy, AMDGPUAS::PRIVATE_ADDRESS);
|
||||
return DAG.getStore(Chain, DL, DAG.getUNDEF(VT), InitPtr,
|
||||
MachinePointerInfo(UndefValue::get(PtrTy)), false, false,
|
||||
TD->getPrefTypeAlignment(InitTy));
|
||||
MachinePointerInfo(UndefValue::get(PtrTy)), false,
|
||||
false, TD.getPrefTypeAlignment(InitTy));
|
||||
}
|
||||
|
||||
Init->dump();
|
||||
|
@ -417,7 +417,7 @@ static EVT toIntegerVT(EVT VT) {
|
||||
SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
|
||||
SDLoc SL, SDValue Chain,
|
||||
unsigned Offset, bool Signed) const {
|
||||
const DataLayout *DL = getDataLayout();
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
const SIRegisterInfo *TRI =
|
||||
static_cast<const SIRegisterInfo*>(Subtarget->getRegisterInfo());
|
||||
@ -426,7 +426,7 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
|
||||
Type *Ty = VT.getTypeForEVT(*DAG.getContext());
|
||||
|
||||
MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo();
|
||||
MVT PtrVT = getPointerTy(DAG.getDataLayout(), AMDGPUAS::CONSTANT_ADDRESS);
|
||||
MVT PtrVT = getPointerTy(DL, AMDGPUAS::CONSTANT_ADDRESS);
|
||||
PointerType *PtrTy = PointerType::get(Ty, AMDGPUAS::CONSTANT_ADDRESS);
|
||||
SDValue BasePtr = DAG.getCopyFromReg(Chain, SL,
|
||||
MRI.getLiveInVirtReg(InputPtrReg), PtrVT);
|
||||
@ -435,7 +435,7 @@ SDValue SITargetLowering::LowerParameter(SelectionDAG &DAG, EVT VT, EVT MemVT,
|
||||
SDValue PtrOffset = DAG.getUNDEF(PtrVT);
|
||||
MachinePointerInfo PtrInfo(UndefValue::get(PtrTy));
|
||||
|
||||
unsigned Align = DL->getABITypeAlignment(Ty);
|
||||
unsigned Align = DL.getABITypeAlignment(Ty);
|
||||
|
||||
if (VT != MemVT && VT.isFloatingPoint()) {
|
||||
// Do an integer load and convert.
|
||||
@ -1414,7 +1414,7 @@ SDValue SITargetLowering::performUCharToFloatCombine(SDNode *N,
|
||||
unsigned AS = Load->getAddressSpace();
|
||||
unsigned Align = Load->getAlignment();
|
||||
Type *Ty = LoadVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
|
||||
// Don't try to replace the load if we have to expand it due to alignment
|
||||
// problems. Otherwise we will end up scalarizing the load, and trying to
|
||||
|
@ -6845,9 +6845,9 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
|
||||
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
|
||||
|
||||
// MachineConstantPool wants an explicit alignment.
|
||||
unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
|
||||
unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
|
||||
if (Align == 0)
|
||||
Align = getDataLayout()->getTypeAllocSize(C->getType());
|
||||
Align = MF->getDataLayout().getTypeAllocSize(C->getType());
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
|
||||
|
||||
unsigned VReg1 = MRI->createVirtualRegister(TRC);
|
||||
@ -6935,9 +6935,9 @@ void ARMTargetLowering::EmitSjLjDispatchBlock(MachineInstr *MI,
|
||||
const Constant *C = ConstantInt::get(Int32Ty, NumLPads);
|
||||
|
||||
// MachineConstantPool wants an explicit alignment.
|
||||
unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
|
||||
unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
|
||||
if (Align == 0)
|
||||
Align = getDataLayout()->getTypeAllocSize(C->getType());
|
||||
Align = MF->getDataLayout().getTypeAllocSize(C->getType());
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
|
||||
|
||||
unsigned VReg1 = MRI->createVirtualRegister(TRC);
|
||||
@ -7313,9 +7313,9 @@ ARMTargetLowering::EmitStructByval(MachineInstr *MI,
|
||||
const Constant *C = ConstantInt::get(Int32Ty, LoopSize);
|
||||
|
||||
// MachineConstantPool wants an explicit alignment.
|
||||
unsigned Align = getDataLayout()->getPrefTypeAlignment(Int32Ty);
|
||||
unsigned Align = MF->getDataLayout().getPrefTypeAlignment(Int32Ty);
|
||||
if (Align == 0)
|
||||
Align = getDataLayout()->getTypeAllocSize(C->getType());
|
||||
Align = MF->getDataLayout().getTypeAllocSize(C->getType());
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, Align);
|
||||
|
||||
if (IsThumb1)
|
||||
@ -11083,7 +11083,8 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
case Intrinsic::arm_neon_vld4lane: {
|
||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
||||
// Conservatively set memVT to the entire set of vectors loaded.
|
||||
uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8;
|
||||
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
|
||||
uint64_t NumElts = DL.getTypeAllocSize(I.getType()) / 8;
|
||||
Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
|
||||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
@ -11103,12 +11104,13 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
case Intrinsic::arm_neon_vst4lane: {
|
||||
Info.opc = ISD::INTRINSIC_VOID;
|
||||
// Conservatively set memVT to the entire set of vectors stored.
|
||||
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
|
||||
unsigned NumElts = 0;
|
||||
for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) {
|
||||
Type *ArgTy = I.getArgOperand(ArgI)->getType();
|
||||
if (!ArgTy->isVectorTy())
|
||||
break;
|
||||
NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8;
|
||||
NumElts += DL.getTypeAllocSize(ArgTy) / 8;
|
||||
}
|
||||
Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts);
|
||||
Info.ptrVal = I.getArgOperand(0);
|
||||
@ -11122,12 +11124,13 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
}
|
||||
case Intrinsic::arm_ldaex:
|
||||
case Intrinsic::arm_ldrex: {
|
||||
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
|
||||
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
|
||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
||||
Info.memVT = MVT::getVT(PtrTy->getElementType());
|
||||
Info.ptrVal = I.getArgOperand(0);
|
||||
Info.offset = 0;
|
||||
Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.vol = true;
|
||||
Info.readMem = true;
|
||||
Info.writeMem = false;
|
||||
@ -11135,12 +11138,13 @@ bool ARMTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
}
|
||||
case Intrinsic::arm_stlex:
|
||||
case Intrinsic::arm_strex: {
|
||||
auto &DL = I.getCalledFunction()->getParent()->getDataLayout();
|
||||
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
|
||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
||||
Info.memVT = MVT::getVT(PtrTy->getElementType());
|
||||
Info.ptrVal = I.getArgOperand(1);
|
||||
Info.offset = 0;
|
||||
Info.align = getDataLayout()->getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.align = DL.getABITypeAlignment(PtrTy->getElementType());
|
||||
Info.vol = true;
|
||||
Info.readMem = false;
|
||||
Info.writeMem = true;
|
||||
@ -11427,9 +11431,9 @@ bool ARMTargetLowering::lowerInterleavedLoad(
|
||||
VectorType *VecTy = Shuffles[0]->getType();
|
||||
Type *EltTy = VecTy->getVectorElementType();
|
||||
|
||||
const DataLayout *DL = getDataLayout();
|
||||
unsigned VecSize = DL->getTypeAllocSizeInBits(VecTy);
|
||||
bool EltIs64Bits = DL->getTypeAllocSizeInBits(EltTy) == 64;
|
||||
const DataLayout &DL = LI->getModule()->getDataLayout();
|
||||
unsigned VecSize = DL.getTypeAllocSizeInBits(VecTy);
|
||||
bool EltIs64Bits = DL.getTypeAllocSizeInBits(EltTy) == 64;
|
||||
|
||||
// Skip illegal vector types and vector types of i64/f64 element (vldN doesn't
|
||||
// support i64/f64 element).
|
||||
@ -11439,8 +11443,8 @@ bool ARMTargetLowering::lowerInterleavedLoad(
|
||||
// A pointer vector can not be the return type of the ldN intrinsics. Need to
|
||||
// load integer vectors first and then convert to pointer vectors.
|
||||
if (EltTy->isPointerTy())
|
||||
VecTy = VectorType::get(DL->getIntPtrType(EltTy),
|
||||
VecTy->getVectorNumElements());
|
||||
VecTy =
|
||||
VectorType::get(DL.getIntPtrType(EltTy), VecTy->getVectorNumElements());
|
||||
|
||||
static const Intrinsic::ID LoadInts[3] = {Intrinsic::arm_neon_vld2,
|
||||
Intrinsic::arm_neon_vld3,
|
||||
@ -11517,9 +11521,9 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
|
||||
Type *EltTy = VecTy->getVectorElementType();
|
||||
VectorType *SubVecTy = VectorType::get(EltTy, NumSubElts);
|
||||
|
||||
const DataLayout *DL = getDataLayout();
|
||||
unsigned SubVecSize = DL->getTypeAllocSizeInBits(SubVecTy);
|
||||
bool EltIs64Bits = DL->getTypeAllocSizeInBits(EltTy) == 64;
|
||||
const DataLayout &DL = SI->getModule()->getDataLayout();
|
||||
unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy);
|
||||
bool EltIs64Bits = DL.getTypeAllocSizeInBits(EltTy) == 64;
|
||||
|
||||
// Skip illegal sub vector types and vector types of i64/f64 element (vstN
|
||||
// doesn't support i64/f64 element).
|
||||
@ -11533,7 +11537,7 @@ bool ARMTargetLowering::lowerInterleavedStore(StoreInst *SI,
|
||||
// StN intrinsics don't support pointer vectors as arguments. Convert pointer
|
||||
// vectors to integer vectors.
|
||||
if (EltTy->isPointerTy()) {
|
||||
Type *IntTy = DL->getIntPtrType(EltTy);
|
||||
Type *IntTy = DL.getIntPtrType(EltTy);
|
||||
|
||||
// Convert to the corresponding integer vector.
|
||||
Type *IntVecTy =
|
||||
|
@ -83,7 +83,7 @@ EmitSpecializedLibcall(SelectionDAG &DAG, SDLoc dl,
|
||||
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Ty = TLI->getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
|
||||
Entry.Node = Dst;
|
||||
Args.push_back(Entry);
|
||||
if (AEABILibcall == AEABI_MEMCLR) {
|
||||
|
@ -1583,7 +1583,7 @@ SDValue MipsTargetLowering::lowerBR_JT(SDValue Op, SelectionDAG &DAG) const {
|
||||
auto &TD = DAG.getDataLayout();
|
||||
EVT PTy = getPointerTy(TD);
|
||||
unsigned EntrySize =
|
||||
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(*getDataLayout());
|
||||
DAG.getMachineFunction().getJumpTableInfo()->getEntrySize(TD);
|
||||
|
||||
Index = DAG.getNode(ISD::MUL, DL, PTy, Index,
|
||||
DAG.getConstant(EntrySize, DL, PTy));
|
||||
@ -1876,7 +1876,9 @@ SDValue MipsTargetLowering::lowerVAARG(SDValue Op, SelectionDAG &DAG) const {
|
||||
}
|
||||
|
||||
// Increment the pointer, VAList, to the next vaarg.
|
||||
unsigned ArgSizeInBytes = getDataLayout()->getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
|
||||
auto &TD = DAG.getDataLayout();
|
||||
unsigned ArgSizeInBytes =
|
||||
TD.getTypeAllocSize(VT.getTypeForEVT(*DAG.getContext()));
|
||||
SDValue Tmp3 = DAG.getNode(ISD::ADD, DL, VAList.getValueType(), VAList,
|
||||
DAG.getConstant(RoundUpToAlignment(ArgSizeInBytes,
|
||||
ArgSlotSizeInBytes),
|
||||
|
@ -890,12 +890,12 @@ NVPTXTargetLowering::LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const {
|
||||
return DAG.getNode(NVPTXISD::Wrapper, dl, PtrVT, Op);
|
||||
}
|
||||
|
||||
std::string
|
||||
NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs,
|
||||
unsigned retAlignment,
|
||||
const ImmutableCallSite *CS) const {
|
||||
auto PtrVT = getPointerTy(*getDataLayout());
|
||||
std::string NVPTXTargetLowering::getPrototype(
|
||||
const DataLayout &DL, Type *retTy, const ArgListTy &Args,
|
||||
const SmallVectorImpl<ISD::OutputArg> &Outs, unsigned retAlignment,
|
||||
const ImmutableCallSite *CS) const {
|
||||
auto PtrVT = getPointerTy(DL);
|
||||
|
||||
bool isABI = (STI.getSmVersion() >= 20);
|
||||
assert(isABI && "Non-ABI compilation is not supported");
|
||||
if (!isABI)
|
||||
@ -925,10 +925,9 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
|
||||
O << ".param .b" << PtrVT.getSizeInBits() << " _";
|
||||
} else if ((retTy->getTypeID() == Type::StructTyID) ||
|
||||
isa<VectorType>(retTy)) {
|
||||
O << ".param .align "
|
||||
<< retAlignment
|
||||
<< " .b8 _["
|
||||
<< getDataLayout()->getTypeAllocSize(retTy) << "]";
|
||||
auto &DL = CS->getCalledFunction()->getParent()->getDataLayout();
|
||||
O << ".param .align " << retAlignment << " .b8 _["
|
||||
<< DL.getTypeAllocSize(retTy) << "]";
|
||||
} else {
|
||||
llvm_unreachable("Unknown return type");
|
||||
}
|
||||
@ -947,29 +946,27 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
|
||||
first = false;
|
||||
|
||||
if (!Outs[OIdx].Flags.isByVal()) {
|
||||
const DataLayout *TD = getDataLayout();
|
||||
if (Ty->isAggregateType() || Ty->isVectorTy()) {
|
||||
unsigned align = 0;
|
||||
const CallInst *CallI = cast<CallInst>(CS->getInstruction());
|
||||
// +1 because index 0 is reserved for return type alignment
|
||||
if (!llvm::getAlign(*CallI, i + 1, align))
|
||||
align = TD->getABITypeAlignment(Ty);
|
||||
unsigned sz = TD->getTypeAllocSize(Ty);
|
||||
align = DL.getABITypeAlignment(Ty);
|
||||
unsigned sz = DL.getTypeAllocSize(Ty);
|
||||
O << ".param .align " << align << " .b8 ";
|
||||
O << "_";
|
||||
O << "[" << sz << "]";
|
||||
// update the index for Outs
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
ComputeValueVTs(*this, *TD, Ty, vtparts);
|
||||
ComputeValueVTs(*this, DL, Ty, vtparts);
|
||||
if (unsigned len = vtparts.size())
|
||||
OIdx += len - 1;
|
||||
continue;
|
||||
}
|
||||
// i8 types in IR will be i16 types in SDAG
|
||||
assert(
|
||||
(getValueType(*TD, Ty) == Outs[OIdx].VT ||
|
||||
(getValueType(*TD, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
|
||||
"type mismatch between callee prototype and arguments");
|
||||
assert((getValueType(DL, Ty) == Outs[OIdx].VT ||
|
||||
(getValueType(DL, Ty) == MVT::i8 && Outs[OIdx].VT == MVT::i16)) &&
|
||||
"type mismatch between callee prototype and arguments");
|
||||
// scalar type
|
||||
unsigned sz = 0;
|
||||
if (isa<IntegerType>(Ty)) {
|
||||
@ -989,7 +986,7 @@ NVPTXTargetLowering::getPrototype(Type *retTy, const ArgListTy &Args,
|
||||
Type *ETy = PTy->getElementType();
|
||||
|
||||
unsigned align = Outs[OIdx].Flags.getByValAlign();
|
||||
unsigned sz = getDataLayout()->getTypeAllocSize(ETy);
|
||||
unsigned sz = DL.getTypeAllocSize(ETy);
|
||||
O << ".param .align " << align << " .b8 ";
|
||||
O << "_";
|
||||
O << "[" << sz << "]";
|
||||
@ -1003,7 +1000,6 @@ NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
|
||||
const ImmutableCallSite *CS,
|
||||
Type *Ty,
|
||||
unsigned Idx) const {
|
||||
const DataLayout *TD = getDataLayout();
|
||||
unsigned Align = 0;
|
||||
const Value *DirectCallee = CS->getCalledFunction();
|
||||
|
||||
@ -1044,7 +1040,8 @@ NVPTXTargetLowering::getArgumentAlignment(SDValue Callee,
|
||||
|
||||
// Call is indirect or alignment information is not available, fall back to
|
||||
// the ABI type alignment
|
||||
return TD->getABITypeAlignment(Ty);
|
||||
auto &DL = CS->getCaller()->getParent()->getDataLayout();
|
||||
return DL.getABITypeAlignment(Ty);
|
||||
}
|
||||
|
||||
SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
@ -1097,7 +1094,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
// aggregate
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
ComputePTXValueVTs(*this, DL, Ty, vtparts, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts, &Offsets,
|
||||
0);
|
||||
|
||||
unsigned align = getArgumentAlignment(Callee, CS, Ty, paramCount + 1);
|
||||
// declare .param .align <align> .b8 .param<n>[<size>];
|
||||
@ -1322,7 +1320,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
const PointerType *PTy = dyn_cast<PointerType>(Args[i].Ty);
|
||||
assert(PTy && "Type of a byval parameter should be pointer");
|
||||
ComputePTXValueVTs(*this, DL, PTy->getElementType(), vtparts, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DAG.getDataLayout(), PTy->getElementType(),
|
||||
vtparts, &Offsets, 0);
|
||||
|
||||
// declare .param .align <align> .b8 .param<n>[<size>];
|
||||
unsigned sz = Outs[OIdx].Flags.getByValSize();
|
||||
@ -1416,7 +1415,8 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
// The prototype is embedded in a string and put as the operand for a
|
||||
// CallPrototype SDNode which will print out to the value of the string.
|
||||
SDVTList ProtoVTs = DAG.getVTList(MVT::Other, MVT::Glue);
|
||||
std::string Proto = getPrototype(retTy, Args, Outs, retAlignment, CS);
|
||||
std::string Proto =
|
||||
getPrototype(DAG.getDataLayout(), retTy, Args, Outs, retAlignment, CS);
|
||||
const char *ProtoStr =
|
||||
nvTM->getManagedStrPool()->getManagedString(Proto.c_str())->c_str();
|
||||
SDValue ProtoOps[] = {
|
||||
@ -1597,7 +1597,7 @@ SDValue NVPTXTargetLowering::LowerCall(TargetLowering::CallLoweringInfo &CLI,
|
||||
} else {
|
||||
SmallVector<EVT, 16> VTs;
|
||||
SmallVector<uint64_t, 16> Offsets;
|
||||
ComputePTXValueVTs(*this, DL, retTy, VTs, &Offsets, 0);
|
||||
ComputePTXValueVTs(*this, DAG.getDataLayout(), retTy, VTs, &Offsets, 0);
|
||||
assert(VTs.size() == Ins.size() && "Bad value decomposition");
|
||||
unsigned RetAlign = getArgumentAlignment(Callee, CS, retTy, 0);
|
||||
for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
|
||||
@ -1920,11 +1920,11 @@ NVPTXTargetLowering::LowerSTOREVector(SDValue Op, SelectionDAG &DAG) const {
|
||||
}
|
||||
|
||||
MemSDNode *MemSD = cast<MemSDNode>(N);
|
||||
const DataLayout *TD = getDataLayout();
|
||||
const DataLayout &TD = DAG.getDataLayout();
|
||||
|
||||
unsigned Align = MemSD->getAlignment();
|
||||
unsigned PrefAlign =
|
||||
TD->getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
|
||||
TD.getPrefTypeAlignment(ValVT.getTypeForEVT(*DAG.getContext()));
|
||||
if (Align < PrefAlign) {
|
||||
// This store is not sufficiently aligned, so bail out and let this vector
|
||||
// store be scalarized. Note that we may still be able to emit smaller
|
||||
@ -2064,8 +2064,8 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
const SmallVectorImpl<ISD::InputArg> &Ins, SDLoc dl, SelectionDAG &DAG,
|
||||
SmallVectorImpl<SDValue> &InVals) const {
|
||||
MachineFunction &MF = DAG.getMachineFunction();
|
||||
const DataLayout &DL = MF.getDataLayout();
|
||||
auto PtrVT = getPointerTy(DL);
|
||||
const DataLayout &DL = DAG.getDataLayout();
|
||||
auto PtrVT = getPointerTy(DAG.getDataLayout());
|
||||
|
||||
const Function *F = MF.getFunction();
|
||||
const AttributeSet &PAL = F->getAttributes();
|
||||
@ -2119,7 +2119,7 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
if (Ty->isAggregateType()) {
|
||||
SmallVector<EVT, 16> vtparts;
|
||||
|
||||
ComputePTXValueVTs(*this, DL, Ty, vtparts);
|
||||
ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts);
|
||||
assert(vtparts.size() > 0 && "empty aggregate type not expected");
|
||||
for (unsigned parti = 0, parte = vtparts.size(); parti != parte;
|
||||
++parti) {
|
||||
@ -2157,7 +2157,8 @@ SDValue NVPTXTargetLowering::LowerFormalArguments(
|
||||
// NOTE: Here, we lose the ability to issue vector loads for vectors
|
||||
// that are a part of a struct. This should be investigated in the
|
||||
// future.
|
||||
ComputePTXValueVTs(*this, DL, Ty, vtparts, &offsets, 0);
|
||||
ComputePTXValueVTs(*this, DAG.getDataLayout(), Ty, vtparts, &offsets,
|
||||
0);
|
||||
assert(vtparts.size() > 0 && "empty aggregate type not expected");
|
||||
bool aggregateIsPacked = false;
|
||||
if (StructType *STy = llvm::dyn_cast<StructType>(Ty))
|
||||
@ -4252,7 +4253,6 @@ SDValue NVPTXTargetLowering::PerformDAGCombine(SDNode *N,
|
||||
|
||||
/// ReplaceVectorLoad - Convert vector loads into multi-output scalar loads.
|
||||
static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
|
||||
const DataLayout *TD,
|
||||
SmallVectorImpl<SDValue> &Results) {
|
||||
EVT ResVT = N->getValueType(0);
|
||||
SDLoc DL(N);
|
||||
@ -4283,8 +4283,9 @@ static void ReplaceLoadVector(SDNode *N, SelectionDAG &DAG,
|
||||
LoadSDNode *LD = cast<LoadSDNode>(N);
|
||||
|
||||
unsigned Align = LD->getAlignment();
|
||||
auto &TD = DAG.getDataLayout();
|
||||
unsigned PrefAlign =
|
||||
TD->getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
|
||||
TD.getPrefTypeAlignment(ResVT.getTypeForEVT(*DAG.getContext()));
|
||||
if (Align < PrefAlign) {
|
||||
// This load is not sufficiently aligned, so bail out and let this vector
|
||||
// load be scalarized. Note that we may still be able to emit smaller
|
||||
@ -4496,7 +4497,7 @@ void NVPTXTargetLowering::ReplaceNodeResults(
|
||||
default:
|
||||
report_fatal_error("Unhandled custom legalization");
|
||||
case ISD::LOAD:
|
||||
ReplaceLoadVector(N, DAG, getDataLayout(), Results);
|
||||
ReplaceLoadVector(N, DAG, Results);
|
||||
return;
|
||||
case ISD::INTRINSIC_W_CHAIN:
|
||||
ReplaceINTRINSIC_W_CHAIN(N, DAG, Results);
|
||||
|
@ -482,7 +482,7 @@ public:
|
||||
SDValue LowerCall(CallLoweringInfo &CLI,
|
||||
SmallVectorImpl<SDValue> &InVals) const override;
|
||||
|
||||
std::string getPrototype(Type *, const ArgListTy &,
|
||||
std::string getPrototype(const DataLayout &DL, Type *, const ArgListTy &,
|
||||
const SmallVectorImpl<ISD::OutputArg> &,
|
||||
unsigned retAlignment,
|
||||
const ImmutableCallSite *CS) const;
|
||||
|
@ -1103,7 +1103,7 @@ static bool isConstantOrUndef(int Op, int Val) {
|
||||
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
|
||||
bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
||||
SelectionDAG &DAG) {
|
||||
bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
|
||||
bool IsLE = DAG.getDataLayout().isLittleEndian();
|
||||
if (ShuffleKind == 0) {
|
||||
if (IsLE)
|
||||
return false;
|
||||
@ -1134,7 +1134,7 @@ bool PPC::isVPKUHUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
||||
/// For the latter, the input operands are swapped (see PPCInstrAltivec.td).
|
||||
bool PPC::isVPKUWUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
||||
SelectionDAG &DAG) {
|
||||
bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
|
||||
bool IsLE = DAG.getDataLayout().isLittleEndian();
|
||||
if (ShuffleKind == 0) {
|
||||
if (IsLE)
|
||||
return false;
|
||||
@ -1176,7 +1176,7 @@ bool PPC::isVPKUDUMShuffleMask(ShuffleVectorSDNode *N, unsigned ShuffleKind,
|
||||
if (!Subtarget.hasP8Vector())
|
||||
return false;
|
||||
|
||||
bool IsLE = DAG.getTarget().getDataLayout()->isLittleEndian();
|
||||
bool IsLE = DAG.getDataLayout().isLittleEndian();
|
||||
if (ShuffleKind == 0) {
|
||||
if (IsLE)
|
||||
return false;
|
||||
@ -1239,7 +1239,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
/// the input operands are swapped (see PPCInstrAltivec.td).
|
||||
bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
unsigned ShuffleKind, SelectionDAG &DAG) {
|
||||
if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
|
||||
if (DAG.getDataLayout().isLittleEndian()) {
|
||||
if (ShuffleKind == 1) // unary
|
||||
return isVMerge(N, UnitSize, 0, 0);
|
||||
else if (ShuffleKind == 2) // swapped
|
||||
@ -1264,7 +1264,7 @@ bool PPC::isVMRGLShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
/// the input operands are swapped (see PPCInstrAltivec.td).
|
||||
bool PPC::isVMRGHShuffleMask(ShuffleVectorSDNode *N, unsigned UnitSize,
|
||||
unsigned ShuffleKind, SelectionDAG &DAG) {
|
||||
if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
|
||||
if (DAG.getDataLayout().isLittleEndian()) {
|
||||
if (ShuffleKind == 1) // unary
|
||||
return isVMerge(N, UnitSize, 8, 8);
|
||||
else if (ShuffleKind == 2) // swapped
|
||||
@ -1354,7 +1354,7 @@ static bool isVMerge(ShuffleVectorSDNode *N, unsigned IndexOffset,
|
||||
*/
|
||||
bool PPC::isVMRGEOShuffleMask(ShuffleVectorSDNode *N, bool CheckEven,
|
||||
unsigned ShuffleKind, SelectionDAG &DAG) {
|
||||
if (DAG.getTarget().getDataLayout()->isLittleEndian()) {
|
||||
if (DAG.getDataLayout().isLittleEndian()) {
|
||||
unsigned indexOffset = CheckEven ? 4 : 0;
|
||||
if (ShuffleKind == 1) // Unary
|
||||
return isVMerge(N, indexOffset, 0);
|
||||
@ -1401,7 +1401,7 @@ int PPC::isVSLDOIShuffleMask(SDNode *N, unsigned ShuffleKind,
|
||||
if (ShiftAmt < i) return -1;
|
||||
|
||||
ShiftAmt -= i;
|
||||
bool isLE = DAG.getTarget().getDataLayout()->isLittleEndian();
|
||||
bool isLE = DAG.getDataLayout().isLittleEndian();
|
||||
|
||||
if ((ShuffleKind == 0 && !isLE) || (ShuffleKind == 2 && isLE)) {
|
||||
// Check the rest of the elements to see if they are consecutive.
|
||||
@ -1458,7 +1458,7 @@ unsigned PPC::getVSPLTImmediate(SDNode *N, unsigned EltSize,
|
||||
SelectionDAG &DAG) {
|
||||
ShuffleVectorSDNode *SVOp = cast<ShuffleVectorSDNode>(N);
|
||||
assert(isSplatShuffleMask(SVOp, EltSize));
|
||||
if (DAG.getTarget().getDataLayout()->isLittleEndian())
|
||||
if (DAG.getDataLayout().isLittleEndian())
|
||||
return (16 / EltSize) - 1 - (SVOp->getMaskElt(0) / EltSize);
|
||||
else
|
||||
return SVOp->getMaskElt(0) / EltSize;
|
||||
@ -2403,9 +2403,7 @@ SDValue PPCTargetLowering::LowerINIT_TRAMPOLINE(SDValue Op,
|
||||
|
||||
EVT PtrVT = DAG.getTargetLoweringInfo().getPointerTy(DAG.getDataLayout());
|
||||
bool isPPC64 = (PtrVT == MVT::i64);
|
||||
Type *IntPtrTy =
|
||||
DAG.getTargetLoweringInfo().getDataLayout()->getIntPtrType(
|
||||
*DAG.getContext());
|
||||
Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
|
||||
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
@ -10150,9 +10148,9 @@ SDValue PPCTargetLowering::PerformDAGCombine(SDNode *N,
|
||||
|
||||
EVT MemVT = LD->getMemoryVT();
|
||||
Type *Ty = MemVT.getTypeForEVT(*DAG.getContext());
|
||||
unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(Ty);
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(Ty);
|
||||
Type *STy = MemVT.getScalarType().getTypeForEVT(*DAG.getContext());
|
||||
unsigned ScalarABIAlignment = getDataLayout()->getABITypeAlignment(STy);
|
||||
unsigned ScalarABIAlignment = DAG.getDataLayout().getABITypeAlignment(STy);
|
||||
if (LD->isUnindexed() && VT.isVector() &&
|
||||
((Subtarget.hasAltivec() && ISD::isNON_EXTLoad(N) &&
|
||||
// P8 and later hardware should just use LOAD.
|
||||
|
@ -995,7 +995,7 @@ SparcTargetLowering::getSRetArgSize(SelectionDAG &DAG, SDValue Callee) const
|
||||
|
||||
PointerType *Ty = cast<PointerType>(CalleeFn->arg_begin()->getType());
|
||||
Type *ElementTy = Ty->getElementType();
|
||||
return getDataLayout()->getTypeAllocSize(ElementTy);
|
||||
return DAG.getDataLayout().getTypeAllocSize(ElementTy);
|
||||
}
|
||||
|
||||
|
||||
|
@ -14984,7 +14984,7 @@ SDValue X86TargetLowering::LowerVAARG(SDValue Op, SelectionDAG &DAG) const {
|
||||
|
||||
EVT ArgVT = Op.getNode()->getValueType(0);
|
||||
Type *ArgTy = ArgVT.getTypeForEVT(*DAG.getContext());
|
||||
uint32_t ArgSize = getDataLayout()->getTypeAllocSize(ArgTy);
|
||||
uint32_t ArgSize = DAG.getDataLayout().getTypeAllocSize(ArgTy);
|
||||
uint8_t ArgMode;
|
||||
|
||||
// Decide which area this value should be read from.
|
||||
@ -21715,7 +21715,7 @@ static SDValue XFormVExtractWithShuffleIntoLoad(SDNode *N, SelectionDAG &DAG,
|
||||
// alignment is valid.
|
||||
unsigned Align = LN0->getAlignment();
|
||||
const TargetLowering &TLI = DAG.getTargetLoweringInfo();
|
||||
unsigned NewAlign = TLI.getDataLayout()->getABITypeAlignment(
|
||||
unsigned NewAlign = DAG.getDataLayout().getABITypeAlignment(
|
||||
EltVT.getTypeForEVT(*DAG.getContext()));
|
||||
|
||||
if (NewAlign > Align || !TLI.isOperationLegalOrCustom(ISD::LOAD, EltVT))
|
||||
|
@ -281,7 +281,8 @@ static bool IsSmallObject(const GlobalValue *GV, const XCoreTargetLowering &XTL)
|
||||
if (!ObjType->isSized())
|
||||
return false;
|
||||
|
||||
unsigned ObjSize = XTL.getDataLayout()->getTypeAllocSize(ObjType);
|
||||
auto &DL = GV->getParent()->getDataLayout();
|
||||
unsigned ObjSize = DL.getTypeAllocSize(ObjType);
|
||||
return ObjSize < CodeModelLargeSize && ObjSize != 0;
|
||||
}
|
||||
|
||||
@ -435,8 +436,9 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
||||
LD->getAlignment()))
|
||||
return SDValue();
|
||||
|
||||
unsigned ABIAlignment = getDataLayout()->
|
||||
getABITypeAlignment(LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
|
||||
auto &TD = DAG.getDataLayout();
|
||||
unsigned ABIAlignment = TD.getABITypeAlignment(
|
||||
LD->getMemoryVT().getTypeForEVT(*DAG.getContext()));
|
||||
// Leave aligned load alone.
|
||||
if (LD->getAlignment() >= ABIAlignment)
|
||||
return SDValue();
|
||||
@ -486,7 +488,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
|
||||
}
|
||||
|
||||
// Lower to a call to __misaligned_load(BasePtr).
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
Type *IntPtrTy = TD.getIntPtrType(*DAG.getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
||||
@ -517,8 +519,8 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
|
||||
ST->getAlignment())) {
|
||||
return SDValue();
|
||||
}
|
||||
unsigned ABIAlignment = getDataLayout()->
|
||||
getABITypeAlignment(ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
|
||||
ST->getMemoryVT().getTypeForEVT(*DAG.getContext()));
|
||||
// Leave aligned store alone.
|
||||
if (ST->getAlignment() >= ABIAlignment) {
|
||||
return SDValue();
|
||||
@ -546,7 +548,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG) const
|
||||
}
|
||||
|
||||
// Lower to a call to __misaligned_store(BasePtr, Value).
|
||||
Type *IntPtrTy = getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
Type *IntPtrTy = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
|
||||
@ -1829,7 +1831,7 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
|
||||
if (StoreBits % 8) {
|
||||
break;
|
||||
}
|
||||
unsigned ABIAlignment = getDataLayout()->getABITypeAlignment(
|
||||
unsigned ABIAlignment = DAG.getDataLayout().getABITypeAlignment(
|
||||
ST->getMemoryVT().getTypeForEVT(*DCI.DAG.getContext()));
|
||||
unsigned Alignment = ST->getAlignment();
|
||||
if (Alignment >= ABIAlignment) {
|
||||
|
@ -36,7 +36,7 @@ EmitTargetCodeForMemcpy(SelectionDAG &DAG, SDLoc dl, SDValue Chain,
|
||||
const TargetLowering &TLI = *DAG.getSubtarget().getTargetLowering();
|
||||
TargetLowering::ArgListTy Args;
|
||||
TargetLowering::ArgListEntry Entry;
|
||||
Entry.Ty = TLI.getDataLayout()->getIntPtrType(*DAG.getContext());
|
||||
Entry.Ty = DAG.getDataLayout().getIntPtrType(*DAG.getContext());
|
||||
Entry.Node = Dst; Args.push_back(Entry);
|
||||
Entry.Node = Src; Args.push_back(Entry);
|
||||
Entry.Node = Size; Args.push_back(Entry);
|
||||
|
Loading…
Reference in New Issue
Block a user