[TargetLowering] Make allowsMemoryAccess methode virtual.

Rename old function to explicitly show that it cares only about alignment.
The new allowsMemoryAccess call the function related to alignment by default
and can be overridden by target to inform whether the memory access is legal or
not.

Differential Revision: https://reviews.llvm.org/D67121

llvm-svn: 372935
This commit is contained in:
Thomas Raoux 2019-09-26 00:16:01 +00:00
parent 9eba532d0e
commit da9ea2e853
9 changed files with 67 additions and 32 deletions

View File

@ -1472,11 +1472,30 @@ public:
return false;
}
/// This function returns true if the memory access is aligned or if the
/// target allows this specific unaligned memory access. If the access is
/// allowed, the optional final parameter returns if the access is also fast
/// (as defined by the target).
bool allowsMemoryAccessForAlignment(
LLVMContext &Context, const DataLayout &DL, EVT VT,
unsigned AddrSpace = 0, unsigned Alignment = 1,
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,
bool *Fast = nullptr) const;
/// Return true if the memory access of this type is aligned or if the target
/// allows this specific unaligned access for the given MachineMemOperand.
/// If the access is allowed, the optional final parameter returns if the
/// access is also fast (as defined by the target).
bool allowsMemoryAccessForAlignment(LLVMContext &Context,
const DataLayout &DL, EVT VT,
const MachineMemOperand &MMO,
bool *Fast = nullptr) const;
/// Return true if the target supports a memory access of this type for the
/// given address space and alignment. If the access is allowed, the optional
/// final parameter returns if the access is also fast (as defined by the
/// target).
bool
virtual bool
allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
unsigned AddrSpace = 0, unsigned Alignment = 1,
MachineMemOperand::Flags Flags = MachineMemOperand::MONone,

View File

@ -4952,7 +4952,7 @@ bool DAGCombiner::isLegalNarrowLdSt(LSBaseSDNode *LDST,
if (LDST->getMemoryVT().getSizeInBits() < MemVT.getSizeInBits())
return false;
// Ensure that this isn't going to produce an unsupported unaligned access.
// Ensure that this isn't going to produce an unsupported memory access.
if (ShAmt &&
!TLI.allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
LDST->getAddressSpace(), ShAmt / 8,

View File

@ -503,8 +503,8 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
// expand it.
EVT MemVT = ST->getMemoryVT();
const DataLayout &DL = DAG.getDataLayout();
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
*ST->getMemOperand())) {
if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
*ST->getMemOperand())) {
LLVM_DEBUG(dbgs() << "Expanding unsupported unaligned store\n");
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
@ -618,8 +618,8 @@ void SelectionDAGLegalize::LegalizeStoreOps(SDNode *Node) {
EVT MemVT = ST->getMemoryVT();
// If this is an unaligned store and the target doesn't support it,
// expand it.
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
*ST->getMemOperand())) {
if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
*ST->getMemOperand())) {
SDValue Result = TLI.expandUnalignedStore(ST, DAG);
ReplaceNode(SDValue(ST, 0), Result);
}
@ -679,8 +679,8 @@ void SelectionDAGLegalize::LegalizeLoadOps(SDNode *Node) {
const DataLayout &DL = DAG.getDataLayout();
// If this is an unaligned load and the target doesn't support it,
// expand it.
if (!TLI.allowsMemoryAccess(*DAG.getContext(), DL, MemVT,
*LD->getMemOperand())) {
if (!TLI.allowsMemoryAccessForAlignment(*DAG.getContext(), DL, MemVT,
*LD->getMemOperand())) {
std::tie(RVal, RChain) = TLI.expandUnalignedLoad(LD, DAG);
}
break;

View File

@ -1505,12 +1505,9 @@ unsigned TargetLoweringBase::getByValTypeAlignment(Type *Ty,
return DL.getABITypeAlignment(Ty);
}
bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
const DataLayout &DL, EVT VT,
unsigned AddrSpace,
unsigned Alignment,
MachineMemOperand::Flags Flags,
bool *Fast) const {
bool TargetLoweringBase::allowsMemoryAccessForAlignment(
LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
// Check if the specified alignment is sufficient based on the data layout.
// TODO: While using the data layout works in practice, a better solution
// would be to implement this check directly (make this a virtual function).
@ -1528,6 +1525,21 @@ bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
return allowsMisalignedMemoryAccesses(VT, AddrSpace, Alignment, Flags, Fast);
}
bool TargetLoweringBase::allowsMemoryAccessForAlignment(
LLVMContext &Context, const DataLayout &DL, EVT VT,
const MachineMemOperand &MMO, bool *Fast) const {
return allowsMemoryAccessForAlignment(Context, DL, VT, MMO.getAddrSpace(),
MMO.getAlignment(), MMO.getFlags(),
Fast);
}
bool TargetLoweringBase::allowsMemoryAccess(
LLVMContext &Context, const DataLayout &DL, EVT VT, unsigned AddrSpace,
unsigned Alignment, MachineMemOperand::Flags Flags, bool *Fast) const {
return allowsMemoryAccessForAlignment(Context, DL, VT, AddrSpace, Alignment,
Flags, Fast);
}
bool TargetLoweringBase::allowsMemoryAccess(LLVMContext &Context,
const DataLayout &DL, EVT VT,
const MachineMemOperand &MMO,

View File

@ -681,8 +681,9 @@ bool AMDGPUTargetLowering::isLoadBitCastBeneficial(EVT LoadTy, EVT CastTy,
return false;
bool Fast = false;
return allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), CastTy,
MMO, &Fast) && Fast;
return allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
CastTy, MMO, &Fast) &&
Fast;
}
// SI+ has instructions for cttz / ctlz for 32-bit values. This is probably also

View File

@ -7316,8 +7316,8 @@ SDValue SITargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
assert(Op.getValueType().getVectorElementType() == MVT::i32 &&
"Custom lowering for non-i32 vectors hasn't been implemented.");
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
*Load->getMemOperand())) {
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
MemVT, *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, DL);
@ -7818,8 +7818,8 @@ SDValue SITargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
assert(VT.isVector() &&
Store->getValue().getValueType().getScalarType() == MVT::i32);
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
*Store->getMemOperand())) {
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
VT, *Store->getMemOperand())) {
return expandUnalignedStore(Store, DAG);
}

View File

@ -2673,7 +2673,8 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
DoDefault = true;
if (!AlignLoads) {
if (allowsMemoryAccess(Ctx, DL, LN->getMemoryVT(), *LN->getMemOperand()))
if (allowsMemoryAccessForAlignment(Ctx, DL, LN->getMemoryVT(),
*LN->getMemOperand()))
return Op;
DoDefault = true;
}
@ -2681,7 +2682,8 @@ HexagonTargetLowering::LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG)
// The PartTy is the equivalent of "getLoadableTypeOfSize(HaveAlign)".
MVT PartTy = HaveAlign <= 8 ? MVT::getIntegerVT(8 * HaveAlign)
: MVT::getVectorVT(MVT::i8, HaveAlign);
DoDefault = allowsMemoryAccess(Ctx, DL, PartTy, *LN->getMemOperand());
DoDefault =
allowsMemoryAccessForAlignment(Ctx, DL, PartTy, *LN->getMemOperand());
}
if (DoDefault) {
std::pair<SDValue, SDValue> P = expandUnalignedLoad(LN, DAG);

View File

@ -2230,8 +2230,8 @@ SDValue NVPTXTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
if (Op.getValueType() == MVT::v2f16) {
LoadSDNode *Load = cast<LoadSDNode>(Op);
EVT MemVT = Load->getMemoryVT();
if (!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), MemVT,
*Load->getMemOperand())) {
if (!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
MemVT, *Load->getMemOperand())) {
SDValue Ops[2];
std::tie(Ops[0], Ops[1]) = expandUnalignedLoad(Load, DAG);
return DAG.getMergeValues(Ops, SDLoc(Op));
@ -2273,8 +2273,8 @@ SDValue NVPTXTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
// v2f16 is legal, so we can't rely on legalizer to handle unaligned
// stores and have to handle it here.
if (VT == MVT::v2f16 &&
!allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(), VT,
*Store->getMemOperand()))
!allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
VT, *Store->getMemOperand()))
return expandUnalignedStore(Store, DAG);
if (VT.isVector())

View File

@ -414,8 +414,8 @@ SDValue XCoreTargetLowering::LowerLOAD(SDValue Op, SelectionDAG &DAG) const {
"Unexpected extension type");
assert(LD->getMemoryVT() == MVT::i32 && "Unexpected load EVT");
if (allowsMemoryAccess(Context, DAG.getDataLayout(), LD->getMemoryVT(),
*LD->getMemOperand()))
if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
LD->getMemoryVT(), *LD->getMemOperand()))
return SDValue();
SDValue Chain = LD->getChain();
@ -488,8 +488,8 @@ SDValue XCoreTargetLowering::LowerSTORE(SDValue Op, SelectionDAG &DAG) const {
assert(!ST->isTruncatingStore() && "Unexpected store type");
assert(ST->getMemoryVT() == MVT::i32 && "Unexpected store EVT");
if (allowsMemoryAccess(Context, DAG.getDataLayout(), ST->getMemoryVT(),
*ST->getMemOperand()))
if (allowsMemoryAccessForAlignment(Context, DAG.getDataLayout(),
ST->getMemoryVT(), *ST->getMemOperand()))
return SDValue();
SDValue Chain = ST->getChain();
@ -1780,8 +1780,9 @@ SDValue XCoreTargetLowering::PerformDAGCombine(SDNode *N,
// Replace unaligned store of unaligned load with memmove.
StoreSDNode *ST = cast<StoreSDNode>(N);
if (!DCI.isBeforeLegalize() ||
allowsMemoryAccess(*DAG.getContext(), DAG.getDataLayout(),
ST->getMemoryVT(), *ST->getMemOperand()) ||
allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
ST->getMemoryVT(),
*ST->getMemOperand()) ||
ST->isVolatile() || ST->isIndexed()) {
break;
}