mirror of
https://github.com/RPCSX/llvm.git
synced 2025-01-16 09:30:09 +00:00
[AArch64] Move helper functions into TII, so they can be reused elsewhere. NFC.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@263032 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
86b13ad31e
commit
f8b4bf82a5
@ -1313,6 +1313,35 @@ void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const {
|
|||||||
->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
|
->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const {
|
||||||
|
switch (Opc) {
|
||||||
|
default:
|
||||||
|
return false;
|
||||||
|
case AArch64::STURSi:
|
||||||
|
case AArch64::STURDi:
|
||||||
|
case AArch64::STURQi:
|
||||||
|
case AArch64::STURBBi:
|
||||||
|
case AArch64::STURHHi:
|
||||||
|
case AArch64::STURWi:
|
||||||
|
case AArch64::STURXi:
|
||||||
|
case AArch64::LDURSi:
|
||||||
|
case AArch64::LDURDi:
|
||||||
|
case AArch64::LDURQi:
|
||||||
|
case AArch64::LDURWi:
|
||||||
|
case AArch64::LDURXi:
|
||||||
|
case AArch64::LDURSWi:
|
||||||
|
case AArch64::LDURHHi:
|
||||||
|
case AArch64::LDURBBi:
|
||||||
|
case AArch64::LDURSBWi:
|
||||||
|
case AArch64::LDURSHWi:
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr *MI) const {
|
||||||
|
return isUnscaledLdSt(MI->getOpcode());
|
||||||
|
}
|
||||||
|
|
||||||
bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
|
bool AArch64InstrInfo::getMemOpBaseRegImmOfs(
|
||||||
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset,
|
MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset,
|
||||||
const TargetRegisterInfo *TRI) const {
|
const TargetRegisterInfo *TRI) const {
|
||||||
|
@ -87,6 +87,12 @@ public:
|
|||||||
/// unprofitable.
|
/// unprofitable.
|
||||||
bool isLdStPairSuppressed(const MachineInstr *MI) const;
|
bool isLdStPairSuppressed(const MachineInstr *MI) const;
|
||||||
|
|
||||||
|
/// Return true if this is an unscaled load/store.
|
||||||
|
bool isUnscaledLdSt(unsigned Opc) const;
|
||||||
|
|
||||||
|
/// Return true if this is an unscaled load/store.
|
||||||
|
bool isUnscaledLdSt(MachineInstr *MI) const;
|
||||||
|
|
||||||
/// Hint that pairing the given load or store is unprofitable.
|
/// Hint that pairing the given load or store is unprofitable.
|
||||||
void suppressLdStPair(MachineInstr *MI) const;
|
void suppressLdStPair(MachineInstr *MI) const;
|
||||||
|
|
||||||
|
@ -177,35 +177,6 @@ char AArch64LoadStoreOpt::ID = 0;
|
|||||||
INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
|
INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt",
|
||||||
AARCH64_LOAD_STORE_OPT_NAME, false, false)
|
AARCH64_LOAD_STORE_OPT_NAME, false, false)
|
||||||
|
|
||||||
static bool isUnscaledLdSt(unsigned Opc) {
|
|
||||||
switch (Opc) {
|
|
||||||
default:
|
|
||||||
return false;
|
|
||||||
case AArch64::STURSi:
|
|
||||||
case AArch64::STURDi:
|
|
||||||
case AArch64::STURQi:
|
|
||||||
case AArch64::STURBBi:
|
|
||||||
case AArch64::STURHHi:
|
|
||||||
case AArch64::STURWi:
|
|
||||||
case AArch64::STURXi:
|
|
||||||
case AArch64::LDURSi:
|
|
||||||
case AArch64::LDURDi:
|
|
||||||
case AArch64::LDURQi:
|
|
||||||
case AArch64::LDURWi:
|
|
||||||
case AArch64::LDURXi:
|
|
||||||
case AArch64::LDURSWi:
|
|
||||||
case AArch64::LDURHHi:
|
|
||||||
case AArch64::LDURBBi:
|
|
||||||
case AArch64::LDURSBWi:
|
|
||||||
case AArch64::LDURSHWi:
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static bool isUnscaledLdSt(MachineInstr *MI) {
|
|
||||||
return isUnscaledLdSt(MI->getOpcode());
|
|
||||||
}
|
|
||||||
|
|
||||||
static unsigned getBitExtrOpcode(MachineInstr *MI) {
|
static unsigned getBitExtrOpcode(MachineInstr *MI) {
|
||||||
switch (MI->getOpcode()) {
|
switch (MI->getOpcode()) {
|
||||||
default:
|
default:
|
||||||
@ -626,14 +597,15 @@ static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
|
static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst,
|
||||||
MachineInstr *StoreInst) {
|
MachineInstr *StoreInst,
|
||||||
|
const AArch64InstrInfo *TII) {
|
||||||
assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
|
assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st.");
|
||||||
int LoadSize = getMemScale(LoadInst);
|
int LoadSize = getMemScale(LoadInst);
|
||||||
int StoreSize = getMemScale(StoreInst);
|
int StoreSize = getMemScale(StoreInst);
|
||||||
int UnscaledStOffset = isUnscaledLdSt(StoreInst)
|
int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst)
|
||||||
? getLdStOffsetOp(StoreInst).getImm()
|
? getLdStOffsetOp(StoreInst).getImm()
|
||||||
: getLdStOffsetOp(StoreInst).getImm() * StoreSize;
|
: getLdStOffsetOp(StoreInst).getImm() * StoreSize;
|
||||||
int UnscaledLdOffset = isUnscaledLdSt(LoadInst)
|
int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst)
|
||||||
? getLdStOffsetOp(LoadInst).getImm()
|
? getLdStOffsetOp(LoadInst).getImm()
|
||||||
: getLdStOffsetOp(LoadInst).getImm() * LoadSize;
|
: getLdStOffsetOp(LoadInst).getImm() * LoadSize;
|
||||||
return (UnscaledStOffset <= UnscaledLdOffset) &&
|
return (UnscaledStOffset <= UnscaledLdOffset) &&
|
||||||
@ -664,7 +636,7 @@ AArch64LoadStoreOpt::mergeNarrowInsns(MachineBasicBlock::iterator I,
|
|||||||
++NextI;
|
++NextI;
|
||||||
|
|
||||||
unsigned Opc = I->getOpcode();
|
unsigned Opc = I->getOpcode();
|
||||||
bool IsScaled = !isUnscaledLdSt(Opc);
|
bool IsScaled = !TII->isUnscaledLdSt(Opc);
|
||||||
int OffsetStride = IsScaled ? 1 : getMemScale(I);
|
int OffsetStride = IsScaled ? 1 : getMemScale(I);
|
||||||
|
|
||||||
bool MergeForward = Flags.getMergeForward();
|
bool MergeForward = Flags.getMergeForward();
|
||||||
@ -827,7 +799,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
|
|||||||
int SExtIdx = Flags.getSExtIdx();
|
int SExtIdx = Flags.getSExtIdx();
|
||||||
unsigned Opc =
|
unsigned Opc =
|
||||||
SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
|
SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode());
|
||||||
bool IsUnscaled = isUnscaledLdSt(Opc);
|
bool IsUnscaled = TII->isUnscaledLdSt(Opc);
|
||||||
int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
|
int OffsetStride = IsUnscaled ? getMemScale(I) : 1;
|
||||||
|
|
||||||
bool MergeForward = Flags.getMergeForward();
|
bool MergeForward = Flags.getMergeForward();
|
||||||
@ -841,7 +813,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
|
|||||||
|
|
||||||
int Offset = getLdStOffsetOp(I).getImm();
|
int Offset = getLdStOffsetOp(I).getImm();
|
||||||
int PairedOffset = getLdStOffsetOp(Paired).getImm();
|
int PairedOffset = getLdStOffsetOp(Paired).getImm();
|
||||||
bool PairedIsUnscaled = isUnscaledLdSt(Paired->getOpcode());
|
bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode());
|
||||||
if (IsUnscaled != PairedIsUnscaled) {
|
if (IsUnscaled != PairedIsUnscaled) {
|
||||||
// We're trying to pair instructions that differ in how they are scaled. If
|
// We're trying to pair instructions that differ in how they are scaled. If
|
||||||
// I is scaled then scale the offset of Paired accordingly. Otherwise, do
|
// I is scaled then scale the offset of Paired accordingly. Otherwise, do
|
||||||
@ -874,7 +846,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I,
|
|||||||
}
|
}
|
||||||
int OffsetImm = getLdStOffsetOp(RtMI).getImm();
|
int OffsetImm = getLdStOffsetOp(RtMI).getImm();
|
||||||
// Scale the immediate offset, if necessary.
|
// Scale the immediate offset, if necessary.
|
||||||
if (isUnscaledLdSt(RtMI->getOpcode())) {
|
if (TII->isUnscaledLdSt(RtMI->getOpcode())) {
|
||||||
assert(!(OffsetImm % getMemScale(RtMI)) &&
|
assert(!(OffsetImm % getMemScale(RtMI)) &&
|
||||||
"Unscaled offset cannot be scaled.");
|
"Unscaled offset cannot be scaled.");
|
||||||
OffsetImm /= getMemScale(RtMI);
|
OffsetImm /= getMemScale(RtMI);
|
||||||
@ -982,8 +954,9 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI,
|
|||||||
// performance and correctness are verified only in little-endian.
|
// performance and correctness are verified only in little-endian.
|
||||||
if (!Subtarget->isLittleEndian())
|
if (!Subtarget->isLittleEndian())
|
||||||
return NextI;
|
return NextI;
|
||||||
bool IsUnscaled = isUnscaledLdSt(LoadI);
|
bool IsUnscaled = TII->isUnscaledLdSt(LoadI);
|
||||||
assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match");
|
assert(IsUnscaled == TII->isUnscaledLdSt(StoreI) &&
|
||||||
|
"Unsupported ld/st match");
|
||||||
assert(LoadSize <= StoreSize && "Invalid load size");
|
assert(LoadSize <= StoreSize && "Invalid load size");
|
||||||
int UnscaledLdOffset = IsUnscaled
|
int UnscaledLdOffset = IsUnscaled
|
||||||
? getLdStOffsetOp(LoadI).getImm()
|
? getLdStOffsetOp(LoadI).getImm()
|
||||||
@ -1145,7 +1118,7 @@ bool AArch64LoadStoreOpt::findMatchingStore(
|
|||||||
// it's unnecessary to check if BaseReg is modified by the store itself.
|
// it's unnecessary to check if BaseReg is modified by the store itself.
|
||||||
if (MI->mayStore() && isMatchingStore(LoadMI, MI) &&
|
if (MI->mayStore() && isMatchingStore(LoadMI, MI) &&
|
||||||
BaseReg == getLdStBaseOp(MI).getReg() &&
|
BaseReg == getLdStBaseOp(MI).getReg() &&
|
||||||
isLdOffsetInRangeOfSt(LoadMI, MI) &&
|
isLdOffsetInRangeOfSt(LoadMI, MI, TII) &&
|
||||||
!ModifiedRegs[getLdStRegOp(MI).getReg()]) {
|
!ModifiedRegs[getLdStRegOp(MI).getReg()]) {
|
||||||
StoreI = MBBI;
|
StoreI = MBBI;
|
||||||
return true;
|
return true;
|
||||||
@ -1171,7 +1144,8 @@ bool AArch64LoadStoreOpt::findMatchingStore(
|
|||||||
|
|
||||||
// Returns true if these two opcodes can be merged or paired. Otherwise,
|
// Returns true if these two opcodes can be merged or paired. Otherwise,
|
||||||
// returns false.
|
// returns false.
|
||||||
static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags) {
|
static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags,
|
||||||
|
const AArch64InstrInfo *TII) {
|
||||||
// Opcodes match: nothing more to check.
|
// Opcodes match: nothing more to check.
|
||||||
if (OpcA == OpcB)
|
if (OpcA == OpcB)
|
||||||
return true;
|
return true;
|
||||||
@ -1197,7 +1171,7 @@ static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags) {
|
|||||||
return false;
|
return false;
|
||||||
|
|
||||||
// Try to match an unscaled load/store with a scaled load/store.
|
// Try to match an unscaled load/store with a scaled load/store.
|
||||||
return isUnscaledLdSt(OpcA) != isUnscaledLdSt(OpcB) &&
|
return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) &&
|
||||||
getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
|
getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB);
|
||||||
|
|
||||||
// FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
|
// FIXME: Can we also match a mixed sext/zext unscaled/scaled pair?
|
||||||
@ -1215,7 +1189,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
|
|||||||
|
|
||||||
unsigned Opc = FirstMI->getOpcode();
|
unsigned Opc = FirstMI->getOpcode();
|
||||||
bool MayLoad = FirstMI->mayLoad();
|
bool MayLoad = FirstMI->mayLoad();
|
||||||
bool IsUnscaled = isUnscaledLdSt(FirstMI);
|
bool IsUnscaled = TII->isUnscaledLdSt(FirstMI);
|
||||||
unsigned Reg = getLdStRegOp(FirstMI).getReg();
|
unsigned Reg = getLdStRegOp(FirstMI).getReg();
|
||||||
unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
|
unsigned BaseReg = getLdStBaseOp(FirstMI).getReg();
|
||||||
int Offset = getLdStOffsetOp(FirstMI).getImm();
|
int Offset = getLdStOffsetOp(FirstMI).getImm();
|
||||||
@ -1241,7 +1215,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
|
|||||||
++Count;
|
++Count;
|
||||||
|
|
||||||
Flags.setSExtIdx(-1);
|
Flags.setSExtIdx(-1);
|
||||||
if (canMergeOpc(Opc, MI->getOpcode(), Flags) &&
|
if (canMergeOpc(Opc, MI->getOpcode(), Flags, TII) &&
|
||||||
getLdStOffsetOp(MI).isImm()) {
|
getLdStOffsetOp(MI).isImm()) {
|
||||||
assert(MI->mayLoadOrStore() && "Expected memory operation.");
|
assert(MI->mayLoadOrStore() && "Expected memory operation.");
|
||||||
// If we've found another instruction with the same opcode, check to see
|
// If we've found another instruction with the same opcode, check to see
|
||||||
@ -1256,7 +1230,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I,
|
|||||||
// final offset must be in range.
|
// final offset must be in range.
|
||||||
unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
|
unsigned MIBaseReg = getLdStBaseOp(MI).getReg();
|
||||||
int MIOffset = getLdStOffsetOp(MI).getImm();
|
int MIOffset = getLdStOffsetOp(MI).getImm();
|
||||||
bool MIIsUnscaled = isUnscaledLdSt(MI);
|
bool MIIsUnscaled = TII->isUnscaledLdSt(MI);
|
||||||
if (IsUnscaled != MIIsUnscaled) {
|
if (IsUnscaled != MIIsUnscaled) {
|
||||||
// We're trying to pair instructions that differ in how they are scaled.
|
// We're trying to pair instructions that differ in how they are scaled.
|
||||||
// If FirstMI is scaled then scale the offset of MI accordingly.
|
// If FirstMI is scaled then scale the offset of MI accordingly.
|
||||||
@ -1685,7 +1659,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
|
|||||||
// Early exit if the offset is not possible to match. (6 bits of positive
|
// Early exit if the offset is not possible to match. (6 bits of positive
|
||||||
// range, plus allow an extra one in case we find a later insn that matches
|
// range, plus allow an extra one in case we find a later insn that matches
|
||||||
// with Offset-1)
|
// with Offset-1)
|
||||||
bool IsUnscaled = isUnscaledLdSt(MI);
|
bool IsUnscaled = TII->isUnscaledLdSt(MI);
|
||||||
int Offset = getLdStOffsetOp(MI).getImm();
|
int Offset = getLdStOffsetOp(MI).getImm();
|
||||||
int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
|
int OffsetStride = IsUnscaled ? getMemScale(MI) : 1;
|
||||||
if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
|
if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride))
|
||||||
@ -1696,7 +1670,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) {
|
|||||||
MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
|
MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit);
|
||||||
if (Paired != E) {
|
if (Paired != E) {
|
||||||
++NumPairCreated;
|
++NumPairCreated;
|
||||||
if (isUnscaledLdSt(MI))
|
if (TII->isUnscaledLdSt(MI))
|
||||||
++NumUnscaledPairCreated;
|
++NumUnscaledPairCreated;
|
||||||
// Keeping the iterator straight is a pain, so we let the merge routine tell
|
// Keeping the iterator straight is a pain, so we let the merge routine tell
|
||||||
// us what the next instruction is after it's done mucking about.
|
// us what the next instruction is after it's done mucking about.
|
||||||
@ -1920,7 +1894,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB,
|
|||||||
}
|
}
|
||||||
// Don't know how to handle pre/post-index versions, so move to the next
|
// Don't know how to handle pre/post-index versions, so move to the next
|
||||||
// instruction.
|
// instruction.
|
||||||
if (isUnscaledLdSt(Opc)) {
|
if (TII->isUnscaledLdSt(Opc)) {
|
||||||
++MBBI;
|
++MBBI;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user