From f8b4bf82a5a5f509321c0421645d8dccfc8fc862 Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Wed, 9 Mar 2016 17:29:48 +0000 Subject: [PATCH] [AArch64] Move helper functions into TII, so they can be reused elsewhere. NFC. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@263032 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64InstrInfo.cpp | 29 ++++++++ lib/Target/AArch64/AArch64InstrInfo.h | 6 ++ .../AArch64/AArch64LoadStoreOptimizer.cpp | 68 ++++++------------- 3 files changed, 56 insertions(+), 47 deletions(-) diff --git a/lib/Target/AArch64/AArch64InstrInfo.cpp b/lib/Target/AArch64/AArch64InstrInfo.cpp index 5d6b6f8b711..c57c41196f5 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.cpp +++ b/lib/Target/AArch64/AArch64InstrInfo.cpp @@ -1313,6 +1313,35 @@ void AArch64InstrInfo::suppressLdStPair(MachineInstr *MI) const { ->setFlags(MOSuppressPair << MachineMemOperand::MOTargetStartBit); } +bool AArch64InstrInfo::isUnscaledLdSt(unsigned Opc) const { + switch (Opc) { + default: + return false; + case AArch64::STURSi: + case AArch64::STURDi: + case AArch64::STURQi: + case AArch64::STURBBi: + case AArch64::STURHHi: + case AArch64::STURWi: + case AArch64::STURXi: + case AArch64::LDURSi: + case AArch64::LDURDi: + case AArch64::LDURQi: + case AArch64::LDURWi: + case AArch64::LDURXi: + case AArch64::LDURSWi: + case AArch64::LDURHHi: + case AArch64::LDURBBi: + case AArch64::LDURSBWi: + case AArch64::LDURSHWi: + return true; + } +} + +bool AArch64InstrInfo::isUnscaledLdSt(MachineInstr *MI) const { + return isUnscaledLdSt(MI->getOpcode()); +} + bool AArch64InstrInfo::getMemOpBaseRegImmOfs( MachineInstr *LdSt, unsigned &BaseReg, int64_t &Offset, const TargetRegisterInfo *TRI) const { diff --git a/lib/Target/AArch64/AArch64InstrInfo.h b/lib/Target/AArch64/AArch64InstrInfo.h index ea593d76cc5..fbf8f2a1c47 100644 --- a/lib/Target/AArch64/AArch64InstrInfo.h +++ b/lib/Target/AArch64/AArch64InstrInfo.h @@ -87,6 +87,12 @@ public: /// unprofitable. bool isLdStPairSuppressed(const MachineInstr *MI) const; + /// Return true if this is an unscaled load/store. + bool isUnscaledLdSt(unsigned Opc) const; + + /// Return true if this is an unscaled load/store. + bool isUnscaledLdSt(MachineInstr *MI) const; + /// Hint that pairing the given load or store is unprofitable. void suppressLdStPair(MachineInstr *MI) const; diff --git a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp index b744dc2f5e0..de401b17130 100644 --- a/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp +++ b/lib/Target/AArch64/AArch64LoadStoreOptimizer.cpp @@ -177,35 +177,6 @@ char AArch64LoadStoreOpt::ID = 0; INITIALIZE_PASS(AArch64LoadStoreOpt, "aarch64-ldst-opt", AARCH64_LOAD_STORE_OPT_NAME, false, false) -static bool isUnscaledLdSt(unsigned Opc) { - switch (Opc) { - default: - return false; - case AArch64::STURSi: - case AArch64::STURDi: - case AArch64::STURQi: - case AArch64::STURBBi: - case AArch64::STURHHi: - case AArch64::STURWi: - case AArch64::STURXi: - case AArch64::LDURSi: - case AArch64::LDURDi: - case AArch64::LDURQi: - case AArch64::LDURWi: - case AArch64::LDURXi: - case AArch64::LDURSWi: - case AArch64::LDURHHi: - case AArch64::LDURBBi: - case AArch64::LDURSBWi: - case AArch64::LDURSHWi: - return true; - } -} - -static bool isUnscaledLdSt(MachineInstr *MI) { - return isUnscaledLdSt(MI->getOpcode()); -} - static unsigned getBitExtrOpcode(MachineInstr *MI) { switch (MI->getOpcode()) { default: @@ -626,14 +597,15 @@ static const MachineOperand &getLdStOffsetOp(const MachineInstr *MI) { } static bool isLdOffsetInRangeOfSt(MachineInstr *LoadInst, - MachineInstr *StoreInst) { + MachineInstr *StoreInst, + const AArch64InstrInfo *TII) { assert(isMatchingStore(LoadInst, StoreInst) && "Expect only matched ld/st."); int LoadSize = getMemScale(LoadInst); int StoreSize = getMemScale(StoreInst); - int UnscaledStOffset = isUnscaledLdSt(StoreInst) + int UnscaledStOffset = TII->isUnscaledLdSt(StoreInst) ? getLdStOffsetOp(StoreInst).getImm() : getLdStOffsetOp(StoreInst).getImm() * StoreSize; - int UnscaledLdOffset = isUnscaledLdSt(LoadInst) + int UnscaledLdOffset = TII->isUnscaledLdSt(LoadInst) ? getLdStOffsetOp(LoadInst).getImm() : getLdStOffsetOp(LoadInst).getImm() * LoadSize; return (UnscaledStOffset <= UnscaledLdOffset) && @@ -664,7 +636,7 @@ AArch64LoadStoreOpt::mergeNarrowInsns(MachineBasicBlock::iterator I, ++NextI; unsigned Opc = I->getOpcode(); - bool IsScaled = !isUnscaledLdSt(Opc); + bool IsScaled = !TII->isUnscaledLdSt(Opc); int OffsetStride = IsScaled ? 1 : getMemScale(I); bool MergeForward = Flags.getMergeForward(); @@ -827,7 +799,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, int SExtIdx = Flags.getSExtIdx(); unsigned Opc = SExtIdx == -1 ? I->getOpcode() : getMatchingNonSExtOpcode(I->getOpcode()); - bool IsUnscaled = isUnscaledLdSt(Opc); + bool IsUnscaled = TII->isUnscaledLdSt(Opc); int OffsetStride = IsUnscaled ? getMemScale(I) : 1; bool MergeForward = Flags.getMergeForward(); @@ -841,7 +813,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, int Offset = getLdStOffsetOp(I).getImm(); int PairedOffset = getLdStOffsetOp(Paired).getImm(); - bool PairedIsUnscaled = isUnscaledLdSt(Paired->getOpcode()); + bool PairedIsUnscaled = TII->isUnscaledLdSt(Paired->getOpcode()); if (IsUnscaled != PairedIsUnscaled) { // We're trying to pair instructions that differ in how they are scaled. If // I is scaled then scale the offset of Paired accordingly. Otherwise, do @@ -874,7 +846,7 @@ AArch64LoadStoreOpt::mergePairedInsns(MachineBasicBlock::iterator I, } int OffsetImm = getLdStOffsetOp(RtMI).getImm(); // Scale the immediate offset, if necessary. - if (isUnscaledLdSt(RtMI->getOpcode())) { + if (TII->isUnscaledLdSt(RtMI->getOpcode())) { assert(!(OffsetImm % getMemScale(RtMI)) && "Unscaled offset cannot be scaled."); OffsetImm /= getMemScale(RtMI); @@ -982,8 +954,9 @@ AArch64LoadStoreOpt::promoteLoadFromStore(MachineBasicBlock::iterator LoadI, // performance and correctness are verified only in little-endian. if (!Subtarget->isLittleEndian()) return NextI; - bool IsUnscaled = isUnscaledLdSt(LoadI); - assert(IsUnscaled == isUnscaledLdSt(StoreI) && "Unsupported ld/st match"); + bool IsUnscaled = TII->isUnscaledLdSt(LoadI); + assert(IsUnscaled == TII->isUnscaledLdSt(StoreI) && + "Unsupported ld/st match"); assert(LoadSize <= StoreSize && "Invalid load size"); int UnscaledLdOffset = IsUnscaled ? getLdStOffsetOp(LoadI).getImm() @@ -1145,7 +1118,7 @@ bool AArch64LoadStoreOpt::findMatchingStore( // it's unnecessary to check if BaseReg is modified by the store itself. if (MI->mayStore() && isMatchingStore(LoadMI, MI) && BaseReg == getLdStBaseOp(MI).getReg() && - isLdOffsetInRangeOfSt(LoadMI, MI) && + isLdOffsetInRangeOfSt(LoadMI, MI, TII) && !ModifiedRegs[getLdStRegOp(MI).getReg()]) { StoreI = MBBI; return true; @@ -1171,7 +1144,8 @@ bool AArch64LoadStoreOpt::findMatchingStore( // Returns true if these two opcodes can be merged or paired. Otherwise, // returns false. -static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags) { +static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags, + const AArch64InstrInfo *TII) { // Opcodes match: nothing more to check. if (OpcA == OpcB) return true; @@ -1197,7 +1171,7 @@ static bool canMergeOpc(unsigned OpcA, unsigned OpcB, LdStPairFlags &Flags) { return false; // Try to match an unscaled load/store with a scaled load/store. - return isUnscaledLdSt(OpcA) != isUnscaledLdSt(OpcB) && + return TII->isUnscaledLdSt(OpcA) != TII->isUnscaledLdSt(OpcB) && getMatchingPairOpcode(OpcA) == getMatchingPairOpcode(OpcB); // FIXME: Can we also match a mixed sext/zext unscaled/scaled pair? @@ -1215,7 +1189,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, unsigned Opc = FirstMI->getOpcode(); bool MayLoad = FirstMI->mayLoad(); - bool IsUnscaled = isUnscaledLdSt(FirstMI); + bool IsUnscaled = TII->isUnscaledLdSt(FirstMI); unsigned Reg = getLdStRegOp(FirstMI).getReg(); unsigned BaseReg = getLdStBaseOp(FirstMI).getReg(); int Offset = getLdStOffsetOp(FirstMI).getImm(); @@ -1241,7 +1215,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, ++Count; Flags.setSExtIdx(-1); - if (canMergeOpc(Opc, MI->getOpcode(), Flags) && + if (canMergeOpc(Opc, MI->getOpcode(), Flags, TII) && getLdStOffsetOp(MI).isImm()) { assert(MI->mayLoadOrStore() && "Expected memory operation."); // If we've found another instruction with the same opcode, check to see @@ -1256,7 +1230,7 @@ AArch64LoadStoreOpt::findMatchingInsn(MachineBasicBlock::iterator I, // final offset must be in range. unsigned MIBaseReg = getLdStBaseOp(MI).getReg(); int MIOffset = getLdStOffsetOp(MI).getImm(); - bool MIIsUnscaled = isUnscaledLdSt(MI); + bool MIIsUnscaled = TII->isUnscaledLdSt(MI); if (IsUnscaled != MIIsUnscaled) { // We're trying to pair instructions that differ in how they are scaled. // If FirstMI is scaled then scale the offset of MI accordingly. @@ -1685,7 +1659,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { // Early exit if the offset is not possible to match. (6 bits of positive // range, plus allow an extra one in case we find a later insn that matches // with Offset-1) - bool IsUnscaled = isUnscaledLdSt(MI); + bool IsUnscaled = TII->isUnscaledLdSt(MI); int Offset = getLdStOffsetOp(MI).getImm(); int OffsetStride = IsUnscaled ? getMemScale(MI) : 1; if (!inBoundsForPair(IsUnscaled, Offset, OffsetStride)) @@ -1696,7 +1670,7 @@ bool AArch64LoadStoreOpt::tryToPairLdStInst(MachineBasicBlock::iterator &MBBI) { MachineBasicBlock::iterator Paired = findMatchingInsn(MBBI, Flags, LdStLimit); if (Paired != E) { ++NumPairCreated; - if (isUnscaledLdSt(MI)) + if (TII->isUnscaledLdSt(MI)) ++NumUnscaledPairCreated; // Keeping the iterator straight is a pain, so we let the merge routine tell // us what the next instruction is after it's done mucking about. @@ -1920,7 +1894,7 @@ bool AArch64LoadStoreOpt::optimizeBlock(MachineBasicBlock &MBB, } // Don't know how to handle pre/post-index versions, so move to the next // instruction. - if (isUnscaledLdSt(Opc)) { + if (TII->isUnscaledLdSt(Opc)) { ++MBBI; break; }