[InstrInfo] Refactor foldOperandImpl to thread through InsertPt. NFC

Summary:
This was a longstanding FIXME and is a necessary precursor to cases
where foldOperandImpl may have to create more than one instruction
(e.g. to constrain a register class). This is the split out NFC changes from
D6262.

Reviewers: pete, ributzka, uweigand, mcrosier

Reviewed By: mcrosier

Subscribers: mcrosier, ted, llvm-commits

Differential Revision: http://reviews.llvm.org/D10174

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@239336 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Keno Fischer 2015-06-08 20:09:58 +00:00
parent 279ef837e8
commit 4332f869bf
13 changed files with 132 additions and 104 deletions

View File

@ -711,20 +711,22 @@ protected:
/// Target-dependent implementation for foldMemoryOperand. /// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will /// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction. /// take care of adding a MachineMemOperand to the newly created instruction.
virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, /// The instruction and any auxiliary instructions necessary will be inserted
MachineInstr *MI, /// at InsertPt.
ArrayRef<unsigned> Ops, virtual MachineInstr *foldMemoryOperandImpl(
int FrameIndex) const { MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
return nullptr; return nullptr;
} }
/// Target-dependent implementation for foldMemoryOperand. /// Target-dependent implementation for foldMemoryOperand.
/// Target-independent code in foldMemoryOperand will /// Target-independent code in foldMemoryOperand will
/// take care of adding a MachineMemOperand to the newly created instruction. /// take care of adding a MachineMemOperand to the newly created instruction.
virtual MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, /// The instruction and any auxiliary instructions necessary will be inserted
MachineInstr *MI, /// at InsertPt.
ArrayRef<unsigned> Ops, virtual MachineInstr *foldMemoryOperandImpl(
MachineInstr *LoadMI) const { MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
return nullptr; return nullptr;
} }

View File

@ -471,9 +471,11 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MI->getOpcode() == TargetOpcode::PATCHPOINT) { MI->getOpcode() == TargetOpcode::PATCHPOINT) {
// Fold stackmap/patchpoint. // Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FI, *this); NewMI = foldPatchpoint(MF, MI, Ops, FI, *this);
if (NewMI)
MBB->insert(MI, NewMI);
} else { } else {
// Ask the target to do the actual folding. // Ask the target to do the actual folding.
NewMI =foldMemoryOperandImpl(MF, MI, Ops, FI); NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, FI);
} }
if (NewMI) { if (NewMI) {
@ -493,8 +495,7 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
MFI.getObjectAlignment(FI)); MFI.getObjectAlignment(FI));
NewMI->addMemOperand(MF, MMO); NewMI->addMemOperand(MF, MMO);
// FIXME: change foldMemoryOperandImpl semantics to also insert NewMI. return NewMI;
return MBB->insert(MI, NewMI);
} }
// Straight COPY may fold as load/store. // Straight COPY may fold as load/store.
@ -539,15 +540,15 @@ MachineInstr *TargetInstrInfo::foldMemoryOperand(MachineBasicBlock::iterator MI,
isLoadFromStackSlot(LoadMI, FrameIndex)) { isLoadFromStackSlot(LoadMI, FrameIndex)) {
// Fold stackmap/patchpoint. // Fold stackmap/patchpoint.
NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this); NewMI = foldPatchpoint(MF, MI, Ops, FrameIndex, *this);
if (NewMI)
NewMI = MBB.insert(MI, NewMI);
} else { } else {
// Ask the target to do the actual folding. // Ask the target to do the actual folding.
NewMI = foldMemoryOperandImpl(MF, MI, Ops, LoadMI); NewMI = foldMemoryOperandImpl(MF, MI, Ops, MI, LoadMI);
} }
if (!NewMI) return nullptr; if (!NewMI) return nullptr;
NewMI = MBB.insert(MI, NewMI);
// Copy the memoperands from the load to the folded instruction. // Copy the memoperands from the load to the folded instruction.
if (MI->memoperands_empty()) { if (MI->memoperands_empty()) {
NewMI->setMemRefs(LoadMI->memoperands_begin(), NewMI->setMemRefs(LoadMI->memoperands_begin(),

View File

@ -2066,10 +2066,9 @@ void llvm::emitFrameOffset(MachineBasicBlock &MBB,
.setMIFlag(Flag); .setMIFlag(Flag);
} }
MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *AArch64InstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
int FrameIndex) const {
// This is a bit of a hack. Consider this instruction: // This is a bit of a hack. Consider this instruction:
// //
// %vreg0<def> = COPY %SP; GPR64all:%vreg0 // %vreg0<def> = COPY %SP; GPR64all:%vreg0

View File

@ -131,6 +131,7 @@ public:
using TargetInstrInfo::foldMemoryOperandImpl; using TargetInstrInfo::foldMemoryOperandImpl;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override; int FrameIndex) const override;
bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB, bool AnalyzeBranch(MachineBasicBlock &MBB, MachineBasicBlock *&TBB,

View File

@ -779,10 +779,9 @@ HexagonInstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return false; return false;
} }
MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *HexagonInstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, int FI) const {
int FI) const {
// Hexagon_TODO: Implement. // Hexagon_TODO: Implement.
return nullptr; return nullptr;
} }

View File

@ -114,10 +114,12 @@ public:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override; int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override { MachineInstr *LoadMI) const override {
return nullptr; return nullptr;
} }

View File

@ -152,17 +152,15 @@ bool AMDGPUInstrInfo::expandPostRAPseudo (MachineBasicBlock::iterator MI) const
return true; return true;
} }
MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
int FrameIndex) const {
// TODO: Implement this function // TODO: Implement this function
return nullptr; return nullptr;
} }
MachineInstr * MachineInstr *AMDGPUInstrInfo::foldMemoryOperandImpl(
AMDGPUInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
MachineInstr *LoadMI) const {
// TODO: Implement this function // TODO: Implement this function
return nullptr; return nullptr;
} }

View File

@ -87,9 +87,11 @@ public:
protected: protected:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override; int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override; MachineInstr *LoadMI) const override;
public: public:

View File

@ -752,10 +752,9 @@ SystemZInstrInfo::convertToThreeAddress(MachineFunction::iterator &MFI,
return nullptr; return nullptr;
} }
MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
int FrameIndex) const {
const MachineFrameInfo *MFI = MF.getFrameInfo(); const MachineFrameInfo *MFI = MF.getFrameInfo();
unsigned Size = MFI->getObjectSize(FrameIndex); unsigned Size = MFI->getObjectSize(FrameIndex);
unsigned Opcode = MI->getOpcode(); unsigned Opcode = MI->getOpcode();
@ -765,8 +764,10 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
isInt<8>(MI->getOperand(2).getImm()) && isInt<8>(MI->getOperand(2).getImm()) &&
!MI->getOperand(3).getReg()) { !MI->getOperand(3).getReg()) {
// LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::AGSI)) return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
.addFrameIndex(FrameIndex).addImm(0) get(SystemZ::AGSI))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI->getOperand(2).getImm()); .addImm(MI->getOperand(2).getImm());
} }
return nullptr; return nullptr;
@ -786,8 +787,10 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
isInt<8>(MI->getOperand(2).getImm())) { isInt<8>(MI->getOperand(2).getImm())) {
// A(G)HI %reg, CONST -> A(G)SI %mem, CONST // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI); Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
return BuildMI(MF, MI->getDebugLoc(), get(Opcode)) return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
.addFrameIndex(FrameIndex).addImm(0) get(Opcode))
.addFrameIndex(FrameIndex)
.addImm(0)
.addImm(MI->getOperand(2).getImm()); .addImm(MI->getOperand(2).getImm());
} }
@ -798,17 +801,23 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// source register instead. // source register instead.
if (OpNum == 0) { if (OpNum == 0) {
unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD; unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
return BuildMI(MF, MI->getDebugLoc(), get(StoreOpcode)) return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
.addOperand(MI->getOperand(1)).addFrameIndex(FrameIndex) get(StoreOpcode))
.addImm(0).addReg(0); .addOperand(MI->getOperand(1))
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
} }
// If we're spilling the source of an LDGR or LGDR, load the // If we're spilling the source of an LDGR or LGDR, load the
// destination register instead. // destination register instead.
if (OpNum == 1) { if (OpNum == 1) {
unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD; unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
unsigned Dest = MI->getOperand(0).getReg(); unsigned Dest = MI->getOperand(0).getReg();
return BuildMI(MF, MI->getDebugLoc(), get(LoadOpcode), Dest) return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
.addFrameIndex(FrameIndex).addImm(0).addReg(0); get(LoadOpcode), Dest)
.addFrameIndex(FrameIndex)
.addImm(0)
.addReg(0);
} }
} }
@ -830,16 +839,24 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
if (MMO->getSize() == Size && !MMO->isVolatile()) { if (MMO->getSize() == Size && !MMO->isVolatile()) {
// Handle conversion of loads. // Handle conversion of loads.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) { if (isSimpleBD12Move(MI, SystemZII::SimpleBDXLoad)) {
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
.addFrameIndex(FrameIndex).addImm(0).addImm(Size) get(SystemZ::MVC))
.addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) .addFrameIndex(FrameIndex)
.addImm(0)
.addImm(Size)
.addOperand(MI->getOperand(1))
.addImm(MI->getOperand(2).getImm())
.addMemOperand(MMO); .addMemOperand(MMO);
} }
// Handle conversion of stores. // Handle conversion of stores.
if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) { if (isSimpleBD12Move(MI, SystemZII::SimpleBDXStore)) {
return BuildMI(MF, MI->getDebugLoc(), get(SystemZ::MVC)) return BuildMI(*InsertPt->getParent(), InsertPt, MI->getDebugLoc(),
.addOperand(MI->getOperand(1)).addImm(MI->getOperand(2).getImm()) get(SystemZ::MVC))
.addImm(Size).addFrameIndex(FrameIndex).addImm(0) .addOperand(MI->getOperand(1))
.addImm(MI->getOperand(2).getImm())
.addImm(Size)
.addFrameIndex(FrameIndex)
.addImm(0)
.addMemOperand(MMO); .addMemOperand(MMO);
} }
} }
@ -856,7 +873,8 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
assert(AccessBytes != 0 && "Size of access should be known"); assert(AccessBytes != 0 && "Size of access should be known");
assert(AccessBytes <= Size && "Access outside the frame index"); assert(AccessBytes <= Size && "Access outside the frame index");
uint64_t Offset = Size - AccessBytes; uint64_t Offset = Size - AccessBytes;
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), get(MemOpcode)); MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MI->getDebugLoc(), get(MemOpcode));
for (unsigned I = 0; I < OpNum; ++I) for (unsigned I = 0; I < OpNum; ++I)
MIB.addOperand(MI->getOperand(I)); MIB.addOperand(MI->getOperand(I));
MIB.addFrameIndex(FrameIndex).addImm(Offset); MIB.addFrameIndex(FrameIndex).addImm(Offset);
@ -869,10 +887,9 @@ MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return nullptr; return nullptr;
} }
MachineInstr * MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
SystemZInstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
MachineInstr *LoadMI) const {
return nullptr; return nullptr;
} }

View File

@ -187,9 +187,11 @@ public:
LiveVariables *LV) const override; LiveVariables *LV) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override; int FrameIndex) const override;
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override; MachineInstr *LoadMI) const override;
bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override; bool expandPostRAPseudo(MachineBasicBlock::iterator MBBI) const override;
bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const bool ReverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const

View File

@ -3530,9 +3530,9 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
SmallVector<MachineOperand, 8> AddrOps; SmallVector<MachineOperand, 8> AddrOps;
AM.getFullAddress(AddrOps); AM.getFullAddress(AddrOps);
MachineInstr *Result = MachineInstr *Result = XII.foldMemoryOperandImpl(
XII.foldMemoryOperandImpl(*FuncInfo.MF, MI, OpNo, AddrOps, *FuncInfo.MF, MI, OpNo, AddrOps, FuncInfo.InsertPt, Size, Alignment,
Size, Alignment, /*AllowCommute=*/true); /*AllowCommute=*/true);
if (!Result) if (!Result)
return false; return false;
@ -3556,7 +3556,6 @@ bool X86FastISel::tryToFoldLoadIntoMI(MachineInstr *MI, unsigned OpNo,
} }
Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI)); Result->addMemOperand(*FuncInfo.MF, createMachineMemOperandFor(LI));
FuncInfo.MBB->insert(FuncInfo.InsertPt, Result);
MI->eraseFromParent(); MI->eraseFromParent();
return true; return true;
} }

View File

@ -4703,8 +4703,17 @@ bool X86InstrInfo::expandPostRAPseudo(MachineBasicBlock::iterator MI) const {
return false; return false;
} }
static void addOperands(MachineInstrBuilder &MIB, ArrayRef<MachineOperand> MOs) {
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
}
static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode, static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
ArrayRef<MachineOperand> MOs, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr *MI, MachineInstr *MI,
const TargetInstrInfo &TII) { const TargetInstrInfo &TII) {
// Create the base instruction with the memory operand as the first part. // Create the base instruction with the memory operand as the first part.
@ -4712,11 +4721,7 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
MI->getDebugLoc(), true); MI->getDebugLoc(), true);
MachineInstrBuilder MIB(MF, NewMI); MachineInstrBuilder MIB(MF, NewMI);
unsigned NumAddrOps = MOs.size(); addOperands(MIB, MOs);
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
// Loop over the rest of the ri operands, converting them over. // Loop over the rest of the ri operands, converting them over.
unsigned NumOps = MI->getDesc().getNumOperands()-2; unsigned NumOps = MI->getDesc().getNumOperands()-2;
@ -4728,11 +4733,16 @@ static MachineInstr *FuseTwoAddrInst(MachineFunction &MF, unsigned Opcode,
MachineOperand &MO = MI->getOperand(i); MachineOperand &MO = MI->getOperand(i);
MIB.addOperand(MO); MIB.addOperand(MO);
} }
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB; return MIB;
} }
static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode, static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
unsigned OpNo, ArrayRef<MachineOperand> MOs, unsigned OpNo, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr *MI, const TargetInstrInfo &TII) { MachineInstr *MI, const TargetInstrInfo &TII) {
// Omit the implicit operands, something BuildMI can't do. // Omit the implicit operands, something BuildMI can't do.
MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode), MachineInstr *NewMI = MF.CreateMachineInstr(TII.get(Opcode),
@ -4743,38 +4753,32 @@ static MachineInstr *FuseInst(MachineFunction &MF, unsigned Opcode,
MachineOperand &MO = MI->getOperand(i); MachineOperand &MO = MI->getOperand(i);
if (i == OpNo) { if (i == OpNo) {
assert(MO.isReg() && "Expected to fold into reg operand!"); assert(MO.isReg() && "Expected to fold into reg operand!");
unsigned NumAddrOps = MOs.size(); addOperands(MIB, MOs);
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
} else { } else {
MIB.addOperand(MO); MIB.addOperand(MO);
} }
} }
MachineBasicBlock *MBB = InsertPt->getParent();
MBB->insert(InsertPt, NewMI);
return MIB; return MIB;
} }
static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode, static MachineInstr *MakeM0Inst(const TargetInstrInfo &TII, unsigned Opcode,
ArrayRef<MachineOperand> MOs, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
MachineInstr *MI) { MachineInstr *MI) {
MachineFunction &MF = *MI->getParent()->getParent(); MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
MachineInstrBuilder MIB = BuildMI(MF, MI->getDebugLoc(), TII.get(Opcode)); MI->getDebugLoc(), TII.get(Opcode));
addOperands(MIB, MOs);
unsigned NumAddrOps = MOs.size();
for (unsigned i = 0; i != NumAddrOps; ++i)
MIB.addOperand(MOs[i]);
if (NumAddrOps < 4) // FrameIndex only
addOffset(MIB, 0);
return MIB.addImm(0); return MIB.addImm(0);
} }
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, unsigned OpNum,
unsigned OpNum, ArrayRef<MachineOperand> MOs, MachineBasicBlock::iterator InsertPt,
ArrayRef<MachineOperand> MOs, unsigned Size, unsigned Align, bool AllowCommute) const {
unsigned Size, unsigned Align,
bool AllowCommute) const {
const DenseMap<unsigned, const DenseMap<unsigned,
std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr; std::pair<unsigned,unsigned> > *OpcodeTablePtr = nullptr;
bool isCallRegIndirect = Subtarget.callRegIndirect(); bool isCallRegIndirect = Subtarget.callRegIndirect();
@ -4808,7 +4812,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
isTwoAddrFold = true; isTwoAddrFold = true;
} else if (OpNum == 0) { } else if (OpNum == 0) {
if (MI->getOpcode() == X86::MOV32r0) { if (MI->getOpcode() == X86::MOV32r0) {
NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, MI); NewMI = MakeM0Inst(*this, X86::MOV32mi, MOs, InsertPt, MI);
if (NewMI) if (NewMI)
return NewMI; return NewMI;
} }
@ -4853,9 +4857,9 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
} }
if (isTwoAddrFold) if (isTwoAddrFold)
NewMI = FuseTwoAddrInst(MF, Opcode, MOs, MI, *this); NewMI = FuseTwoAddrInst(MF, Opcode, MOs, InsertPt, MI, *this);
else else
NewMI = FuseInst(MF, Opcode, OpNum, MOs, MI, *this); NewMI = FuseInst(MF, Opcode, OpNum, MOs, InsertPt, MI, *this);
if (NarrowToMOV32rm) { if (NarrowToMOV32rm) {
// If this is the special case where we use a MOV32rm to load a 32-bit // If this is the special case where we use a MOV32rm to load a 32-bit
@ -4907,7 +4911,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
// Attempt to fold with the commuted version of the instruction. // Attempt to fold with the commuted version of the instruction.
unsigned CommuteOp = unsigned CommuteOp =
(CommuteOpIdx1 == OriginalOpIdx ? CommuteOpIdx2 : CommuteOpIdx1); (CommuteOpIdx1 == OriginalOpIdx ? CommuteOpIdx2 : CommuteOpIdx1);
NewMI = foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, Size, Align, NewMI =
foldMemoryOperandImpl(MF, MI, CommuteOp, MOs, InsertPt, Size, Align,
/*AllowCommute=*/false); /*AllowCommute=*/false);
if (NewMI) if (NewMI)
return NewMI; return NewMI;
@ -5137,10 +5142,9 @@ breakPartialRegDependency(MachineBasicBlock::iterator MI, unsigned OpNum,
MI->addRegisterKilled(Reg, TRI, true); MI->addRegisterKilled(Reg, TRI, true);
} }
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, int FrameIndex) const {
int FrameIndex) const {
// Check switch flag // Check switch flag
if (NoFusing) return nullptr; if (NoFusing) return nullptr;
@ -5179,8 +5183,8 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
return nullptr; return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops[0], return foldMemoryOperandImpl(MF, MI, Ops[0],
MachineOperand::CreateFI(FrameIndex), Size, MachineOperand::CreateFI(FrameIndex), InsertPt,
Alignment, /*AllowCommute=*/true); Size, Alignment, /*AllowCommute=*/true);
} }
static bool isPartialRegisterLoad(const MachineInstr &LoadMI, static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
@ -5202,17 +5206,16 @@ static bool isPartialRegisterLoad(const MachineInstr &LoadMI,
return false; return false;
} }
MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *X86InstrInfo::foldMemoryOperandImpl(
MachineInstr *MI, MachineFunction &MF, MachineInstr *MI, ArrayRef<unsigned> Ops,
ArrayRef<unsigned> Ops, MachineBasicBlock::iterator InsertPt, MachineInstr *LoadMI) const {
MachineInstr *LoadMI) const {
// If loading from a FrameIndex, fold directly from the FrameIndex. // If loading from a FrameIndex, fold directly from the FrameIndex.
unsigned NumOps = LoadMI->getDesc().getNumOperands(); unsigned NumOps = LoadMI->getDesc().getNumOperands();
int FrameIndex; int FrameIndex;
if (isLoadFromStackSlot(LoadMI, FrameIndex)) { if (isLoadFromStackSlot(LoadMI, FrameIndex)) {
if (isPartialRegisterLoad(*LoadMI, MF)) if (isPartialRegisterLoad(*LoadMI, MF))
return nullptr; return nullptr;
return foldMemoryOperandImpl(MF, MI, Ops, FrameIndex); return foldMemoryOperandImpl(MF, MI, Ops, InsertPt, FrameIndex);
} }
// Check switch flag // Check switch flag
@ -5332,7 +5335,7 @@ MachineInstr *X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
break; break;
} }
} }
return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, return foldMemoryOperandImpl(MF, MI, Ops[0], MOs, InsertPt,
/*Size=*/0, Alignment, /*AllowCommute=*/true); /*Size=*/0, Alignment, /*AllowCommute=*/true);
} }

View File

@ -307,6 +307,7 @@ public:
/// references has been changed. /// references has been changed.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
int FrameIndex) const override; int FrameIndex) const override;
/// foldMemoryOperand - Same as the previous version except it allows folding /// foldMemoryOperand - Same as the previous version except it allows folding
@ -314,6 +315,7 @@ public:
/// stack slot. /// stack slot.
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
ArrayRef<unsigned> Ops, ArrayRef<unsigned> Ops,
MachineBasicBlock::iterator InsertPt,
MachineInstr *LoadMI) const override; MachineInstr *LoadMI) const override;
/// canFoldMemoryOperand - Returns true if the specified load / store is /// canFoldMemoryOperand - Returns true if the specified load / store is
@ -407,6 +409,7 @@ public:
MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI, MachineInstr *foldMemoryOperandImpl(MachineFunction &MF, MachineInstr *MI,
unsigned OpNum, unsigned OpNum,
ArrayRef<MachineOperand> MOs, ArrayRef<MachineOperand> MOs,
MachineBasicBlock::iterator InsertPt,
unsigned Size, unsigned Alignment, unsigned Size, unsigned Alignment,
bool AllowCommute) const; bool AllowCommute) const;