Remove redundant foldMemoryOperand variants and other code clean up.

llvm-svn: 44517
This commit is contained in:
Evan Cheng 2007-12-02 08:30:39 +00:00
parent a01c1e3dd5
commit 58b387dfb0
19 changed files with 171 additions and 257 deletions

View File

@ -275,8 +275,7 @@ namespace llvm {
/// returns true.
bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
MachineInstr *DefMI, unsigned InstrIdx,
unsigned OpIdx,
SmallVector<unsigned, 2> &UseOps,
SmallVector<unsigned, 2> &Ops,
bool isSS, int Slot, unsigned Reg);
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified

View File

@ -533,20 +533,13 @@ public:
const MachineInstr *Orig) const = 0;
/// foldMemoryOperand - Attempt to fold a load or store of the specified stack
/// slot into the specified machine instruction for the specified operand. If
/// this is possible, a new instruction is returned with the specified operand
/// folded, otherwise NULL is returned. The client is responsible for removing
/// the old instruction and adding the new one in the instruction stream
/// slot into the specified machine instruction for the specified operand(s).
/// If this is possible, a new instruction is returned with the specified
/// operand folded, otherwise NULL is returned. The client is responsible for
/// removing the old instruction and adding the new one in the instruction
/// stream.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
int FrameIndex) const {
return 0;
}
/// foldMemoryOperand - Same as previous except it tries to fold instruction
/// with multiple uses of the same register.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
return 0;
}
@ -555,15 +548,7 @@ public:
/// of any load and store from / to any address, not just from a specific
/// stack slot.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
/// foldMemoryOperand - Same as previous except it tries to fold instruction
/// with multiple uses of the same register.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}

View File

@ -643,28 +643,32 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
/// returns true.
bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
VirtRegMap &vrm, MachineInstr *DefMI,
unsigned InstrIdx, unsigned OpIdx,
SmallVector<unsigned, 2> &UseOps,
unsigned InstrIdx,
SmallVector<unsigned, 2> &Ops,
bool isSS, int Slot, unsigned Reg) {
// FIXME: fold subreg use
if (MI->getOperand(OpIdx).getSubReg())
return false;
MachineInstr *fmi = NULL;
if (UseOps.size() < 2)
fmi = isSS ? mri_->foldMemoryOperand(MI, OpIdx, Slot)
: mri_->foldMemoryOperand(MI, OpIdx, DefMI);
else {
if (OpIdx != UseOps[0])
// Must be two-address instruction + one more use. Not going to fold.
unsigned MRInfo = 0;
const TargetInstrDescriptor *TID = MI->getInstrDescriptor();
SmallVector<unsigned, 2> FoldOps;
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
unsigned OpIdx = Ops[i];
// FIXME: fold subreg use.
if (MI->getOperand(OpIdx).getSubReg())
return false;
// It may be possible to fold load when there are multiple uses.
// e.g. On x86, TEST32rr r, r -> CMP32rm [mem], 0
fmi = isSS ? mri_->foldMemoryOperand(MI, UseOps, Slot)
: mri_->foldMemoryOperand(MI, UseOps, DefMI);
if (MI->getOperand(OpIdx).isDef())
MRInfo |= (unsigned)VirtRegMap::isMod;
else {
// Filter out two-address use operand(s).
if (TID->getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
MRInfo = VirtRegMap::isModRef;
continue;
}
MRInfo |= (unsigned)VirtRegMap::isRef;
}
FoldOps.push_back(OpIdx);
}
MachineInstr *fmi = isSS ? mri_->foldMemoryOperand(MI, FoldOps, Slot)
: mri_->foldMemoryOperand(MI, FoldOps, DefMI);
if (fmi) {
// Attempt to fold the memory reference into the instruction. If
// we can do this, we don't need to insert spill code.
@ -674,7 +678,7 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
MachineBasicBlock &MBB = *MI->getParent();
if (isSS && !mf_->getFrameInfo()->isFixedObjectIndex(Slot))
vrm.virtFolded(Reg, MI, OpIdx, fmi);
vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
mi2iMap_.erase(MI);
@ -775,28 +779,25 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
HasUse = mop.isUse();
HasDef = mop.isDef();
SmallVector<unsigned, 2> UseOps;
if (HasUse)
UseOps.push_back(i);
std::vector<unsigned> UpdateOps;
SmallVector<unsigned, 2> Ops;
Ops.push_back(i);
for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
if (!MI->getOperand(j).isRegister())
const MachineOperand &MOj = MI->getOperand(j);
if (!MOj.isRegister())
continue;
unsigned RegJ = MI->getOperand(j).getReg();
unsigned RegJ = MOj.getReg();
if (RegJ == 0 || MRegisterInfo::isPhysicalRegister(RegJ))
continue;
if (RegJ == RegI) {
UpdateOps.push_back(j);
if (MI->getOperand(j).isUse())
UseOps.push_back(j);
HasUse |= MI->getOperand(j).isUse();
HasDef |= MI->getOperand(j).isDef();
Ops.push_back(j);
HasUse |= MOj.isUse();
HasDef |= MOj.isDef();
}
}
if (TryFold &&
tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, i,
UseOps, FoldSS, FoldSlot, Reg)) {
tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
Ops, FoldSS, FoldSlot, Reg)) {
// Folding the load/store can completely change the instruction in
// unpredictable ways, rescan it from the beginning.
HasUse = false;
@ -814,8 +815,8 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
mop.setReg(NewVReg);
// Reuse NewVReg for other reads.
for (unsigned j = 0, e = UpdateOps.size(); j != e; ++j)
MI->getOperand(UpdateOps[j]).setReg(NewVReg);
for (unsigned j = 0, e = Ops.size(); j != e; ++j)
MI->getOperand(Ops[j]).setReg(NewVReg);
if (CreatedNewVReg) {
if (DefIsReMat) {
@ -1226,7 +1227,7 @@ addIntervalsForSpills(const LiveInterval &li,
if (!TrySplit)
return NewLIs;
SmallVector<unsigned, 2> UseOps;
SmallVector<unsigned, 2> Ops;
if (NeedStackSlot) {
int Id = SpillMBBs.find_first();
while (Id != -1) {
@ -1236,41 +1237,43 @@ addIntervalsForSpills(const LiveInterval &li,
unsigned VReg = spills[i].vreg;
bool isReMat = vrm.isReMaterialized(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
UseOps.clear();
bool CanFold = false;
bool FoundUse = false;
Ops.clear();
if (spills[i].canFold) {
CanFold = true;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isDef()) {
OpIdx = (int)j;
Ops.push_back(j);
if (MO.isDef())
continue;
}
// Can't fold if it's two-address code and the use isn't the
// first and only use.
if (isReMat ||
(UseOps.empty() && !alsoFoldARestore(Id, index, VReg,
RestoreMBBs, RestoreIdxes))) {
OpIdx = -1;
if (isReMat ||
(!FoundUse && !alsoFoldARestore(Id, index, VReg,
RestoreMBBs, RestoreIdxes))) {
// MI has two-address uses of the same register. If the use
// isn't the first and only use in the BB, then we can't fold
// it. FIXME: Move this to rewriteInstructionsForSpills.
CanFold = false;
break;
}
UseOps.push_back(j);
FoundUse = true;
}
}
// Fold the store into the def if possible.
bool Folded = false;
if (OpIdx != -1) {
if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
true, Slot, VReg)) {
if (!UseOps.empty())
// Folded a two-address instruction, do not issue a load.
eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
if (CanFold && !Ops.empty()) {
if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
Folded = true;
if (FoundUse > 0)
// Also folded uses, do not issue a load.
eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
}
}
// Else tell the spiller to issue a store for us.
// Else tell the spiller to issue a spill.
if (!Folded)
vrm.addSpillPoint(VReg, MI);
}
@ -1287,41 +1290,40 @@ addIntervalsForSpills(const LiveInterval &li,
continue;
unsigned VReg = restores[i].vreg;
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
UseOps.clear();
bool CanFold = false;
Ops.clear();
if (restores[i].canFold) {
CanFold = true;
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isDef()) {
// Can't fold if it's two-address code and it hasn't already
// been folded.
OpIdx = -1;
// If this restore were to be folded, it would have been folded
// already.
CanFold = false;
break;
}
if (UseOps.empty())
// Use the first use index.
OpIdx = (int)j;
UseOps.push_back(j);
Ops.push_back(j);
}
}
// Fold the load into the use if possible.
bool Folded = false;
if (OpIdx != -1) {
if (vrm.isReMaterialized(VReg)) {
if (CanFold && !Ops.empty()) {
if (!vrm.isReMaterialized(VReg))
Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
else {
MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
int LdSlot = 0;
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
// If the rematerializable def is a load, also try to fold it.
if (isLoadSS ||
(ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
UseOps, isLoadSS, LdSlot, VReg);
} else
Folded = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, UseOps,
true, Slot, VReg);
Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
Ops, isLoadSS, LdSlot, VReg);
}
}
// If folding is not possible / failed, then tell the spiller to issue a
// load / rematerialization for us.

View File

@ -520,7 +520,9 @@ MachineInstr *RABigBlock::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI
assignVirtToPhysReg(VirtReg, PhysReg);
} else { // no free registers available.
// try to fold the spill into the instruction
if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)) {
SmallVector<unsigned, 2> Ops;
Ops.push_back(OpNum);
if(MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
++NumFolded;
// Since we changed the address of MI, make sure to update live variables
// to know that the new instruction has the properties of the old one.

View File

@ -473,7 +473,9 @@ MachineInstr *RALocal::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI,
assignVirtToPhysReg(VirtReg, PhysReg);
} else { // No registers available.
// If we can fold this spill into this instruction, do so now.
if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, OpNum, FrameIndex)){
SmallVector<unsigned, 2> Ops;
Ops.push_back(OpNum);
if (MachineInstr* FMI = RegInfo->foldMemoryOperand(MI, Ops, FrameIndex)) {
++NumFolded;
// Since we changed the address of MI, make sure to update live variables
// to know that the new instruction has the properties of the old one.

View File

@ -115,7 +115,7 @@ void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
}
void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
unsigned OpNo, MachineInstr *NewMI) {
MachineInstr *NewMI, ModRef MRInfo) {
// Move previous memory references folded to new instruction.
MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
@ -124,18 +124,6 @@ void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
MI2VirtMap.erase(I++);
}
ModRef MRInfo;
const TargetInstrDescriptor *TID = OldMI->getInstrDescriptor();
if (TID->getOperandConstraint(OpNo, TOI::TIED_TO) != -1 ||
TID->findTiedToSrcOperand(OpNo) != -1) {
// Folded a two-address operand.
MRInfo = isModRef;
} else if (OldMI->getOperand(OpNo).isDef()) {
MRInfo = isMod;
} else {
MRInfo = isRef;
}
// add new memory reference
MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
}
@ -830,7 +818,9 @@ bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
NewMIs.clear();
int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
assert(Idx != -1);
MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Idx, SS);
SmallVector<unsigned, 2> Ops;
Ops.push_back(Idx);
MachineInstr *FoldedMI = MRI->foldMemoryOperand(NewMI, Ops, SS);
if (FoldedMI) {
if (!VRM.hasPhys(UnfoldVR))
VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);

View File

@ -280,10 +280,9 @@ namespace llvm {
}
/// @brief Updates information about the specified virtual register's value
/// folded into newMI machine instruction. The OpNum argument indicates the
/// operand number of OldMI that is folded.
void virtFolded(unsigned VirtReg, MachineInstr *OldMI, unsigned OpNum,
MachineInstr *NewMI);
/// folded into newMI machine instruction.
void virtFolded(unsigned VirtReg, MachineInstr *OldMI, MachineInstr *NewMI,
ModRef MRInfo);
/// @brief Updates information about the specified virtual register's value
/// folded into the specified machine instruction.

View File

@ -347,7 +347,11 @@ static bool isLowRegister(unsigned Reg) {
}
MachineInstr *ARMRegisterInfo::foldMemoryOperand(MachineInstr *MI,
unsigned OpNum, int FI) const {
SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
unsigned Opc = MI->getOpcode();
MachineInstr *NewMI = NULL;
switch (Opc) {

View File

@ -74,22 +74,12 @@ public:
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
int FrameIndex) const {
return 0;
}
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}

View File

@ -153,8 +153,10 @@ void AlphaRegisterInfo::loadRegFromAddr(MachineFunction &MF, unsigned DestReg,
}
MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
unsigned OpNum,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
// Make sure this is a reg-reg copy.
unsigned Opc = MI->getOpcode();
@ -166,7 +168,7 @@ MachineInstr *AlphaRegisterInfo::foldMemoryOperand(MachineInstr *MI,
case Alpha::CPYSS:
case Alpha::CPYST:
if (MI->getOperand(1).getReg() == MI->getOperand(2).getReg()) {
if (OpNum == 0) { // move -> store
if (Ops[0] == 0) { // move -> store
unsigned InReg = MI->getOperand(1).getReg();
Opc = (Opc == Alpha::BISr) ? Alpha::STQ :
((Opc == Alpha::CPYSS) ? Alpha::STS : Alpha::STT);

View File

@ -48,22 +48,12 @@ struct AlphaRegisterInfo : public AlphaGenRegisterInfo {
const TargetRegisterClass *RC,
SmallVectorImpl<MachineInstr*> &NewMIs) const;
MachineInstr* foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
int FrameIndex) const {
return 0;
}
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}

View File

@ -176,8 +176,11 @@ void MipsRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
}
MachineInstr *MipsRegisterInfo::
foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops, int FI) const
{
if (Ops.size() != 1) return NULL;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode())
@ -188,10 +191,10 @@ foldMemoryOperand(MachineInstr* MI, unsigned OpNum, int FI) const
(MI->getOperand(1).getReg() == Mips::ZERO) &&
(MI->getOperand(2).isRegister()))
{
if (OpNum == 0) // COPY -> STORE
if (Ops[0] == 0) // COPY -> STORE
NewMI = BuildMI(TII.get(Mips::SW)).addFrameIndex(FI)
.addImm(0).addReg(MI->getOperand(2).getReg());
else // COPY -> LOAD
else // COPY -> LOAD
NewMI = BuildMI(TII.get(Mips::LW), MI->getOperand(0)
.getReg()).addImm(0).addFrameIndex(FI);
}

View File

@ -55,22 +55,12 @@ struct MipsRegisterInfo : public MipsGenRegisterInfo {
void reMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
unsigned DestReg, const MachineInstr *Orig) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
int FrameIndex) const {
return 0;
}
MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}

View File

@ -555,11 +555,14 @@ BitVector PPCRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
MachineInstr *PPCRegisterInfo::foldMemoryOperand(MachineInstr *MI,
unsigned OpNum,
int FrameIndex) const {
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
if (Ops.size() != 1) return NULL;
// Make sure this is a reg-reg copy. Note that we can't handle MCRF, because
// it takes more than one instruction to store it.
unsigned Opc = MI->getOpcode();
unsigned OpNum = Ops[0];
MachineInstr *NewMI = NULL;
if ((Opc == PPC::OR &&

View File

@ -65,22 +65,12 @@ public:
/// foldMemoryOperand - PowerPC (like most RISC's) can only fold spills into
/// copy instructions, turning them into load/store instructions.
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
int FrameIndex) const {
return 0;
}
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI, unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}

View File

@ -148,8 +148,11 @@ void SparcRegisterInfo::reMaterialize(MachineBasicBlock &MBB,
}
MachineInstr *SparcRegisterInfo::foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
int FI) const {
SmallVectorImpl<unsigned> &Ops,
int FI) const {
if (Ops.size() != 1) return NULL;
unsigned OpNum = Ops[0];
bool isFloat = false;
MachineInstr *NewMI = NULL;
switch (MI->getOpcode()) {

View File

@ -59,23 +59,11 @@ struct SparcRegisterInfo : public SparcGenRegisterInfo {
unsigned DestReg, const MachineInstr *Orig) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
int FrameIndex) const {
return 0;
}
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
MachineInstr* LoadMI) const {
return 0;
}
virtual MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
}

View File

@ -1140,73 +1140,58 @@ X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned i,
}
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
SmallVector<MachineOperand,4> MOs;
MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
return foldMemoryOperand(MI, OpNum, MOs);
}
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
// Check switch flag
if (NoFusing) return NULL;
if (UseOps.size() == 1)
return foldMemoryOperand(MI, UseOps[0], FrameIndex);
else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
switch (MI->getOpcode()) {
default: return NULL;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
}
// Change to CMPXXri r, 0 first.
MI->setInstrDescriptor(TII.get(NewOpc));
MI->getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return NULL;
unsigned NewOpc = 0;
switch (MI->getOpcode()) {
default: return NULL;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
}
// Change to CMPXXri r, 0 first.
MI->setInstrDescriptor(TII.get(NewOpc));
MI->getOperand(1).ChangeToImmediate(0);
return foldMemoryOperand(MI, 0, FrameIndex);
SmallVector<MachineOperand,4> MOs;
MOs.push_back(MachineOperand::CreateFrameIndex(FrameIndex));
return foldMemoryOperand(MI, Ops[0], MOs);
}
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI, unsigned OpNum,
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &Ops,
MachineInstr *LoadMI) const {
// Check switch flag
if (NoFusing) return NULL;
if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
unsigned NewOpc = 0;
switch (MI->getOpcode()) {
default: return NULL;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
}
// Change to CMPXXri r, 0 first.
MI->setInstrDescriptor(TII.get(NewOpc));
MI->getOperand(1).ChangeToImmediate(0);
} else if (Ops.size() != 1)
return NULL;
SmallVector<MachineOperand,4> MOs;
unsigned NumOps = TII.getNumOperands(LoadMI->getOpcode());
for (unsigned i = NumOps - 4; i != NumOps; ++i)
MOs.push_back(LoadMI->getOperand(i));
return foldMemoryOperand(MI, OpNum, MOs);
}
MachineInstr* X86RegisterInfo::foldMemoryOperand(MachineInstr *MI,
SmallVectorImpl<unsigned> &UseOps,
MachineInstr *LoadMI) const {
// Check switch flag
if (NoFusing) return NULL;
if (UseOps.size() == 1)
return foldMemoryOperand(MI, UseOps[0], LoadMI);
else if (UseOps.size() != 2 || UseOps[0] != 0 && UseOps[1] != 1)
return NULL;
unsigned NewOpc = 0;
switch (MI->getOpcode()) {
default: return NULL;
case X86::TEST8rr: NewOpc = X86::CMP8ri; break;
case X86::TEST16rr: NewOpc = X86::CMP16ri; break;
case X86::TEST32rr: NewOpc = X86::CMP32ri; break;
case X86::TEST64rr: NewOpc = X86::CMP64ri32; break;
}
// Change to CMPXXri r, 0 first.
MI->setInstrDescriptor(TII.get(NewOpc));
MI->getOperand(1).ChangeToImmediate(0);
return foldMemoryOperand(MI, 0, LoadMI);
return foldMemoryOperand(MI, Ops[0], MOs);
}

View File

@ -133,32 +133,19 @@ public:
/// foldMemoryOperand - If this target supports it, fold a load or store of
/// the specified stack slot into the specified machine instruction for the
/// specified operand. If this is possible, the target should perform the
/// specified operand(s). If this is possible, the target should perform the
/// folding and return true, otherwise it should return false. If it folds
/// the instruction, it is likely that the MachineInstruction the iterator
/// references has been changed.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
int FrameIndex) const;
/// foldMemoryOperand - Same as previous except it tries to fold instruction
/// with multiple uses of the same register.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
unsigned OpNum,
MachineInstr* LoadMI) const;
/// foldMemoryOperand - Same as the previous version except it allows folding
/// of any load and store from / to any address, not just from a specific
/// stack slot.
MachineInstr* foldMemoryOperand(MachineInstr* MI,
SmallVectorImpl<unsigned> &UseOps,
SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const;
/// getOpcodeAfterMemoryFold - Returns the opcode of the would be new