Fixed various live interval splitting bugs / compile time issues.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@44428 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Evan Cheng 2007-11-29 01:06:25 +00:00
parent c3868e04bf
commit 0cbb1164b3
4 changed files with 280 additions and 166 deletions

View File

@ -277,10 +277,13 @@ namespace llvm {
MachineInstr *DefMI, unsigned index, unsigned i,
bool isSS, int slot, unsigned reg);
bool anyKillInMBBAfterIdx(const LiveInterval &li,
MachineBasicBlock *MBB, unsigned Idx,
const VNInfo *VNI = NULL) const;
/// anyKillInMBBAfterIdx - Returns true if there is a kill of the specified
/// VNInfo that's after the specified index but is within the basic block.
bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
MachineBasicBlock *MBB, unsigned Idx) const;
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
/// within a single basic block.
bool intervalIsInOneMBB(const LiveInterval &li) const;
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
@ -301,7 +304,9 @@ namespace llvm {
VirtRegMap &vrm, SSARegMap *RegMap, const TargetRegisterClass* rc,
SmallVector<int, 4> &ReMatIds, const LoopInfo *loopInfo,
BitVector &SpillMBBs,
std::map<unsigned, std::pair<int, unsigned> > &SpillIdxes,
std::map<unsigned, std::pair<int, bool> > &SpillIdxes,
BitVector &RestoreMBBs,
std::map<unsigned, std::pair<int, bool> > &RestoreIdxes,
std::map<unsigned,unsigned> &NewVRegs,
std::vector<LiveInterval*> &NewLIs);

View File

@ -42,12 +42,15 @@ namespace {
cl::init(false), cl::Hidden);
cl::opt<bool> SplitAtBB("split-intervals-at-bb",
cl::init(false), cl::Hidden);
cl::init(false), cl::Hidden);
cl::opt<int> SplitLimit("split-limit",
cl::init(-1), cl::Hidden);
}
STATISTIC(numIntervals, "Number of original intervals");
STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
STATISTIC(numFolded , "Number of loads/stores folded into instructions");
STATISTIC(numFolds , "Number of loads/stores folded into instructions");
STATISTIC(numSplits , "Number of intervals split");
char LiveIntervals::ID = 0;
namespace {
@ -389,7 +392,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
LiveRange LR(defIndex, killIndex, ValNo);
interval.addRange(LR);
interval.addKill(ValNo, killIndex-1); // odd # means phi node
interval.addKill(ValNo, killIndex+1); // odd # means phi node
DOUT << " +" << LR;
}
}
@ -652,13 +655,17 @@ bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
else
LiveVariables::transferKillDeadInfo(MI, fmi, mri_);
MachineBasicBlock &MBB = *MI->getParent();
vrm.virtFolded(reg, MI, i, fmi);
if (isSS) {
if (!mf_->getFrameInfo()->isFixedObjectIndex(slot))
vrm.virtFolded(reg, MI, i, fmi);
}
vrm.transferSpillPts(MI, fmi);
vrm.transferRestorePts(MI, fmi);
mi2iMap_.erase(MI);
i2miMap_[index/InstrSlots::NUM] = fmi;
mi2iMap_[fmi] = index;
MI = MBB.insert(MBB.erase(MI), fmi);
++numFolded;
++numFolds;
return true;
}
return false;
@ -681,20 +688,6 @@ bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
return true;
}
static
bool hasALaterUse(MachineBasicBlock *MBB, MachineInstr *MI, unsigned Reg) {
MachineBasicBlock::iterator I = MI;
if (I == MBB->end())
return false;
++I;
while (I != MBB->end()) {
if (I->findRegisterUseOperandIdx(Reg) != -1)
return true;
++I;
}
return false;
}
/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
/// for addIntervalsForSpills to rewrite uses / defs for the given live range.
void LiveIntervals::
@ -738,6 +731,7 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
}
// If def for this use can't be rematerialized, then try folding.
// If def is rematerializable and it's a load, also try folding.
TryFold = !ReMatOrigDefMI ||
(ReMatOrigDefMI && (MI == ReMatOrigDefMI || isLoad));
if (isLoad) {
@ -747,15 +741,10 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
}
}
// If we are splitting live intervals, only fold if it's 1) the first
// use and it's a kill or 2) there isn't another use later in this MBB.
TryFold &= NewVReg == 0;
if (TryFold && TrySplit)
// Do not fold store into def here if we are splitting. We'll find an
// optimal point to insert a store later.
if (HasDef || mop.isDef() ||
(!mop.isKill() && hasALaterUse(MI->getParent(), MI, li.reg)))
TryFold = false;
// Do not fold load / store here if we are splitting. We'll find an
// optimal point to insert a load / store later.
if (TryFold)
TryFold = !TrySplit && NewVReg == 0;
// FIXME: fold subreg use
if (!isSubReg && TryFold &&
@ -859,27 +848,13 @@ rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
}
bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
MachineBasicBlock *MBB, unsigned Idx,
const VNInfo *VNI) const {
const VNInfo *VNI,
MachineBasicBlock *MBB, unsigned Idx) const {
unsigned End = getMBBEndIdx(MBB);
if (VNI) {
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
unsigned KillIdx = VNI->kills[j];
if (KillIdx > Idx && KillIdx < End)
return true;
}
return false;
}
// Look at all the VNInfo's.
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
const VNInfo *VNI = *i;
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
unsigned KillIdx = VNI->kills[j];
if (KillIdx > Idx && KillIdx < End)
return true;
}
for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
unsigned KillIdx = VNI->kills[j];
if (KillIdx > Idx && KillIdx < End)
return true;
}
return false;
}
@ -895,7 +870,9 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
SmallVector<int, 4> &ReMatIds,
const LoopInfo *loopInfo,
BitVector &SpillMBBs,
std::map<unsigned, std::pair<int, unsigned> > &SpillIdxes,
std::map<unsigned, std::pair<int, bool> > &SpillIdxes,
BitVector &RestoreMBBs,
std::map<unsigned, std::pair<int, bool> > &RestoreIdxes,
std::map<unsigned,unsigned> &NewVRegs,
std::vector<LiveInterval*> &NewLIs) {
unsigned NewVReg = 0;
@ -930,53 +907,79 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
// Update weight of spill interval.
LiveInterval &nI = getOrCreateInterval(NewVReg);
if (!TrySplitMI)
if (!TrySplitMI) {
// The spill weight is now infinity as it cannot be spilled again.
nI.weight = HUGE_VALF;
else {
// Keep track of the last def in each MBB.
if (HasDef) {
if (MI != ReMatOrigDefMI || !CanDelete) {
// If this is a two-address code, then this index probably starts a
// VNInfo so we should examine all the VNInfo's.
bool HasKill = HasUse
? anyKillInMBBAfterIdx(li, MBB, getDefIndex(index))
: anyKillInMBBAfterIdx(li, MBB, getDefIndex(index), I->valno);
if (!HasKill) {
unsigned MBBId = MBB->getNumber();
// High bit specify whether this spill ought to be folded if
// possible.
std::map<unsigned, std::pair<int,unsigned> >::iterator SII =
SpillIdxes.find(MBBId);
if (SII == SpillIdxes.end() || (int)index > SII->second.first)
SpillIdxes[MBBId] = std::make_pair(index, NewVReg | (1 << 31));
SpillMBBs.set(MBBId);
}
}
if (!IsNew) {
// It this interval hasn't been assigned a stack slot
// (because earlier def is remat), do it now.
int SS = vrm.getStackSlot(NewVReg);
if (SS != (int)Slot) {
assert(SS == VirtRegMap::NO_STACK_SLOT);
vrm.assignVirt2StackSlot(NewVReg, Slot);
}
}
} else if (HasUse) {
// Use(s) following the last def, it's not safe to fold the spill.
unsigned MBBId = MBB->getNumber();
std::map<unsigned, std::pair<int,unsigned> >::iterator SII =
SpillIdxes.find(MBBId);
if (SII != SpillIdxes.end() &&
(SII->second.second & ((1<<31)-1)) == NewVReg &&
(int)getUseIndex(index) > SII->second.first)
SpillIdxes[MBBId].second &= (1<<31)-1;
}
// Update spill weight.
unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
continue;
}
// Keep track of the last def and first use in each MBB.
unsigned MBBId = MBB->getNumber();
if (HasDef) {
if (MI != ReMatOrigDefMI || !CanDelete) {
// If this is a two-address code, then this index probably starts a
// VNInfo so we should examine all the VNInfo's.
bool HasKill = false;
if (!HasUse)
HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
else {
const VNInfo *VNI = NULL;
for (LiveInterval::const_vni_iterator i = li.vni_begin(),
e = li.vni_end(); i != e; ++i)
if ((*i)->def == getDefIndex(index)) {
VNI = *i;
break;
}
if (VNI)
HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
}
if (!HasKill) {
std::map<unsigned, std::pair<int, bool> >::iterator SII =
SpillIdxes.find(MBBId);
if (SII == SpillIdxes.end())
SpillIdxes[MBBId] = std::make_pair(index, true);
else if ((int)index > SII->second.first) {
// If there is an earlier def and this is a two-address
// instruction, then it's not possible to fold the store (which
// would also fold the load).
SpillIdxes[MBBId] = std::make_pair(index, !HasUse);
}
SpillMBBs.set(MBBId);
}
}
if (!IsNew) {
// It this interval hasn't been assigned a stack slot
// (because earlier def is remat), do it now.
int SS = vrm.getStackSlot(NewVReg);
if (SS != (int)Slot) {
assert(SS == VirtRegMap::NO_STACK_SLOT);
vrm.assignVirt2StackSlot(NewVReg, Slot);
}
}
}
if (HasUse) {
std::map<unsigned, std::pair<int, bool> >::iterator SII =
SpillIdxes.find(MBBId);
if (SII != SpillIdxes.end() && (int)index > SII->second.first)
// Use(s) following the last def, it's not safe to fold the spill.
SII->second.second = false;
std::map<unsigned, std::pair<int, bool> >::iterator RII =
RestoreIdxes.find(MBBId);
if (RII != RestoreIdxes.end())
// If we are splitting live intervals, only fold if it's the first
// use and there isn't another use later in the MBB.
RII->second.second = false;
else if (IsNew) {
// Only need a reload if there isn't an earlier def / use.
RestoreIdxes[MBBId] = std::make_pair(index, true);
RestoreMBBs.set(MBBId);
}
}
// Update spill weight.
unsigned loopDepth = loopInfo->getLoopDepth(MBB->getBasicBlock());
nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
}
}
@ -998,7 +1001,9 @@ addIntervalsForSpills(const LiveInterval &li,
// Each bit specify whether it a spill is required in the MBB.
BitVector SpillMBBs(mf_->getNumBlockIDs());
std::map<unsigned, std::pair<int, unsigned> > SpillIdxes;
std::map<unsigned, std::pair<int, bool> > SpillIdxes;
BitVector RestoreMBBs(mf_->getNumBlockIDs());
std::map<unsigned, std::pair<int, bool> > RestoreIdxes;
std::map<unsigned,unsigned> NewVRegs;
std::vector<LiveInterval*> NewLIs;
SSARegMap *RegMap = mf_->getSSARegMap();
@ -1036,13 +1041,15 @@ addIntervalsForSpills(const LiveInterval &li,
if (IsFirstRange) {
rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
false, vrm, RegMap, rc, ReMatIds,
loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
false, vrm, RegMap, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
NewVRegs, NewLIs);
} else {
rewriteInstructionsForSpills(li, false, I, NULL, 0,
Slot, 0, false, false, false,
false, vrm, RegMap, rc, ReMatIds,
loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
false, vrm, RegMap, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
NewVRegs, NewLIs);
}
IsFirstRange = false;
}
@ -1050,6 +1057,10 @@ addIntervalsForSpills(const LiveInterval &li,
}
bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
TrySplit = false;
if (TrySplit)
++numSplits;
bool NeedStackSlot = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
@ -1110,39 +1121,118 @@ addIntervalsForSpills(const LiveInterval &li,
bool isLoad = isLoadSS ||
(DefIsReMat && (ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG));
rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, RegMap, rc, ReMatIds,
loopInfo, SpillMBBs, SpillIdxes, NewVRegs, NewLIs);
Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
CanDelete, vrm, RegMap, rc, ReMatIds, loopInfo,
SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
NewVRegs, NewLIs);
}
// Insert spills if we are splitting.
if (TrySplit && NeedStackSlot) {
int Id = SpillMBBs.find_first();
// Insert spills / restores if we are splitting.
if (TrySplit) {
if (NeedStackSlot) {
int Id = SpillMBBs.find_first();
while (Id != -1) {
unsigned VReg = NewVRegs[Id];
int index = SpillIdxes[Id].first;
bool DoFold = SpillIdxes[Id].second;
bool isReMat = vrm.isReMaterialized(VReg);
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
bool FoldedLoad = false;
if (DoFold) {
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isUse()) {
// Can't fold if it's two-address code and the use isn't the
// first and only use.
// If there are more than one uses, a load is still needed.
if (!isReMat && !FoldedLoad &&
RestoreMBBs[Id] && RestoreIdxes[Id].first == index &&
RestoreIdxes[Id].second) {
FoldedLoad = true;
continue;
} else {
OpIdx = -1;
break;
}
}
OpIdx = (int)j;
}
}
// Fold the store into the def if possible.
if (OpIdx == -1)
DoFold = false;
if (DoFold) {
if (tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, true, Slot,
VReg)) {
if (FoldedLoad) {
// Folded a two-address instruction, do not issue a load.
RestoreMBBs.reset(Id);
RestoreIdxes.erase(Id);
}
} else
DoFold = false;
}
// Else tell the spiller to issue a store for us.
if (!DoFold)
vrm.addSpillPoint(VReg, MI);
Id = SpillMBBs.find_next(Id);
}
}
int Id = RestoreMBBs.find_first();
while (Id != -1) {
unsigned index = SpillIdxes[Id].first;
unsigned VReg = SpillIdxes[Id].second & ((1 << 31)-1);
bool TryFold = SpillIdxes[Id].second & (1 << 31);
unsigned VReg = NewVRegs[Id];
int index = RestoreIdxes[Id].first;
bool DoFold = RestoreIdxes[Id].second;
MachineInstr *MI = getInstructionFromIndex(index);
int OpIdx = -1;
if (TryFold) {
if (DoFold) {
for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = MI->getOperand(j);
if (!MO.isRegister() || MO.getReg() != VReg)
continue;
if (MO.isUse()) {
if (MO.isDef()) {
// Can't fold if it's two-address code.
OpIdx = -1;
break;
}
if (OpIdx != -1) {
// Multiple uses, do not fold!
OpIdx = -1;
break;
}
OpIdx = (int)j;
}
}
// Fold the store into the def if possible.
if (OpIdx == -1 ||
!tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx, true, Slot, VReg))
// Else tell the spiller to issue a store for us.
vrm.addSpillPoint(VReg, MI);
Id = SpillMBBs.find_next(Id);
// Fold the load into the use if possible.
if (OpIdx == -1)
DoFold = false;
if (DoFold) {
if (vrm.isReMaterialized(VReg)) {
MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
int LdSlot = 0;
bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
// If the rematerializable def is a load, also try to fold it.
if (isLoadSS ||
(ReMatDefMI->getInstrDescriptor()->Flags & M_LOAD_FLAG))
DoFold = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, OpIdx,
isLoadSS, LdSlot, VReg);
else
DoFold = false;
} else
DoFold = tryFoldMemoryOperand(MI, vrm, NULL, index, OpIdx,
true, Slot, VReg);
}
// If folding is not possible / failed, then tell the spiller to issue a
// load / rematerialization for us.
if (!DoFold)
vrm.addRestorePoint(VReg, MI);
Id = RestoreMBBs.find_next(Id);
}
}

View File

@ -940,10 +940,6 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
// ReMatDefs - These are rematerializable def MIs which are not deleted.
SmallSet<MachineInstr*, 4> ReMatDefs;
// ReloadedSplits - Splits must be reloaded once per MBB. This keeps track
// which have been reloaded.
SmallSet<unsigned, 8> ReloadedSplits;
// Keep track of kill information.
BitVector RegKills(MRI->getNumRegs());
std::vector<MachineOperand*> KillOps;
@ -963,6 +959,31 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
MachineInstr &MI = *MII;
const TargetInstrDescriptor *TID = MI.getInstrDescriptor();
// Insert restores here if asked to.
if (VRM.isRestorePt(&MI)) {
std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
unsigned VirtReg = RestoreRegs[i];
if (!VRM.getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
unsigned Phys = VRM.getPhys(VirtReg);
MF.setPhysRegUsed(Phys);
if (VRM.isReMaterialized(VirtReg)) {
MRI->reMaterialize(MBB, &MI, Phys,
VRM.getReMaterializedMI(VirtReg));
++NumReMats;
} else {
const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), RC);
++NumLoads;
}
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
UpdateKills(*prior(MII), RegKills, KillOps);
DOUT << '\t' << *prior(MII);
}
}
// Insert spills here if asked to.
if (VRM.isSpillPt(&MI)) {
std::vector<unsigned> &SpillRegs = VRM.getSpillPtSpills(&MI);
@ -1006,43 +1027,6 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
MF.setPhysRegUsed(Phys);
if (MO.isDef())
ReusedOperands.markClobbered(Phys);
// If it's a split live interval, insert a reload for the first use
// unless it's previously defined in the MBB.
unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
if (SplitReg) {
if (ReloadedSplits.insert(VirtReg)) {
bool HasUse = MO.isUse();
// If it's a def, we don't need to reload the value unless it's
// a two-address code.
if (!HasUse) {
for (unsigned j = i+1; j != e; ++j) {
MachineOperand &MOJ = MI.getOperand(j);
if (MOJ.isRegister() && MOJ.getReg() == VirtReg) {
HasUse = true;
break;
}
}
}
if (HasUse) {
if (VRM.isReMaterialized(VirtReg)) {
MRI->reMaterialize(MBB, &MI, Phys,
VRM.getReMaterializedMI(VirtReg));
++NumReMats;
} else {
const TargetRegisterClass* RC = RegMap->getRegClass(VirtReg);
MRI->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg), RC);
++NumLoads;
}
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
UpdateKills(*prior(MII), RegKills, KillOps);
DOUT << '\t' << *prior(MII);
}
}
}
unsigned RReg = SubIdx ? MRI->getSubReg(Phys, SubIdx) : Phys;
MI.getOperand(i).setReg(RReg);
continue;
@ -1264,12 +1248,6 @@ void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
VirtRegMap::ModRef MR = I->second.second;
DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
// If this is a split live interval, remember we have seen this so
// we do not need to reload it for later uses.
unsigned SplitReg = VRM.getPreSplitReg(VirtReg);
if (SplitReg)
ReloadedSplits.insert(VirtReg);
int SS = VRM.getStackSlot(VirtReg);
if (SS == VirtRegMap::NO_STACK_SLOT)
continue;

View File

@ -82,6 +82,11 @@ namespace llvm {
/// splitting.
std::map<MachineInstr*, std::vector<unsigned> > SpillPt2VirtMap;
/// RestorePt2VirtMap - This records the virtual registers which should
/// be restored right before the MachineInstr due to live interval
/// splitting.
std::map<MachineInstr*, std::vector<unsigned> > RestorePt2VirtMap;
/// ReMatId - Instead of assigning a stack slot to a to be rematerialized
/// virtual register, an unique id is being assigned. This keeps track of
/// the highest id used so far. Note, this starts at (1<<18) to avoid
@ -239,6 +244,41 @@ namespace llvm {
SpillPt2VirtMap.erase(I);
}
/// @brief returns true if the specified MachineInstr is a restore point.
bool isRestorePt(MachineInstr *Pt) const {
return RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end();
}
/// @brief returns the virtual registers that should be restoreed due to
/// splitting right after the specified MachineInstr.
std::vector<unsigned> &getRestorePtRestores(MachineInstr *Pt) {
return RestorePt2VirtMap[Pt];
}
/// @brief records the specified MachineInstr as a restore point for virtReg.
void addRestorePoint(unsigned virtReg, MachineInstr *Pt) {
if (RestorePt2VirtMap.find(Pt) != RestorePt2VirtMap.end())
RestorePt2VirtMap[Pt].push_back(virtReg);
else {
std::vector<unsigned> Virts;
Virts.push_back(virtReg);
RestorePt2VirtMap.insert(std::make_pair(Pt, Virts));
}
}
void transferRestorePts(MachineInstr *Old, MachineInstr *New) {
std::map<MachineInstr*,std::vector<unsigned> >::iterator I =
RestorePt2VirtMap.find(Old);
if (I == RestorePt2VirtMap.end())
return;
while (!I->second.empty()) {
unsigned virtReg = I->second.back();
I->second.pop_back();
addRestorePoint(virtReg, New);
}
RestorePt2VirtMap.erase(I);
}
/// @brief Updates information about the specified virtual register's value
/// folded into newMI machine instruction. The OpNum argument indicates the
/// operand number of OldMI that is folded.
@ -261,6 +301,7 @@ namespace llvm {
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
MI2VirtMap.erase(MI);
SpillPt2VirtMap.erase(MI);
RestorePt2VirtMap.erase(MI);
}
void print(std::ostream &OS) const;