RABasic is nearly functionally complete. There are a few remaining

benchmarks hitting an assertion.
Adds LiveIntervalUnion::collectInterferingVRegs.
Fixes "late spilling" by checking for any unspillable live vregs among
all physReg aliases.


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@118701 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Andrew Trick 2010-11-10 19:18:47 +00:00
parent 4283f4b81e
commit f4baeaf848
9 changed files with 228 additions and 90 deletions

View File

@ -270,7 +270,7 @@ namespace llvm {
/// (if any is created) by reference. This is temporary.
std::vector<LiveInterval*>
addIntervalsForSpills(const LiveInterval& i,
SmallVectorImpl<LiveInterval*> &SpillIs,
const SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap& vrm);
/// spillPhysRegAroundRegDefsUses - Spill the specified physical register
@ -283,7 +283,7 @@ namespace llvm {
/// val# of the specified interval is re-materializable. Also returns true
/// by reference if all of the defs are load instructions.
bool isReMaterializable(const LiveInterval &li,
SmallVectorImpl<LiveInterval*> &SpillIs,
const SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad);
/// isReMaterializable - Returns true if the definition MI of the specified
@ -360,7 +360,7 @@ namespace llvm {
/// by reference if the def is a load.
bool isReMaterializable(const LiveInterval &li, const VNInfo *ValNo,
MachineInstr *MI,
SmallVectorImpl<LiveInterval*> &SpillIs,
const SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad);
/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from

View File

@ -86,7 +86,7 @@ public:
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
SmallVectorImpl<LiveInterval*> &spillIs);
const SmallVectorImpl<LiveInterval*> &spillIs);
void spill(LiveRangeEdit &);
@ -352,7 +352,7 @@ void InlineSpiller::insertSpill(LiveInterval &NewLI,
void InlineSpiller::spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
SmallVectorImpl<LiveInterval*> &spillIs) {
const SmallVectorImpl<LiveInterval*> &spillIs) {
LiveRangeEdit edit(*li, newIntervals, spillIs);
spill(edit);
if (VerifySpills)

View File

@ -802,10 +802,11 @@ bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
/// isReMaterializable - Returns true if the definition MI of the specified
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
const VNInfo *ValNo, MachineInstr *MI,
SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad) {
bool
LiveIntervals::isReMaterializable(const LiveInterval &li,
const VNInfo *ValNo, MachineInstr *MI,
const SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad) {
if (DisableReMat)
return false;
@ -849,9 +850,10 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li,
/// isReMaterializable - Returns true if every definition of MI of every
/// val# of the specified interval is re-materializable.
bool LiveIntervals::isReMaterializable(const LiveInterval &li,
SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad) {
bool
LiveIntervals::isReMaterializable(const LiveInterval &li,
const SmallVectorImpl<LiveInterval*> &SpillIs,
bool &isLoad) {
isLoad = false;
for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
i != e; ++i) {
@ -1556,7 +1558,7 @@ LiveIntervals::normalizeSpillWeights(std::vector<LiveInterval*> &NewLIs) {
std::vector<LiveInterval*> LiveIntervals::
addIntervalsForSpills(const LiveInterval &li,
SmallVectorImpl<LiveInterval*> &SpillIs,
const SmallVectorImpl<LiveInterval*> &SpillIs,
const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
assert(li.isSpillable() && "attempt to spill already spilled interval!");

View File

@ -164,7 +164,7 @@ void LiveIntervalUnion::Query::findIntersection(InterferenceResult &ir) const {
while (ir.liuSegI_ != liuEnd) {
// Slowly advance the live virtual reg iterator until we surpass the next
// segment in this union. If this is ever used for coalescing of fixed
// registers and we have a LiveInterval with thousands of segments, then use
// registers and we have a live vreg with thousands of segments, then use
// upper bound instead.
while (ir.lvrSegI_ != lvrEnd && ir.lvrSegI_->end <= ir.liuSegI_->start)
++ir.lvrSegI_;
@ -220,3 +220,73 @@ bool LiveIntervalUnion::Query::nextInterference(InterferenceResult &ir) const {
findIntersection(ir);
return isInterference(ir);
}
// Scan the vector of interfering virtual registers in this union. Assuming it's
// quite small.
bool LiveIntervalUnion::Query::isSeenInterference(LiveInterval *lvr) const {
SmallVectorImpl<LiveInterval*>::const_iterator I =
std::find(interferingVRegs_.begin(), interferingVRegs_.end(), lvr);
return I != interferingVRegs_.end();
}
// Count the number of virtual registers in this union that interfere with this
// query's live virtual register.
//
// The number of times that we either advance ir.lvrSegI_ or call
// liu_.upperBound() will be no more than the number of holes in
// lvr_. So each invocation of collectInterferingVirtReg() takes
// time proportional to |lvr-holes| * time(liu_.upperBound()).
//
// For comments on how to speed it up, see Query::findIntersection().
unsigned LiveIntervalUnion::Query::
collectInterferingVRegs(unsigned maxInterferingRegs) {
InterferenceResult ir = firstInterference();
LiveInterval::iterator lvrEnd = lvr_->end();
SegmentIter liuEnd = liu_->end();
LiveInterval *recentInterferingVReg = NULL;
while (ir.liuSegI_ != liuEnd) {
// Advance the union's iterator to reach an unseen interfering vreg.
do {
if (ir.liuSegI_->liveVirtReg == recentInterferingVReg)
continue;
if (!isSeenInterference(ir.liuSegI_->liveVirtReg))
break;
// Cache the most recent interfering vreg to bypass isSeenInterference.
recentInterferingVReg = ir.liuSegI_->liveVirtReg;
} while( ++ir.liuSegI_ != liuEnd);
if (ir.liuSegI_ == liuEnd)
break;
// Advance the live vreg reg iterator until surpassing the next
// segment in this union. If this is ever used for coalescing of fixed
// registers and we have a live vreg with thousands of segments, then use
// upper bound instead.
while (ir.lvrSegI_ != lvrEnd && ir.lvrSegI_->end <= ir.liuSegI_->start)
++ir.lvrSegI_;
if (ir.lvrSegI_ == lvrEnd)
break;
// Check for intersection with the union's segment.
if (overlap(*ir.lvrSegI_, *ir.liuSegI_)) {
if (!ir.liuSegI_->liveVirtReg->isSpillable())
seenUnspillableVReg_ = true;
interferingVRegs_.push_back(ir.liuSegI_->liveVirtReg);
if (interferingVRegs_.size() == maxInterferingRegs)
return maxInterferingRegs;
// Cache the most recent interfering vreg to bypass isSeenInterference.
recentInterferingVReg = ir.liuSegI_->liveVirtReg;
++ir.liuSegI_;
continue;
}
// lvrSegI_ may have advanced far beyond liuSegI_,
// do a fast intersection test to "catch up"
LiveSegment seg(ir.lvrSegI_->start, ir.lvrSegI_->end, lvr_);
ir.liuSegI_ = liu_->upperBound(ir.liuSegI_, seg);
}
return interferingVRegs_.size();
}

View File

@ -174,10 +174,10 @@ public:
// result has no way to tell if it's valid to dereference them.
// Access the lvr segment.
const LiveInterval::iterator &lvrSegPos() const { return lvrSegI_; }
LiveInterval::iterator lvrSegPos() const { return lvrSegI_; }
// Access the liu segment.
const SegmentIter &liuSegPos() const { return liuSegI_; }
SegmentIter liuSegPos() const { return liuSegI_; }
bool operator==(const InterferenceResult &ir) const {
return lvrSegI_ == ir.lvrSegI_ && liuSegI_ == ir.liuSegI_;
@ -193,17 +193,21 @@ public:
LiveIntervalUnion *liu_;
LiveInterval *lvr_;
InterferenceResult firstInterference_;
// TBD: interfering vregs
SmallVector<LiveInterval*,4> interferingVRegs_;
bool seenUnspillableVReg_;
public:
Query(): liu_(), lvr_() {}
Query(LiveInterval *lvr, LiveIntervalUnion *liu): liu_(liu), lvr_(lvr) {}
Query(LiveInterval *lvr, LiveIntervalUnion *liu):
liu_(liu), lvr_(lvr), seenUnspillableVReg_(false) {}
void clear() {
liu_ = NULL;
lvr_ = NULL;
firstInterference_ = InterferenceResult();
interferingVRegs_.clear();
seenUnspillableVReg_ = false;
}
void init(LiveInterval *lvr, LiveIntervalUnion *liu) {
@ -218,6 +222,8 @@ public:
lvr_ = lvr;
// Clear cached results.
firstInterference_ = InterferenceResult();
interferingVRegs_.clear();
seenUnspillableVReg_ = false;
}
LiveInterval &lvr() const { assert(lvr_ && "uninitialized"); return *lvr_; }
@ -242,9 +248,24 @@ public:
// of segments. Visiting each unique interfering pairs means that the same
// lvr or liu segment may be visited multiple times.
bool nextInterference(InterferenceResult &ir) const;
// TBD: bool collectInterferingVirtRegs(unsigned maxInterference)
// Count the virtual registers in this union that interfere with this
// query's live virtual register, up to maxInterferingRegs.
unsigned collectInterferingVRegs(unsigned maxInterferingRegs = UINT_MAX);
// Was this virtual register visited during collectInterferingVRegs?
bool isSeenInterference(LiveInterval *lvr) const;
// Did collectInterferingVRegs encounter an unspillable vreg?
bool seenUnspillableVReg() const {
return seenUnspillableVReg_;
}
// Vector generated by collectInterferingVRegs.
const SmallVectorImpl<LiveInterval*> &interferingVRegs() const {
return interferingVRegs_;
}
private:
// Private interface for queries
void findIntersection(InterferenceResult &ir) const;

View File

@ -45,6 +45,7 @@ template<typename T> class SmallVectorImpl;
class TargetRegisterInfo;
class VirtRegMap;
class LiveIntervals;
class Spiller;
// Heuristic that determines the priority of assigning virtual to physical
// registers. The main impact of the heuristic is expected to be compile time.
@ -113,6 +114,9 @@ protected:
// LiveVirtRegQueue.
void allocatePhysRegs();
// Get a temporary reference to a Spiller instance.
virtual Spiller &spiller() = 0;
// A RegAlloc pass should override this to provide the allocation heuristics.
// Each call must guarantee forward progess by returning an available PhysReg
// or new set of split live virtual registers. It is up to the splitter to
@ -128,18 +132,21 @@ protected:
// exists, return the interfering register, which may be preg or an alias.
unsigned checkPhysRegInterference(LiveInterval& lvr, unsigned preg);
// Helper for spilling all live virtual registers currently unified under preg
// that interfere with the most recently queried lvr. Return true if spilling
// was successful, and append any new spilled/split intervals to splitLVRs.
bool spillInterferences(unsigned preg,
SmallVectorImpl<LiveInterval*> &splitLVRs);
#ifndef NDEBUG
// Verify each LiveIntervalUnion.
void verify();
#endif
// Helper that spills all live virtual registers currently unified under preg
// that interfere with the most recently queried lvr.
void spillInterferences(unsigned preg,
SmallVectorImpl<LiveInterval*> &splitLVRs);
private:
void seedLiveVirtRegs(LiveVirtRegQueue &lvrQ);
void spillReg(unsigned reg, SmallVectorImpl<LiveInterval*> &splitLVRs);
};
} // end namespace llvm

View File

@ -96,12 +96,11 @@ public:
virtual void releaseMemory();
virtual Spiller &spiller() { return *spiller_; }
virtual unsigned selectOrSplit(LiveInterval &lvr,
SmallVectorImpl<LiveInterval*> &splitLVRs);
void spillInterferences(unsigned preg,
SmallVectorImpl<LiveInterval*> &splitLVRs);
/// Perform register allocation.
virtual bool runOnMachineFunction(MachineFunction &mf);
@ -326,35 +325,70 @@ unsigned RegAllocBase::checkPhysRegInterference(LiveInterval &lvr,
return 0;
}
// Sort live virtual registers by their register number.
struct LessLiveVirtualReg
: public std::binary_function<LiveInterval, LiveInterval, bool> {
bool operator()(const LiveInterval *left, const LiveInterval *right) const {
return left->reg < right->reg;
}
};
// Spill all interferences currently assigned to this physical register.
void RegAllocBase::spillReg(unsigned reg,
SmallVectorImpl<LiveInterval*> &splitLVRs) {
LiveIntervalUnion::Query &query = queries_[reg];
const SmallVectorImpl<LiveInterval*> &pendingSpills =
query.interferingVRegs();
for (SmallVectorImpl<LiveInterval*>::const_iterator I = pendingSpills.begin(),
E = pendingSpills.end(); I != E; ++I) {
LiveInterval &lvr = **I;
DEBUG(dbgs() <<
"extracting from " << tri_->getName(reg) << " " << lvr << '\n');
// Deallocate the interfering vreg by removing it from the union.
// A LiveInterval instance may not be in a union during modification!
physReg2liu_[reg].extract(lvr);
// After extracting segments, the query's results are invalid.
query.clear();
// Clear the vreg assignment.
vrm_->clearVirt(lvr.reg);
// Spill the extracted interval.
spiller().spill(&lvr, splitLVRs, pendingSpills);
}
}
// Spill or split all live virtual registers currently unified under preg that
// interfere with lvr. The newly spilled or split live intervals are returned by
// appending them to splitLVRs.
void RABasic::spillInterferences(unsigned preg,
bool
RegAllocBase::spillInterferences(unsigned preg,
SmallVectorImpl<LiveInterval*> &splitLVRs) {
SmallPtrSet<LiveInterval*, 8> spilledLVRs;
LiveIntervalUnion::Query &query = queries_[preg];
// Record each interference before mutating either the union or live
// intervals.
LiveIntervalUnion::InterferenceResult ir = query.firstInterference();
assert(query.isInterference(ir) && "expect interference");
do {
spilledLVRs.insert(ir.liuSegPos()->liveVirtReg);
} while (query.nextInterference(ir));
for (SmallPtrSetIterator<LiveInterval*> lvrI = spilledLVRs.begin(),
lvrEnd = spilledLVRs.end();
lvrI != lvrEnd; ++lvrI ) {
LiveInterval& lvr = **lvrI;
// Spill the previously allocated lvr.
DEBUG(dbgs() << "extracting from " << preg << " " << lvr << '\n');
// Deallocate the interfering lvr by removing it from the preg union.
// Live intervals may not be in a union during modification.
physReg2liu_[preg].extract(lvr);
// Spill the extracted interval.
SmallVector<LiveInterval*, 8> spillIs;
spiller_->spill(&lvr, splitLVRs, spillIs);
// Record each interference and determine if all are spillable before mutating
// either the union or live intervals.
std::vector<LiveInterval*> spilledLVRs;
unsigned numInterferences = queries_[preg].collectInterferingVRegs();
if (queries_[preg].seenUnspillableVReg()) {
return false;
}
// After extracting segments, the query's results are invalid.
query.clear();
for (const unsigned *asI = tri_->getAliasSet(preg); *asI; ++asI) {
numInterferences += queries_[*asI].collectInterferingVRegs();
if (queries_[*asI].seenUnspillableVReg()) {
return false;
}
}
DEBUG(dbgs() << "spilling " << tri_->getName(preg) <<
" interferences with " << queries_[preg].lvr() << "\n");
assert(numInterferences > 0 && "expect interference");
// Spill each interfering vreg allocated to preg or an alias.
spillReg(preg, splitLVRs);
for (const unsigned *asI = tri_->getAliasSet(preg); *asI; ++asI)
spillReg(*asI, splitLVRs);
return true;
}
//===----------------------------------------------------------------------===//
@ -374,53 +408,57 @@ void RABasic::spillInterferences(unsigned preg,
// minimal, there is no value in caching them.
unsigned RABasic::selectOrSplit(LiveInterval &lvr,
SmallVectorImpl<LiveInterval*> &splitLVRs) {
// Accumulate the min spill cost among the interferences, in case we spill.
unsigned minSpillReg = 0;
unsigned minSpillAlias = 0;
float minSpillWeight = lvr.weight;
// Populate a list of physical register spill candidates.
std::vector<unsigned> pregSpillCands;
// Check for an available reg in this class.
// Check for an available register in this class.
const TargetRegisterClass *trc = mri_->getRegClass(lvr.reg);
for (TargetRegisterClass::iterator trcI = trc->allocation_order_begin(*mf_),
trcEnd = trc->allocation_order_end(*mf_);
trcI != trcEnd; ++trcI) {
unsigned preg = *trcI;
// Check interference and intialize queries for this lvr as a side effect.
unsigned interfReg = checkPhysRegInterference(lvr, preg);
if (interfReg == 0) {
// Found an available register.
return preg;
}
LiveIntervalUnion::InterferenceResult interf =
queries_[interfReg].firstInterference();
float interfWeight = interf.liuSegPos()->liveVirtReg->weight;
if (interfWeight < minSpillWeight ) {
minSpillReg = interfReg;
minSpillAlias = preg;
minSpillWeight = interfWeight;
LiveInterval *interferingVirtReg =
queries_[interfReg].firstInterference().liuSegPos()->liveVirtReg;
// The current lvr must either spillable, or one of its interferences must
// have less spill weight.
if (interferingVirtReg->weight < lvr.weight ) {
pregSpillCands.push_back(preg);
}
}
if (minSpillReg == 0) {
DEBUG(dbgs() << "spilling: " << lvr << '\n');
SmallVector<LiveInterval*, 1> spillIs; // ignored
spiller_->spill(&lvr, splitLVRs, spillIs);
// The live virtual register requesting to be allocated was spilled. So tell
// the caller not to allocate anything for this round.
return 0;
// Try to spill another interfering reg with less spill weight.
//
// FIXME: RAGreedy will sort this list by spill weight.
for (std::vector<unsigned>::iterator pregI = pregSpillCands.begin(),
pregE = pregSpillCands.end(); pregI != pregE; ++pregI) {
if (!spillInterferences(*pregI, splitLVRs)) continue;
unsigned interfReg = checkPhysRegInterference(lvr, *pregI);
if (interfReg != 0) {
const LiveSegment &seg =
*queries_[interfReg].firstInterference().liuSegPos();
dbgs() << "spilling cannot free " << tri_->getName(*pregI) <<
" for " << lvr.reg << " with interference " << seg.liveVirtReg << "\n";
llvm_unreachable("Interference after spill.");
}
// Tell the caller to allocate to this newly freed physical register.
return *pregI;
}
// Free the cheapest physical register.
spillInterferences(minSpillReg, splitLVRs);
// Tell the caller to allocate to this newly freed physical register.
assert(minSpillAlias != 0 && "need a free register after spilling");
// We just spilled the first register that interferes with minSpillAlias. We
// now assume minSpillAlias is free because only one register alias may
// interfere at a time. e.g. we ignore predication.
unsigned interfReg = checkPhysRegInterference(lvr, minSpillAlias);
if (interfReg != 0) {
dbgs() << "spilling cannot free " << tri_->getName(minSpillAlias) <<
" for " << lvr.reg << " with interference " <<
*queries_[interfReg].firstInterference().liuSegPos()->liveVirtReg << "\n";
llvm_unreachable("Interference after spill.");
}
return minSpillAlias;
// No other spill candidates were found, so spill the current lvr.
DEBUG(dbgs() << "spilling: " << lvr << '\n');
SmallVector<LiveInterval*, 1> pendingSpills;
spiller().spill(&lvr, splitLVRs, pendingSpills);
// The live virtual register requesting allocation was spilled, so tell
// the caller not to allocate anything during this round.
return 0;
}
namespace llvm {

View File

@ -183,7 +183,7 @@ public:
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
SmallVectorImpl<LiveInterval*> &) {
const SmallVectorImpl<LiveInterval*> &) {
// Ignore spillIs - we don't use it.
trivialSpillEverywhere(li, newIntervals);
}
@ -213,7 +213,7 @@ public:
/// Falls back on LiveIntervals::addIntervalsForSpills.
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
SmallVectorImpl<LiveInterval*> &spillIs) {
const SmallVectorImpl<LiveInterval*> &spillIs) {
std::vector<LiveInterval*> added =
lis->addIntervalsForSpills(*li, spillIs, loopInfo, *vrm);
newIntervals.insert(newIntervals.end(), added.begin(), added.end());
@ -250,7 +250,7 @@ public:
void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
SmallVectorImpl<LiveInterval*> &spillIs) {
const SmallVectorImpl<LiveInterval*> &spillIs) {
if (worthTryingToSplit(li))
tryVNISplit(li);
else

View File

@ -36,7 +36,7 @@ namespace llvm {
/// @param newIntervals The newly created intervals will be appended here.
virtual void spill(LiveInterval *li,
SmallVectorImpl<LiveInterval*> &newIntervals,
SmallVectorImpl<LiveInterval*> &spillIs) = 0;
const SmallVectorImpl<LiveInterval*> &spillIs) = 0;
};