mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-25 04:39:44 +00:00
The Indexes Patch.
This introduces a new pass, SlotIndexes, which is responsible for numbering instructions for register allocation (and other clients). SlotIndexes numbering is designed to match the existing scheme, so this patch should not cause any changes in the generated code. For consistency, and to avoid naming confusion, LiveIndex has been renamed SlotIndex. The processImplicitDefs method of the LiveIntervals analysis has been moved into its own pass so that it can be run prior to SlotIndexes. This was necessary to match the existing numbering scheme. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@85979 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
888acc35a3
commit
233a60ec40
@ -21,221 +21,19 @@
|
||||
#ifndef LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
#define LLVM_CODEGEN_LIVEINTERVAL_H
|
||||
|
||||
#include "llvm/ADT/DenseMapInfo.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/AlignOf.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include <cassert>
|
||||
#include <climits>
|
||||
|
||||
namespace llvm {
|
||||
class LiveIntervals;
|
||||
class MachineInstr;
|
||||
class MachineRegisterInfo;
|
||||
class TargetRegisterInfo;
|
||||
class raw_ostream;
|
||||
|
||||
/// LiveIndex - An opaque wrapper around machine indexes.
|
||||
class LiveIndex {
|
||||
friend class VNInfo;
|
||||
friend class LiveInterval;
|
||||
friend class LiveIntervals;
|
||||
friend struct DenseMapInfo<LiveIndex>;
|
||||
|
||||
public:
|
||||
|
||||
enum Slot { LOAD, USE, DEF, STORE, NUM };
|
||||
|
||||
private:
|
||||
|
||||
unsigned index;
|
||||
|
||||
static const unsigned PHI_BIT = 1 << 31;
|
||||
|
||||
public:
|
||||
|
||||
/// Construct a default LiveIndex pointing to a reserved index.
|
||||
LiveIndex() : index(0) {}
|
||||
|
||||
/// Construct an index from the given index, pointing to the given slot.
|
||||
LiveIndex(LiveIndex m, Slot s)
|
||||
: index((m.index / NUM) * NUM + s) {}
|
||||
|
||||
/// Print this index to the given raw_ostream.
|
||||
void print(raw_ostream &os) const;
|
||||
|
||||
/// Compare two LiveIndex objects for equality.
|
||||
bool operator==(LiveIndex other) const {
|
||||
return ((index & ~PHI_BIT) == (other.index & ~PHI_BIT));
|
||||
}
|
||||
/// Compare two LiveIndex objects for inequality.
|
||||
bool operator!=(LiveIndex other) const {
|
||||
return ((index & ~PHI_BIT) != (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Compare two LiveIndex objects. Return true if the first index
|
||||
/// is strictly lower than the second.
|
||||
bool operator<(LiveIndex other) const {
|
||||
return ((index & ~PHI_BIT) < (other.index & ~PHI_BIT));
|
||||
}
|
||||
/// Compare two LiveIndex objects. Return true if the first index
|
||||
/// is lower than, or equal to, the second.
|
||||
bool operator<=(LiveIndex other) const {
|
||||
return ((index & ~PHI_BIT) <= (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Compare two LiveIndex objects. Return true if the first index
|
||||
/// is greater than the second.
|
||||
bool operator>(LiveIndex other) const {
|
||||
return ((index & ~PHI_BIT) > (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Compare two LiveIndex objects. Return true if the first index
|
||||
/// is greater than, or equal to, the second.
|
||||
bool operator>=(LiveIndex other) const {
|
||||
return ((index & ~PHI_BIT) >= (other.index & ~PHI_BIT));
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a load.
|
||||
bool isLoad() const {
|
||||
return ((index % NUM) == LOAD);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a use.
|
||||
bool isUse() const {
|
||||
return ((index % NUM) == USE);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a def.
|
||||
bool isDef() const {
|
||||
return ((index % NUM) == DEF);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a store.
|
||||
bool isStore() const {
|
||||
return ((index % NUM) == STORE);
|
||||
}
|
||||
|
||||
/// Returns the slot for this LiveIndex.
|
||||
Slot getSlot() const {
|
||||
return static_cast<Slot>(index % NUM);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a non-PHI use/def.
|
||||
bool isNonPHIIndex() const {
|
||||
return ((index & PHI_BIT) == 0);
|
||||
}
|
||||
|
||||
/// Returns true if this index represents a PHI use/def.
|
||||
bool isPHIIndex() const {
|
||||
return ((index & PHI_BIT) == PHI_BIT);
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
/// Construct an index from the given index, with its PHI kill marker set.
|
||||
LiveIndex(bool phi, LiveIndex o) : index(o.index) {
|
||||
if (phi)
|
||||
index |= PHI_BIT;
|
||||
else
|
||||
index &= ~PHI_BIT;
|
||||
}
|
||||
|
||||
explicit LiveIndex(unsigned idx)
|
||||
: index(idx & ~PHI_BIT) {}
|
||||
|
||||
LiveIndex(bool phi, unsigned idx)
|
||||
: index(idx & ~PHI_BIT) {
|
||||
if (phi)
|
||||
index |= PHI_BIT;
|
||||
}
|
||||
|
||||
LiveIndex(bool phi, unsigned idx, Slot slot)
|
||||
: index(((idx / NUM) * NUM + slot) & ~PHI_BIT) {
|
||||
if (phi)
|
||||
index |= PHI_BIT;
|
||||
}
|
||||
|
||||
LiveIndex nextSlot_() const {
|
||||
assert((index & PHI_BIT) == ((index + 1) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return LiveIndex(index + 1);
|
||||
}
|
||||
|
||||
LiveIndex nextIndex_() const {
|
||||
assert((index & PHI_BIT) == ((index + NUM) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return LiveIndex(index + NUM);
|
||||
}
|
||||
|
||||
LiveIndex prevSlot_() const {
|
||||
assert((index & PHI_BIT) == ((index - 1) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return LiveIndex(index - 1);
|
||||
}
|
||||
|
||||
LiveIndex prevIndex_() const {
|
||||
assert((index & PHI_BIT) == ((index - NUM) & PHI_BIT) &&
|
||||
"Index out of bounds.");
|
||||
return LiveIndex(index - NUM);
|
||||
}
|
||||
|
||||
int distance(LiveIndex other) const {
|
||||
return (other.index & ~PHI_BIT) - (index & ~PHI_BIT);
|
||||
}
|
||||
|
||||
/// Returns an unsigned number suitable as an index into a
|
||||
/// vector over all instructions.
|
||||
unsigned getVecIndex() const {
|
||||
return (index & ~PHI_BIT) / NUM;
|
||||
}
|
||||
|
||||
/// Scale this index by the given factor.
|
||||
LiveIndex scale(unsigned factor) const {
|
||||
unsigned i = (index & ~PHI_BIT) / NUM,
|
||||
o = (index % ~PHI_BIT) % NUM;
|
||||
assert(index <= (~0U & ~PHI_BIT) / (factor * NUM) &&
|
||||
"Rescaled interval would overflow");
|
||||
return LiveIndex(i * NUM * factor, o);
|
||||
}
|
||||
|
||||
static LiveIndex emptyKey() {
|
||||
return LiveIndex(true, 0x7fffffff);
|
||||
}
|
||||
|
||||
static LiveIndex tombstoneKey() {
|
||||
return LiveIndex(true, 0x7ffffffe);
|
||||
}
|
||||
|
||||
static unsigned getHashValue(const LiveIndex &v) {
|
||||
return v.index * 37;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
inline raw_ostream& operator<<(raw_ostream &os, LiveIndex mi) {
|
||||
mi.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
/// Densemap specialization for LiveIndex.
|
||||
template <>
|
||||
struct DenseMapInfo<LiveIndex> {
|
||||
static inline LiveIndex getEmptyKey() {
|
||||
return LiveIndex::emptyKey();
|
||||
}
|
||||
static inline LiveIndex getTombstoneKey() {
|
||||
return LiveIndex::tombstoneKey();
|
||||
}
|
||||
static inline unsigned getHashValue(const LiveIndex &v) {
|
||||
return LiveIndex::getHashValue(v);
|
||||
}
|
||||
static inline bool isEqual(const LiveIndex &LHS,
|
||||
const LiveIndex &RHS) {
|
||||
return (LHS == RHS);
|
||||
}
|
||||
static inline bool isPod() { return true; }
|
||||
};
|
||||
|
||||
|
||||
/// VNInfo - Value Number Information.
|
||||
/// This class holds information about a machine level values, including
|
||||
@ -270,23 +68,25 @@ namespace llvm {
|
||||
|
||||
public:
|
||||
|
||||
typedef SmallVector<LiveIndex, 4> KillSet;
|
||||
typedef SmallVector<SlotIndex, 4> KillSet;
|
||||
|
||||
/// The ID number of this value.
|
||||
unsigned id;
|
||||
|
||||
/// The index of the defining instruction (if isDefAccurate() returns true).
|
||||
LiveIndex def;
|
||||
SlotIndex def;
|
||||
|
||||
KillSet kills;
|
||||
|
||||
VNInfo()
|
||||
: flags(IS_UNUSED), id(~1U) { cr.copy = 0; }
|
||||
/*
|
||||
VNInfo(LiveIntervals &li_)
|
||||
: defflags(IS_UNUSED), id(~1U) { cr.copy = 0; }
|
||||
*/
|
||||
|
||||
/// VNInfo constructor.
|
||||
/// d is presumed to point to the actual defining instr. If it doesn't
|
||||
/// setIsDefAccurate(false) should be called after construction.
|
||||
VNInfo(unsigned i, LiveIndex d, MachineInstr *c)
|
||||
VNInfo(unsigned i, SlotIndex d, MachineInstr *c)
|
||||
: flags(IS_DEF_ACCURATE), id(i), def(d) { cr.copy = c; }
|
||||
|
||||
/// VNInfo construtor, copies values from orig, except for the value number.
|
||||
@ -377,7 +177,7 @@ namespace llvm {
|
||||
}
|
||||
|
||||
/// Returns true if the given index is a kill of this value.
|
||||
bool isKill(LiveIndex k) const {
|
||||
bool isKill(SlotIndex k) const {
|
||||
KillSet::const_iterator
|
||||
i = std::lower_bound(kills.begin(), kills.end(), k);
|
||||
return (i != kills.end() && *i == k);
|
||||
@ -385,7 +185,7 @@ namespace llvm {
|
||||
|
||||
/// addKill - Add a kill instruction index to the specified value
|
||||
/// number.
|
||||
void addKill(LiveIndex k) {
|
||||
void addKill(SlotIndex k) {
|
||||
if (kills.empty()) {
|
||||
kills.push_back(k);
|
||||
} else {
|
||||
@ -397,7 +197,7 @@ namespace llvm {
|
||||
|
||||
/// Remove the specified kill index from this value's kills list.
|
||||
/// Returns true if the value was present, otherwise returns false.
|
||||
bool removeKill(LiveIndex k) {
|
||||
bool removeKill(SlotIndex k) {
|
||||
KillSet::iterator i = std::lower_bound(kills.begin(), kills.end(), k);
|
||||
if (i != kills.end() && *i == k) {
|
||||
kills.erase(i);
|
||||
@ -407,7 +207,7 @@ namespace llvm {
|
||||
}
|
||||
|
||||
/// Remove all kills in the range [s, e).
|
||||
void removeKills(LiveIndex s, LiveIndex e) {
|
||||
void removeKills(SlotIndex s, SlotIndex e) {
|
||||
KillSet::iterator
|
||||
si = std::lower_bound(kills.begin(), kills.end(), s),
|
||||
se = std::upper_bound(kills.begin(), kills.end(), e);
|
||||
@ -421,11 +221,11 @@ namespace llvm {
|
||||
/// program, with an inclusive start point and an exclusive end point.
|
||||
/// These ranges are rendered as [start,end).
|
||||
struct LiveRange {
|
||||
LiveIndex start; // Start point of the interval (inclusive)
|
||||
LiveIndex end; // End point of the interval (exclusive)
|
||||
SlotIndex start; // Start point of the interval (inclusive)
|
||||
SlotIndex end; // End point of the interval (exclusive)
|
||||
VNInfo *valno; // identifier for the value contained in this interval.
|
||||
|
||||
LiveRange(LiveIndex S, LiveIndex E, VNInfo *V)
|
||||
LiveRange(SlotIndex S, SlotIndex E, VNInfo *V)
|
||||
: start(S), end(E), valno(V) {
|
||||
|
||||
assert(S < E && "Cannot create empty or backwards range");
|
||||
@ -433,13 +233,13 @@ namespace llvm {
|
||||
|
||||
/// contains - Return true if the index is covered by this range.
|
||||
///
|
||||
bool contains(LiveIndex I) const {
|
||||
bool contains(SlotIndex I) const {
|
||||
return start <= I && I < end;
|
||||
}
|
||||
|
||||
/// containsRange - Return true if the given range, [S, E), is covered by
|
||||
/// this range.
|
||||
bool containsRange(LiveIndex S, LiveIndex E) const {
|
||||
bool containsRange(SlotIndex S, SlotIndex E) const {
|
||||
assert((S < E) && "Backwards interval?");
|
||||
return (start <= S && S < end) && (start < E && E <= end);
|
||||
}
|
||||
@ -461,11 +261,11 @@ namespace llvm {
|
||||
raw_ostream& operator<<(raw_ostream& os, const LiveRange &LR);
|
||||
|
||||
|
||||
inline bool operator<(LiveIndex V, const LiveRange &LR) {
|
||||
inline bool operator<(SlotIndex V, const LiveRange &LR) {
|
||||
return V < LR.start;
|
||||
}
|
||||
|
||||
inline bool operator<(const LiveRange &LR, LiveIndex V) {
|
||||
inline bool operator<(const LiveRange &LR, SlotIndex V) {
|
||||
return LR.start < V;
|
||||
}
|
||||
|
||||
@ -522,7 +322,7 @@ namespace llvm {
|
||||
/// end of the interval. If no LiveRange contains this position, but the
|
||||
/// position is in a hole, this method returns an iterator pointing the the
|
||||
/// LiveRange immediately after the hole.
|
||||
iterator advanceTo(iterator I, LiveIndex Pos) {
|
||||
iterator advanceTo(iterator I, SlotIndex Pos) {
|
||||
if (Pos >= endIndex())
|
||||
return end();
|
||||
while (I->end <= Pos) ++I;
|
||||
@ -569,7 +369,7 @@ namespace llvm {
|
||||
|
||||
/// getNextValue - Create a new value number and return it. MIIdx specifies
|
||||
/// the instruction that defines the value number.
|
||||
VNInfo *getNextValue(LiveIndex def, MachineInstr *CopyMI,
|
||||
VNInfo *getNextValue(SlotIndex def, MachineInstr *CopyMI,
|
||||
bool isDefAccurate, BumpPtrAllocator &VNInfoAllocator){
|
||||
VNInfo *VNI =
|
||||
static_cast<VNInfo*>(VNInfoAllocator.Allocate((unsigned)sizeof(VNInfo),
|
||||
@ -625,13 +425,15 @@ namespace llvm {
|
||||
/// current interval, but are defined in the Clobbers interval, mark them
|
||||
/// used with an unknown definition value. Caller must pass in reference to
|
||||
/// VNInfoAllocator since it will create a new val#.
|
||||
void MergeInClobberRanges(const LiveInterval &Clobbers,
|
||||
void MergeInClobberRanges(LiveIntervals &li_,
|
||||
const LiveInterval &Clobbers,
|
||||
BumpPtrAllocator &VNInfoAllocator);
|
||||
|
||||
/// MergeInClobberRange - Same as MergeInClobberRanges except it merge in a
|
||||
/// single LiveRange only.
|
||||
void MergeInClobberRange(LiveIndex Start,
|
||||
LiveIndex End,
|
||||
void MergeInClobberRange(LiveIntervals &li_,
|
||||
SlotIndex Start,
|
||||
SlotIndex End,
|
||||
BumpPtrAllocator &VNInfoAllocator);
|
||||
|
||||
/// MergeValueInAsValue - Merge all of the live ranges of a specific val#
|
||||
@ -657,56 +459,54 @@ namespace llvm {
|
||||
bool empty() const { return ranges.empty(); }
|
||||
|
||||
/// beginIndex - Return the lowest numbered slot covered by interval.
|
||||
LiveIndex beginIndex() const {
|
||||
if (empty())
|
||||
return LiveIndex();
|
||||
SlotIndex beginIndex() const {
|
||||
assert(!empty() && "Call to beginIndex() on empty interval.");
|
||||
return ranges.front().start;
|
||||
}
|
||||
|
||||
/// endNumber - return the maximum point of the interval of the whole,
|
||||
/// exclusive.
|
||||
LiveIndex endIndex() const {
|
||||
if (empty())
|
||||
return LiveIndex();
|
||||
SlotIndex endIndex() const {
|
||||
assert(!empty() && "Call to endIndex() on empty interval.");
|
||||
return ranges.back().end;
|
||||
}
|
||||
|
||||
bool expiredAt(LiveIndex index) const {
|
||||
bool expiredAt(SlotIndex index) const {
|
||||
return index >= endIndex();
|
||||
}
|
||||
|
||||
bool liveAt(LiveIndex index) const;
|
||||
bool liveAt(SlotIndex index) const;
|
||||
|
||||
// liveBeforeAndAt - Check if the interval is live at the index and the
|
||||
// index just before it. If index is liveAt, check if it starts a new live
|
||||
// range.If it does, then check if the previous live range ends at index-1.
|
||||
bool liveBeforeAndAt(LiveIndex index) const;
|
||||
bool liveBeforeAndAt(SlotIndex index) const;
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
const LiveRange *getLiveRangeContaining(LiveIndex Idx) const {
|
||||
const LiveRange *getLiveRangeContaining(SlotIndex Idx) const {
|
||||
const_iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : &*I;
|
||||
}
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
LiveRange *getLiveRangeContaining(LiveIndex Idx) {
|
||||
LiveRange *getLiveRangeContaining(SlotIndex Idx) {
|
||||
iterator I = FindLiveRangeContaining(Idx);
|
||||
return I == end() ? 0 : &*I;
|
||||
}
|
||||
|
||||
/// FindLiveRangeContaining - Return an iterator to the live range that
|
||||
/// contains the specified index, or end() if there is none.
|
||||
const_iterator FindLiveRangeContaining(LiveIndex Idx) const;
|
||||
const_iterator FindLiveRangeContaining(SlotIndex Idx) const;
|
||||
|
||||
/// FindLiveRangeContaining - Return an iterator to the live range that
|
||||
/// contains the specified index, or end() if there is none.
|
||||
iterator FindLiveRangeContaining(LiveIndex Idx);
|
||||
iterator FindLiveRangeContaining(SlotIndex Idx);
|
||||
|
||||
/// findDefinedVNInfo - Find the by the specified
|
||||
/// index (register interval) or defined
|
||||
VNInfo *findDefinedVNInfoForRegInt(LiveIndex Idx) const;
|
||||
VNInfo *findDefinedVNInfoForRegInt(SlotIndex Idx) const;
|
||||
|
||||
/// findDefinedVNInfo - Find the VNInfo that's defined by the specified
|
||||
/// register (stack inteval only).
|
||||
@ -721,7 +521,7 @@ namespace llvm {
|
||||
|
||||
/// overlaps - Return true if the live interval overlaps a range specified
|
||||
/// by [Start, End).
|
||||
bool overlaps(LiveIndex Start, LiveIndex End) const;
|
||||
bool overlaps(SlotIndex Start, SlotIndex End) const;
|
||||
|
||||
/// overlapsFrom - Return true if the intersection of the two live intervals
|
||||
/// is not empty. The specified iterator is a hint that we can begin
|
||||
@ -738,18 +538,19 @@ namespace llvm {
|
||||
/// join - Join two live intervals (this, and other) together. This applies
|
||||
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
|
||||
/// the intervals are not joinable, this aborts.
|
||||
void join(LiveInterval &Other, const int *ValNoAssignments,
|
||||
void join(LiveInterval &Other,
|
||||
const int *ValNoAssignments,
|
||||
const int *RHSValNoAssignments,
|
||||
SmallVector<VNInfo*, 16> &NewVNInfo,
|
||||
MachineRegisterInfo *MRI);
|
||||
|
||||
/// isInOneLiveRange - Return true if the range specified is entirely in the
|
||||
/// a single LiveRange of the live interval.
|
||||
bool isInOneLiveRange(LiveIndex Start, LiveIndex End);
|
||||
bool isInOneLiveRange(SlotIndex Start, SlotIndex End);
|
||||
|
||||
/// removeRange - Remove the specified range from this interval. Note that
|
||||
/// the range must be a single LiveRange in its entirety.
|
||||
void removeRange(LiveIndex Start, LiveIndex End,
|
||||
void removeRange(SlotIndex Start, SlotIndex End,
|
||||
bool RemoveDeadValNo = false);
|
||||
|
||||
void removeRange(LiveRange LR, bool RemoveDeadValNo = false) {
|
||||
@ -773,8 +574,8 @@ namespace llvm {
|
||||
void ComputeJoinedWeight(const LiveInterval &Other);
|
||||
|
||||
bool operator<(const LiveInterval& other) const {
|
||||
const LiveIndex &thisIndex = beginIndex();
|
||||
const LiveIndex &otherIndex = other.beginIndex();
|
||||
const SlotIndex &thisIndex = beginIndex();
|
||||
const SlotIndex &otherIndex = other.beginIndex();
|
||||
return (thisIndex < otherIndex ||
|
||||
(thisIndex == otherIndex && reg < other.reg));
|
||||
}
|
||||
@ -785,8 +586,9 @@ namespace llvm {
|
||||
private:
|
||||
|
||||
Ranges::iterator addRangeFrom(LiveRange LR, Ranges::iterator From);
|
||||
void extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd);
|
||||
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, LiveIndex NewStr);
|
||||
void extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd);
|
||||
Ranges::iterator extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStr);
|
||||
|
||||
LiveInterval& operator=(const LiveInterval& rhs); // DO NOT IMPLEMENT
|
||||
|
||||
};
|
||||
|
@ -23,12 +23,14 @@
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/ADT/BitVector.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include <cmath>
|
||||
#include <iterator>
|
||||
|
||||
namespace llvm {
|
||||
|
||||
@ -40,21 +42,6 @@ namespace llvm {
|
||||
class TargetInstrInfo;
|
||||
class TargetRegisterClass;
|
||||
class VirtRegMap;
|
||||
typedef std::pair<LiveIndex, MachineBasicBlock*> IdxMBBPair;
|
||||
|
||||
inline bool operator<(LiveIndex V, const IdxMBBPair &IM) {
|
||||
return V < IM.first;
|
||||
}
|
||||
|
||||
inline bool operator<(const IdxMBBPair &IM, LiveIndex V) {
|
||||
return IM.first < V;
|
||||
}
|
||||
|
||||
struct Idx2MBBCompare {
|
||||
bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
|
||||
return LHS.first < RHS.first;
|
||||
}
|
||||
};
|
||||
|
||||
class LiveIntervals : public MachineFunctionPass {
|
||||
MachineFunction* mf_;
|
||||
@ -64,33 +51,15 @@ namespace llvm {
|
||||
const TargetInstrInfo* tii_;
|
||||
AliasAnalysis *aa_;
|
||||
LiveVariables* lv_;
|
||||
SlotIndexes* indexes_;
|
||||
|
||||
/// Special pool allocator for VNInfo's (LiveInterval val#).
|
||||
///
|
||||
BumpPtrAllocator VNInfoAllocator;
|
||||
|
||||
/// MBB2IdxMap - The indexes of the first and last instructions in the
|
||||
/// specified basic block.
|
||||
std::vector<std::pair<LiveIndex, LiveIndex> > MBB2IdxMap;
|
||||
|
||||
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
|
||||
/// and MBB id.
|
||||
std::vector<IdxMBBPair> Idx2MBBMap;
|
||||
|
||||
/// FunctionSize - The number of instructions present in the function
|
||||
uint64_t FunctionSize;
|
||||
|
||||
typedef DenseMap<const MachineInstr*, LiveIndex> Mi2IndexMap;
|
||||
Mi2IndexMap mi2iMap_;
|
||||
|
||||
typedef std::vector<MachineInstr*> Index2MiMap;
|
||||
Index2MiMap i2miMap_;
|
||||
|
||||
typedef DenseMap<unsigned, LiveInterval*> Reg2IntervalMap;
|
||||
Reg2IntervalMap r2iMap_;
|
||||
|
||||
DenseMap<MachineBasicBlock*, LiveIndex> terminatorGaps;
|
||||
|
||||
/// phiJoinCopies - Copy instructions which are PHI joins.
|
||||
SmallVector<MachineInstr*, 16> phiJoinCopies;
|
||||
|
||||
@ -100,48 +69,10 @@ namespace llvm {
|
||||
/// CloneMIs - A list of clones as result of re-materialization.
|
||||
std::vector<MachineInstr*> CloneMIs;
|
||||
|
||||
typedef LiveInterval::InstrSlots InstrSlots;
|
||||
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
LiveIntervals() : MachineFunctionPass(&ID) {}
|
||||
|
||||
LiveIndex getBaseIndex(LiveIndex index) {
|
||||
return LiveIndex(index, LiveIndex::LOAD);
|
||||
}
|
||||
LiveIndex getBoundaryIndex(LiveIndex index) {
|
||||
return LiveIndex(index,
|
||||
(LiveIndex::Slot)(LiveIndex::NUM - 1));
|
||||
}
|
||||
LiveIndex getLoadIndex(LiveIndex index) {
|
||||
return LiveIndex(index, LiveIndex::LOAD);
|
||||
}
|
||||
LiveIndex getUseIndex(LiveIndex index) {
|
||||
return LiveIndex(index, LiveIndex::USE);
|
||||
}
|
||||
LiveIndex getDefIndex(LiveIndex index) {
|
||||
return LiveIndex(index, LiveIndex::DEF);
|
||||
}
|
||||
LiveIndex getStoreIndex(LiveIndex index) {
|
||||
return LiveIndex(index, LiveIndex::STORE);
|
||||
}
|
||||
|
||||
LiveIndex getNextSlot(LiveIndex m) const {
|
||||
return m.nextSlot_();
|
||||
}
|
||||
|
||||
LiveIndex getNextIndex(LiveIndex m) const {
|
||||
return m.nextIndex_();
|
||||
}
|
||||
|
||||
LiveIndex getPrevSlot(LiveIndex m) const {
|
||||
return m.prevSlot_();
|
||||
}
|
||||
|
||||
LiveIndex getPrevIndex(LiveIndex m) const {
|
||||
return m.prevIndex_();
|
||||
}
|
||||
|
||||
static float getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) {
|
||||
return (isDef + isUse) * powf(10.0F, (float)loopDepth);
|
||||
}
|
||||
@ -170,111 +101,18 @@ namespace llvm {
|
||||
return r2iMap_.count(reg);
|
||||
}
|
||||
|
||||
/// getMBBStartIdx - Return the base index of the first instruction in the
|
||||
/// specified MachineBasicBlock.
|
||||
LiveIndex getMBBStartIdx(MachineBasicBlock *MBB) const {
|
||||
return getMBBStartIdx(MBB->getNumber());
|
||||
}
|
||||
LiveIndex getMBBStartIdx(unsigned MBBNo) const {
|
||||
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
||||
return MBB2IdxMap[MBBNo].first;
|
||||
}
|
||||
|
||||
/// getMBBEndIdx - Return the store index of the last instruction in the
|
||||
/// specified MachineBasicBlock.
|
||||
LiveIndex getMBBEndIdx(MachineBasicBlock *MBB) const {
|
||||
return getMBBEndIdx(MBB->getNumber());
|
||||
}
|
||||
LiveIndex getMBBEndIdx(unsigned MBBNo) const {
|
||||
assert(MBBNo < MBB2IdxMap.size() && "Invalid MBB number!");
|
||||
return MBB2IdxMap[MBBNo].second;
|
||||
}
|
||||
|
||||
/// getScaledIntervalSize - get the size of an interval in "units,"
|
||||
/// where every function is composed of one thousand units. This
|
||||
/// measure scales properly with empty index slots in the function.
|
||||
double getScaledIntervalSize(LiveInterval& I) {
|
||||
return (1000.0 / InstrSlots::NUM * I.getSize()) / i2miMap_.size();
|
||||
return (1000.0 * I.getSize()) / indexes_->getIndexesLength();
|
||||
}
|
||||
|
||||
/// getApproximateInstructionCount - computes an estimate of the number
|
||||
/// of instructions in a given LiveInterval.
|
||||
unsigned getApproximateInstructionCount(LiveInterval& I) {
|
||||
double IntervalPercentage = getScaledIntervalSize(I) / 1000.0;
|
||||
return (unsigned)(IntervalPercentage * FunctionSize);
|
||||
}
|
||||
|
||||
/// getMBBFromIndex - given an index in any instruction of an
|
||||
/// MBB return a pointer the MBB
|
||||
MachineBasicBlock* getMBBFromIndex(LiveIndex index) const {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), index);
|
||||
// Take the pair containing the index
|
||||
std::vector<IdxMBBPair>::const_iterator J =
|
||||
((I != Idx2MBBMap.end() && I->first > index) ||
|
||||
(I == Idx2MBBMap.end() && Idx2MBBMap.size()>0)) ? (I-1): I;
|
||||
|
||||
assert(J != Idx2MBBMap.end() && J->first <= index &&
|
||||
index <= getMBBEndIdx(J->second) &&
|
||||
"index does not correspond to an MBB");
|
||||
return J->second;
|
||||
}
|
||||
|
||||
/// getInstructionIndex - returns the base index of instr
|
||||
LiveIndex getInstructionIndex(const MachineInstr* instr) const {
|
||||
Mi2IndexMap::const_iterator it = mi2iMap_.find(instr);
|
||||
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
||||
return it->second;
|
||||
}
|
||||
|
||||
/// getInstructionFromIndex - given an index in any slot of an
|
||||
/// instruction return a pointer the instruction
|
||||
MachineInstr* getInstructionFromIndex(LiveIndex index) const {
|
||||
// convert index to vector index
|
||||
unsigned i = index.getVecIndex();
|
||||
assert(i < i2miMap_.size() &&
|
||||
"index does not correspond to an instruction");
|
||||
return i2miMap_[i];
|
||||
}
|
||||
|
||||
/// hasGapBeforeInstr - Return true if the previous instruction slot,
|
||||
/// i.e. Index - InstrSlots::NUM, is not occupied.
|
||||
bool hasGapBeforeInstr(LiveIndex Index) {
|
||||
Index = getBaseIndex(getPrevIndex(Index));
|
||||
return getInstructionFromIndex(Index) == 0;
|
||||
}
|
||||
|
||||
/// hasGapAfterInstr - Return true if the successive instruction slot,
|
||||
/// i.e. Index + InstrSlots::Num, is not occupied.
|
||||
bool hasGapAfterInstr(LiveIndex Index) {
|
||||
Index = getBaseIndex(getNextIndex(Index));
|
||||
return getInstructionFromIndex(Index) == 0;
|
||||
}
|
||||
|
||||
/// findGapBeforeInstr - Find an empty instruction slot before the
|
||||
/// specified index. If "Furthest" is true, find one that's furthest
|
||||
/// away from the index (but before any index that's occupied).
|
||||
LiveIndex findGapBeforeInstr(LiveIndex Index, bool Furthest = false) {
|
||||
Index = getBaseIndex(getPrevIndex(Index));
|
||||
if (getInstructionFromIndex(Index))
|
||||
return LiveIndex(); // No gap!
|
||||
if (!Furthest)
|
||||
return Index;
|
||||
LiveIndex PrevIndex = getBaseIndex(getPrevIndex(Index));
|
||||
while (getInstructionFromIndex(Index)) {
|
||||
Index = PrevIndex;
|
||||
PrevIndex = getBaseIndex(getPrevIndex(Index));
|
||||
}
|
||||
return Index;
|
||||
}
|
||||
|
||||
/// InsertMachineInstrInMaps - Insert the specified machine instruction
|
||||
/// into the instruction index map at the given index.
|
||||
void InsertMachineInstrInMaps(MachineInstr *MI, LiveIndex Index) {
|
||||
i2miMap_[Index.getVecIndex()] = MI;
|
||||
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
||||
assert(it == mi2iMap_.end() && "Already in map!");
|
||||
mi2iMap_[MI] = Index;
|
||||
return (unsigned)(IntervalPercentage * indexes_->getFunctionSize());
|
||||
}
|
||||
|
||||
/// conflictsWithPhysRegDef - Returns true if the specified register
|
||||
@ -288,19 +126,7 @@ namespace llvm {
|
||||
bool CheckUse,
|
||||
SmallPtrSet<MachineInstr*,32> &JoinedCopies);
|
||||
|
||||
/// findLiveInMBBs - Given a live range, if the value of the range
|
||||
/// is live in any MBB returns true as well as the list of basic blocks
|
||||
/// in which the value is live.
|
||||
bool findLiveInMBBs(LiveIndex Start, LiveIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
||||
|
||||
/// findReachableMBBs - Return a list MBB that can be reached via any
|
||||
/// branch or fallthroughs. Return true if the list is not empty.
|
||||
bool findReachableMBBs(LiveIndex Start, LiveIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const;
|
||||
|
||||
// Interval creation
|
||||
|
||||
LiveInterval &getOrCreateInterval(unsigned reg) {
|
||||
Reg2IntervalMap::iterator I = r2iMap_.find(reg);
|
||||
if (I == r2iMap_.end())
|
||||
@ -325,36 +151,75 @@ namespace llvm {
|
||||
r2iMap_.erase(I);
|
||||
}
|
||||
|
||||
SlotIndex getZeroIndex() const {
|
||||
return indexes_->getZeroIndex();
|
||||
}
|
||||
|
||||
SlotIndex getInvalidIndex() const {
|
||||
return indexes_->getInvalidIndex();
|
||||
}
|
||||
|
||||
/// isNotInMIMap - returns true if the specified machine instr has been
|
||||
/// removed or was never entered in the map.
|
||||
bool isNotInMIMap(MachineInstr* instr) const {
|
||||
return !mi2iMap_.count(instr);
|
||||
bool isNotInMIMap(const MachineInstr* Instr) const {
|
||||
return !indexes_->hasIndex(Instr);
|
||||
}
|
||||
|
||||
/// Returns the base index of the given instruction.
|
||||
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
|
||||
return indexes_->getInstructionIndex(instr);
|
||||
}
|
||||
|
||||
/// Returns the instruction associated with the given index.
|
||||
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
|
||||
return indexes_->getInstructionFromIndex(index);
|
||||
}
|
||||
|
||||
/// Return the first index in the given basic block.
|
||||
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
|
||||
return indexes_->getMBBStartIdx(mbb);
|
||||
}
|
||||
|
||||
/// Return the last index in the given basic block.
|
||||
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
|
||||
return indexes_->getMBBEndIdx(mbb);
|
||||
}
|
||||
|
||||
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
|
||||
return indexes_->getMBBFromIndex(index);
|
||||
}
|
||||
|
||||
bool hasGapBeforeInstr(SlotIndex index) {
|
||||
return indexes_->hasGapBeforeInstr(index);
|
||||
}
|
||||
|
||||
bool hasGapAfterInstr(SlotIndex index) {
|
||||
return indexes_->hasGapAfterInstr(index);
|
||||
}
|
||||
|
||||
SlotIndex findGapBeforeInstr(SlotIndex index, bool furthest = false) {
|
||||
return indexes_->findGapBeforeInstr(index, furthest);
|
||||
}
|
||||
|
||||
void InsertMachineInstrInMaps(MachineInstr *MI, SlotIndex Index) {
|
||||
indexes_->insertMachineInstrInMaps(MI, Index);
|
||||
}
|
||||
|
||||
/// RemoveMachineInstrFromMaps - This marks the specified machine instr as
|
||||
/// deleted.
|
||||
void RemoveMachineInstrFromMaps(MachineInstr *MI) {
|
||||
// remove index -> MachineInstr and
|
||||
// MachineInstr -> index mappings
|
||||
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
||||
if (mi2i != mi2iMap_.end()) {
|
||||
i2miMap_[mi2i->second.index/InstrSlots::NUM] = 0;
|
||||
mi2iMap_.erase(mi2i);
|
||||
}
|
||||
indexes_->removeMachineInstrFromMaps(MI);
|
||||
}
|
||||
|
||||
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
|
||||
/// maps used by register allocator.
|
||||
void ReplaceMachineInstrInMaps(MachineInstr *MI, MachineInstr *NewMI) {
|
||||
Mi2IndexMap::iterator mi2i = mi2iMap_.find(MI);
|
||||
if (mi2i == mi2iMap_.end())
|
||||
return;
|
||||
i2miMap_[mi2i->second.index/InstrSlots::NUM] = NewMI;
|
||||
Mi2IndexMap::iterator it = mi2iMap_.find(MI);
|
||||
assert(it != mi2iMap_.end() && "Invalid instruction!");
|
||||
LiveIndex Index = it->second;
|
||||
mi2iMap_.erase(it);
|
||||
mi2iMap_[NewMI] = Index;
|
||||
indexes_->replaceMachineInstrInMaps(MI, NewMI);
|
||||
}
|
||||
|
||||
bool findLiveInMBBs(SlotIndex Start, SlotIndex End,
|
||||
SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
|
||||
return indexes_->findLiveInMBBs(Start, End, MBBs);
|
||||
}
|
||||
|
||||
void renumber() {
|
||||
indexes_->renumber();
|
||||
}
|
||||
|
||||
BumpPtrAllocator& getVNInfoAllocator() { return VNInfoAllocator; }
|
||||
@ -417,13 +282,6 @@ namespace llvm {
|
||||
/// marker to implicit_def defs and their uses.
|
||||
void processImplicitDefs();
|
||||
|
||||
/// computeNumbering - Compute the index numbering.
|
||||
void computeNumbering();
|
||||
|
||||
/// scaleNumbering - Rescale interval numbers to introduce gaps for new
|
||||
/// instructions
|
||||
void scaleNumbering(int factor);
|
||||
|
||||
/// intervalIsInOneMBB - Returns true if the specified interval is entirely
|
||||
/// within a single basic block.
|
||||
bool intervalIsInOneMBB(const LiveInterval &li) const;
|
||||
@ -443,14 +301,14 @@ namespace llvm {
|
||||
/// handleVirtualRegisterDef)
|
||||
void handleRegisterDef(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
LiveIndex MIIdx,
|
||||
SlotIndex MIIdx,
|
||||
MachineOperand& MO, unsigned MOIdx);
|
||||
|
||||
/// handleVirtualRegisterDef - update intervals for a virtual
|
||||
/// register def
|
||||
void handleVirtualRegisterDef(MachineBasicBlock *MBB,
|
||||
MachineBasicBlock::iterator MI,
|
||||
LiveIndex MIIdx, MachineOperand& MO,
|
||||
SlotIndex MIIdx, MachineOperand& MO,
|
||||
unsigned MOIdx,
|
||||
LiveInterval& interval);
|
||||
|
||||
@ -458,13 +316,13 @@ namespace llvm {
|
||||
/// def.
|
||||
void handlePhysicalRegisterDef(MachineBasicBlock* mbb,
|
||||
MachineBasicBlock::iterator mi,
|
||||
LiveIndex MIIdx, MachineOperand& MO,
|
||||
SlotIndex MIIdx, MachineOperand& MO,
|
||||
LiveInterval &interval,
|
||||
MachineInstr *CopyMI);
|
||||
|
||||
/// handleLiveInRegister - Create interval for a livein register.
|
||||
void handleLiveInRegister(MachineBasicBlock* mbb,
|
||||
LiveIndex MIIdx,
|
||||
SlotIndex MIIdx,
|
||||
LiveInterval &interval, bool isAlias = false);
|
||||
|
||||
/// getReMatImplicitUse - If the remat definition MI has one (for now, we
|
||||
@ -477,7 +335,7 @@ namespace llvm {
|
||||
/// which reaches the given instruction also reaches the specified use
|
||||
/// index.
|
||||
bool isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI,
|
||||
LiveIndex UseIdx) const;
|
||||
SlotIndex UseIdx) const;
|
||||
|
||||
/// isReMaterializable - Returns true if the definition MI of the specified
|
||||
/// val# of the specified interval is re-materializable. Also returns true
|
||||
@ -492,7 +350,7 @@ namespace llvm {
|
||||
/// MI. If it is successul, MI is updated with the newly created MI and
|
||||
/// returns true.
|
||||
bool tryFoldMemoryOperand(MachineInstr* &MI, VirtRegMap &vrm,
|
||||
MachineInstr *DefMI, LiveIndex InstrIdx,
|
||||
MachineInstr *DefMI, SlotIndex InstrIdx,
|
||||
SmallVector<unsigned, 2> &Ops,
|
||||
bool isSS, int FrameIndex, unsigned Reg);
|
||||
|
||||
@ -506,7 +364,7 @@ namespace llvm {
|
||||
/// VNInfo that's after the specified index but is within the basic block.
|
||||
bool anyKillInMBBAfterIdx(const LiveInterval &li, const VNInfo *VNI,
|
||||
MachineBasicBlock *MBB,
|
||||
LiveIndex Idx) const;
|
||||
SlotIndex Idx) const;
|
||||
|
||||
/// hasAllocatableSuperReg - Return true if the specified physical register
|
||||
/// has any super register that's allocatable.
|
||||
@ -514,17 +372,17 @@ namespace llvm {
|
||||
|
||||
/// SRInfo - Spill / restore info.
|
||||
struct SRInfo {
|
||||
LiveIndex index;
|
||||
SlotIndex index;
|
||||
unsigned vreg;
|
||||
bool canFold;
|
||||
SRInfo(LiveIndex i, unsigned vr, bool f)
|
||||
SRInfo(SlotIndex i, unsigned vr, bool f)
|
||||
: index(i), vreg(vr), canFold(f) {}
|
||||
};
|
||||
|
||||
bool alsoFoldARestore(int Id, LiveIndex index, unsigned vr,
|
||||
bool alsoFoldARestore(int Id, SlotIndex index, unsigned vr,
|
||||
BitVector &RestoreMBBs,
|
||||
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
||||
void eraseRestoreInfo(int Id, LiveIndex index, unsigned vr,
|
||||
void eraseRestoreInfo(int Id, SlotIndex index, unsigned vr,
|
||||
BitVector &RestoreMBBs,
|
||||
DenseMap<unsigned,std::vector<SRInfo> >&RestoreIdxes);
|
||||
|
||||
@ -543,7 +401,7 @@ namespace llvm {
|
||||
/// functions for addIntervalsForSpills to rewrite uses / defs for the given
|
||||
/// live range.
|
||||
bool rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI,
|
||||
bool TrySplit, LiveIndex index, LiveIndex end,
|
||||
bool TrySplit, SlotIndex index, SlotIndex end,
|
||||
MachineInstr *MI, MachineInstr *OrigDefMI, MachineInstr *DefMI,
|
||||
unsigned Slot, int LdSlot,
|
||||
bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
|
||||
|
@ -48,8 +48,6 @@ namespace llvm {
|
||||
iterator begin() { return S2IMap.begin(); }
|
||||
iterator end() { return S2IMap.end(); }
|
||||
|
||||
void scaleNumbering(int factor);
|
||||
|
||||
unsigned getNumIntervals() const { return (unsigned)S2IMap.size(); }
|
||||
|
||||
LiveInterval &getOrCreateInterval(int Slot, const TargetRegisterClass *RC) {
|
||||
|
41
include/llvm/CodeGen/ProcessImplicitDefs.h
Normal file
41
include/llvm/CodeGen/ProcessImplicitDefs.h
Normal file
@ -0,0 +1,41 @@
|
||||
//===-------------- llvm/CodeGen/ProcessImplicitDefs.h ----------*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
|
||||
#ifndef LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
|
||||
#define LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
|
||||
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
class MachineInstr;
|
||||
class TargetInstrInfo;
|
||||
|
||||
/// Process IMPLICIT_DEF instructions and make sure there is one implicit_def
|
||||
/// for each use. Add isUndef marker to implicit_def defs and their uses.
|
||||
class ProcessImplicitDefs : public MachineFunctionPass {
|
||||
private:
|
||||
|
||||
bool CanTurnIntoImplicitDef(MachineInstr *MI, unsigned Reg,
|
||||
unsigned OpIdx, const TargetInstrInfo *tii_);
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
ProcessImplicitDefs() : MachineFunctionPass(&ID) {}
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &au) const;
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &fn);
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_PROCESSIMPLICITDEFS_H
|
775
include/llvm/CodeGen/SlotIndexes.h
Normal file
775
include/llvm/CodeGen/SlotIndexes.h
Normal file
@ -0,0 +1,775 @@
|
||||
//===- llvm/CodeGen/SlotIndexes.h - Slot indexes representation -*- C++ -*-===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
//
|
||||
// This file implements SlotIndex and related classes. The purpuse of SlotIndex
|
||||
// is to describe a position at which a register can become live, or cease to
|
||||
// be live.
|
||||
//
|
||||
// SlotIndex is mostly a proxy for entries of the SlotIndexList, a class which
|
||||
// is held is LiveIntervals and provides the real numbering. This allows
|
||||
// LiveIntervals to perform largely transparent renumbering. The SlotIndex
|
||||
// class does hold a PHI bit, which determines whether the index relates to a
|
||||
// PHI use or def point, or an actual instruction. See the SlotIndex class
|
||||
// description for futher information.
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#ifndef LLVM_CODEGEN_SLOTINDEXES_H
|
||||
#define LLVM_CODEGEN_SLOTINDEXES_H
|
||||
|
||||
#include "llvm/ADT/PointerIntPair.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
#include "llvm/CodeGen/MachineBasicBlock.h"
|
||||
#include "llvm/CodeGen/MachineFunctionPass.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
|
||||
namespace llvm {
|
||||
|
||||
/// This class represents an entry in the slot index list held in the
|
||||
/// SlotIndexes pass. It should not be used directly. See the
|
||||
/// SlotIndex & SlotIndexes classes for the public interface to this
|
||||
/// information.
|
||||
class IndexListEntry {
|
||||
friend class SlotIndex;
|
||||
friend class SlotIndexes;
|
||||
|
||||
private:
|
||||
|
||||
IndexListEntry *next, *prev;
|
||||
MachineInstr *mi;
|
||||
unsigned index;
|
||||
|
||||
public:
|
||||
|
||||
IndexListEntry(MachineInstr *mi, unsigned index)
|
||||
: mi(mi), index(index) {}
|
||||
|
||||
MachineInstr* getInstr() const { return mi; }
|
||||
void setInstr(MachineInstr *mi) { this->mi = mi; }
|
||||
|
||||
unsigned getIndex() const { return index; }
|
||||
void setIndex(unsigned index) { this->index = index; }
|
||||
|
||||
IndexListEntry* getNext() { return next; }
|
||||
const IndexListEntry* getNext() const { return next; }
|
||||
void setNext(IndexListEntry *next) { this->next = next; }
|
||||
|
||||
IndexListEntry* getPrev() { return prev; }
|
||||
const IndexListEntry* getPrev() const { return prev; }
|
||||
void setPrev(IndexListEntry *prev) { this->prev = prev; }
|
||||
|
||||
/*
|
||||
bool operator==(const IndexListEntry &other) const {
|
||||
assert(getIndex() != other.getIndex() || this == &other &&
|
||||
"Non-equal index list entries compare equal.");
|
||||
return getIndex() == other.getIndex();
|
||||
}
|
||||
|
||||
bool operator!=(const IndexListEntry &other) const {
|
||||
return getIndex() != other.getIndex();
|
||||
}
|
||||
|
||||
bool operator<(const IndexListEntry &other) const {
|
||||
return getIndex() < other.getIndex();
|
||||
}
|
||||
|
||||
bool operator<=(const IndexListEntry &other) const {
|
||||
return getIndex() <= other.getIndex();
|
||||
}
|
||||
|
||||
bool operator>(const IndexListEntry &other) const {
|
||||
return getIndex() > other.getIndex();
|
||||
}
|
||||
|
||||
bool operator>=(const IndexListEntry &other) const {
|
||||
return getIndex() >= other.getIndex();
|
||||
}
|
||||
|
||||
int distance(const IndexListEntry &other) const {
|
||||
return other.getIndex() - getIndex();
|
||||
}
|
||||
*/
|
||||
};
|
||||
|
||||
// Specialize PointerLikeTypeTraits for IndexListEntry.
|
||||
template <>
|
||||
class PointerLikeTypeTraits<IndexListEntry*> {
|
||||
public:
|
||||
static inline void* getAsVoidPointer(IndexListEntry *p) {
|
||||
return p;
|
||||
}
|
||||
static inline IndexListEntry* getFromVoidPointer(void *p) {
|
||||
return static_cast<IndexListEntry*>(p);
|
||||
}
|
||||
enum { NumLowBitsAvailable = 3 };
|
||||
};
|
||||
|
||||
/// SlotIndex - An opaque wrapper around machine indexes.
|
||||
class SlotIndex {
|
||||
friend class SlotIndexes;
|
||||
friend class DenseMapInfo<SlotIndex>;
|
||||
|
||||
private:
|
||||
|
||||
// FIXME: Is there any way to statically allocate these things and have
|
||||
// them 8-byte aligned?
|
||||
static std::auto_ptr<IndexListEntry> emptyKeyPtr, tombstoneKeyPtr;
|
||||
static const unsigned PHI_BIT = 1 << 2;
|
||||
|
||||
PointerIntPair<IndexListEntry*, 3, unsigned> lie;
|
||||
|
||||
SlotIndex(IndexListEntry *entry, unsigned phiAndSlot)
|
||||
: lie(entry, phiAndSlot) {
|
||||
assert(entry != 0 && "Attempt to construct index with 0 pointer.");
|
||||
}
|
||||
|
||||
IndexListEntry& entry() const {
|
||||
assert(lie.getPointer() != 0 && "Use of invalid index.");
|
||||
return *lie.getPointer();
|
||||
}
|
||||
|
||||
int getIndex() const {
|
||||
return entry().getIndex() | getSlot();
|
||||
}
|
||||
|
||||
static inline unsigned getHashValue(const SlotIndex &v) {
|
||||
IndexListEntry *ptrVal = &v.entry();
|
||||
return (unsigned((intptr_t)ptrVal) >> 4) ^
|
||||
(unsigned((intptr_t)ptrVal) >> 9);
|
||||
}
|
||||
|
||||
public:
|
||||
|
||||
// FIXME: Ugh. This is public because LiveIntervalAnalysis is still using it
|
||||
// for some spill weight stuff. Fix that, then make this private.
|
||||
enum Slot { LOAD, USE, DEF, STORE, NUM };
|
||||
|
||||
static inline SlotIndex getEmptyKey() {
|
||||
// FIXME: How do we guarantee these numbers don't get allocated to
|
||||
// legit indexes?
|
||||
if (emptyKeyPtr.get() == 0)
|
||||
emptyKeyPtr.reset(new IndexListEntry(0, ~0U & ~3U));
|
||||
|
||||
return SlotIndex(emptyKeyPtr.get(), 0);
|
||||
}
|
||||
|
||||
static inline SlotIndex getTombstoneKey() {
|
||||
// FIXME: How do we guarantee these numbers don't get allocated to
|
||||
// legit indexes?
|
||||
if (tombstoneKeyPtr.get() == 0)
|
||||
tombstoneKeyPtr.reset(new IndexListEntry(0, ~0U & ~7U));
|
||||
|
||||
return SlotIndex(tombstoneKeyPtr.get(), 0);
|
||||
}
|
||||
|
||||
/// Construct an invalid index.
|
||||
SlotIndex() : lie(&getEmptyKey().entry(), 0) {}
|
||||
|
||||
// Construct a new slot index from the given one, set the phi flag on the
|
||||
// new index to the value of the phi parameter.
|
||||
SlotIndex(const SlotIndex &li, bool phi)
|
||||
: lie(&li.entry(), phi ? PHI_BIT & li.getSlot() : (unsigned)li.getSlot()){
|
||||
assert(lie.getPointer() != 0 &&
|
||||
"Attempt to construct index with 0 pointer.");
|
||||
}
|
||||
|
||||
// Construct a new slot index from the given one, set the phi flag on the
|
||||
// new index to the value of the phi parameter, and the slot to the new slot.
|
||||
SlotIndex(const SlotIndex &li, bool phi, Slot s)
|
||||
: lie(&li.entry(), phi ? PHI_BIT & s : (unsigned)s) {
|
||||
assert(lie.getPointer() != 0 &&
|
||||
"Attempt to construct index with 0 pointer.");
|
||||
}
|
||||
|
||||
/// Returns true if this is a valid index. Invalid indicies do
|
||||
/// not point into an index table, and cannot be compared.
|
||||
bool isValid() const {
|
||||
return (lie.getPointer() != 0) && (lie.getPointer()->getIndex() != 0);
|
||||
}
|
||||
|
||||
/// Print this index to the given raw_ostream.
|
||||
void print(raw_ostream &os) const;
|
||||
|
||||
/// Dump this index to stderr.
|
||||
void dump() const;
|
||||
|
||||
/// Compare two SlotIndex objects for equality.
|
||||
bool operator==(SlotIndex other) const {
|
||||
return getIndex() == other.getIndex();
|
||||
}
|
||||
/// Compare two SlotIndex objects for inequality.
|
||||
bool operator!=(SlotIndex other) const {
|
||||
return getIndex() != other.getIndex();
|
||||
}
|
||||
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is strictly lower than the second.
|
||||
bool operator<(SlotIndex other) const {
|
||||
return getIndex() < other.getIndex();
|
||||
}
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is lower than, or equal to, the second.
|
||||
bool operator<=(SlotIndex other) const {
|
||||
return getIndex() <= other.getIndex();
|
||||
}
|
||||
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is greater than the second.
|
||||
bool operator>(SlotIndex other) const {
|
||||
return getIndex() > other.getIndex();
|
||||
}
|
||||
|
||||
/// Compare two SlotIndex objects. Return true if the first index
|
||||
/// is greater than, or equal to, the second.
|
||||
bool operator>=(SlotIndex other) const {
|
||||
return getIndex() >= other.getIndex();
|
||||
}
|
||||
|
||||
/// Return the distance from this index to the given one.
|
||||
int distance(SlotIndex other) const {
|
||||
return other.getIndex() - getIndex();
|
||||
}
|
||||
|
||||
/// Returns the slot for this SlotIndex.
|
||||
Slot getSlot() const {
|
||||
return static_cast<Slot>(lie.getInt() & ~PHI_BIT);
|
||||
}
|
||||
|
||||
/// Returns the state of the PHI bit.
|
||||
bool isPHI() const {
|
||||
return lie.getInt() & PHI_BIT;
|
||||
}
|
||||
|
||||
/// Returns the base index for associated with this index. The base index
|
||||
/// is the one associated with the LOAD slot for the instruction pointed to
|
||||
/// by this index.
|
||||
SlotIndex getBaseIndex() const {
|
||||
return getLoadIndex();
|
||||
}
|
||||
|
||||
/// Returns the boundary index for associated with this index. The boundary
|
||||
/// index is the one associated with the LOAD slot for the instruction
|
||||
/// pointed to by this index.
|
||||
SlotIndex getBoundaryIndex() const {
|
||||
return getStoreIndex();
|
||||
}
|
||||
|
||||
/// Returns the index of the LOAD slot for the instruction pointed to by
|
||||
/// this index.
|
||||
SlotIndex getLoadIndex() const {
|
||||
return SlotIndex(&entry(), SlotIndex::LOAD);
|
||||
}
|
||||
|
||||
/// Returns the index of the USE slot for the instruction pointed to by
|
||||
/// this index.
|
||||
SlotIndex getUseIndex() const {
|
||||
return SlotIndex(&entry(), SlotIndex::USE);
|
||||
}
|
||||
|
||||
/// Returns the index of the DEF slot for the instruction pointed to by
|
||||
/// this index.
|
||||
SlotIndex getDefIndex() const {
|
||||
return SlotIndex(&entry(), SlotIndex::DEF);
|
||||
}
|
||||
|
||||
/// Returns the index of the STORE slot for the instruction pointed to by
|
||||
/// this index.
|
||||
SlotIndex getStoreIndex() const {
|
||||
return SlotIndex(&entry(), SlotIndex::STORE);
|
||||
}
|
||||
|
||||
/// Returns the next slot in the index list. This could be either the
|
||||
/// next slot for the instruction pointed to by this index or, if this
|
||||
/// index is a STORE, the first slot for the next instruction.
|
||||
/// WARNING: This method is considerably more expensive than the methods
|
||||
/// that return specific slots (getUseIndex(), etc). If you can - please
|
||||
/// use one of those methods.
|
||||
SlotIndex getNextSlot() const {
|
||||
Slot s = getSlot();
|
||||
if (s == SlotIndex::STORE) {
|
||||
return SlotIndex(entry().getNext(), SlotIndex::LOAD);
|
||||
}
|
||||
return SlotIndex(&entry(), s + 1);
|
||||
}
|
||||
|
||||
/// Returns the next index. This is the index corresponding to the this
|
||||
/// index's slot, but for the next instruction.
|
||||
SlotIndex getNextIndex() const {
|
||||
return SlotIndex(entry().getNext(), getSlot());
|
||||
}
|
||||
|
||||
/// Returns the previous slot in the index list. This could be either the
|
||||
/// previous slot for the instruction pointed to by this index or, if this
|
||||
/// index is a LOAD, the last slot for the previous instruction.
|
||||
/// WARNING: This method is considerably more expensive than the methods
|
||||
/// that return specific slots (getUseIndex(), etc). If you can - please
|
||||
/// use one of those methods.
|
||||
SlotIndex getPrevSlot() const {
|
||||
Slot s = getSlot();
|
||||
if (s == SlotIndex::LOAD) {
|
||||
return SlotIndex(entry().getPrev(), SlotIndex::STORE);
|
||||
}
|
||||
return SlotIndex(&entry(), s - 1);
|
||||
}
|
||||
|
||||
/// Returns the previous index. This is the index corresponding to this
|
||||
/// index's slot, but for the previous instruction.
|
||||
SlotIndex getPrevIndex() const {
|
||||
return SlotIndex(entry().getPrev(), getSlot());
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
/// DenseMapInfo specialization for SlotIndex.
|
||||
template <>
|
||||
struct DenseMapInfo<SlotIndex> {
|
||||
static inline SlotIndex getEmptyKey() {
|
||||
return SlotIndex::getEmptyKey();
|
||||
}
|
||||
static inline SlotIndex getTombstoneKey() {
|
||||
return SlotIndex::getTombstoneKey();
|
||||
}
|
||||
static inline unsigned getHashValue(const SlotIndex &v) {
|
||||
return SlotIndex::getHashValue(v);
|
||||
}
|
||||
static inline bool isEqual(const SlotIndex &LHS, const SlotIndex &RHS) {
|
||||
return (LHS == RHS);
|
||||
}
|
||||
static inline bool isPod() { return false; }
|
||||
};
|
||||
|
||||
inline raw_ostream& operator<<(raw_ostream &os, SlotIndex li) {
|
||||
li.print(os);
|
||||
return os;
|
||||
}
|
||||
|
||||
typedef std::pair<SlotIndex, MachineBasicBlock*> IdxMBBPair;
|
||||
|
||||
inline bool operator<(SlotIndex V, const IdxMBBPair &IM) {
|
||||
return V < IM.first;
|
||||
}
|
||||
|
||||
inline bool operator<(const IdxMBBPair &IM, SlotIndex V) {
|
||||
return IM.first < V;
|
||||
}
|
||||
|
||||
struct Idx2MBBCompare {
|
||||
bool operator()(const IdxMBBPair &LHS, const IdxMBBPair &RHS) const {
|
||||
return LHS.first < RHS.first;
|
||||
}
|
||||
};
|
||||
|
||||
/// SlotIndexes pass.
|
||||
///
|
||||
/// This pass assigns indexes to each instruction.
|
||||
class SlotIndexes : public MachineFunctionPass {
|
||||
private:
|
||||
|
||||
MachineFunction *mf;
|
||||
IndexListEntry *indexListHead;
|
||||
unsigned functionSize;
|
||||
|
||||
typedef DenseMap<const MachineInstr*, SlotIndex> Mi2IndexMap;
|
||||
Mi2IndexMap mi2iMap;
|
||||
|
||||
/// MBB2IdxMap - The indexes of the first and last instructions in the
|
||||
/// specified basic block.
|
||||
typedef DenseMap<const MachineBasicBlock*,
|
||||
std::pair<SlotIndex, SlotIndex> > MBB2IdxMap;
|
||||
MBB2IdxMap mbb2IdxMap;
|
||||
|
||||
/// Idx2MBBMap - Sorted list of pairs of index of first instruction
|
||||
/// and MBB id.
|
||||
std::vector<IdxMBBPair> idx2MBBMap;
|
||||
|
||||
typedef DenseMap<const MachineBasicBlock*, SlotIndex> TerminatorGapsMap;
|
||||
TerminatorGapsMap terminatorGaps;
|
||||
|
||||
// IndexListEntry allocator.
|
||||
BumpPtrAllocator ileAllocator;
|
||||
|
||||
IndexListEntry* createEntry(MachineInstr *mi, unsigned index) {
|
||||
IndexListEntry *entry =
|
||||
static_cast<IndexListEntry*>(
|
||||
ileAllocator.Allocate(sizeof(IndexListEntry),
|
||||
alignof<IndexListEntry>()));
|
||||
|
||||
new (entry) IndexListEntry(mi, index);
|
||||
|
||||
return entry;
|
||||
}
|
||||
|
||||
void initList() {
|
||||
assert(indexListHead == 0 && "Zero entry non-null at initialisation.");
|
||||
indexListHead = createEntry(0, ~0U);
|
||||
indexListHead->setNext(0);
|
||||
indexListHead->setPrev(indexListHead);
|
||||
}
|
||||
|
||||
void clearList() {
|
||||
indexListHead = 0;
|
||||
ileAllocator.Reset();
|
||||
}
|
||||
|
||||
IndexListEntry* getTail() {
|
||||
assert(indexListHead != 0 && "Call to getTail on uninitialized list.");
|
||||
return indexListHead->getPrev();
|
||||
}
|
||||
|
||||
const IndexListEntry* getTail() const {
|
||||
assert(indexListHead != 0 && "Call to getTail on uninitialized list.");
|
||||
return indexListHead->getPrev();
|
||||
}
|
||||
|
||||
// Returns true if the index list is empty.
|
||||
bool empty() const { return (indexListHead == getTail()); }
|
||||
|
||||
IndexListEntry* front() {
|
||||
assert(!empty() && "front() called on empty index list.");
|
||||
return indexListHead;
|
||||
}
|
||||
|
||||
const IndexListEntry* front() const {
|
||||
assert(!empty() && "front() called on empty index list.");
|
||||
return indexListHead;
|
||||
}
|
||||
|
||||
IndexListEntry* back() {
|
||||
assert(!empty() && "back() called on empty index list.");
|
||||
return getTail()->getPrev();
|
||||
}
|
||||
|
||||
const IndexListEntry* back() const {
|
||||
assert(!empty() && "back() called on empty index list.");
|
||||
return getTail()->getPrev();
|
||||
}
|
||||
|
||||
/// Insert a new entry before itr.
|
||||
void insert(IndexListEntry *itr, IndexListEntry *val) {
|
||||
assert(itr != 0 && "itr should not be null.");
|
||||
IndexListEntry *prev = itr->getPrev();
|
||||
val->setNext(itr);
|
||||
val->setPrev(prev);
|
||||
|
||||
if (itr != indexListHead) {
|
||||
prev->setNext(val);
|
||||
}
|
||||
else {
|
||||
indexListHead = val;
|
||||
}
|
||||
itr->setPrev(val);
|
||||
}
|
||||
|
||||
/// Push a new entry on to the end of the list.
|
||||
void push_back(IndexListEntry *val) {
|
||||
insert(getTail(), val);
|
||||
}
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
|
||||
SlotIndexes() : MachineFunctionPass(&ID), indexListHead(0) {}
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &au) const;
|
||||
virtual void releaseMemory();
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &fn);
|
||||
|
||||
/// Dump the indexes.
|
||||
void dump() const;
|
||||
|
||||
/// Renumber the index list, providing space for new instructions.
|
||||
void renumber();
|
||||
|
||||
/// Returns the zero index for this analysis.
|
||||
SlotIndex getZeroIndex() {
|
||||
assert(front()->getIndex() == 0 && "First index is not 0?");
|
||||
return SlotIndex(front(), 0);
|
||||
}
|
||||
|
||||
/// Returns the invalid index marker for this analysis.
|
||||
SlotIndex getInvalidIndex() {
|
||||
return getZeroIndex();
|
||||
}
|
||||
|
||||
/// Returns the distance between the highest and lowest indexes allocated
|
||||
/// so far.
|
||||
unsigned getIndexesLength() const {
|
||||
assert(front()->getIndex() == 0 &&
|
||||
"Initial index isn't zero?");
|
||||
|
||||
return back()->getIndex();
|
||||
}
|
||||
|
||||
/// Returns the number of instructions in the function.
|
||||
unsigned getFunctionSize() const {
|
||||
return functionSize;
|
||||
}
|
||||
|
||||
/// Returns true if the given machine instr is mapped to an index,
|
||||
/// otherwise returns false.
|
||||
bool hasIndex(const MachineInstr *instr) const {
|
||||
return (mi2iMap.find(instr) != mi2iMap.end());
|
||||
}
|
||||
|
||||
/// Returns the base index for the given instruction.
|
||||
SlotIndex getInstructionIndex(const MachineInstr *instr) const {
|
||||
Mi2IndexMap::const_iterator itr = mi2iMap.find(instr);
|
||||
assert(itr != mi2iMap.end() && "Instruction not found in maps.");
|
||||
return itr->second;
|
||||
}
|
||||
|
||||
/// Returns the instruction for the given index, or null if the given
|
||||
/// index has no instruction associated with it.
|
||||
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
|
||||
return index.entry().getInstr();
|
||||
}
|
||||
|
||||
/// Returns the next non-null index.
|
||||
SlotIndex getNextNonNullIndex(SlotIndex index) {
|
||||
SlotIndex nextNonNull = index.getNextIndex();
|
||||
|
||||
while (&nextNonNull.entry() != getTail() &&
|
||||
getInstructionFromIndex(nextNonNull) == 0) {
|
||||
nextNonNull = nextNonNull.getNextIndex();
|
||||
}
|
||||
|
||||
return nextNonNull;
|
||||
}
|
||||
|
||||
/// Returns the first index in the given basic block.
|
||||
SlotIndex getMBBStartIdx(const MachineBasicBlock *mbb) const {
|
||||
MBB2IdxMap::const_iterator itr = mbb2IdxMap.find(mbb);
|
||||
assert(itr != mbb2IdxMap.end() && "MBB not found in maps.");
|
||||
return itr->second.first;
|
||||
}
|
||||
|
||||
/// Returns the last index in the given basic block.
|
||||
SlotIndex getMBBEndIdx(const MachineBasicBlock *mbb) const {
|
||||
MBB2IdxMap::const_iterator itr = mbb2IdxMap.find(mbb);
|
||||
assert(itr != mbb2IdxMap.end() && "MBB not found in maps.");
|
||||
return itr->second.second;
|
||||
}
|
||||
|
||||
/// Returns the terminator gap for the given index.
|
||||
SlotIndex getTerminatorGap(const MachineBasicBlock *mbb) {
|
||||
TerminatorGapsMap::iterator itr = terminatorGaps.find(mbb);
|
||||
assert(itr != terminatorGaps.end() &&
|
||||
"All MBBs should have terminator gaps in their indexes.");
|
||||
return itr->second;
|
||||
}
|
||||
|
||||
/// Returns the basic block which the given index falls in.
|
||||
MachineBasicBlock* getMBBFromIndex(SlotIndex index) const {
|
||||
std::vector<IdxMBBPair>::const_iterator I =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), index);
|
||||
// Take the pair containing the index
|
||||
std::vector<IdxMBBPair>::const_iterator J =
|
||||
((I != idx2MBBMap.end() && I->first > index) ||
|
||||
(I == idx2MBBMap.end() && idx2MBBMap.size()>0)) ? (I-1): I;
|
||||
|
||||
assert(J != idx2MBBMap.end() && J->first <= index &&
|
||||
index <= getMBBEndIdx(J->second) &&
|
||||
"index does not correspond to an MBB");
|
||||
return J->second;
|
||||
}
|
||||
|
||||
bool findLiveInMBBs(SlotIndex start, SlotIndex end,
|
||||
SmallVectorImpl<MachineBasicBlock*> &mbbs) const {
|
||||
std::vector<IdxMBBPair>::const_iterator itr =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
|
||||
bool resVal = false;
|
||||
|
||||
while (itr != idx2MBBMap.end()) {
|
||||
if (itr->first >= end)
|
||||
break;
|
||||
mbbs.push_back(itr->second);
|
||||
resVal = true;
|
||||
++itr;
|
||||
}
|
||||
return resVal;
|
||||
}
|
||||
|
||||
/// Return a list of MBBs that can be reach via any branches or
|
||||
/// fall-throughs.
|
||||
bool findReachableMBBs(SlotIndex start, SlotIndex end,
|
||||
SmallVectorImpl<MachineBasicBlock*> &mbbs) const {
|
||||
std::vector<IdxMBBPair>::const_iterator itr =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
|
||||
|
||||
bool resVal = false;
|
||||
while (itr != idx2MBBMap.end()) {
|
||||
if (itr->first > end)
|
||||
break;
|
||||
MachineBasicBlock *mbb = itr->second;
|
||||
if (getMBBEndIdx(mbb) > end)
|
||||
break;
|
||||
for (MachineBasicBlock::succ_iterator si = mbb->succ_begin(),
|
||||
se = mbb->succ_end(); si != se; ++si)
|
||||
mbbs.push_back(*si);
|
||||
resVal = true;
|
||||
++itr;
|
||||
}
|
||||
return resVal;
|
||||
}
|
||||
|
||||
/// Returns the MBB covering the given range, or null if the range covers
|
||||
/// more than one basic block.
|
||||
MachineBasicBlock* getMBBCoveringRange(SlotIndex start, SlotIndex end) const {
|
||||
|
||||
assert(start < end && "Backwards ranges not allowed.");
|
||||
|
||||
std::vector<IdxMBBPair>::const_iterator itr =
|
||||
std::lower_bound(idx2MBBMap.begin(), idx2MBBMap.end(), start);
|
||||
|
||||
if (itr == idx2MBBMap.end()) {
|
||||
itr = prior(itr);
|
||||
return itr->second;
|
||||
}
|
||||
|
||||
// Check that we don't cross the boundary into this block.
|
||||
if (itr->first < end)
|
||||
return 0;
|
||||
|
||||
itr = prior(itr);
|
||||
|
||||
if (itr->first <= start)
|
||||
return itr->second;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// Returns true if there is a gap in the numbering before the given index.
|
||||
bool hasGapBeforeInstr(SlotIndex index) {
|
||||
index = index.getBaseIndex();
|
||||
SlotIndex prevIndex = index.getPrevIndex();
|
||||
|
||||
if (prevIndex == getZeroIndex())
|
||||
return false;
|
||||
|
||||
if (getInstructionFromIndex(prevIndex) == 0)
|
||||
return true;
|
||||
|
||||
if (prevIndex.distance(index) >= 2 * SlotIndex::NUM)
|
||||
return true;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/// Returns true if there is a gap in the numbering after the given index.
|
||||
bool hasGapAfterInstr(SlotIndex index) const {
|
||||
// Not implemented yet.
|
||||
assert(false &&
|
||||
"SlotIndexes::hasGapAfterInstr(SlotIndex) not implemented yet.");
|
||||
return false;
|
||||
}
|
||||
|
||||
/// findGapBeforeInstr - Find an empty instruction slot before the
|
||||
/// specified index. If "Furthest" is true, find one that's furthest
|
||||
/// away from the index (but before any index that's occupied).
|
||||
// FIXME: This whole method should go away in future. It should
|
||||
// always be possible to insert code between existing indices.
|
||||
SlotIndex findGapBeforeInstr(SlotIndex index, bool furthest = false) {
|
||||
if (index == getZeroIndex())
|
||||
return getInvalidIndex();
|
||||
|
||||
index = index.getBaseIndex();
|
||||
SlotIndex prevIndex = index.getPrevIndex();
|
||||
|
||||
if (prevIndex == getZeroIndex())
|
||||
return getInvalidIndex();
|
||||
|
||||
// Try to reuse existing index objects with null-instrs.
|
||||
if (getInstructionFromIndex(prevIndex) == 0) {
|
||||
if (furthest) {
|
||||
while (getInstructionFromIndex(prevIndex) == 0 &&
|
||||
prevIndex != getZeroIndex()) {
|
||||
prevIndex = prevIndex.getPrevIndex();
|
||||
}
|
||||
|
||||
prevIndex = prevIndex.getNextIndex();
|
||||
}
|
||||
|
||||
assert(getInstructionFromIndex(prevIndex) == 0 && "Index list is broken.");
|
||||
|
||||
return prevIndex;
|
||||
}
|
||||
|
||||
int dist = prevIndex.distance(index);
|
||||
|
||||
// Double check that the spacing between this instruction and
|
||||
// the last is sane.
|
||||
assert(dist >= SlotIndex::NUM &&
|
||||
"Distance between indexes too small.");
|
||||
|
||||
// If there's no gap return an invalid index.
|
||||
if (dist < 2*SlotIndex::NUM) {
|
||||
return getInvalidIndex();
|
||||
}
|
||||
|
||||
// Otherwise insert new index entries into the list using the
|
||||
// gap in the numbering.
|
||||
IndexListEntry *newEntry =
|
||||
createEntry(0, prevIndex.entry().getIndex() + SlotIndex::NUM);
|
||||
|
||||
insert(&index.entry(), newEntry);
|
||||
|
||||
// And return a pointer to the entry at the start of the gap.
|
||||
return index.getPrevIndex();
|
||||
}
|
||||
|
||||
/// Insert the given machine instruction into the mapping at the given
|
||||
/// index.
|
||||
void insertMachineInstrInMaps(MachineInstr *mi, SlotIndex index) {
|
||||
index = index.getBaseIndex();
|
||||
IndexListEntry *miEntry = &index.entry();
|
||||
assert(miEntry->getInstr() == 0 && "Index already in use.");
|
||||
miEntry->setInstr(mi);
|
||||
|
||||
assert(mi2iMap.find(mi) == mi2iMap.end() &&
|
||||
"MachineInstr already has an index.");
|
||||
|
||||
mi2iMap.insert(std::make_pair(mi, index));
|
||||
}
|
||||
|
||||
/// Remove the given machine instruction from the mapping.
|
||||
void removeMachineInstrFromMaps(MachineInstr *mi) {
|
||||
// remove index -> MachineInstr and
|
||||
// MachineInstr -> index mappings
|
||||
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
|
||||
if (mi2iItr != mi2iMap.end()) {
|
||||
IndexListEntry *miEntry(&mi2iItr->second.entry());
|
||||
assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
|
||||
// FIXME: Eventually we want to actually delete these indexes.
|
||||
miEntry->setInstr(0);
|
||||
mi2iMap.erase(mi2iItr);
|
||||
}
|
||||
}
|
||||
|
||||
/// ReplaceMachineInstrInMaps - Replacing a machine instr with a new one in
|
||||
/// maps used by register allocator.
|
||||
void replaceMachineInstrInMaps(MachineInstr *mi, MachineInstr *newMI) {
|
||||
Mi2IndexMap::iterator mi2iItr = mi2iMap.find(mi);
|
||||
if (mi2iItr == mi2iMap.end())
|
||||
return;
|
||||
SlotIndex replaceBaseIndex = mi2iItr->second;
|
||||
IndexListEntry *miEntry(&replaceBaseIndex.entry());
|
||||
assert(miEntry->getInstr() == mi &&
|
||||
"Mismatched instruction in index tables.");
|
||||
miEntry->setInstr(newMI);
|
||||
mi2iMap.erase(mi2iItr);
|
||||
mi2iMap.insert(std::make_pair(newMI, replaceBaseIndex));
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
}
|
||||
|
||||
#endif // LLVM_CODEGEN_LIVEINDEX_H
|
@ -19,6 +19,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/CodeGen/LiveInterval.h"
|
||||
#include "llvm/CodeGen/LiveIntervalAnalysis.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/ADT/DenseMap.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
@ -28,11 +29,6 @@
|
||||
#include <algorithm>
|
||||
using namespace llvm;
|
||||
|
||||
// Print a LiveIndex to a raw_ostream.
|
||||
void LiveIndex::print(raw_ostream &os) const {
|
||||
os << (index & ~PHI_BIT);
|
||||
}
|
||||
|
||||
// An example for liveAt():
|
||||
//
|
||||
// this = [1,4), liveAt(0) will return false. The instruction defining this
|
||||
@ -40,7 +36,7 @@ void LiveIndex::print(raw_ostream &os) const {
|
||||
// variable it represents. This is because slot 1 is used (def slot) and spans
|
||||
// up to slot 3 (store slot).
|
||||
//
|
||||
bool LiveInterval::liveAt(LiveIndex I) const {
|
||||
bool LiveInterval::liveAt(SlotIndex I) const {
|
||||
Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
|
||||
|
||||
if (r == ranges.begin())
|
||||
@ -53,7 +49,7 @@ bool LiveInterval::liveAt(LiveIndex I) const {
|
||||
// liveBeforeAndAt - Check if the interval is live at the index and the index
|
||||
// just before it. If index is liveAt, check if it starts a new live range.
|
||||
// If it does, then check if the previous live range ends at index-1.
|
||||
bool LiveInterval::liveBeforeAndAt(LiveIndex I) const {
|
||||
bool LiveInterval::liveBeforeAndAt(SlotIndex I) const {
|
||||
Ranges::const_iterator r = std::upper_bound(ranges.begin(), ranges.end(), I);
|
||||
|
||||
if (r == ranges.begin())
|
||||
@ -131,7 +127,7 @@ bool LiveInterval::overlapsFrom(const LiveInterval& other,
|
||||
|
||||
/// overlaps - Return true if the live interval overlaps a range specified
|
||||
/// by [Start, End).
|
||||
bool LiveInterval::overlaps(LiveIndex Start, LiveIndex End) const {
|
||||
bool LiveInterval::overlaps(SlotIndex Start, SlotIndex End) const {
|
||||
assert(Start < End && "Invalid range");
|
||||
const_iterator I = begin();
|
||||
const_iterator E = end();
|
||||
@ -149,10 +145,10 @@ bool LiveInterval::overlaps(LiveIndex Start, LiveIndex End) const {
|
||||
/// specified by I to end at the specified endpoint. To do this, we should
|
||||
/// merge and eliminate all ranges that this will overlap with. The iterator is
|
||||
/// not invalidated.
|
||||
void LiveInterval::extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd) {
|
||||
void LiveInterval::extendIntervalEndTo(Ranges::iterator I, SlotIndex NewEnd) {
|
||||
assert(I != ranges.end() && "Not a valid interval!");
|
||||
VNInfo *ValNo = I->valno;
|
||||
LiveIndex OldEnd = I->end;
|
||||
SlotIndex OldEnd = I->end;
|
||||
|
||||
// Search for the first interval that we can't merge with.
|
||||
Ranges::iterator MergeTo = next(I);
|
||||
@ -167,7 +163,7 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd) {
|
||||
ranges.erase(next(I), MergeTo);
|
||||
|
||||
// Update kill info.
|
||||
ValNo->removeKills(OldEnd, I->end.prevSlot_());
|
||||
ValNo->removeKills(OldEnd, I->end.getPrevSlot());
|
||||
|
||||
// If the newly formed range now touches the range after it and if they have
|
||||
// the same value number, merge the two ranges into one range.
|
||||
@ -183,7 +179,7 @@ void LiveInterval::extendIntervalEndTo(Ranges::iterator I, LiveIndex NewEnd) {
|
||||
/// specified by I to start at the specified endpoint. To do this, we should
|
||||
/// merge and eliminate all ranges that this will overlap with.
|
||||
LiveInterval::Ranges::iterator
|
||||
LiveInterval::extendIntervalStartTo(Ranges::iterator I, LiveIndex NewStart) {
|
||||
LiveInterval::extendIntervalStartTo(Ranges::iterator I, SlotIndex NewStart) {
|
||||
assert(I != ranges.end() && "Not a valid interval!");
|
||||
VNInfo *ValNo = I->valno;
|
||||
|
||||
@ -216,7 +212,7 @@ LiveInterval::extendIntervalStartTo(Ranges::iterator I, LiveIndex NewStart) {
|
||||
|
||||
LiveInterval::iterator
|
||||
LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
|
||||
LiveIndex Start = LR.start, End = LR.end;
|
||||
SlotIndex Start = LR.start, End = LR.end;
|
||||
iterator it = std::upper_bound(From, ranges.end(), Start);
|
||||
|
||||
// If the inserted interval starts in the middle or right at the end of
|
||||
@ -268,7 +264,7 @@ LiveInterval::addRangeFrom(LiveRange LR, iterator From) {
|
||||
|
||||
/// isInOneLiveRange - Return true if the range specified is entirely in
|
||||
/// a single LiveRange of the live interval.
|
||||
bool LiveInterval::isInOneLiveRange(LiveIndex Start, LiveIndex End) {
|
||||
bool LiveInterval::isInOneLiveRange(SlotIndex Start, SlotIndex End) {
|
||||
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
|
||||
if (I == ranges.begin())
|
||||
return false;
|
||||
@ -279,7 +275,7 @@ bool LiveInterval::isInOneLiveRange(LiveIndex Start, LiveIndex End) {
|
||||
|
||||
/// removeRange - Remove the specified range from this interval. Note that
|
||||
/// the range must be in a single LiveRange in its entirety.
|
||||
void LiveInterval::removeRange(LiveIndex Start, LiveIndex End,
|
||||
void LiveInterval::removeRange(SlotIndex Start, SlotIndex End,
|
||||
bool RemoveDeadValNo) {
|
||||
// Find the LiveRange containing this span.
|
||||
Ranges::iterator I = std::upper_bound(ranges.begin(), ranges.end(), Start);
|
||||
@ -331,7 +327,7 @@ void LiveInterval::removeRange(LiveIndex Start, LiveIndex End,
|
||||
}
|
||||
|
||||
// Otherwise, we are splitting the LiveRange into two pieces.
|
||||
LiveIndex OldEnd = I->end;
|
||||
SlotIndex OldEnd = I->end;
|
||||
I->end = Start; // Trim the old interval.
|
||||
|
||||
// Insert the new one.
|
||||
@ -362,36 +358,11 @@ void LiveInterval::removeValNo(VNInfo *ValNo) {
|
||||
ValNo->setIsUnused(true);
|
||||
}
|
||||
}
|
||||
|
||||
/// scaleNumbering - Renumber VNI and ranges to provide gaps for new
|
||||
/// instructions.
|
||||
|
||||
void LiveInterval::scaleNumbering(unsigned factor) {
|
||||
// Scale ranges.
|
||||
for (iterator RI = begin(), RE = end(); RI != RE; ++RI) {
|
||||
RI->start = RI->start.scale(factor);
|
||||
RI->end = RI->end.scale(factor);
|
||||
}
|
||||
|
||||
// Scale VNI info.
|
||||
for (vni_iterator VNI = vni_begin(), VNIE = vni_end(); VNI != VNIE; ++VNI) {
|
||||
VNInfo *vni = *VNI;
|
||||
|
||||
if (vni->isDefAccurate())
|
||||
vni->def = vni->def.scale(factor);
|
||||
|
||||
for (unsigned i = 0; i < vni->kills.size(); ++i) {
|
||||
if (!vni->kills[i].isPHIIndex())
|
||||
vni->kills[i] = vni->kills[i].scale(factor);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// getLiveRangeContaining - Return the live range that contains the
|
||||
/// specified index, or null if there is none.
|
||||
LiveInterval::const_iterator
|
||||
LiveInterval::FindLiveRangeContaining(LiveIndex Idx) const {
|
||||
LiveInterval::FindLiveRangeContaining(SlotIndex Idx) const {
|
||||
const_iterator It = std::upper_bound(begin(), end(), Idx);
|
||||
if (It != ranges.begin()) {
|
||||
--It;
|
||||
@ -403,7 +374,7 @@ LiveInterval::FindLiveRangeContaining(LiveIndex Idx) const {
|
||||
}
|
||||
|
||||
LiveInterval::iterator
|
||||
LiveInterval::FindLiveRangeContaining(LiveIndex Idx) {
|
||||
LiveInterval::FindLiveRangeContaining(SlotIndex Idx) {
|
||||
iterator It = std::upper_bound(begin(), end(), Idx);
|
||||
if (It != begin()) {
|
||||
--It;
|
||||
@ -416,7 +387,7 @@ LiveInterval::FindLiveRangeContaining(LiveIndex Idx) {
|
||||
|
||||
/// findDefinedVNInfo - Find the VNInfo defined by the specified
|
||||
/// index (register interval).
|
||||
VNInfo *LiveInterval::findDefinedVNInfoForRegInt(LiveIndex Idx) const {
|
||||
VNInfo *LiveInterval::findDefinedVNInfoForRegInt(SlotIndex Idx) const {
|
||||
for (LiveInterval::const_vni_iterator i = vni_begin(), e = vni_end();
|
||||
i != e; ++i) {
|
||||
if ((*i)->def == Idx)
|
||||
@ -440,7 +411,8 @@ VNInfo *LiveInterval::findDefinedVNInfoForStackInt(unsigned reg) const {
|
||||
/// join - Join two live intervals (this, and other) together. This applies
|
||||
/// mappings to the value numbers in the LHS/RHS intervals as specified. If
|
||||
/// the intervals are not joinable, this aborts.
|
||||
void LiveInterval::join(LiveInterval &Other, const int *LHSValNoAssignments,
|
||||
void LiveInterval::join(LiveInterval &Other,
|
||||
const int *LHSValNoAssignments,
|
||||
const int *RHSValNoAssignments,
|
||||
SmallVector<VNInfo*, 16> &NewVNInfo,
|
||||
MachineRegisterInfo *MRI) {
|
||||
@ -554,14 +526,15 @@ void LiveInterval::MergeRangesInAsValue(const LiveInterval &RHS,
|
||||
/// The LiveRanges in RHS are allowed to overlap with LiveRanges in the
|
||||
/// current interval, it will replace the value numbers of the overlaped
|
||||
/// live ranges with the specified value number.
|
||||
void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
|
||||
const VNInfo *RHSValNo, VNInfo *LHSValNo) {
|
||||
void LiveInterval::MergeValueInAsValue(
|
||||
const LiveInterval &RHS,
|
||||
const VNInfo *RHSValNo, VNInfo *LHSValNo) {
|
||||
SmallVector<VNInfo*, 4> ReplacedValNos;
|
||||
iterator IP = begin();
|
||||
for (const_iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
|
||||
if (I->valno != RHSValNo)
|
||||
continue;
|
||||
LiveIndex Start = I->start, End = I->end;
|
||||
SlotIndex Start = I->start, End = I->end;
|
||||
IP = std::upper_bound(IP, end(), Start);
|
||||
// If the start of this range overlaps with an existing liverange, trim it.
|
||||
if (IP != begin() && IP[-1].end > Start) {
|
||||
@ -621,7 +594,8 @@ void LiveInterval::MergeValueInAsValue(const LiveInterval &RHS,
|
||||
/// MergeInClobberRanges - For any live ranges that are not defined in the
|
||||
/// current interval, but are defined in the Clobbers interval, mark them
|
||||
/// used with an unknown definition value.
|
||||
void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
|
||||
void LiveInterval::MergeInClobberRanges(LiveIntervals &li_,
|
||||
const LiveInterval &Clobbers,
|
||||
BumpPtrAllocator &VNInfoAllocator) {
|
||||
if (Clobbers.empty()) return;
|
||||
|
||||
@ -638,20 +612,20 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
|
||||
ClobberValNo = UnusedValNo;
|
||||
else {
|
||||
UnusedValNo = ClobberValNo =
|
||||
getNextValue(LiveIndex(), 0, false, VNInfoAllocator);
|
||||
getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
|
||||
ValNoMaps.insert(std::make_pair(I->valno, ClobberValNo));
|
||||
}
|
||||
|
||||
bool Done = false;
|
||||
LiveIndex Start = I->start, End = I->end;
|
||||
SlotIndex Start = I->start, End = I->end;
|
||||
// If a clobber range starts before an existing range and ends after
|
||||
// it, the clobber range will need to be split into multiple ranges.
|
||||
// Loop until the entire clobber range is handled.
|
||||
while (!Done) {
|
||||
Done = true;
|
||||
IP = std::upper_bound(IP, end(), Start);
|
||||
LiveIndex SubRangeStart = Start;
|
||||
LiveIndex SubRangeEnd = End;
|
||||
SlotIndex SubRangeStart = Start;
|
||||
SlotIndex SubRangeEnd = End;
|
||||
|
||||
// If the start of this range overlaps with an existing liverange, trim it.
|
||||
if (IP != begin() && IP[-1].end > SubRangeStart) {
|
||||
@ -687,13 +661,14 @@ void LiveInterval::MergeInClobberRanges(const LiveInterval &Clobbers,
|
||||
}
|
||||
}
|
||||
|
||||
void LiveInterval::MergeInClobberRange(LiveIndex Start,
|
||||
LiveIndex End,
|
||||
void LiveInterval::MergeInClobberRange(LiveIntervals &li_,
|
||||
SlotIndex Start,
|
||||
SlotIndex End,
|
||||
BumpPtrAllocator &VNInfoAllocator) {
|
||||
// Find a value # to use for the clobber ranges. If there is already a value#
|
||||
// for unknown values, use it.
|
||||
VNInfo *ClobberValNo =
|
||||
getNextValue(LiveIndex(), 0, false, VNInfoAllocator);
|
||||
getNextValue(li_.getInvalidIndex(), 0, false, VNInfoAllocator);
|
||||
|
||||
iterator IP = begin();
|
||||
IP = std::upper_bound(IP, end(), Start);
|
||||
@ -881,8 +856,6 @@ void LiveInterval::print(raw_ostream &OS, const TargetRegisterInfo *TRI) const {
|
||||
OS << "-(";
|
||||
for (unsigned j = 0; j != ee; ++j) {
|
||||
OS << vni->kills[j];
|
||||
if (vni->kills[j].isPHIIndex())
|
||||
OS << "*";
|
||||
if (j != ee-1)
|
||||
OS << " ";
|
||||
}
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -27,15 +27,10 @@ using namespace llvm;
|
||||
char LiveStacks::ID = 0;
|
||||
static RegisterPass<LiveStacks> X("livestacks", "Live Stack Slot Analysis");
|
||||
|
||||
void LiveStacks::scaleNumbering(int factor) {
|
||||
// Scale the intervals.
|
||||
for (iterator LI = begin(), LE = end(); LI != LE; ++LI) {
|
||||
LI->second.scaleNumbering(factor);
|
||||
}
|
||||
}
|
||||
|
||||
void LiveStacks::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesAll();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
AU.addRequiredTransitive<SlotIndexes>();
|
||||
MachineFunctionPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
||||
|
@ -57,6 +57,7 @@ namespace {
|
||||
const TargetRegisterInfo* TRI;
|
||||
MachineFrameInfo *MFI;
|
||||
MachineRegisterInfo *MRI;
|
||||
SlotIndexes *SIs;
|
||||
LiveIntervals *LIs;
|
||||
LiveStacks *LSs;
|
||||
VirtRegMap *VRM;
|
||||
@ -68,7 +69,7 @@ namespace {
|
||||
MachineBasicBlock *BarrierMBB;
|
||||
|
||||
// Barrier - Current barrier index.
|
||||
LiveIndex BarrierIdx;
|
||||
SlotIndex BarrierIdx;
|
||||
|
||||
// CurrLI - Current live interval being split.
|
||||
LiveInterval *CurrLI;
|
||||
@ -83,16 +84,19 @@ namespace {
|
||||
DenseMap<unsigned, int> IntervalSSMap;
|
||||
|
||||
// Def2SpillMap - A map from a def instruction index to spill index.
|
||||
DenseMap<LiveIndex, LiveIndex> Def2SpillMap;
|
||||
DenseMap<SlotIndex, SlotIndex> Def2SpillMap;
|
||||
|
||||
public:
|
||||
static char ID;
|
||||
PreAllocSplitting() : MachineFunctionPass(&ID) {}
|
||||
PreAllocSplitting()
|
||||
: MachineFunctionPass(&ID) {}
|
||||
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesCFG();
|
||||
AU.addRequired<SlotIndexes>();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
AU.addRequired<LiveIntervals>();
|
||||
AU.addPreserved<LiveIntervals>();
|
||||
AU.addRequired<LiveStacks>();
|
||||
@ -129,23 +133,23 @@ namespace {
|
||||
private:
|
||||
MachineBasicBlock::iterator
|
||||
findNextEmptySlot(MachineBasicBlock*, MachineInstr*,
|
||||
LiveIndex&);
|
||||
SlotIndex&);
|
||||
|
||||
MachineBasicBlock::iterator
|
||||
findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
|
||||
SmallPtrSet<MachineInstr*, 4>&, LiveIndex&);
|
||||
SmallPtrSet<MachineInstr*, 4>&, SlotIndex&);
|
||||
|
||||
MachineBasicBlock::iterator
|
||||
findRestorePoint(MachineBasicBlock*, MachineInstr*, LiveIndex,
|
||||
SmallPtrSet<MachineInstr*, 4>&, LiveIndex&);
|
||||
findRestorePoint(MachineBasicBlock*, MachineInstr*, SlotIndex,
|
||||
SmallPtrSet<MachineInstr*, 4>&, SlotIndex&);
|
||||
|
||||
int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
|
||||
|
||||
bool IsAvailableInStack(MachineBasicBlock*, unsigned,
|
||||
LiveIndex, LiveIndex,
|
||||
LiveIndex&, int&) const;
|
||||
SlotIndex, SlotIndex,
|
||||
SlotIndex&, int&) const;
|
||||
|
||||
void UpdateSpillSlotInterval(VNInfo*, LiveIndex, LiveIndex);
|
||||
void UpdateSpillSlotInterval(VNInfo*, SlotIndex, SlotIndex);
|
||||
|
||||
bool SplitRegLiveInterval(LiveInterval*);
|
||||
|
||||
@ -157,7 +161,7 @@ namespace {
|
||||
bool Rematerialize(unsigned vreg, VNInfo* ValNo,
|
||||
MachineInstr* DefMI,
|
||||
MachineBasicBlock::iterator RestorePt,
|
||||
LiveIndex RestoreIdx,
|
||||
SlotIndex RestoreIdx,
|
||||
SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
|
||||
MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
|
||||
MachineInstr* DefMI,
|
||||
@ -209,12 +213,12 @@ const PassInfo *const llvm::PreAllocSplittingID = &X;
|
||||
/// instruction index map. If there isn't one, return end().
|
||||
MachineBasicBlock::iterator
|
||||
PreAllocSplitting::findNextEmptySlot(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
LiveIndex &SpotIndex) {
|
||||
SlotIndex &SpotIndex) {
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
if (++MII != MBB->end()) {
|
||||
LiveIndex Index =
|
||||
SlotIndex Index =
|
||||
LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
|
||||
if (Index != LiveIndex()) {
|
||||
if (Index != SlotIndex()) {
|
||||
SpotIndex = Index;
|
||||
return MII;
|
||||
}
|
||||
@ -230,7 +234,7 @@ MachineBasicBlock::iterator
|
||||
PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
MachineInstr *DefMI,
|
||||
SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
|
||||
LiveIndex &SpillIndex) {
|
||||
SlotIndex &SpillIndex) {
|
||||
MachineBasicBlock::iterator Pt = MBB->begin();
|
||||
|
||||
MachineBasicBlock::iterator MII = MI;
|
||||
@ -243,7 +247,7 @@ PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
|
||||
|
||||
while (MII != EndPt && !RefsInMBB.count(MII)) {
|
||||
LiveIndex Index = LIs->getInstructionIndex(MII);
|
||||
SlotIndex Index = LIs->getInstructionIndex(MII);
|
||||
|
||||
// We can't insert the spill between the barrier (a call), and its
|
||||
// corresponding call frame setup.
|
||||
@ -276,9 +280,9 @@ PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
/// found.
|
||||
MachineBasicBlock::iterator
|
||||
PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
LiveIndex LastIdx,
|
||||
SlotIndex LastIdx,
|
||||
SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
|
||||
LiveIndex &RestoreIndex) {
|
||||
SlotIndex &RestoreIndex) {
|
||||
// FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
|
||||
// begin index accordingly.
|
||||
MachineBasicBlock::iterator Pt = MBB->end();
|
||||
@ -299,10 +303,10 @@ PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
// FIXME: Limit the number of instructions to examine to reduce
|
||||
// compile time?
|
||||
while (MII != EndPt) {
|
||||
LiveIndex Index = LIs->getInstructionIndex(MII);
|
||||
SlotIndex Index = LIs->getInstructionIndex(MII);
|
||||
if (Index > LastIdx)
|
||||
break;
|
||||
LiveIndex Gap = LIs->findGapBeforeInstr(Index);
|
||||
SlotIndex Gap = LIs->findGapBeforeInstr(Index);
|
||||
|
||||
// We can't insert a restore between the barrier (a call) and its
|
||||
// corresponding call frame teardown.
|
||||
@ -311,7 +315,7 @@ PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
|
||||
if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
|
||||
++MII;
|
||||
} while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
|
||||
} else if (Gap != LiveIndex()) {
|
||||
} else if (Gap != SlotIndex()) {
|
||||
Pt = MII;
|
||||
RestoreIndex = Gap;
|
||||
}
|
||||
@ -344,7 +348,7 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
|
||||
if (CurrSLI->hasAtLeastOneValue())
|
||||
CurrSValNo = CurrSLI->getValNumInfo(0);
|
||||
else
|
||||
CurrSValNo = CurrSLI->getNextValue(LiveIndex(), 0, false,
|
||||
CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
|
||||
LSs->getVNInfoAllocator());
|
||||
return SS;
|
||||
}
|
||||
@ -353,9 +357,9 @@ int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
|
||||
/// slot at the specified index.
|
||||
bool
|
||||
PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
|
||||
unsigned Reg, LiveIndex DefIndex,
|
||||
LiveIndex RestoreIndex,
|
||||
LiveIndex &SpillIndex,
|
||||
unsigned Reg, SlotIndex DefIndex,
|
||||
SlotIndex RestoreIndex,
|
||||
SlotIndex &SpillIndex,
|
||||
int& SS) const {
|
||||
if (!DefMBB)
|
||||
return false;
|
||||
@ -363,7 +367,7 @@ PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
|
||||
DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
|
||||
if (I == IntervalSSMap.end())
|
||||
return false;
|
||||
DenseMap<LiveIndex, LiveIndex>::iterator
|
||||
DenseMap<SlotIndex, SlotIndex>::iterator
|
||||
II = Def2SpillMap.find(DefIndex);
|
||||
if (II == Def2SpillMap.end())
|
||||
return false;
|
||||
@ -384,8 +388,8 @@ PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
|
||||
/// interval being split, and the spill and restore indicies, update the live
|
||||
/// interval of the spill stack slot.
|
||||
void
|
||||
PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
|
||||
LiveIndex RestoreIndex) {
|
||||
PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, SlotIndex SpillIndex,
|
||||
SlotIndex RestoreIndex) {
|
||||
assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
|
||||
"Expect restore in the barrier mbb");
|
||||
|
||||
@ -398,8 +402,8 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
|
||||
}
|
||||
|
||||
SmallPtrSet<MachineBasicBlock*, 4> Processed;
|
||||
LiveIndex EndIdx = LIs->getMBBEndIdx(MBB);
|
||||
LiveRange SLR(SpillIndex, LIs->getNextSlot(EndIdx), CurrSValNo);
|
||||
SlotIndex EndIdx = LIs->getMBBEndIdx(MBB);
|
||||
LiveRange SLR(SpillIndex, EndIdx.getNextSlot(), CurrSValNo);
|
||||
CurrSLI->addRange(SLR);
|
||||
Processed.insert(MBB);
|
||||
|
||||
@ -418,7 +422,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
|
||||
WorkList.pop_back();
|
||||
if (Processed.count(MBB))
|
||||
continue;
|
||||
LiveIndex Idx = LIs->getMBBStartIdx(MBB);
|
||||
SlotIndex Idx = LIs->getMBBStartIdx(MBB);
|
||||
LR = CurrLI->getLiveRangeContaining(Idx);
|
||||
if (LR && LR->valno == ValNo) {
|
||||
EndIdx = LIs->getMBBEndIdx(MBB);
|
||||
@ -428,7 +432,7 @@ PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, LiveIndex SpillIndex,
|
||||
CurrSLI->addRange(SLR);
|
||||
} else if (LR->end > EndIdx) {
|
||||
// Live range extends beyond end of mbb, process successors.
|
||||
LiveRange SLR(Idx, LIs->getNextIndex(EndIdx), CurrSValNo);
|
||||
LiveRange SLR(Idx, EndIdx.getNextIndex(), CurrSValNo);
|
||||
CurrSLI->addRange(SLR);
|
||||
for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
|
||||
SE = MBB->succ_end(); SI != SE; ++SI)
|
||||
@ -491,12 +495,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
}
|
||||
|
||||
// Once we've found it, extend its VNInfo to our instruction.
|
||||
LiveIndex DefIndex = LIs->getInstructionIndex(Walker);
|
||||
DefIndex = LIs->getDefIndex(DefIndex);
|
||||
LiveIndex EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
SlotIndex DefIndex = LIs->getInstructionIndex(Walker);
|
||||
DefIndex = DefIndex.getDefIndex();
|
||||
SlotIndex EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
|
||||
RetVNI = NewVNs[Walker];
|
||||
LI->addRange(LiveRange(DefIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
LI->addRange(LiveRange(DefIndex, EndIndex.getNextSlot(), RetVNI));
|
||||
} else if (!ContainsDefs && ContainsUses) {
|
||||
SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
|
||||
|
||||
@ -528,12 +532,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
IsTopLevel, IsIntraBlock);
|
||||
}
|
||||
|
||||
LiveIndex UseIndex = LIs->getInstructionIndex(Walker);
|
||||
UseIndex = LIs->getUseIndex(UseIndex);
|
||||
LiveIndex EndIndex;
|
||||
SlotIndex UseIndex = LIs->getInstructionIndex(Walker);
|
||||
UseIndex = UseIndex.getUseIndex();
|
||||
SlotIndex EndIndex;
|
||||
if (IsIntraBlock) {
|
||||
EndIndex = LIs->getInstructionIndex(UseI);
|
||||
EndIndex = LIs->getUseIndex(EndIndex);
|
||||
EndIndex = EndIndex.getUseIndex();
|
||||
} else
|
||||
EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
|
||||
@ -542,7 +546,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
|
||||
NewVNs, LiveOut, Phis, false, true);
|
||||
|
||||
LI->addRange(LiveRange(UseIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
LI->addRange(LiveRange(UseIndex, EndIndex.getNextSlot(), RetVNI));
|
||||
|
||||
// FIXME: Need to set kills properly for inter-block stuff.
|
||||
if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
|
||||
@ -588,13 +592,12 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
IsTopLevel, IsIntraBlock);
|
||||
}
|
||||
|
||||
LiveIndex StartIndex = LIs->getInstructionIndex(Walker);
|
||||
StartIndex = foundDef ? LIs->getDefIndex(StartIndex) :
|
||||
LIs->getUseIndex(StartIndex);
|
||||
LiveIndex EndIndex;
|
||||
SlotIndex StartIndex = LIs->getInstructionIndex(Walker);
|
||||
StartIndex = foundDef ? StartIndex.getDefIndex() : StartIndex.getUseIndex();
|
||||
SlotIndex EndIndex;
|
||||
if (IsIntraBlock) {
|
||||
EndIndex = LIs->getInstructionIndex(UseI);
|
||||
EndIndex = LIs->getUseIndex(EndIndex);
|
||||
EndIndex = EndIndex.getUseIndex();
|
||||
} else
|
||||
EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
|
||||
@ -604,7 +607,7 @@ PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
|
||||
RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
|
||||
NewVNs, LiveOut, Phis, false, true);
|
||||
|
||||
LI->addRange(LiveRange(StartIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI));
|
||||
|
||||
if (foundUse && RetVNI->isKill(StartIndex))
|
||||
RetVNI->removeKill(StartIndex);
|
||||
@ -640,9 +643,9 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
|
||||
// assume that we are not intrablock here.
|
||||
if (Phis.count(MBB)) return Phis[MBB];
|
||||
|
||||
LiveIndex StartIndex = LIs->getMBBStartIdx(MBB);
|
||||
SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
|
||||
VNInfo *RetVNI = Phis[MBB] =
|
||||
LI->getNextValue(LiveIndex(), /*FIXME*/ 0, false,
|
||||
LI->getNextValue(SlotIndex(), /*FIXME*/ 0, false,
|
||||
LIs->getVNInfoAllocator());
|
||||
|
||||
if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
|
||||
@ -685,19 +688,19 @@ PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator Us
|
||||
for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
|
||||
IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
|
||||
I->second->setHasPHIKill(true);
|
||||
LiveIndex KillIndex = LIs->getMBBEndIdx(I->first);
|
||||
SlotIndex KillIndex = LIs->getMBBEndIdx(I->first);
|
||||
if (!I->second->isKill(KillIndex))
|
||||
I->second->addKill(KillIndex);
|
||||
}
|
||||
}
|
||||
|
||||
LiveIndex EndIndex;
|
||||
SlotIndex EndIndex;
|
||||
if (IsIntraBlock) {
|
||||
EndIndex = LIs->getInstructionIndex(UseI);
|
||||
EndIndex = LIs->getUseIndex(EndIndex);
|
||||
EndIndex = EndIndex.getUseIndex();
|
||||
} else
|
||||
EndIndex = LIs->getMBBEndIdx(MBB);
|
||||
LI->addRange(LiveRange(StartIndex, LIs->getNextSlot(EndIndex), RetVNI));
|
||||
LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI));
|
||||
if (IsIntraBlock)
|
||||
RetVNI->addKill(EndIndex);
|
||||
|
||||
@ -733,8 +736,8 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
|
||||
DE = MRI->def_end(); DI != DE; ++DI) {
|
||||
Defs[(*DI).getParent()].insert(&*DI);
|
||||
|
||||
LiveIndex DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
DefIdx = LIs->getDefIndex(DefIdx);
|
||||
SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
DefIdx = DefIdx.getDefIndex();
|
||||
|
||||
assert(DI->getOpcode() != TargetInstrInfo::PHI &&
|
||||
"Following NewVN isPHIDef flag incorrect. Fix me!");
|
||||
@ -769,13 +772,13 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
|
||||
// Add ranges for dead defs
|
||||
for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
|
||||
DE = MRI->def_end(); DI != DE; ++DI) {
|
||||
LiveIndex DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
DefIdx = LIs->getDefIndex(DefIdx);
|
||||
SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
|
||||
DefIdx = DefIdx.getDefIndex();
|
||||
|
||||
if (LI->liveAt(DefIdx)) continue;
|
||||
|
||||
VNInfo* DeadVN = NewVNs[&*DI];
|
||||
LI->addRange(LiveRange(DefIdx, LIs->getNextSlot(DefIdx), DeadVN));
|
||||
LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
|
||||
DeadVN->addKill(DefIdx);
|
||||
}
|
||||
|
||||
@ -784,8 +787,8 @@ void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
|
||||
VI != VE; ++VI) {
|
||||
VNInfo* VNI = *VI;
|
||||
for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
|
||||
LiveIndex KillIdx = VNI->kills[i];
|
||||
if (KillIdx.isPHIIndex())
|
||||
SlotIndex KillIdx = VNI->kills[i];
|
||||
if (KillIdx.isPHI())
|
||||
continue;
|
||||
MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
|
||||
if (KillMI) {
|
||||
@ -826,14 +829,14 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
// Locate two-address redefinitions
|
||||
for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
|
||||
KE = OldVN->kills.end(); KI != KE; ++KI) {
|
||||
assert(!KI->isPHIIndex() &&
|
||||
assert(!KI->isPHI() &&
|
||||
"VN previously reported having no PHI kills.");
|
||||
MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
|
||||
unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
|
||||
if (DefIdx == ~0U) continue;
|
||||
if (MI->isRegTiedToUseOperand(DefIdx)) {
|
||||
VNInfo* NextVN =
|
||||
CurrLI->findDefinedVNInfoForRegInt(LIs->getDefIndex(*KI));
|
||||
CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
|
||||
if (NextVN == OldVN) continue;
|
||||
Stack.push_back(NextVN);
|
||||
}
|
||||
@ -865,10 +868,10 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
|
||||
E = MRI->reg_end(); I != E; ++I) {
|
||||
MachineOperand& MO = I.getOperand();
|
||||
LiveIndex InstrIdx = LIs->getInstructionIndex(&*I);
|
||||
SlotIndex InstrIdx = LIs->getInstructionIndex(&*I);
|
||||
|
||||
if ((MO.isUse() && NewLI.liveAt(LIs->getUseIndex(InstrIdx))) ||
|
||||
(MO.isDef() && NewLI.liveAt(LIs->getDefIndex(InstrIdx))))
|
||||
if ((MO.isUse() && NewLI.liveAt(InstrIdx.getUseIndex())) ||
|
||||
(MO.isDef() && NewLI.liveAt(InstrIdx.getDefIndex())))
|
||||
OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
|
||||
}
|
||||
|
||||
@ -893,12 +896,12 @@ void PreAllocSplitting::RenumberValno(VNInfo* VN) {
|
||||
bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
|
||||
MachineInstr* DefMI,
|
||||
MachineBasicBlock::iterator RestorePt,
|
||||
LiveIndex RestoreIdx,
|
||||
SlotIndex RestoreIdx,
|
||||
SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
|
||||
MachineBasicBlock& MBB = *RestorePt->getParent();
|
||||
|
||||
MachineBasicBlock::iterator KillPt = BarrierMBB->end();
|
||||
LiveIndex KillIdx;
|
||||
SlotIndex KillIdx;
|
||||
if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
|
||||
KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
|
||||
else
|
||||
@ -911,8 +914,8 @@ bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
|
||||
LIs->InsertMachineInstrInMaps(prior(RestorePt), RestoreIdx);
|
||||
|
||||
ReconstructLiveInterval(CurrLI);
|
||||
LiveIndex RematIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
RematIdx = LIs->getDefIndex(RematIdx);
|
||||
SlotIndex RematIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
RematIdx = RematIdx.getDefIndex();
|
||||
RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RematIdx));
|
||||
|
||||
++NumSplits;
|
||||
@ -968,7 +971,7 @@ MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
|
||||
if (CurrSLI->hasAtLeastOneValue())
|
||||
CurrSValNo = CurrSLI->getValNumInfo(0);
|
||||
else
|
||||
CurrSValNo = CurrSLI->getNextValue(LiveIndex(), 0, false,
|
||||
CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
|
||||
LSs->getVNInfoAllocator());
|
||||
}
|
||||
|
||||
@ -1052,11 +1055,14 @@ MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
|
||||
/// so it would not cross the barrier that's being processed. Shrink wrap
|
||||
/// (minimize) the live interval to the last uses.
|
||||
bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
DEBUG(errs() << "Pre-alloc splitting " << LI->reg << " for " << *Barrier
|
||||
<< " result: ");
|
||||
|
||||
CurrLI = LI;
|
||||
|
||||
// Find live range where current interval cross the barrier.
|
||||
LiveInterval::iterator LR =
|
||||
CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx));
|
||||
CurrLI->FindLiveRangeContaining(BarrierIdx.getUseIndex());
|
||||
VNInfo *ValNo = LR->valno;
|
||||
|
||||
assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
|
||||
@ -1065,8 +1071,10 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
? LIs->getInstructionFromIndex(ValNo->def) : NULL;
|
||||
|
||||
// If this would create a new join point, do not split.
|
||||
if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent()))
|
||||
if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
|
||||
DEBUG(errs() << "FAILED (would create a new join point).\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Find all references in the barrier mbb.
|
||||
SmallPtrSet<MachineInstr*, 4> RefsInMBB;
|
||||
@ -1078,21 +1086,25 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
}
|
||||
|
||||
// Find a point to restore the value after the barrier.
|
||||
LiveIndex RestoreIndex;
|
||||
SlotIndex RestoreIndex;
|
||||
MachineBasicBlock::iterator RestorePt =
|
||||
findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB, RestoreIndex);
|
||||
if (RestorePt == BarrierMBB->end())
|
||||
if (RestorePt == BarrierMBB->end()) {
|
||||
DEBUG(errs() << "FAILED (could not find a suitable restore point).\n");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
|
||||
if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt,
|
||||
RestoreIndex, RefsInMBB))
|
||||
return true;
|
||||
RestoreIndex, RefsInMBB)) {
|
||||
DEBUG(errs() << "success (remat).\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
// Add a spill either before the barrier or after the definition.
|
||||
MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
|
||||
const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
|
||||
LiveIndex SpillIndex;
|
||||
SlotIndex SpillIndex;
|
||||
MachineInstr *SpillMI = NULL;
|
||||
int SS = -1;
|
||||
if (!ValNo->isDefAccurate()) {
|
||||
@ -1103,8 +1115,10 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
} else {
|
||||
MachineBasicBlock::iterator SpillPt =
|
||||
findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, SpillIndex);
|
||||
if (SpillPt == BarrierMBB->begin())
|
||||
if (SpillPt == BarrierMBB->begin()) {
|
||||
DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
|
||||
return false; // No gap to insert spill.
|
||||
}
|
||||
// Add spill.
|
||||
|
||||
SS = CreateSpillStackSlot(CurrLI->reg, RC);
|
||||
@ -1116,8 +1130,10 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
RestoreIndex, SpillIndex, SS)) {
|
||||
// If it's already split, just restore the value. There is no need to spill
|
||||
// the def again.
|
||||
if (!DefMI)
|
||||
if (!DefMI) {
|
||||
DEBUG(errs() << "FAILED (def is dead).\n");
|
||||
return false; // Def is dead. Do nothing.
|
||||
}
|
||||
|
||||
if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
|
||||
BarrierMBB, SS, RefsInMBB))) {
|
||||
@ -1129,12 +1145,16 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
// Add spill after the def and the last use before the barrier.
|
||||
SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
|
||||
RefsInMBB, SpillIndex);
|
||||
if (SpillPt == DefMBB->begin())
|
||||
if (SpillPt == DefMBB->begin()) {
|
||||
DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
|
||||
return false; // No gap to insert spill.
|
||||
}
|
||||
} else {
|
||||
SpillPt = findNextEmptySlot(DefMBB, DefMI, SpillIndex);
|
||||
if (SpillPt == DefMBB->end())
|
||||
if (SpillPt == DefMBB->end()) {
|
||||
DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
|
||||
return false; // No gap to insert spill.
|
||||
}
|
||||
}
|
||||
// Add spill.
|
||||
SS = CreateSpillStackSlot(CurrLI->reg, RC);
|
||||
@ -1162,18 +1182,19 @@ bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
|
||||
}
|
||||
|
||||
// Update spill stack slot live interval.
|
||||
UpdateSpillSlotInterval(ValNo, LIs->getNextSlot(LIs->getUseIndex(SpillIndex)),
|
||||
LIs->getDefIndex(RestoreIndex));
|
||||
UpdateSpillSlotInterval(ValNo, SpillIndex.getUseIndex().getNextSlot(),
|
||||
RestoreIndex.getDefIndex());
|
||||
|
||||
ReconstructLiveInterval(CurrLI);
|
||||
|
||||
if (!FoldedRestore) {
|
||||
LiveIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
RestoreIdx = LIs->getDefIndex(RestoreIdx);
|
||||
SlotIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
|
||||
RestoreIdx = RestoreIdx.getDefIndex();
|
||||
RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RestoreIdx));
|
||||
}
|
||||
|
||||
++NumSplits;
|
||||
DEBUG(errs() << "success.\n");
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1254,8 +1275,8 @@ bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
|
||||
// reaching definition (VNInfo).
|
||||
for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
|
||||
UE = MRI->use_end(); UI != UE; ++UI) {
|
||||
LiveIndex index = LIs->getInstructionIndex(&*UI);
|
||||
index = LIs->getUseIndex(index);
|
||||
SlotIndex index = LIs->getInstructionIndex(&*UI);
|
||||
index = index.getUseIndex();
|
||||
|
||||
const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
|
||||
VNUseCount[LR->valno].insert(&*UI);
|
||||
@ -1404,7 +1425,7 @@ bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
|
||||
if (LR->valno->hasPHIKill())
|
||||
return false;
|
||||
|
||||
LiveIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
|
||||
SlotIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
|
||||
if (LR->end < MBBEnd)
|
||||
return false;
|
||||
|
||||
@ -1467,6 +1488,7 @@ bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
|
||||
TII = TM->getInstrInfo();
|
||||
MFI = MF.getFrameInfo();
|
||||
MRI = &MF.getRegInfo();
|
||||
SIs = &getAnalysis<SlotIndexes>();
|
||||
LIs = &getAnalysis<LiveIntervals>();
|
||||
LSs = &getAnalysis<LiveStacks>();
|
||||
VRM = &getAnalysis<VirtRegMap>();
|
||||
|
231
lib/CodeGen/ProcessImplicitDefs.cpp
Normal file
231
lib/CodeGen/ProcessImplicitDefs.cpp
Normal file
@ -0,0 +1,231 @@
|
||||
//===---------------------- ProcessImplicitDefs.cpp -----------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "processimplicitdefs"
|
||||
|
||||
#include "llvm/CodeGen/ProcessImplicitDefs.h"
|
||||
|
||||
#include "llvm/ADT/DepthFirstIterator.h"
|
||||
#include "llvm/ADT/SmallSet.h"
|
||||
#include "llvm/Analysis/AliasAnalysis.h"
|
||||
#include "llvm/CodeGen/LiveVariables.h"
|
||||
#include "llvm/CodeGen/MachineInstr.h"
|
||||
#include "llvm/CodeGen/MachineRegisterInfo.h"
|
||||
#include "llvm/CodeGen/Passes.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Target/TargetInstrInfo.h"
|
||||
#include "llvm/Target/TargetRegisterInfo.h"
|
||||
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
char ProcessImplicitDefs::ID = 0;
|
||||
static RegisterPass<ProcessImplicitDefs> X("processimpdefs",
|
||||
"Process Implicit Definitions.");
|
||||
|
||||
void ProcessImplicitDefs::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesCFG();
|
||||
AU.addPreserved<AliasAnalysis>();
|
||||
AU.addPreserved<LiveVariables>();
|
||||
AU.addRequired<LiveVariables>();
|
||||
AU.addPreservedID(MachineLoopInfoID);
|
||||
AU.addPreservedID(MachineDominatorsID);
|
||||
AU.addPreservedID(TwoAddressInstructionPassID);
|
||||
AU.addPreservedID(PHIEliminationID);
|
||||
MachineFunctionPass::getAnalysisUsage(AU);
|
||||
}
|
||||
|
||||
bool ProcessImplicitDefs::CanTurnIntoImplicitDef(MachineInstr *MI,
|
||||
unsigned Reg, unsigned OpIdx,
|
||||
const TargetInstrInfo *tii_) {
|
||||
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
|
||||
if (tii_->isMoveInstr(*MI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
|
||||
Reg == SrcReg)
|
||||
return true;
|
||||
|
||||
if (OpIdx == 2 && MI->getOpcode() == TargetInstrInfo::SUBREG_TO_REG)
|
||||
return true;
|
||||
if (OpIdx == 1 && MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/// processImplicitDefs - Process IMPLICIT_DEF instructions and make sure
|
||||
/// there is one implicit_def for each use. Add isUndef marker to
|
||||
/// implicit_def defs and their uses.
|
||||
bool ProcessImplicitDefs::runOnMachineFunction(MachineFunction &fn) {
|
||||
|
||||
DEBUG(errs() << "********** PROCESS IMPLICIT DEFS **********\n"
|
||||
<< "********** Function: "
|
||||
<< ((Value*)fn.getFunction())->getName() << '\n');
|
||||
|
||||
bool Changed = false;
|
||||
|
||||
const TargetInstrInfo *tii_ = fn.getTarget().getInstrInfo();
|
||||
const TargetRegisterInfo *tri_ = fn.getTarget().getRegisterInfo();
|
||||
MachineRegisterInfo *mri_ = &fn.getRegInfo();
|
||||
|
||||
LiveVariables *lv_ = &getAnalysis<LiveVariables>();
|
||||
|
||||
SmallSet<unsigned, 8> ImpDefRegs;
|
||||
SmallVector<MachineInstr*, 8> ImpDefMIs;
|
||||
MachineBasicBlock *Entry = fn.begin();
|
||||
SmallPtrSet<MachineBasicBlock*,16> Visited;
|
||||
|
||||
for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
|
||||
DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
|
||||
DFI != E; ++DFI) {
|
||||
MachineBasicBlock *MBB = *DFI;
|
||||
for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
|
||||
I != E; ) {
|
||||
MachineInstr *MI = &*I;
|
||||
++I;
|
||||
if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF) {
|
||||
unsigned Reg = MI->getOperand(0).getReg();
|
||||
ImpDefRegs.insert(Reg);
|
||||
if (TargetRegisterInfo::isPhysicalRegister(Reg)) {
|
||||
for (const unsigned *SS = tri_->getSubRegisters(Reg); *SS; ++SS)
|
||||
ImpDefRegs.insert(*SS);
|
||||
}
|
||||
ImpDefMIs.push_back(MI);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (MI->getOpcode() == TargetInstrInfo::INSERT_SUBREG) {
|
||||
MachineOperand &MO = MI->getOperand(2);
|
||||
if (ImpDefRegs.count(MO.getReg())) {
|
||||
// %reg1032<def> = INSERT_SUBREG %reg1032, undef, 2
|
||||
// This is an identity copy, eliminate it now.
|
||||
if (MO.isKill()) {
|
||||
LiveVariables::VarInfo& vi = lv_->getVarInfo(MO.getReg());
|
||||
vi.removeKill(MI);
|
||||
}
|
||||
MI->eraseFromParent();
|
||||
Changed = true;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
bool ChangedToImpDef = false;
|
||||
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
|
||||
MachineOperand& MO = MI->getOperand(i);
|
||||
if (!MO.isReg() || !MO.isUse() || MO.isUndef())
|
||||
continue;
|
||||
unsigned Reg = MO.getReg();
|
||||
if (!Reg)
|
||||
continue;
|
||||
if (!ImpDefRegs.count(Reg))
|
||||
continue;
|
||||
// Use is a copy, just turn it into an implicit_def.
|
||||
if (CanTurnIntoImplicitDef(MI, Reg, i, tii_)) {
|
||||
bool isKill = MO.isKill();
|
||||
MI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
|
||||
for (int j = MI->getNumOperands() - 1, ee = 0; j > ee; --j)
|
||||
MI->RemoveOperand(j);
|
||||
if (isKill) {
|
||||
ImpDefRegs.erase(Reg);
|
||||
LiveVariables::VarInfo& vi = lv_->getVarInfo(Reg);
|
||||
vi.removeKill(MI);
|
||||
}
|
||||
ChangedToImpDef = true;
|
||||
Changed = true;
|
||||
break;
|
||||
}
|
||||
|
||||
Changed = true;
|
||||
MO.setIsUndef();
|
||||
if (MO.isKill() || MI->isRegTiedToDefOperand(i)) {
|
||||
// Make sure other uses of
|
||||
for (unsigned j = i+1; j != e; ++j) {
|
||||
MachineOperand &MOJ = MI->getOperand(j);
|
||||
if (MOJ.isReg() && MOJ.isUse() && MOJ.getReg() == Reg)
|
||||
MOJ.setIsUndef();
|
||||
}
|
||||
ImpDefRegs.erase(Reg);
|
||||
}
|
||||
}
|
||||
|
||||
if (ChangedToImpDef) {
|
||||
// Backtrack to process this new implicit_def.
|
||||
--I;
|
||||
} else {
|
||||
for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
|
||||
MachineOperand& MO = MI->getOperand(i);
|
||||
if (!MO.isReg() || !MO.isDef())
|
||||
continue;
|
||||
ImpDefRegs.erase(MO.getReg());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Any outstanding liveout implicit_def's?
|
||||
for (unsigned i = 0, e = ImpDefMIs.size(); i != e; ++i) {
|
||||
MachineInstr *MI = ImpDefMIs[i];
|
||||
unsigned Reg = MI->getOperand(0).getReg();
|
||||
if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
|
||||
!ImpDefRegs.count(Reg)) {
|
||||
// Delete all "local" implicit_def's. That include those which define
|
||||
// physical registers since they cannot be liveout.
|
||||
MI->eraseFromParent();
|
||||
Changed = true;
|
||||
continue;
|
||||
}
|
||||
|
||||
// If there are multiple defs of the same register and at least one
|
||||
// is not an implicit_def, do not insert implicit_def's before the
|
||||
// uses.
|
||||
bool Skip = false;
|
||||
for (MachineRegisterInfo::def_iterator DI = mri_->def_begin(Reg),
|
||||
DE = mri_->def_end(); DI != DE; ++DI) {
|
||||
if (DI->getOpcode() != TargetInstrInfo::IMPLICIT_DEF) {
|
||||
Skip = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (Skip)
|
||||
continue;
|
||||
|
||||
// The only implicit_def which we want to keep are those that are live
|
||||
// out of its block.
|
||||
MI->eraseFromParent();
|
||||
Changed = true;
|
||||
|
||||
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(Reg),
|
||||
UE = mri_->use_end(); UI != UE; ) {
|
||||
MachineOperand &RMO = UI.getOperand();
|
||||
MachineInstr *RMI = &*UI;
|
||||
++UI;
|
||||
MachineBasicBlock *RMBB = RMI->getParent();
|
||||
if (RMBB == MBB)
|
||||
continue;
|
||||
|
||||
// Turn a copy use into an implicit_def.
|
||||
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
|
||||
if (tii_->isMoveInstr(*RMI, SrcReg, DstReg, SrcSubReg, DstSubReg) &&
|
||||
Reg == SrcReg) {
|
||||
RMI->setDesc(tii_->get(TargetInstrInfo::IMPLICIT_DEF));
|
||||
for (int j = RMI->getNumOperands() - 1, ee = 0; j > ee; --j)
|
||||
RMI->RemoveOperand(j);
|
||||
continue;
|
||||
}
|
||||
|
||||
const TargetRegisterClass* RC = mri_->getRegClass(Reg);
|
||||
unsigned NewVReg = mri_->createVirtualRegister(RC);
|
||||
RMO.setReg(NewVReg);
|
||||
RMO.setIsUndef();
|
||||
RMO.setIsKill();
|
||||
}
|
||||
}
|
||||
ImpDefRegs.clear();
|
||||
ImpDefMIs.clear();
|
||||
}
|
||||
|
||||
return Changed;
|
||||
}
|
||||
|
@ -145,6 +145,7 @@ namespace {
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesCFG();
|
||||
AU.addRequired<LiveIntervals>();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
if (StrongPHIElim)
|
||||
AU.addRequiredID(StrongPHIEliminationID);
|
||||
// Make sure PassManager knows which analyses to make available
|
||||
@ -175,11 +176,11 @@ namespace {
|
||||
|
||||
/// processActiveIntervals - expire old intervals and move non-overlapping
|
||||
/// ones to the inactive list.
|
||||
void processActiveIntervals(LiveIndex CurPoint);
|
||||
void processActiveIntervals(SlotIndex CurPoint);
|
||||
|
||||
/// processInactiveIntervals - expire old intervals and move overlapping
|
||||
/// ones to the active list.
|
||||
void processInactiveIntervals(LiveIndex CurPoint);
|
||||
void processInactiveIntervals(SlotIndex CurPoint);
|
||||
|
||||
/// hasNextReloadInterval - Return the next liveinterval that's being
|
||||
/// defined by a reload from the same SS as the specified one.
|
||||
@ -365,7 +366,7 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
|
||||
return Reg;
|
||||
|
||||
VNInfo *vni = cur.begin()->valno;
|
||||
if ((vni->def == LiveIndex()) ||
|
||||
if ((vni->def == SlotIndex()) ||
|
||||
vni->isUnused() || !vni->isDefAccurate())
|
||||
return Reg;
|
||||
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
||||
@ -402,7 +403,7 @@ unsigned RALinScan::attemptTrivialCoalescing(LiveInterval &cur, unsigned Reg) {
|
||||
if (!O.isKill())
|
||||
continue;
|
||||
MachineInstr *MI = &*I;
|
||||
if (SrcLI.liveAt(li_->getDefIndex(li_->getInstructionIndex(MI))))
|
||||
if (SrcLI.liveAt(li_->getInstructionIndex(MI).getDefIndex()))
|
||||
O.setIsKill(false);
|
||||
}
|
||||
}
|
||||
@ -479,10 +480,17 @@ void RALinScan::initIntervalSets()
|
||||
|
||||
for (LiveIntervals::iterator i = li_->begin(), e = li_->end(); i != e; ++i) {
|
||||
if (TargetRegisterInfo::isPhysicalRegister(i->second->reg)) {
|
||||
mri_->setPhysRegUsed(i->second->reg);
|
||||
fixed_.push_back(std::make_pair(i->second, i->second->begin()));
|
||||
} else
|
||||
unhandled_.push(i->second);
|
||||
if (!i->second->empty()) {
|
||||
mri_->setPhysRegUsed(i->second->reg);
|
||||
fixed_.push_back(std::make_pair(i->second, i->second->begin()));
|
||||
}
|
||||
} else {
|
||||
if (i->second->empty()) {
|
||||
assignRegOrStackSlotAtInterval(i->second);
|
||||
}
|
||||
else
|
||||
unhandled_.push(i->second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,13 +510,13 @@ void RALinScan::linearScan() {
|
||||
++NumIters;
|
||||
DEBUG(errs() << "\n*** CURRENT ***: " << *cur << '\n');
|
||||
|
||||
if (!cur->empty()) {
|
||||
processActiveIntervals(cur->beginIndex());
|
||||
processInactiveIntervals(cur->beginIndex());
|
||||
assert(!cur->empty() && "Empty interval in unhandled set.");
|
||||
|
||||
assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
|
||||
"Can only allocate virtual registers!");
|
||||
}
|
||||
processActiveIntervals(cur->beginIndex());
|
||||
processInactiveIntervals(cur->beginIndex());
|
||||
|
||||
assert(TargetRegisterInfo::isVirtualRegister(cur->reg) &&
|
||||
"Can only allocate virtual registers!");
|
||||
|
||||
// Allocating a virtual register. try to find a free
|
||||
// physical register or spill an interval (possibly this one) in order to
|
||||
@ -585,7 +593,7 @@ void RALinScan::linearScan() {
|
||||
|
||||
/// processActiveIntervals - expire old intervals and move non-overlapping ones
|
||||
/// to the inactive list.
|
||||
void RALinScan::processActiveIntervals(LiveIndex CurPoint)
|
||||
void RALinScan::processActiveIntervals(SlotIndex CurPoint)
|
||||
{
|
||||
DEBUG(errs() << "\tprocessing active intervals:\n");
|
||||
|
||||
@ -631,7 +639,7 @@ void RALinScan::processActiveIntervals(LiveIndex CurPoint)
|
||||
|
||||
/// processInactiveIntervals - expire old intervals and move overlapping
|
||||
/// ones to the active list.
|
||||
void RALinScan::processInactiveIntervals(LiveIndex CurPoint)
|
||||
void RALinScan::processInactiveIntervals(SlotIndex CurPoint)
|
||||
{
|
||||
DEBUG(errs() << "\tprocessing inactive intervals:\n");
|
||||
|
||||
@ -712,7 +720,7 @@ FindIntervalInVector(RALinScan::IntervalPtrs &IP, LiveInterval *LI) {
|
||||
return IP.end();
|
||||
}
|
||||
|
||||
static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, LiveIndex Point){
|
||||
static void RevertVectorIteratorsTo(RALinScan::IntervalPtrs &V, SlotIndex Point){
|
||||
for (unsigned i = 0, e = V.size(); i != e; ++i) {
|
||||
RALinScan::IntervalPtr &IP = V[i];
|
||||
LiveInterval::iterator I = std::upper_bound(IP.first->begin(),
|
||||
@ -738,7 +746,7 @@ static void addStackInterval(LiveInterval *cur, LiveStacks *ls_,
|
||||
if (SI.hasAtLeastOneValue())
|
||||
VNI = SI.getValNumInfo(0);
|
||||
else
|
||||
VNI = SI.getNextValue(LiveIndex(), 0, false,
|
||||
VNI = SI.getNextValue(SlotIndex(), 0, false,
|
||||
ls_->getVNInfoAllocator());
|
||||
|
||||
LiveInterval &RI = li_->getInterval(cur->reg);
|
||||
@ -906,7 +914,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
backUpRegUses();
|
||||
|
||||
std::vector<std::pair<unsigned, float> > SpillWeightsToAdd;
|
||||
LiveIndex StartPosition = cur->beginIndex();
|
||||
SlotIndex StartPosition = cur->beginIndex();
|
||||
const TargetRegisterClass *RCLeader = RelatedRegClasses.getLeaderValue(RC);
|
||||
|
||||
// If start of this live interval is defined by a move instruction and its
|
||||
@ -916,7 +924,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
// one, e.g. X86::mov32to32_. These move instructions are not coalescable.
|
||||
if (!vrm_->getRegAllocPref(cur->reg) && cur->hasAtLeastOneValue()) {
|
||||
VNInfo *vni = cur->begin()->valno;
|
||||
if ((vni->def != LiveIndex()) && !vni->isUnused() &&
|
||||
if ((vni->def != SlotIndex()) && !vni->isUnused() &&
|
||||
vni->isDefAccurate()) {
|
||||
MachineInstr *CopyMI = li_->getInstructionFromIndex(vni->def);
|
||||
unsigned SrcReg, DstReg, SrcSubReg, DstSubReg;
|
||||
@ -1118,6 +1126,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
DowngradedRegs.clear();
|
||||
assignRegOrStackSlotAtInterval(cur);
|
||||
} else {
|
||||
assert(false && "Ran out of registers during register allocation!");
|
||||
llvm_report_error("Ran out of registers during register allocation!");
|
||||
}
|
||||
return;
|
||||
@ -1172,7 +1181,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
LiveInterval *ReloadLi = added[i];
|
||||
if (ReloadLi->weight == HUGE_VALF &&
|
||||
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
|
||||
LiveIndex ReloadIdx = ReloadLi->beginIndex();
|
||||
SlotIndex ReloadIdx = ReloadLi->beginIndex();
|
||||
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
|
||||
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
|
||||
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
|
||||
@ -1242,7 +1251,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
spilled.insert(sli->reg);
|
||||
}
|
||||
|
||||
LiveIndex earliestStart = earliestStartInterval->beginIndex();
|
||||
SlotIndex earliestStart = earliestStartInterval->beginIndex();
|
||||
|
||||
DEBUG(errs() << "\t\trolling back to: " << earliestStart << '\n');
|
||||
|
||||
@ -1323,7 +1332,7 @@ void RALinScan::assignRegOrStackSlotAtInterval(LiveInterval* cur) {
|
||||
LiveInterval *ReloadLi = added[i];
|
||||
if (ReloadLi->weight == HUGE_VALF &&
|
||||
li_->getApproximateInstructionCount(*ReloadLi) == 0) {
|
||||
LiveIndex ReloadIdx = ReloadLi->beginIndex();
|
||||
SlotIndex ReloadIdx = ReloadLi->beginIndex();
|
||||
MachineBasicBlock *ReloadMBB = li_->getMBBFromIndex(ReloadIdx);
|
||||
int ReloadSS = vrm_->getStackSlot(ReloadLi->reg);
|
||||
if (LastReloadMBB == ReloadMBB && LastReloadSS == ReloadSS) {
|
||||
|
@ -85,6 +85,8 @@ namespace {
|
||||
|
||||
/// PBQP analysis usage.
|
||||
virtual void getAnalysisUsage(AnalysisUsage &au) const {
|
||||
au.addRequired<SlotIndexes>();
|
||||
au.addPreserved<SlotIndexes>();
|
||||
au.addRequired<LiveIntervals>();
|
||||
//au.addRequiredID(SplitCriticalEdgesID);
|
||||
au.addRequired<RegisterCoalescer>();
|
||||
@ -684,7 +686,7 @@ void PBQPRegAlloc::addStackInterval(const LiveInterval *spilled,
|
||||
vni = stackInterval.getValNumInfo(0);
|
||||
else
|
||||
vni = stackInterval.getNextValue(
|
||||
LiveIndex(), 0, false, lss->getVNInfoAllocator());
|
||||
SlotIndex(), 0, false, lss->getVNInfoAllocator());
|
||||
|
||||
LiveInterval &rhsInterval = lis->getInterval(spilled->reg);
|
||||
stackInterval.MergeRangesInAsValue(rhsInterval, vni);
|
||||
@ -832,7 +834,7 @@ bool PBQPRegAlloc::runOnMachineFunction(MachineFunction &MF) {
|
||||
tm = &mf->getTarget();
|
||||
tri = tm->getRegisterInfo();
|
||||
tii = tm->getInstrInfo();
|
||||
mri = &mf->getRegInfo();
|
||||
mri = &mf->getRegInfo();
|
||||
|
||||
lis = &getAnalysis<LiveIntervals>();
|
||||
lss = &getAnalysis<LiveStacks>();
|
||||
|
@ -76,6 +76,7 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.addRequired<AliasAnalysis>();
|
||||
AU.addRequired<LiveIntervals>();
|
||||
AU.addPreserved<LiveIntervals>();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
AU.addRequired<MachineLoopInfo>();
|
||||
AU.addPreserved<MachineLoopInfo>();
|
||||
AU.addPreservedID(MachineDominatorsID);
|
||||
@ -105,7 +106,7 @@ void SimpleRegisterCoalescing::getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
LiveInterval &IntB,
|
||||
MachineInstr *CopyMI) {
|
||||
LiveIndex CopyIdx = li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getDefIndex();
|
||||
|
||||
// BValNo is a value number in B that is defined by a copy from A. 'B3' in
|
||||
// the example above.
|
||||
@ -120,7 +121,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
assert(BValNo->def == CopyIdx && "Copy doesn't define the value?");
|
||||
|
||||
// AValNo is the value number in A that defines the copy, A3 in the example.
|
||||
LiveIndex CopyUseIdx = li_->getUseIndex(CopyIdx);
|
||||
SlotIndex CopyUseIdx = CopyIdx.getUseIndex();
|
||||
LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx);
|
||||
assert(ALR != IntA.end() && "Live range not found!");
|
||||
VNInfo *AValNo = ALR->valno;
|
||||
@ -158,13 +159,13 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
|
||||
// Get the LiveRange in IntB that this value number starts with.
|
||||
LiveInterval::iterator ValLR =
|
||||
IntB.FindLiveRangeContaining(li_->getPrevSlot(AValNo->def));
|
||||
IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot());
|
||||
assert(ValLR != IntB.end() && "Live range not found!");
|
||||
|
||||
// Make sure that the end of the live range is inside the same block as
|
||||
// CopyMI.
|
||||
MachineInstr *ValLREndInst =
|
||||
li_->getInstructionFromIndex(li_->getPrevSlot(ValLR->end));
|
||||
li_->getInstructionFromIndex(ValLR->end.getPrevSlot());
|
||||
if (!ValLREndInst ||
|
||||
ValLREndInst->getParent() != CopyMI->getParent()) return false;
|
||||
|
||||
@ -193,7 +194,7 @@ bool SimpleRegisterCoalescing::AdjustCopiesBackFrom(LiveInterval &IntA,
|
||||
IntB.print(errs(), tri_);
|
||||
});
|
||||
|
||||
LiveIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
|
||||
SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start;
|
||||
// We are about to delete CopyMI, so need to remove it as the 'instruction
|
||||
// that defines this value #'. Update the the valnum with the new defining
|
||||
// instruction #.
|
||||
@ -306,8 +307,8 @@ TransferImplicitOps(MachineInstr *MI, MachineInstr *NewMI) {
|
||||
bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
LiveInterval &IntB,
|
||||
MachineInstr *CopyMI) {
|
||||
LiveIndex CopyIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
SlotIndex CopyIdx =
|
||||
li_->getInstructionIndex(CopyMI).getDefIndex();
|
||||
|
||||
// FIXME: For now, only eliminate the copy by commuting its def when the
|
||||
// source register is a virtual register. We want to guard against cases
|
||||
@ -330,7 +331,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
|
||||
// AValNo is the value number in A that defines the copy, A3 in the example.
|
||||
LiveInterval::iterator ALR =
|
||||
IntA.FindLiveRangeContaining(li_->getPrevSlot(CopyIdx));
|
||||
IntA.FindLiveRangeContaining(CopyIdx.getUseIndex()); //
|
||||
|
||||
assert(ALR != IntA.end() && "Live range not found!");
|
||||
VNInfo *AValNo = ALR->valno;
|
||||
@ -376,7 +377,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
for (MachineRegisterInfo::use_iterator UI = mri_->use_begin(IntA.reg),
|
||||
UE = mri_->use_end(); UI != UE; ++UI) {
|
||||
MachineInstr *UseMI = &*UI;
|
||||
LiveIndex UseIdx = li_->getInstructionIndex(UseMI);
|
||||
SlotIndex UseIdx = li_->getInstructionIndex(UseMI);
|
||||
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
|
||||
if (ULR == IntA.end())
|
||||
continue;
|
||||
@ -401,7 +402,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
bool BHasPHIKill = BValNo->hasPHIKill();
|
||||
SmallVector<VNInfo*, 4> BDeadValNos;
|
||||
VNInfo::KillSet BKills;
|
||||
std::map<LiveIndex, LiveIndex> BExtend;
|
||||
std::map<SlotIndex, SlotIndex> BExtend;
|
||||
|
||||
// If ALR and BLR overlaps and end of BLR extends beyond end of ALR, e.g.
|
||||
// A = or A, B
|
||||
@ -428,7 +429,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
++UI;
|
||||
if (JoinedCopies.count(UseMI))
|
||||
continue;
|
||||
LiveIndex UseIdx= li_->getUseIndex(li_->getInstructionIndex(UseMI));
|
||||
SlotIndex UseIdx = li_->getInstructionIndex(UseMI).getUseIndex();
|
||||
LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx);
|
||||
if (ULR == IntA.end() || ULR->valno != AValNo)
|
||||
continue;
|
||||
@ -439,7 +440,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
if (Extended)
|
||||
UseMO.setIsKill(false);
|
||||
else
|
||||
BKills.push_back(li_->getNextSlot(UseIdx));
|
||||
BKills.push_back(UseIdx.getDefIndex());
|
||||
}
|
||||
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
||||
if (!tii_->isMoveInstr(*UseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
|
||||
@ -448,7 +449,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
// This copy will become a noop. If it's defining a new val#,
|
||||
// remove that val# as well. However this live range is being
|
||||
// extended to the end of the existing live range defined by the copy.
|
||||
LiveIndex DefIdx = li_->getDefIndex(UseIdx);
|
||||
SlotIndex DefIdx = UseIdx.getDefIndex();
|
||||
const LiveRange *DLR = IntB.getLiveRangeContaining(DefIdx);
|
||||
BHasPHIKill |= DLR->valno->hasPHIKill();
|
||||
assert(DLR->valno->def == DefIdx);
|
||||
@ -495,8 +496,8 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end();
|
||||
AI != AE; ++AI) {
|
||||
if (AI->valno != AValNo) continue;
|
||||
LiveIndex End = AI->end;
|
||||
std::map<LiveIndex, LiveIndex>::iterator
|
||||
SlotIndex End = AI->end;
|
||||
std::map<SlotIndex, SlotIndex>::iterator
|
||||
EI = BExtend.find(End);
|
||||
if (EI != BExtend.end())
|
||||
End = EI->second;
|
||||
@ -507,7 +508,7 @@ bool SimpleRegisterCoalescing::RemoveCopyByCommutingDef(LiveInterval &IntA,
|
||||
if (BHasSubRegs) {
|
||||
for (const unsigned *SR = tri_->getSubRegisters(IntB.reg); *SR; ++SR) {
|
||||
LiveInterval &SRLI = li_->getInterval(*SR);
|
||||
SRLI.MergeInClobberRange(AI->start, End, li_->getVNInfoAllocator());
|
||||
SRLI.MergeInClobberRange(*li_, AI->start, End, li_->getVNInfoAllocator());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -551,7 +552,7 @@ static bool isSameOrFallThroughBB(MachineBasicBlock *MBB,
|
||||
/// from a physical register live interval as well as from the live intervals
|
||||
/// of its sub-registers.
|
||||
static void removeRange(LiveInterval &li,
|
||||
LiveIndex Start, LiveIndex End,
|
||||
SlotIndex Start, SlotIndex End,
|
||||
LiveIntervals *li_, const TargetRegisterInfo *tri_) {
|
||||
li.removeRange(Start, End, true);
|
||||
if (TargetRegisterInfo::isPhysicalRegister(li.reg)) {
|
||||
@ -559,8 +560,9 @@ static void removeRange(LiveInterval &li,
|
||||
if (!li_->hasInterval(*SR))
|
||||
continue;
|
||||
LiveInterval &sli = li_->getInterval(*SR);
|
||||
LiveIndex RemoveStart = Start;
|
||||
LiveIndex RemoveEnd = Start;
|
||||
SlotIndex RemoveStart = Start;
|
||||
SlotIndex RemoveEnd = Start;
|
||||
|
||||
while (RemoveEnd != End) {
|
||||
LiveInterval::iterator LR = sli.FindLiveRangeContaining(RemoveStart);
|
||||
if (LR == sli.end())
|
||||
@ -577,14 +579,14 @@ static void removeRange(LiveInterval &li,
|
||||
/// as the copy instruction, trim the live interval to the last use and return
|
||||
/// true.
|
||||
bool
|
||||
SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
|
||||
SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
|
||||
MachineBasicBlock *CopyMBB,
|
||||
LiveInterval &li,
|
||||
const LiveRange *LR) {
|
||||
LiveIndex MBBStart = li_->getMBBStartIdx(CopyMBB);
|
||||
LiveIndex LastUseIdx;
|
||||
SlotIndex MBBStart = li_->getMBBStartIdx(CopyMBB);
|
||||
SlotIndex LastUseIdx;
|
||||
MachineOperand *LastUse =
|
||||
lastRegisterUse(LR->start, li_->getPrevSlot(CopyIdx), li.reg, LastUseIdx);
|
||||
lastRegisterUse(LR->start, CopyIdx.getPrevSlot(), li.reg, LastUseIdx);
|
||||
if (LastUse) {
|
||||
MachineInstr *LastUseMI = LastUse->getParent();
|
||||
if (!isSameOrFallThroughBB(LastUseMI->getParent(), CopyMBB, tii_)) {
|
||||
@ -603,8 +605,8 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
|
||||
// There are uses before the copy, just shorten the live range to the end
|
||||
// of last use.
|
||||
LastUse->setIsKill();
|
||||
removeRange(li, li_->getDefIndex(LastUseIdx), LR->end, li_, tri_);
|
||||
LR->valno->addKill(li_->getNextSlot(LastUseIdx));
|
||||
removeRange(li, LastUseIdx.getDefIndex(), LR->end, li_, tri_);
|
||||
LR->valno->addKill(LastUseIdx.getDefIndex());
|
||||
unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
|
||||
if (tii_->isMoveInstr(*LastUseMI, SrcReg, DstReg, SrcSubIdx, DstSubIdx) &&
|
||||
DstReg == li.reg) {
|
||||
@ -617,7 +619,7 @@ SimpleRegisterCoalescing::TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
|
||||
|
||||
// Is it livein?
|
||||
if (LR->start <= MBBStart && LR->end > MBBStart) {
|
||||
if (LR->start == LiveIndex()) {
|
||||
if (LR->start == li_->getZeroIndex()) {
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
|
||||
// Live-in to the function but dead. Remove it from entry live-in set.
|
||||
mf_->begin()->removeLiveIn(li.reg);
|
||||
@ -634,7 +636,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
||||
unsigned DstReg,
|
||||
unsigned DstSubIdx,
|
||||
MachineInstr *CopyMI) {
|
||||
LiveIndex CopyIdx = li_->getUseIndex(li_->getInstructionIndex(CopyMI));
|
||||
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI).getUseIndex();
|
||||
LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx);
|
||||
assert(SrcLR != SrcInt.end() && "Live range not found!");
|
||||
VNInfo *ValNo = SrcLR->valno;
|
||||
@ -683,7 +685,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
||||
return false;
|
||||
}
|
||||
|
||||
LiveIndex DefIdx = li_->getDefIndex(CopyIdx);
|
||||
SlotIndex DefIdx = CopyIdx.getDefIndex();
|
||||
const LiveRange *DLR= li_->getInterval(DstReg).getLiveRangeContaining(DefIdx);
|
||||
DLR->valno->setCopy(0);
|
||||
// Don't forget to update sub-register intervals.
|
||||
@ -716,7 +718,7 @@ bool SimpleRegisterCoalescing::ReMaterializeTrivialDef(LiveInterval &SrcInt,
|
||||
// should mark it dead:
|
||||
if (DefMI->getParent() == MBB) {
|
||||
DefMI->addRegisterDead(SrcInt.reg, tri_);
|
||||
SrcLR->end = li_->getNextSlot(SrcLR->start);
|
||||
SrcLR->end = SrcLR->start.getNextSlot();
|
||||
}
|
||||
}
|
||||
|
||||
@ -815,8 +817,8 @@ SimpleRegisterCoalescing::UpdateRegDefsUses(unsigned SrcReg, unsigned DstReg,
|
||||
(TargetRegisterInfo::isVirtualRegister(CopyDstReg) ||
|
||||
allocatableRegs_[CopyDstReg])) {
|
||||
LiveInterval &LI = li_->getInterval(CopyDstReg);
|
||||
LiveIndex DefIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(UseMI));
|
||||
SlotIndex DefIdx =
|
||||
li_->getInstructionIndex(UseMI).getDefIndex();
|
||||
if (const LiveRange *DLR = LI.getLiveRangeContaining(DefIdx)) {
|
||||
if (DLR->valno->def == DefIdx)
|
||||
DLR->valno->setCopy(UseMI);
|
||||
@ -835,12 +837,12 @@ void SimpleRegisterCoalescing::RemoveUnnecessaryKills(unsigned Reg,
|
||||
if (!UseMO.isKill())
|
||||
continue;
|
||||
MachineInstr *UseMI = UseMO.getParent();
|
||||
LiveIndex UseIdx =
|
||||
li_->getUseIndex(li_->getInstructionIndex(UseMI));
|
||||
SlotIndex UseIdx =
|
||||
li_->getInstructionIndex(UseMI).getUseIndex();
|
||||
const LiveRange *LR = LI.getLiveRangeContaining(UseIdx);
|
||||
if (!LR ||
|
||||
(!LR->valno->isKill(li_->getNextSlot(UseIdx)) &&
|
||||
LR->valno->def != li_->getNextSlot(UseIdx))) {
|
||||
(!LR->valno->isKill(UseIdx.getDefIndex()) &&
|
||||
LR->valno->def != UseIdx.getDefIndex())) {
|
||||
// Interesting problem. After coalescing reg1027's def and kill are both
|
||||
// at the same point: %reg1027,0.000000e+00 = [56,814:0) 0@70-(814)
|
||||
//
|
||||
@ -881,16 +883,16 @@ static bool removeIntervalIfEmpty(LiveInterval &li, LiveIntervals *li_,
|
||||
/// Return true if live interval is removed.
|
||||
bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
|
||||
MachineInstr *CopyMI) {
|
||||
LiveIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
LiveInterval::iterator MLR =
|
||||
li.FindLiveRangeContaining(li_->getDefIndex(CopyIdx));
|
||||
li.FindLiveRangeContaining(CopyIdx.getDefIndex());
|
||||
if (MLR == li.end())
|
||||
return false; // Already removed by ShortenDeadCopySrcLiveRange.
|
||||
LiveIndex RemoveStart = MLR->start;
|
||||
LiveIndex RemoveEnd = MLR->end;
|
||||
LiveIndex DefIdx = li_->getDefIndex(CopyIdx);
|
||||
SlotIndex RemoveStart = MLR->start;
|
||||
SlotIndex RemoveEnd = MLR->end;
|
||||
SlotIndex DefIdx = CopyIdx.getDefIndex();
|
||||
// Remove the liverange that's defined by this.
|
||||
if (RemoveStart == DefIdx && RemoveEnd == li_->getNextSlot(DefIdx)) {
|
||||
if (RemoveStart == DefIdx && RemoveEnd == DefIdx.getStoreIndex()) {
|
||||
removeRange(li, RemoveStart, RemoveEnd, li_, tri_);
|
||||
return removeIntervalIfEmpty(li, li_, tri_);
|
||||
}
|
||||
@ -901,7 +903,7 @@ bool SimpleRegisterCoalescing::ShortenDeadCopyLiveRange(LiveInterval &li,
|
||||
/// the val# it defines. If the live interval becomes empty, remove it as well.
|
||||
bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
|
||||
MachineInstr *DefMI) {
|
||||
LiveIndex DefIdx = li_->getDefIndex(li_->getInstructionIndex(DefMI));
|
||||
SlotIndex DefIdx = li_->getInstructionIndex(DefMI).getDefIndex();
|
||||
LiveInterval::iterator MLR = li.FindLiveRangeContaining(DefIdx);
|
||||
if (DefIdx != MLR->valno->def)
|
||||
return false;
|
||||
@ -912,10 +914,10 @@ bool SimpleRegisterCoalescing::RemoveDeadDef(LiveInterval &li,
|
||||
/// PropagateDeadness - Propagate the dead marker to the instruction which
|
||||
/// defines the val#.
|
||||
static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
||||
LiveIndex &LRStart, LiveIntervals *li_,
|
||||
SlotIndex &LRStart, LiveIntervals *li_,
|
||||
const TargetRegisterInfo* tri_) {
|
||||
MachineInstr *DefMI =
|
||||
li_->getInstructionFromIndex(li_->getDefIndex(LRStart));
|
||||
li_->getInstructionFromIndex(LRStart.getDefIndex());
|
||||
if (DefMI && DefMI != CopyMI) {
|
||||
int DeadIdx = DefMI->findRegisterDefOperandIdx(li.reg, false);
|
||||
if (DeadIdx != -1)
|
||||
@ -923,7 +925,7 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
||||
else
|
||||
DefMI->addOperand(MachineOperand::CreateReg(li.reg,
|
||||
/*def*/true, /*implicit*/true, /*kill*/false, /*dead*/true));
|
||||
LRStart = li_->getNextSlot(LRStart);
|
||||
LRStart = LRStart.getNextSlot();
|
||||
}
|
||||
}
|
||||
|
||||
@ -934,8 +936,8 @@ static void PropagateDeadness(LiveInterval &li, MachineInstr *CopyMI,
|
||||
bool
|
||||
SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
MachineInstr *CopyMI) {
|
||||
LiveIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
if (CopyIdx == LiveIndex()) {
|
||||
SlotIndex CopyIdx = li_->getInstructionIndex(CopyMI);
|
||||
if (CopyIdx == SlotIndex()) {
|
||||
// FIXME: special case: function live in. It can be a general case if the
|
||||
// first instruction index starts at > 0 value.
|
||||
assert(TargetRegisterInfo::isPhysicalRegister(li.reg));
|
||||
@ -948,13 +950,13 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
}
|
||||
|
||||
LiveInterval::iterator LR =
|
||||
li.FindLiveRangeContaining(li_->getPrevSlot(CopyIdx));
|
||||
li.FindLiveRangeContaining(CopyIdx.getPrevIndex().getStoreIndex());
|
||||
if (LR == li.end())
|
||||
// Livein but defined by a phi.
|
||||
return false;
|
||||
|
||||
LiveIndex RemoveStart = LR->start;
|
||||
LiveIndex RemoveEnd = li_->getNextSlot(li_->getDefIndex(CopyIdx));
|
||||
SlotIndex RemoveStart = LR->start;
|
||||
SlotIndex RemoveEnd = CopyIdx.getStoreIndex();
|
||||
if (LR->end > RemoveEnd)
|
||||
// More uses past this copy? Nothing to do.
|
||||
return false;
|
||||
@ -974,7 +976,7 @@ SimpleRegisterCoalescing::ShortenDeadCopySrcLiveRange(LiveInterval &li,
|
||||
// If the live range starts in another mbb and the copy mbb is not a fall
|
||||
// through mbb, then we can only cut the range from the beginning of the
|
||||
// copy mbb.
|
||||
RemoveStart = li_->getNextSlot(li_->getMBBStartIdx(CopyMBB));
|
||||
RemoveStart = li_->getMBBStartIdx(CopyMBB).getNextIndex().getBaseIndex();
|
||||
|
||||
if (LR->valno->def == RemoveStart) {
|
||||
// If the def MI defines the val# and this copy is the only kill of the
|
||||
@ -1030,14 +1032,14 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
||||
|
||||
// If the virtual register live interval extends into a loop, turn down
|
||||
// aggressiveness.
|
||||
LiveIndex CopyIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
SlotIndex CopyIdx =
|
||||
li_->getInstructionIndex(CopyMI).getDefIndex();
|
||||
const MachineLoop *L = loopInfo->getLoopFor(CopyMBB);
|
||||
if (!L) {
|
||||
// Let's see if the virtual register live interval extends into the loop.
|
||||
LiveInterval::iterator DLR = DstInt.FindLiveRangeContaining(CopyIdx);
|
||||
assert(DLR != DstInt.end() && "Live range not found!");
|
||||
DLR = DstInt.FindLiveRangeContaining(li_->getNextSlot(DLR->end));
|
||||
DLR = DstInt.FindLiveRangeContaining(DLR->end.getNextSlot());
|
||||
if (DLR != DstInt.end()) {
|
||||
CopyMBB = li_->getMBBFromIndex(DLR->start);
|
||||
L = loopInfo->getLoopFor(CopyMBB);
|
||||
@ -1047,7 +1049,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
||||
if (!L || Length <= Threshold)
|
||||
return true;
|
||||
|
||||
LiveIndex UseIdx = li_->getUseIndex(CopyIdx);
|
||||
SlotIndex UseIdx = CopyIdx.getUseIndex();
|
||||
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
|
||||
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
|
||||
if (loopInfo->getLoopFor(SMBB) != L) {
|
||||
@ -1060,7 +1062,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithSrcPhysReg(MachineInstr *CopyMI,
|
||||
if (SuccMBB == CopyMBB)
|
||||
continue;
|
||||
if (DstInt.overlaps(li_->getMBBStartIdx(SuccMBB),
|
||||
li_->getNextSlot(li_->getMBBEndIdx(SuccMBB))))
|
||||
li_->getMBBEndIdx(SuccMBB).getNextIndex().getBaseIndex()))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1091,12 +1093,12 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
|
||||
|
||||
// If the virtual register live interval is defined or cross a loop, turn
|
||||
// down aggressiveness.
|
||||
LiveIndex CopyIdx =
|
||||
li_->getDefIndex(li_->getInstructionIndex(CopyMI));
|
||||
LiveIndex UseIdx = li_->getUseIndex(CopyIdx);
|
||||
SlotIndex CopyIdx =
|
||||
li_->getInstructionIndex(CopyMI).getDefIndex();
|
||||
SlotIndex UseIdx = CopyIdx.getUseIndex();
|
||||
LiveInterval::iterator SLR = SrcInt.FindLiveRangeContaining(UseIdx);
|
||||
assert(SLR != SrcInt.end() && "Live range not found!");
|
||||
SLR = SrcInt.FindLiveRangeContaining(li_->getPrevSlot(SLR->start));
|
||||
SLR = SrcInt.FindLiveRangeContaining(SLR->start.getPrevSlot());
|
||||
if (SLR == SrcInt.end())
|
||||
return true;
|
||||
MachineBasicBlock *SMBB = li_->getMBBFromIndex(SLR->start);
|
||||
@ -1116,7 +1118,7 @@ SimpleRegisterCoalescing::isWinToJoinVRWithDstPhysReg(MachineInstr *CopyMI,
|
||||
if (PredMBB == SMBB)
|
||||
continue;
|
||||
if (SrcInt.overlaps(li_->getMBBStartIdx(PredMBB),
|
||||
li_->getNextSlot(li_->getMBBEndIdx(PredMBB))))
|
||||
li_->getMBBEndIdx(PredMBB).getNextIndex().getBaseIndex()))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1705,7 +1707,7 @@ bool SimpleRegisterCoalescing::JoinCopy(CopyRec &TheCopy, bool &Again) {
|
||||
|
||||
// Update the liveintervals of sub-registers.
|
||||
for (const unsigned *AS = tri_->getSubRegisters(DstReg); *AS; ++AS)
|
||||
li_->getOrCreateInterval(*AS).MergeInClobberRanges(*ResSrcInt,
|
||||
li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, *ResSrcInt,
|
||||
li_->getVNInfoAllocator());
|
||||
}
|
||||
|
||||
@ -1867,7 +1869,7 @@ bool SimpleRegisterCoalescing::RangeIsDefinedByCopyFromReg(LiveInterval &li,
|
||||
/// is live at the given point.
|
||||
bool SimpleRegisterCoalescing::ValueLiveAt(LiveInterval::iterator LRItr,
|
||||
LiveInterval::iterator LREnd,
|
||||
LiveIndex defPoint) const {
|
||||
SlotIndex defPoint) const {
|
||||
for (const VNInfo *valno = LRItr->valno;
|
||||
(LRItr != LREnd) && (LRItr->valno == valno); ++LRItr) {
|
||||
if (LRItr->contains(defPoint))
|
||||
@ -2047,7 +2049,7 @@ bool SimpleRegisterCoalescing::SimpleJoin(LiveInterval &LHS, LiveInterval &RHS){
|
||||
// Update the liveintervals of sub-registers.
|
||||
if (TargetRegisterInfo::isPhysicalRegister(LHS.reg))
|
||||
for (const unsigned *AS = tri_->getSubRegisters(LHS.reg); *AS; ++AS)
|
||||
li_->getOrCreateInterval(*AS).MergeInClobberRanges(LHS,
|
||||
li_->getOrCreateInterval(*AS).MergeInClobberRanges(*li_, LHS,
|
||||
li_->getVNInfoAllocator());
|
||||
|
||||
return true;
|
||||
@ -2148,7 +2150,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
} else {
|
||||
// It was defined as a copy from the LHS, find out what value # it is.
|
||||
RHSValNoInfo =
|
||||
LHS.getLiveRangeContaining(li_->getPrevSlot(RHSValNoInfo0->def))->valno;
|
||||
LHS.getLiveRangeContaining(RHSValNoInfo0->def.getPrevSlot())->valno;
|
||||
RHSValID = RHSValNoInfo->id;
|
||||
RHSVal0DefinedFromLHS = RHSValID;
|
||||
}
|
||||
@ -2212,7 +2214,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
|
||||
// Figure out the value # from the RHS.
|
||||
LHSValsDefinedFromRHS[VNI]=
|
||||
RHS.getLiveRangeContaining(li_->getPrevSlot(VNI->def))->valno;
|
||||
RHS.getLiveRangeContaining(VNI->def.getPrevSlot())->valno;
|
||||
}
|
||||
|
||||
// Loop over the value numbers of the RHS, seeing if any are defined from
|
||||
@ -2230,7 +2232,7 @@ SimpleRegisterCoalescing::JoinIntervals(LiveInterval &LHS, LiveInterval &RHS,
|
||||
|
||||
// Figure out the value # from the LHS.
|
||||
RHSValsDefinedFromLHS[VNI]=
|
||||
LHS.getLiveRangeContaining(li_->getPrevSlot(VNI->def))->valno;
|
||||
LHS.getLiveRangeContaining(VNI->def.getPrevSlot())->valno;
|
||||
}
|
||||
|
||||
LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
|
||||
@ -2494,11 +2496,11 @@ SimpleRegisterCoalescing::differingRegisterClasses(unsigned RegA,
|
||||
/// lastRegisterUse - Returns the last use of the specific register between
|
||||
/// cycles Start and End or NULL if there are no uses.
|
||||
MachineOperand *
|
||||
SimpleRegisterCoalescing::lastRegisterUse(LiveIndex Start,
|
||||
LiveIndex End,
|
||||
SimpleRegisterCoalescing::lastRegisterUse(SlotIndex Start,
|
||||
SlotIndex End,
|
||||
unsigned Reg,
|
||||
LiveIndex &UseIdx) const{
|
||||
UseIdx = LiveIndex();
|
||||
SlotIndex &UseIdx) const{
|
||||
UseIdx = SlotIndex();
|
||||
if (TargetRegisterInfo::isVirtualRegister(Reg)) {
|
||||
MachineOperand *LastUse = NULL;
|
||||
for (MachineRegisterInfo::use_iterator I = mri_->use_begin(Reg),
|
||||
@ -2510,22 +2512,24 @@ SimpleRegisterCoalescing::lastRegisterUse(LiveIndex Start,
|
||||
SrcReg == DstReg)
|
||||
// Ignore identity copies.
|
||||
continue;
|
||||
LiveIndex Idx = li_->getInstructionIndex(UseMI);
|
||||
SlotIndex Idx = li_->getInstructionIndex(UseMI);
|
||||
// FIXME: Should this be Idx != UseIdx? SlotIndex() will return something
|
||||
// that compares higher than any other interval.
|
||||
if (Idx >= Start && Idx < End && Idx >= UseIdx) {
|
||||
LastUse = &Use;
|
||||
UseIdx = li_->getUseIndex(Idx);
|
||||
UseIdx = Idx.getUseIndex();
|
||||
}
|
||||
}
|
||||
return LastUse;
|
||||
}
|
||||
|
||||
LiveIndex s = Start;
|
||||
LiveIndex e = li_->getBaseIndex(li_->getPrevSlot(End));
|
||||
SlotIndex s = Start;
|
||||
SlotIndex e = End.getPrevSlot().getBaseIndex();
|
||||
while (e >= s) {
|
||||
// Skip deleted instructions
|
||||
MachineInstr *MI = li_->getInstructionFromIndex(e);
|
||||
while (e != LiveIndex() && li_->getPrevIndex(e) >= s && !MI) {
|
||||
e = li_->getPrevIndex(e);
|
||||
while (e != SlotIndex() && e.getPrevIndex() >= s && !MI) {
|
||||
e = e.getPrevIndex();
|
||||
MI = li_->getInstructionFromIndex(e);
|
||||
}
|
||||
if (e < s || MI == NULL)
|
||||
@ -2539,12 +2543,12 @@ SimpleRegisterCoalescing::lastRegisterUse(LiveIndex Start,
|
||||
MachineOperand &Use = MI->getOperand(i);
|
||||
if (Use.isReg() && Use.isUse() && Use.getReg() &&
|
||||
tri_->regsOverlap(Use.getReg(), Reg)) {
|
||||
UseIdx = li_->getUseIndex(e);
|
||||
UseIdx = e.getUseIndex();
|
||||
return &Use;
|
||||
}
|
||||
}
|
||||
|
||||
e = li_->getPrevIndex(e);
|
||||
e = e.getPrevIndex();
|
||||
}
|
||||
|
||||
return NULL;
|
||||
@ -2568,7 +2572,7 @@ void SimpleRegisterCoalescing::releaseMemory() {
|
||||
static bool isZeroLengthInterval(LiveInterval *li, LiveIntervals *li_) {
|
||||
for (LiveInterval::Ranges::const_iterator
|
||||
i = li->ranges.begin(), e = li->ranges.end(); i != e; ++i)
|
||||
if (li_->getPrevIndex(i->end) > i->start)
|
||||
if (i->end.getPrevIndex() > i->start)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
@ -2579,7 +2583,7 @@ void SimpleRegisterCoalescing::CalculateSpillWeights() {
|
||||
for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
|
||||
mbbi != mbbe; ++mbbi) {
|
||||
MachineBasicBlock* MBB = mbbi;
|
||||
LiveIndex MBBEnd = li_->getMBBEndIdx(MBB);
|
||||
SlotIndex MBBEnd = li_->getMBBEndIdx(MBB);
|
||||
MachineLoop* loop = loopInfo->getLoopFor(MBB);
|
||||
unsigned loopDepth = loop ? loop->getLoopDepth() : 0;
|
||||
bool isExiting = loop ? loop->isLoopExiting(MBB) : false;
|
||||
@ -2621,7 +2625,7 @@ void SimpleRegisterCoalescing::CalculateSpillWeights() {
|
||||
float Weight = li_->getSpillWeight(HasDef, HasUse, loopDepth);
|
||||
if (HasDef && isExiting) {
|
||||
// Looks like this is a loop count variable update.
|
||||
LiveIndex DefIdx = li_->getDefIndex(li_->getInstructionIndex(MI));
|
||||
SlotIndex DefIdx = li_->getInstructionIndex(MI).getDefIndex();
|
||||
const LiveRange *DLR =
|
||||
li_->getInterval(Reg).getLiveRangeContaining(DefIdx);
|
||||
if (DLR->end > MBBEnd)
|
||||
|
@ -146,7 +146,7 @@ namespace llvm {
|
||||
/// TrimLiveIntervalToLastUse - If there is a last use in the same basic
|
||||
/// block as the copy instruction, trim the ive interval to the last use
|
||||
/// and return true.
|
||||
bool TrimLiveIntervalToLastUse(LiveIndex CopyIdx,
|
||||
bool TrimLiveIntervalToLastUse(SlotIndex CopyIdx,
|
||||
MachineBasicBlock *CopyMBB,
|
||||
LiveInterval &li, const LiveRange *LR);
|
||||
|
||||
@ -205,7 +205,7 @@ namespace llvm {
|
||||
/// iterator, or any subsequent range with the same value number,
|
||||
/// is live at the given point.
|
||||
bool ValueLiveAt(LiveInterval::iterator LRItr, LiveInterval::iterator LREnd,
|
||||
LiveIndex defPoint) const;
|
||||
SlotIndex defPoint) const;
|
||||
|
||||
/// RangeIsDefinedByCopyFromReg - Return true if the specified live range of
|
||||
/// the specified live interval is defined by a copy from the specified
|
||||
@ -241,9 +241,8 @@ namespace llvm {
|
||||
|
||||
/// lastRegisterUse - Returns the last use of the specific register between
|
||||
/// cycles Start and End or NULL if there are no uses.
|
||||
MachineOperand *lastRegisterUse(LiveIndex Start,
|
||||
LiveIndex End, unsigned Reg,
|
||||
LiveIndex &LastUseIdx) const;
|
||||
MachineOperand *lastRegisterUse(SlotIndex Start, SlotIndex End,
|
||||
unsigned Reg, SlotIndex &LastUseIdx) const;
|
||||
|
||||
/// CalculateSpillWeights - Compute spill weights for all virtual register
|
||||
/// live intervals.
|
||||
|
189
lib/CodeGen/SlotIndexes.cpp
Normal file
189
lib/CodeGen/SlotIndexes.cpp
Normal file
@ -0,0 +1,189 @@
|
||||
//===-- SlotIndexes.cpp - Slot Indexes Pass ------------------------------===//
|
||||
//
|
||||
// The LLVM Compiler Infrastructure
|
||||
//
|
||||
// This file is distributed under the University of Illinois Open Source
|
||||
// License. See LICENSE.TXT for details.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#define DEBUG_TYPE "slotindexes"
|
||||
|
||||
#include "llvm/CodeGen/SlotIndexes.h"
|
||||
#include "llvm/CodeGen/MachineFunction.h"
|
||||
#include "llvm/Support/Debug.h"
|
||||
#include "llvm/Support/raw_ostream.h"
|
||||
|
||||
using namespace llvm;
|
||||
|
||||
std::auto_ptr<IndexListEntry> SlotIndex::emptyKeyPtr(0),
|
||||
SlotIndex::tombstoneKeyPtr(0);
|
||||
|
||||
char SlotIndexes::ID = 0;
|
||||
static RegisterPass<SlotIndexes> X("slotindexes", "Slot index numbering");
|
||||
|
||||
void SlotIndexes::getAnalysisUsage(AnalysisUsage &au) const {
|
||||
au.setPreservesAll();
|
||||
MachineFunctionPass::getAnalysisUsage(au);
|
||||
}
|
||||
|
||||
void SlotIndexes::releaseMemory() {
|
||||
mi2iMap.clear();
|
||||
mbb2IdxMap.clear();
|
||||
idx2MBBMap.clear();
|
||||
terminatorGaps.clear();
|
||||
clearList();
|
||||
}
|
||||
|
||||
bool SlotIndexes::runOnMachineFunction(MachineFunction &fn) {
|
||||
|
||||
// Compute numbering as follows:
|
||||
// Grab an iterator to the start of the index list.
|
||||
// Iterate over all MBBs, and within each MBB all MIs, keeping the MI
|
||||
// iterator in lock-step (though skipping it over indexes which have
|
||||
// null pointers in the instruction field).
|
||||
// At each iteration assert that the instruction pointed to in the index
|
||||
// is the same one pointed to by the MI iterator. This
|
||||
|
||||
// FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should
|
||||
// only need to be set up once after the first numbering is computed.
|
||||
|
||||
mf = &fn;
|
||||
initList();
|
||||
|
||||
const unsigned gap = 1;
|
||||
|
||||
// Check that the list contains only the sentinal.
|
||||
assert(indexListHead->getNext() == 0 &&
|
||||
"Index list non-empty at initial numbering?");
|
||||
assert(idx2MBBMap.empty() &&
|
||||
"Index -> MBB mapping non-empty at initial numbering?");
|
||||
assert(mbb2IdxMap.empty() &&
|
||||
"MBB -> Index mapping non-empty at initial numbering?");
|
||||
assert(mi2iMap.empty() &&
|
||||
"MachineInstr -> Index mapping non-empty at initial numbering?");
|
||||
|
||||
functionSize = 0;
|
||||
/*
|
||||
for (unsigned s = 0; s < SlotIndex::NUM; ++s) {
|
||||
indexList.push_back(createEntry(0, s));
|
||||
}
|
||||
|
||||
unsigned index = gap * SlotIndex::NUM;
|
||||
*/
|
||||
|
||||
unsigned index = 0;
|
||||
|
||||
// Iterate over the the function.
|
||||
for (MachineFunction::iterator mbbItr = mf->begin(), mbbEnd = mf->end();
|
||||
mbbItr != mbbEnd; ++mbbItr) {
|
||||
MachineBasicBlock *mbb = &*mbbItr;
|
||||
|
||||
// Insert an index for the MBB start.
|
||||
push_back(createEntry(0, index));
|
||||
SlotIndex blockStartIndex(back(), SlotIndex::LOAD);
|
||||
|
||||
index += gap * SlotIndex::NUM;
|
||||
|
||||
for (MachineBasicBlock::iterator miItr = mbb->begin(), miEnd = mbb->end();
|
||||
miItr != miEnd; ++miItr) {
|
||||
MachineInstr *mi = &*miItr;
|
||||
|
||||
if (miItr == mbb->getFirstTerminator()) {
|
||||
push_back(createEntry(0, index));
|
||||
terminatorGaps.insert(
|
||||
std::make_pair(mbb, SlotIndex(back(), SlotIndex::PHI_BIT)));
|
||||
index += gap * SlotIndex::NUM;
|
||||
}
|
||||
|
||||
// Insert a store index for the instr.
|
||||
push_back(createEntry(mi, index));
|
||||
|
||||
// Save this base index in the maps.
|
||||
mi2iMap.insert(
|
||||
std::make_pair(mi, SlotIndex(back(), SlotIndex::LOAD)));
|
||||
|
||||
++functionSize;
|
||||
|
||||
unsigned Slots = mi->getDesc().getNumDefs();
|
||||
if (Slots == 0)
|
||||
Slots = 1;
|
||||
|
||||
index += (Slots + 1) * gap * SlotIndex::NUM;
|
||||
}
|
||||
|
||||
if (mbb->getFirstTerminator() == mbb->end()) {
|
||||
push_back(createEntry(0, index));
|
||||
terminatorGaps.insert(
|
||||
std::make_pair(mbb, SlotIndex(back(), SlotIndex::PHI_BIT)));
|
||||
index += gap * SlotIndex::NUM;
|
||||
}
|
||||
|
||||
SlotIndex blockEndIndex(back(), SlotIndex::STORE);
|
||||
mbb2IdxMap.insert(
|
||||
std::make_pair(mbb, std::make_pair(blockStartIndex, blockEndIndex)));
|
||||
|
||||
idx2MBBMap.push_back(IdxMBBPair(blockStartIndex, mbb));
|
||||
}
|
||||
|
||||
// One blank instruction at the end.
|
||||
push_back(createEntry(0, index));
|
||||
|
||||
// Sort the Idx2MBBMap
|
||||
std::sort(idx2MBBMap.begin(), idx2MBBMap.end(), Idx2MBBCompare());
|
||||
|
||||
DEBUG(dump());
|
||||
|
||||
// And we're done!
|
||||
return false;
|
||||
}
|
||||
|
||||
void SlotIndexes::renumber() {
|
||||
assert(false && "SlotIndexes::runmuber is not fully implemented yet.");
|
||||
|
||||
// Compute numbering as follows:
|
||||
// Grab an iterator to the start of the index list.
|
||||
// Iterate over all MBBs, and within each MBB all MIs, keeping the MI
|
||||
// iterator in lock-step (though skipping it over indexes which have
|
||||
// null pointers in the instruction field).
|
||||
// At each iteration assert that the instruction pointed to in the index
|
||||
// is the same one pointed to by the MI iterator. This
|
||||
|
||||
// FIXME: This can be simplified. The mi2iMap_, Idx2MBBMap, etc. should
|
||||
// only need to be set up once - when the first numbering is computed.
|
||||
|
||||
assert(false && "Renumbering not supported yet.");
|
||||
}
|
||||
|
||||
void SlotIndexes::dump() const {
|
||||
for (const IndexListEntry *itr = front(); itr != getTail();
|
||||
itr = itr->getNext()) {
|
||||
errs() << itr->getIndex() << " ";
|
||||
|
||||
if (itr->getInstr() != 0) {
|
||||
errs() << *itr->getInstr();
|
||||
} else {
|
||||
errs() << "\n";
|
||||
}
|
||||
}
|
||||
|
||||
for (MBB2IdxMap::iterator itr = mbb2IdxMap.begin();
|
||||
itr != mbb2IdxMap.end(); ++itr) {
|
||||
errs() << "MBB " << itr->first->getNumber() << " (" << itr->first << ") - ["
|
||||
<< itr->second.first << ", " << itr->second.second << "]\n";
|
||||
}
|
||||
}
|
||||
|
||||
// Print a SlotIndex to a raw_ostream.
|
||||
void SlotIndex::print(raw_ostream &os) const {
|
||||
os << getIndex();
|
||||
if (isPHI())
|
||||
os << "*";
|
||||
}
|
||||
|
||||
// Dump a SlotIndex to stderr.
|
||||
void SlotIndex::dump() const {
|
||||
print(errs());
|
||||
errs() << "\n";
|
||||
}
|
||||
|
@ -51,13 +51,15 @@ protected:
|
||||
|
||||
/// Ensures there is space before the given machine instruction, returns the
|
||||
/// instruction's new number.
|
||||
LiveIndex makeSpaceBefore(MachineInstr *mi) {
|
||||
SlotIndex makeSpaceBefore(MachineInstr *mi) {
|
||||
if (!lis->hasGapBeforeInstr(lis->getInstructionIndex(mi))) {
|
||||
lis->scaleNumbering(2);
|
||||
ls->scaleNumbering(2);
|
||||
// FIXME: Should be updated to use rewrite-in-place methods when they're
|
||||
// introduced. Currently broken.
|
||||
//lis->scaleNumbering(2);
|
||||
//ls->scaleNumbering(2);
|
||||
}
|
||||
|
||||
LiveIndex miIdx = lis->getInstructionIndex(mi);
|
||||
SlotIndex miIdx = lis->getInstructionIndex(mi);
|
||||
|
||||
assert(lis->hasGapBeforeInstr(miIdx));
|
||||
|
||||
@ -66,13 +68,15 @@ protected:
|
||||
|
||||
/// Ensure there is space after the given machine instruction, returns the
|
||||
/// instruction's new number.
|
||||
LiveIndex makeSpaceAfter(MachineInstr *mi) {
|
||||
SlotIndex makeSpaceAfter(MachineInstr *mi) {
|
||||
if (!lis->hasGapAfterInstr(lis->getInstructionIndex(mi))) {
|
||||
lis->scaleNumbering(2);
|
||||
ls->scaleNumbering(2);
|
||||
// FIXME: Should be updated to use rewrite-in-place methods when they're
|
||||
// introduced. Currently broken.
|
||||
// lis->scaleNumbering(2);
|
||||
// ls->scaleNumbering(2);
|
||||
}
|
||||
|
||||
LiveIndex miIdx = lis->getInstructionIndex(mi);
|
||||
SlotIndex miIdx = lis->getInstructionIndex(mi);
|
||||
|
||||
assert(lis->hasGapAfterInstr(miIdx));
|
||||
|
||||
@ -83,19 +87,19 @@ protected:
|
||||
/// after the given instruction. Returns the base index of the inserted
|
||||
/// instruction. The caller is responsible for adding an appropriate
|
||||
/// LiveInterval to the LiveIntervals analysis.
|
||||
LiveIndex insertStoreAfter(MachineInstr *mi, unsigned ss,
|
||||
SlotIndex insertStoreAfter(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
MachineBasicBlock::iterator nextInstItr(next(mi));
|
||||
|
||||
LiveIndex miIdx = makeSpaceAfter(mi);
|
||||
SlotIndex miIdx = makeSpaceAfter(mi);
|
||||
|
||||
tii->storeRegToStackSlot(*mi->getParent(), nextInstItr, vreg,
|
||||
true, ss, trc);
|
||||
MachineBasicBlock::iterator storeInstItr(next(mi));
|
||||
MachineInstr *storeInst = &*storeInstItr;
|
||||
LiveIndex storeInstIdx = lis->getNextIndex(miIdx);
|
||||
SlotIndex storeInstIdx = miIdx.getNextIndex();
|
||||
|
||||
assert(lis->getInstructionFromIndex(storeInstIdx) == 0 &&
|
||||
"Store inst index already in use.");
|
||||
@ -108,15 +112,15 @@ protected:
|
||||
/// Insert a store of the given vreg to the given stack slot immediately
|
||||
/// before the given instructnion. Returns the base index of the inserted
|
||||
/// Instruction.
|
||||
LiveIndex insertStoreBefore(MachineInstr *mi, unsigned ss,
|
||||
SlotIndex insertStoreBefore(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
LiveIndex miIdx = makeSpaceBefore(mi);
|
||||
SlotIndex miIdx = makeSpaceBefore(mi);
|
||||
|
||||
tii->storeRegToStackSlot(*mi->getParent(), mi, vreg, true, ss, trc);
|
||||
MachineBasicBlock::iterator storeInstItr(prior(mi));
|
||||
MachineInstr *storeInst = &*storeInstItr;
|
||||
LiveIndex storeInstIdx = lis->getPrevIndex(miIdx);
|
||||
SlotIndex storeInstIdx = miIdx.getPrevIndex();
|
||||
|
||||
assert(lis->getInstructionFromIndex(storeInstIdx) == 0 &&
|
||||
"Store inst index already in use.");
|
||||
@ -131,9 +135,9 @@ protected:
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
LiveIndex storeInstIdx = insertStoreAfter(mi, ss, vreg, trc);
|
||||
LiveIndex start = lis->getDefIndex(lis->getInstructionIndex(mi)),
|
||||
end = lis->getUseIndex(storeInstIdx);
|
||||
SlotIndex storeInstIdx = insertStoreAfter(mi, ss, vreg, trc);
|
||||
SlotIndex start = lis->getInstructionIndex(mi).getDefIndex(),
|
||||
end = storeInstIdx.getUseIndex();
|
||||
|
||||
VNInfo *vni =
|
||||
li->getNextValue(storeInstIdx, 0, true, lis->getVNInfoAllocator());
|
||||
@ -149,18 +153,18 @@ protected:
|
||||
/// after the given instruction. Returns the base index of the inserted
|
||||
/// instruction. The caller is responsibel for adding/removing an appropriate
|
||||
/// range vreg's LiveInterval.
|
||||
LiveIndex insertLoadAfter(MachineInstr *mi, unsigned ss,
|
||||
SlotIndex insertLoadAfter(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
MachineBasicBlock::iterator nextInstItr(next(mi));
|
||||
|
||||
LiveIndex miIdx = makeSpaceAfter(mi);
|
||||
SlotIndex miIdx = makeSpaceAfter(mi);
|
||||
|
||||
tii->loadRegFromStackSlot(*mi->getParent(), nextInstItr, vreg, ss, trc);
|
||||
MachineBasicBlock::iterator loadInstItr(next(mi));
|
||||
MachineInstr *loadInst = &*loadInstItr;
|
||||
LiveIndex loadInstIdx = lis->getNextIndex(miIdx);
|
||||
SlotIndex loadInstIdx = miIdx.getNextIndex();
|
||||
|
||||
assert(lis->getInstructionFromIndex(loadInstIdx) == 0 &&
|
||||
"Store inst index already in use.");
|
||||
@ -174,15 +178,15 @@ protected:
|
||||
/// before the given instruction. Returns the base index of the inserted
|
||||
/// instruction. The caller is responsible for adding an appropriate
|
||||
/// LiveInterval to the LiveIntervals analysis.
|
||||
LiveIndex insertLoadBefore(MachineInstr *mi, unsigned ss,
|
||||
SlotIndex insertLoadBefore(MachineInstr *mi, unsigned ss,
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
LiveIndex miIdx = makeSpaceBefore(mi);
|
||||
SlotIndex miIdx = makeSpaceBefore(mi);
|
||||
|
||||
tii->loadRegFromStackSlot(*mi->getParent(), mi, vreg, ss, trc);
|
||||
MachineBasicBlock::iterator loadInstItr(prior(mi));
|
||||
MachineInstr *loadInst = &*loadInstItr;
|
||||
LiveIndex loadInstIdx = lis->getPrevIndex(miIdx);
|
||||
SlotIndex loadInstIdx = miIdx.getPrevIndex();
|
||||
|
||||
assert(lis->getInstructionFromIndex(loadInstIdx) == 0 &&
|
||||
"Load inst index already in use.");
|
||||
@ -197,9 +201,9 @@ protected:
|
||||
unsigned vreg,
|
||||
const TargetRegisterClass *trc) {
|
||||
|
||||
LiveIndex loadInstIdx = insertLoadBefore(mi, ss, vreg, trc);
|
||||
LiveIndex start = lis->getDefIndex(loadInstIdx),
|
||||
end = lis->getUseIndex(lis->getInstructionIndex(mi));
|
||||
SlotIndex loadInstIdx = insertLoadBefore(mi, ss, vreg, trc);
|
||||
SlotIndex start = loadInstIdx.getDefIndex(),
|
||||
end = lis->getInstructionIndex(mi).getUseIndex();
|
||||
|
||||
VNInfo *vni =
|
||||
li->getNextValue(loadInstIdx, 0, true, lis->getVNInfoAllocator());
|
||||
@ -321,21 +325,21 @@ public:
|
||||
vrm->assignVirt2StackSlot(li->reg, ss);
|
||||
|
||||
MachineInstr *mi = 0;
|
||||
LiveIndex storeIdx = LiveIndex();
|
||||
SlotIndex storeIdx = SlotIndex();
|
||||
|
||||
if (valno->isDefAccurate()) {
|
||||
// If we have an accurate def we can just grab an iterator to the instr
|
||||
// after the def.
|
||||
mi = lis->getInstructionFromIndex(valno->def);
|
||||
storeIdx = lis->getDefIndex(insertStoreAfter(mi, ss, li->reg, trc));
|
||||
storeIdx = insertStoreAfter(mi, ss, li->reg, trc).getDefIndex();
|
||||
} else {
|
||||
// if we get here we have a PHI def.
|
||||
mi = &lis->getMBBFromIndex(valno->def)->front();
|
||||
storeIdx = lis->getDefIndex(insertStoreBefore(mi, ss, li->reg, trc));
|
||||
storeIdx = insertStoreBefore(mi, ss, li->reg, trc).getDefIndex();
|
||||
}
|
||||
|
||||
MachineBasicBlock *defBlock = mi->getParent();
|
||||
LiveIndex loadIdx = LiveIndex();
|
||||
SlotIndex loadIdx = SlotIndex();
|
||||
|
||||
// Now we need to find the load...
|
||||
MachineBasicBlock::iterator useItr(mi);
|
||||
@ -343,11 +347,11 @@ public:
|
||||
|
||||
if (useItr != defBlock->end()) {
|
||||
MachineInstr *loadInst = useItr;
|
||||
loadIdx = lis->getUseIndex(insertLoadBefore(loadInst, ss, li->reg, trc));
|
||||
loadIdx = insertLoadBefore(loadInst, ss, li->reg, trc).getUseIndex();
|
||||
}
|
||||
else {
|
||||
MachineInstr *loadInst = &defBlock->back();
|
||||
loadIdx = lis->getUseIndex(insertLoadAfter(loadInst, ss, li->reg, trc));
|
||||
loadIdx = insertLoadAfter(loadInst, ss, li->reg, trc).getUseIndex();
|
||||
}
|
||||
|
||||
li->removeRange(storeIdx, loadIdx, true);
|
||||
|
@ -98,6 +98,8 @@ namespace {
|
||||
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesCFG();
|
||||
AU.addRequired<SlotIndexes>();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
AU.addRequired<LiveStacks>();
|
||||
AU.addRequired<VirtRegMap>();
|
||||
AU.addPreserved<VirtRegMap>();
|
||||
|
@ -72,6 +72,8 @@ namespace {
|
||||
virtual void getAnalysisUsage(AnalysisUsage &AU) const {
|
||||
AU.setPreservesCFG();
|
||||
AU.addRequired<MachineDominatorTree>();
|
||||
AU.addRequired<SlotIndexes>();
|
||||
AU.addPreserved<SlotIndexes>();
|
||||
AU.addRequired<LiveIntervals>();
|
||||
|
||||
// TODO: Actually make this true.
|
||||
@ -294,7 +296,7 @@ StrongPHIElimination::computeDomForest(
|
||||
static bool isLiveIn(unsigned r, MachineBasicBlock* MBB,
|
||||
LiveIntervals& LI) {
|
||||
LiveInterval& I = LI.getOrCreateInterval(r);
|
||||
LiveIndex idx = LI.getMBBStartIdx(MBB);
|
||||
SlotIndex idx = LI.getMBBStartIdx(MBB);
|
||||
return I.liveAt(idx);
|
||||
}
|
||||
|
||||
@ -427,7 +429,7 @@ void StrongPHIElimination::processBlock(MachineBasicBlock* MBB) {
|
||||
}
|
||||
|
||||
LiveInterval& PI = LI.getOrCreateInterval(DestReg);
|
||||
LiveIndex pIdx = LI.getDefIndex(LI.getInstructionIndex(P));
|
||||
SlotIndex pIdx = LI.getInstructionIndex(P).getDefIndex();
|
||||
VNInfo* PVN = PI.getLiveRangeContaining(pIdx)->valno;
|
||||
PhiValueNumber.insert(std::make_pair(DestReg, PVN->id));
|
||||
|
||||
@ -747,7 +749,7 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
|
||||
|
||||
LiveInterval& I = LI.getInterval(curr.second);
|
||||
MachineBasicBlock::iterator term = MBB->getFirstTerminator();
|
||||
LiveIndex endIdx = LiveIndex();
|
||||
SlotIndex endIdx = SlotIndex();
|
||||
if (term != MBB->end())
|
||||
endIdx = LI.getInstructionIndex(term);
|
||||
else
|
||||
@ -771,7 +773,7 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
|
||||
|
||||
// Renumber the instructions so that we can perform the index computations
|
||||
// needed to create new live intervals.
|
||||
LI.computeNumbering();
|
||||
LI.renumber();
|
||||
|
||||
// For copies that we inserted at the ends of predecessors, we construct
|
||||
// live intervals. This is pretty easy, since we know that the destination
|
||||
@ -783,15 +785,15 @@ void StrongPHIElimination::ScheduleCopies(MachineBasicBlock* MBB,
|
||||
InsertedPHIDests.begin(), E = InsertedPHIDests.end(); I != E; ++I) {
|
||||
if (RegHandled.insert(I->first).second) {
|
||||
LiveInterval& Int = LI.getOrCreateInterval(I->first);
|
||||
LiveIndex instrIdx = LI.getInstructionIndex(I->second);
|
||||
if (Int.liveAt(LI.getDefIndex(instrIdx)))
|
||||
Int.removeRange(LI.getDefIndex(instrIdx),
|
||||
LI.getNextSlot(LI.getMBBEndIdx(I->second->getParent())),
|
||||
SlotIndex instrIdx = LI.getInstructionIndex(I->second);
|
||||
if (Int.liveAt(instrIdx.getDefIndex()))
|
||||
Int.removeRange(instrIdx.getDefIndex(),
|
||||
LI.getMBBEndIdx(I->second->getParent()).getNextSlot(),
|
||||
true);
|
||||
|
||||
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first, I->second);
|
||||
R.valno->setCopy(I->second);
|
||||
R.valno->def = LI.getDefIndex(LI.getInstructionIndex(I->second));
|
||||
R.valno->def = LI.getInstructionIndex(I->second).getDefIndex();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -816,8 +818,8 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
|
||||
Stacks[I->getOperand(i).getReg()].size()) {
|
||||
// Remove the live range for the old vreg.
|
||||
LiveInterval& OldInt = LI.getInterval(I->getOperand(i).getReg());
|
||||
LiveInterval::iterator OldLR = OldInt.FindLiveRangeContaining(
|
||||
LI.getUseIndex(LI.getInstructionIndex(I)));
|
||||
LiveInterval::iterator OldLR =
|
||||
OldInt.FindLiveRangeContaining(LI.getInstructionIndex(I).getUseIndex());
|
||||
if (OldLR != OldInt.end())
|
||||
OldInt.removeRange(*OldLR, true);
|
||||
|
||||
@ -829,11 +831,10 @@ void StrongPHIElimination::InsertCopies(MachineDomTreeNode* MDTN,
|
||||
VNInfo* FirstVN = *Int.vni_begin();
|
||||
FirstVN->setHasPHIKill(false);
|
||||
if (I->getOperand(i).isKill())
|
||||
FirstVN->addKill(
|
||||
LI.getUseIndex(LI.getInstructionIndex(I)));
|
||||
FirstVN->addKill(LI.getInstructionIndex(I).getUseIndex());
|
||||
|
||||
LiveRange LR (LI.getMBBStartIdx(I->getParent()),
|
||||
LI.getNextSlot(LI.getUseIndex(LI.getInstructionIndex(I))),
|
||||
LI.getInstructionIndex(I).getUseIndex().getNextSlot(),
|
||||
FirstVN);
|
||||
|
||||
Int.addRange(LR);
|
||||
@ -862,14 +863,14 @@ bool StrongPHIElimination::mergeLiveIntervals(unsigned primary,
|
||||
LiveInterval& LHS = LI.getOrCreateInterval(primary);
|
||||
LiveInterval& RHS = LI.getOrCreateInterval(secondary);
|
||||
|
||||
LI.computeNumbering();
|
||||
LI.renumber();
|
||||
|
||||
DenseMap<VNInfo*, VNInfo*> VNMap;
|
||||
for (LiveInterval::iterator I = RHS.begin(), E = RHS.end(); I != E; ++I) {
|
||||
LiveRange R = *I;
|
||||
|
||||
LiveIndex Start = R.start;
|
||||
LiveIndex End = R.end;
|
||||
SlotIndex Start = R.start;
|
||||
SlotIndex End = R.end;
|
||||
if (LHS.getLiveRangeContaining(Start))
|
||||
return false;
|
||||
|
||||
@ -963,19 +964,19 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
TII->copyRegToReg(*SI->second, SI->second->getFirstTerminator(),
|
||||
I->first, SI->first, RC, RC);
|
||||
|
||||
LI.computeNumbering();
|
||||
LI.renumber();
|
||||
|
||||
LiveInterval& Int = LI.getOrCreateInterval(I->first);
|
||||
LiveIndex instrIdx =
|
||||
SlotIndex instrIdx =
|
||||
LI.getInstructionIndex(--SI->second->getFirstTerminator());
|
||||
if (Int.liveAt(LI.getDefIndex(instrIdx)))
|
||||
Int.removeRange(LI.getDefIndex(instrIdx),
|
||||
LI.getNextSlot(LI.getMBBEndIdx(SI->second)), true);
|
||||
if (Int.liveAt(instrIdx.getDefIndex()))
|
||||
Int.removeRange(instrIdx.getDefIndex(),
|
||||
LI.getMBBEndIdx(SI->second).getNextSlot(), true);
|
||||
|
||||
LiveRange R = LI.addLiveRangeToEndOfBlock(I->first,
|
||||
--SI->second->getFirstTerminator());
|
||||
R.valno->setCopy(--SI->second->getFirstTerminator());
|
||||
R.valno->def = LI.getDefIndex(instrIdx);
|
||||
R.valno->def = instrIdx.getDefIndex();
|
||||
|
||||
DEBUG(errs() << "Renaming failed: " << SI->first << " -> "
|
||||
<< I->first << "\n");
|
||||
@ -1010,7 +1011,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
if (PI.containsOneValue()) {
|
||||
LI.removeInterval(DestReg);
|
||||
} else {
|
||||
LiveIndex idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
|
||||
SlotIndex idx = LI.getInstructionIndex(PInstr).getDefIndex();
|
||||
PI.removeRange(*PI.getLiveRangeContaining(idx), true);
|
||||
}
|
||||
} else {
|
||||
@ -1024,7 +1025,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
LiveInterval& InputI = LI.getInterval(reg);
|
||||
if (MBB != PInstr->getParent() &&
|
||||
InputI.liveAt(LI.getMBBStartIdx(PInstr->getParent())) &&
|
||||
InputI.expiredAt(LI.getNextIndex(LI.getInstructionIndex(PInstr))))
|
||||
InputI.expiredAt(LI.getInstructionIndex(PInstr).getNextIndex()))
|
||||
InputI.removeRange(LI.getMBBStartIdx(PInstr->getParent()),
|
||||
LI.getInstructionIndex(PInstr),
|
||||
true);
|
||||
@ -1032,7 +1033,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
|
||||
// If the PHI is not dead, then the valno defined by the PHI
|
||||
// now has an unknown def.
|
||||
LiveIndex idx = LI.getDefIndex(LI.getInstructionIndex(PInstr));
|
||||
SlotIndex idx = LI.getInstructionIndex(PInstr).getDefIndex();
|
||||
const LiveRange* PLR = PI.getLiveRangeContaining(idx);
|
||||
PLR->valno->setIsPHIDef(true);
|
||||
LiveRange R (LI.getMBBStartIdx(PInstr->getParent()),
|
||||
@ -1044,7 +1045,7 @@ bool StrongPHIElimination::runOnMachineFunction(MachineFunction &Fn) {
|
||||
PInstr->eraseFromParent();
|
||||
}
|
||||
|
||||
LI.computeNumbering();
|
||||
LI.renumber();
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ bool VirtRegMap::runOnMachineFunction(MachineFunction &mf) {
|
||||
TII = mf.getTarget().getInstrInfo();
|
||||
TRI = mf.getTarget().getRegisterInfo();
|
||||
MF = &mf;
|
||||
|
||||
|
||||
ReMatId = MAX_STACK_SLOT+1;
|
||||
LowSpillSlot = HighSpillSlot = NO_STACK_SLOT;
|
||||
|
||||
|
@ -80,7 +80,7 @@ namespace llvm {
|
||||
|
||||
/// Virt2SplitKillMap - This is splitted virtual register to its last use
|
||||
/// (kill) index mapping.
|
||||
IndexedMap<LiveIndex> Virt2SplitKillMap;
|
||||
IndexedMap<SlotIndex> Virt2SplitKillMap;
|
||||
|
||||
/// ReMatMap - This is virtual register to re-materialized instruction
|
||||
/// mapping. Each virtual register whose definition is going to be
|
||||
@ -142,7 +142,7 @@ namespace llvm {
|
||||
VirtRegMap() : MachineFunctionPass(&ID), Virt2PhysMap(NO_PHYS_REG),
|
||||
Virt2StackSlotMap(NO_STACK_SLOT),
|
||||
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
|
||||
Virt2SplitKillMap(LiveIndex()), ReMatMap(NULL),
|
||||
Virt2SplitKillMap(SlotIndex()), ReMatMap(NULL),
|
||||
ReMatId(MAX_STACK_SLOT+1),
|
||||
LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) { }
|
||||
virtual bool runOnMachineFunction(MachineFunction &MF);
|
||||
@ -266,17 +266,17 @@ namespace llvm {
|
||||
}
|
||||
|
||||
/// @brief record the last use (kill) of a split virtual register.
|
||||
void addKillPoint(unsigned virtReg, LiveIndex index) {
|
||||
void addKillPoint(unsigned virtReg, SlotIndex index) {
|
||||
Virt2SplitKillMap[virtReg] = index;
|
||||
}
|
||||
|
||||
LiveIndex getKillPoint(unsigned virtReg) const {
|
||||
SlotIndex getKillPoint(unsigned virtReg) const {
|
||||
return Virt2SplitKillMap[virtReg];
|
||||
}
|
||||
|
||||
/// @brief remove the last use (kill) of a split virtual register.
|
||||
void removeKillPoint(unsigned virtReg) {
|
||||
Virt2SplitKillMap[virtReg] = LiveIndex();
|
||||
Virt2SplitKillMap[virtReg] = SlotIndex();
|
||||
}
|
||||
|
||||
/// @brief returns true if the specified MachineInstr is a spill point.
|
||||
|
Loading…
Reference in New Issue
Block a user