Introduce a new data structure, the SparseMultiSet, and changes to the MI scheduler to use it.

A SparseMultiSet adds multiset behavior to SparseSet, while retaining SparseSet's desirable properties. Essentially, SparseMultiSet provides multiset behavior by storing its dense data in doubly linked lists that are inlined into the dense vector. This allows it to provide good data locality as well as vector-like constant-time clear() and fast constant time find(), insert(), and erase(). It also allows SparseMultiSet to have a builtin recycler rather than keeping SparseSet's behavior of always swapping upon removal, which allows it to preserve more iterators. It's often a better alternative to a SparseSet of a growable container or vector-of-vector.



git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@173064 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Michael Ilseman 2013-01-21 18:18:53 +00:00
parent 47543a8a66
commit afe77f33b2
5 changed files with 803 additions and 90 deletions

View File

@ -0,0 +1,526 @@
//===--- llvm/ADT/SparseMultiSet.h - Sparse multiset ------------*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file defines the SparseMultiSet class, which adds multiset behavior to
// the SparseSet.
//
// A sparse multiset holds a small number of objects identified by integer keys
// from a moderately sized universe. The sparse multiset uses more memory than
// other containers in order to provide faster operations. Any key can map to
// multiple values. A SparseMultiSetNode class is provided, which serves as a
// convenient base class for the contents of a SparseMultiSet.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_ADT_SPARSEMULTISET_H
#define LLVM_ADT_SPARSEMULTISET_H
#include "llvm/ADT/SparseSet.h"
namespace llvm {
/// Fast multiset implementation for objects that can be identified by small
/// unsigned keys.
///
/// SparseMultiSet allocates memory proportional to the size of the key
/// universe, so it is not recommended for building composite data structures.
/// It is useful for algorithms that require a single set with fast operations.
///
/// Compared to DenseSet and DenseMap, SparseMultiSet provides constant-time
/// fast clear() as fast as a vector. The find(), insert(), and erase()
/// operations are all constant time, and typically faster than a hash table.
/// The iteration order doesn't depend on numerical key values, it only depends
/// on the order of insert() and erase() operations. Iteration order is the
/// insertion order. Iteration is only provided over elements of equivalent
/// keys, but iterators are bidirectional.
///
/// Compared to BitVector, SparseMultiSet<unsigned> uses 8x-40x more memory, but
/// offers constant-time clear() and size() operations as well as fast iteration
/// independent on the size of the universe.
///
/// SparseMultiSet contains a dense vector holding all the objects and a sparse
/// array holding indexes into the dense vector. Most of the memory is used by
/// the sparse array which is the size of the key universe. The SparseT template
/// parameter provides a space/speed tradeoff for sets holding many elements.
///
/// When SparseT is uint32_t, find() only touches up to 3 cache lines, but the
/// sparse array uses 4 x Universe bytes.
///
/// When SparseT is uint8_t (the default), find() touches up to 3+[N/256] cache
/// lines, but the sparse array is 4x smaller. N is the number of elements in
/// the set.
///
/// For sets that may grow to thousands of elements, SparseT should be set to
/// uint16_t or uint32_t.
///
/// Multiset behavior is provided by providing doubly linked lists for values
/// that are inlined in the dense vector. SparseMultiSet is a good choice when
/// one desires a growable number of entries per key, as it will retain the
/// SparseSet algorithmic properties despite being growable. Thus, it is often a
/// better choice than a SparseSet of growable containers or a vector of
/// vectors. SparseMultiSet also keeps iterators valid after erasure (provided
/// the iterators don't point to the element erased), allowing for more
/// intuitive and fast removal.
///
/// @tparam ValueT The type of objects in the set.
/// @tparam KeyFunctorT A functor that computes an unsigned index from KeyT.
/// @tparam SparseT An unsigned integer type. See above.
///
template<typename ValueT,
typename KeyFunctorT = llvm::identity<unsigned>,
typename SparseT = uint8_t>
class SparseMultiSet {
/// The actual data that's stored, as a doubly-linked list implemented via
/// indices into the DenseVector. The doubly linked list is implemented
/// circular in Prev indices, and INVALID-terminated in Next indices. This
/// provides efficient access to list tails. These nodes can also be
/// tombstones, in which case they are actually nodes in a single-linked
/// freelist of recyclable slots.
struct SMSNode {
static const unsigned INVALID = ~0U;
ValueT Data;
unsigned Prev;
unsigned Next;
SMSNode(ValueT D, unsigned P, unsigned N) : Data(D), Prev(P), Next(N) { }
/// List tails have invalid Nexts.
bool isTail() const {
return Next == INVALID;
}
/// Whether this node is a tombstone node, and thus is in our freelist.
bool isTombstone() const {
return Prev == INVALID;
}
/// Since the list is circular in Prev, all non-tombstone nodes have a valid
/// Prev.
bool isValid() const { return Prev != INVALID; }
};
typedef typename KeyFunctorT::argument_type KeyT;
typedef SmallVector<SMSNode, 8> DenseT;
DenseT Dense;
SparseT *Sparse;
unsigned Universe;
KeyFunctorT KeyIndexOf;
SparseSetValFunctor<KeyT, ValueT, KeyFunctorT> ValIndexOf;
/// We have a built-in recycler for reusing tombstone slots. This recycler
/// puts a singly-linked free list into tombstone slots, allowing us quick
/// erasure, iterator preservation, and dense size.
unsigned FreelistIdx;
unsigned NumFree;
unsigned sparseIndex(const ValueT &Val) const {
assert(ValIndexOf(Val) < Universe &&
"Invalid key in set. Did object mutate?");
return ValIndexOf(Val);
}
unsigned sparseIndex(const SMSNode &N) const { return sparseIndex(N.Data); }
// Disable copy construction and assignment.
// This data structure is not meant to be used that way.
SparseMultiSet(const SparseMultiSet&) LLVM_DELETED_FUNCTION;
SparseMultiSet &operator=(const SparseMultiSet&) LLVM_DELETED_FUNCTION;
/// Whether the given entry is the head of the list. List heads's previous
/// pointers are to the tail of the list, allowing for efficient access to the
/// list tail. D must be a valid entry node.
bool isHead(const SMSNode &D) const {
assert(D.isValid() && "Invalid node for head");
return Dense[D.Prev].isTail();
}
/// Whether the given entry is a singleton entry, i.e. the only entry with
/// that key.
bool isSingleton(const SMSNode &N) const {
assert(N.isValid() && "Invalid node for singleton");
// Is N its own predecessor?
return &Dense[N.Prev] == &N;
}
/// Add in the given SMSNode. Uses a free entry in our freelist if
/// available. Returns the index of the added node.
unsigned addValue(const ValueT& V, unsigned Prev, unsigned Next) {
if (NumFree == 0) {
Dense.push_back(SMSNode(V, Prev, Next));
return Dense.size() - 1;
}
// Peel off a free slot
unsigned Idx = FreelistIdx;
unsigned NextFree = Dense[Idx].Next;
assert(Dense[Idx].isTombstone() && "Non-tombstone free?");
Dense[Idx] = SMSNode(V, Prev, Next);
FreelistIdx = NextFree;
--NumFree;
return Idx;
}
/// Make the current index a new tombstone. Pushes it onto the freelist.
void makeTombstone(unsigned Idx) {
Dense[Idx].Prev = SMSNode::INVALID;
Dense[Idx].Next = FreelistIdx;
FreelistIdx = Idx;
++NumFree;
}
public:
typedef ValueT value_type;
typedef ValueT &reference;
typedef const ValueT &const_reference;
typedef ValueT *pointer;
typedef const ValueT *const_pointer;
SparseMultiSet()
: Sparse(0), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) { }
~SparseMultiSet() { free(Sparse); }
/// Set the universe size which determines the largest key the set can hold.
/// The universe must be sized before any elements can be added.
///
/// @param U Universe size. All object keys must be less than U.
///
void setUniverse(unsigned U) {
// It's not hard to resize the universe on a non-empty set, but it doesn't
// seem like a likely use case, so we can add that code when we need it.
assert(empty() && "Can only resize universe on an empty map");
// Hysteresis prevents needless reallocations.
if (U >= Universe/4 && U <= Universe)
return;
free(Sparse);
// The Sparse array doesn't actually need to be initialized, so malloc
// would be enough here, but that will cause tools like valgrind to
// complain about branching on uninitialized data.
Sparse = reinterpret_cast<SparseT*>(calloc(U, sizeof(SparseT)));
Universe = U;
}
/// Our iterators are iterators over the collection of objects that share a
/// key.
template<typename SMSPtrTy>
class iterator_base : public std::iterator<std::bidirectional_iterator_tag,
ValueT> {
friend class SparseMultiSet;
SMSPtrTy SMS;
unsigned Idx;
unsigned SparseIdx;
iterator_base(SMSPtrTy P, unsigned I, unsigned SI)
: SMS(P), Idx(I), SparseIdx(SI) { }
/// Whether our iterator has fallen outside our dense vector.
bool isEnd() const {
if (Idx == SMSNode::INVALID)
return true;
assert(Idx < SMS->Dense.size() && "Out of range, non-INVALID Idx?");
return false;
}
/// Whether our iterator is properly keyed, i.e. the SparseIdx is valid
bool isKeyed() const { return SparseIdx < SMS->Universe; }
unsigned Prev() const { return SMS->Dense[Idx].Prev; }
unsigned Next() const { return SMS->Dense[Idx].Next; }
void setPrev(unsigned P) { SMS->Dense[Idx].Prev = P; }
void setNext(unsigned N) { SMS->Dense[Idx].Next = N; }
public:
typedef std::iterator<std::bidirectional_iterator_tag, ValueT> super;
typedef typename super::value_type value_type;
typedef typename super::difference_type difference_type;
typedef typename super::pointer pointer;
typedef typename super::reference reference;
iterator_base(const iterator_base &RHS)
: SMS(RHS.SMS), Idx(RHS.Idx), SparseIdx(RHS.SparseIdx) { }
const iterator_base &operator=(const iterator_base &RHS) {
SMS = RHS.SMS;
Idx = RHS.Idx;
SparseIdx = RHS.SparseIdx;
return *this;
}
reference operator*() const {
assert(isKeyed() && SMS->sparseIndex(SMS->Dense[Idx].Data) == SparseIdx &&
"Dereferencing iterator of invalid key or index");
return SMS->Dense[Idx].Data;
}
pointer operator->() const { return &operator*(); }
/// Comparison operators
bool operator==(const iterator_base &RHS) const {
// end compares equal
if (SMS == RHS.SMS && Idx == RHS.Idx) {
assert(isEnd() || SparseIdx == RHS.SparseIdx &&
"Same dense entry, but different keys?");
return true;
}
return false;
}
bool operator!=(const iterator_base &RHS) const {
return !operator==(RHS);
}
/// Increment and decrement operators
iterator_base &operator--() { // predecrement - Back up
assert(isKeyed() && "Decrementing an invalid iterator");
assert(isEnd() || !SMS->isHead(SMS->Dense[Idx]) &&
"Decrementing head of list");
// If we're at the end, then issue a new find()
if (isEnd())
Idx = SMS->findIndex(SparseIdx).Prev();
else
Idx = Prev();
return *this;
}
iterator_base &operator++() { // preincrement - Advance
assert(!isEnd() && isKeyed() && "Incrementing an invalid/end iterator");
Idx = Next();
return *this;
}
iterator_base operator--(int) { // postdecrement
iterator_base I(*this);
--*this;
return I;
}
iterator_base operator++(int) { // postincrement
iterator_base I(*this);
++*this;
return I;
}
};
typedef iterator_base<SparseMultiSet *> iterator;
typedef iterator_base<const SparseMultiSet *> const_iterator;
// Convenience types
typedef std::pair<iterator, iterator> RangePair;
/// Returns an iterator past this container. Note that such an iterator cannot
/// be decremented, but will compare equal to other end iterators.
iterator end() { return iterator(this, SMSNode::INVALID, SMSNode::INVALID); }
const_iterator end() const {
return const_iterator(this, SMSNode::INVALID, SMSNode::INVALID);
}
/// Returns true if the set is empty.
///
/// This is not the same as BitVector::empty().
///
bool empty() const { return size() == 0; }
/// Returns the number of elements in the set.
///
/// This is not the same as BitVector::size() which returns the size of the
/// universe.
///
unsigned size() const {
assert(NumFree <= Dense.size() && "Out-of-bounds free entries");
return Dense.size() - NumFree;
}
/// Clears the set. This is a very fast constant time operation.
///
void clear() {
// Sparse does not need to be cleared, see find().
Dense.clear();
NumFree = 0;
FreelistIdx = SMSNode::INVALID;
}
/// Find an element by its index.
///
/// @param Idx A valid index to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator findIndex(unsigned Idx) {
assert(Idx < Universe && "Key out of range");
assert(std::numeric_limits<SparseT>::is_integer &&
!std::numeric_limits<SparseT>::is_signed &&
"SparseT must be an unsigned integer type");
const unsigned Stride = std::numeric_limits<SparseT>::max() + 1u;
for (unsigned i = Sparse[Idx], e = Dense.size(); i < e; i += Stride) {
const unsigned FoundIdx = sparseIndex(Dense[i]);
// Check that we're pointing at the correct entry and that it is the head
// of a valid list.
if (Idx == FoundIdx && Dense[i].isValid() && isHead(Dense[i]))
return iterator(this, i, Idx);
// Stride is 0 when SparseT >= unsigned. We don't need to loop.
if (!Stride)
break;
}
return end();
}
/// Find an element by its key.
///
/// @param Key A valid key to find.
/// @returns An iterator to the element identified by key, or end().
///
iterator find(const KeyT &Key) {
return findIndex(KeyIndexOf(Key));
}
const_iterator find(const KeyT &Key) const {
iterator I = const_cast<SparseMultiSet*>(this)->findIndex(KeyIndexOf(Key));
return const_iterator(I.SMS, I.Idx, KeyIndexOf(Key));
}
/// Returns the number of elements identified by Key. This will be linear in
/// the number of elements of that key.
unsigned count(const KeyT &Key) const {
unsigned Ret = 0;
for (const_iterator It = find(Key); It != end(); ++It)
++Ret;
return Ret;
}
/// Returns true if this set contains an element identified by Key.
bool contains(const KeyT &Key) const {
return find(Key) != end();
}
/// Return the head and tail of the subset's list, otherwise returns end().
iterator getHead(const KeyT &Key) { return find(Key); }
iterator getTail(const KeyT &Key) {
iterator I = find(Key);
if (I != end())
I = iterator(this, I.Prev(), KeyIndexOf(Key));
return I;
}
/// The bounds of the range of items sharing Key K. First member is the head
/// of the list, and the second member is a decrementable end iterator for
/// that key.
RangePair equal_range(const KeyT &K) {
iterator B = find(K);
iterator E = iterator(this, SMSNode::INVALID, B.SparseIdx);
return make_pair(B, E);
}
/// Insert a new element at the tail of the subset list. Returns an iterator
/// to the newly added entry.
iterator insert(const ValueT &Val) {
unsigned Idx = sparseIndex(Val);
iterator I = findIndex(Idx);
unsigned NodeIdx = addValue(Val, SMSNode::INVALID, SMSNode::INVALID);
if (I == end()) {
// Make a singleton list
Sparse[Idx] = NodeIdx;
Dense[NodeIdx].Prev = NodeIdx;
return iterator(this, NodeIdx, Idx);
}
// Stick it at the end.
unsigned HeadIdx = I.Idx;
unsigned TailIdx = I.Prev();
Dense[TailIdx].Next = NodeIdx;
Dense[HeadIdx].Prev = NodeIdx;
Dense[NodeIdx].Prev = TailIdx;
return iterator(this, NodeIdx, Idx);
}
/// Erases an existing element identified by a valid iterator.
///
/// This invalidates iterators pointing at the same entry, but erase() returns
/// an iterator pointing to the next element in the subset's list. This makes
/// it possible to erase selected elements while iterating over the subset:
///
/// tie(I, E) = Set.equal_range(Key);
/// while (I != E)
/// if (test(*I))
/// I = Set.erase(I);
/// else
/// ++I;
///
/// Note that if the last element in the subset list is erased, this will
/// return an end iterator which can be decremented to get the new tail (if it
/// exists):
///
/// tie(B, I) = Set.equal_range(Key);
/// for (bool isBegin = B == I; !isBegin; /* empty */) {
/// isBegin = (--I) == B;
/// if (test(I))
/// break;
/// I = erase(I);
/// }
iterator erase(iterator I) {
assert(I.isKeyed() && !I.isEnd() && !Dense[I.Idx].isTombstone() &&
"erasing invalid/end/tombstone iterator");
// First, unlink the node from its list. Then swap the node out with the
// dense vector's last entry
iterator NextI = unlink(Dense[I.Idx]);
// Put in a tombstone.
makeTombstone(I.Idx);
return NextI;
}
/// Erase all elements with the given key. This invalidates all
/// iterators of that key.
void eraseAll(const KeyT &K) {
for (iterator I = find(K); I != end(); /* empty */)
I = erase(I);
}
private:
/// Unlink the node from its list. Returns the next node in the list.
iterator unlink(const SMSNode &N) {
if (isSingleton(N)) {
// Singleton is already unlinked
assert(N.Next == SMSNode::INVALID && "Singleton has next?");
return iterator(this, SMSNode::INVALID, ValIndexOf(N.Data));
}
if (isHead(N)) {
// If we're the head, then update the sparse array and our next.
Sparse[sparseIndex(N)] = N.Next;
Dense[N.Next].Prev = N.Prev;
return iterator(this, N.Next, ValIndexOf(N.Data));
}
if (N.isTail()) {
// If we're the tail, then update our head and our previous.
findIndex(sparseIndex(N)).setPrev(N.Prev);
Dense[N.Prev].Next = N.Next;
// Give back an end iterator that can be decremented
iterator I(this, N.Prev, ValIndexOf(N.Data));
return ++I;
}
// Otherwise, just drop us
Dense[N.Next].Prev = N.Prev;
Dense[N.Prev].Next = N.Next;
return iterator(this, N.Next, ValIndexOf(N.Data));
}
};
} // end namespace llvm
#endif

View File

@ -17,6 +17,7 @@
#include "llvm/ADT/SmallSet.h"
#include "llvm/ADT/SparseSet.h"
#include "llvm/ADT/SparseMultiSet.h"
#include "llvm/CodeGen/MachineDominators.h"
#include "llvm/CodeGen/MachineLoopInfo.h"
#include "llvm/CodeGen/ScheduleDAG.h"
@ -48,55 +49,17 @@ namespace llvm {
struct PhysRegSUOper {
SUnit *SU;
int OpIdx;
unsigned Reg;
PhysRegSUOper(SUnit *su, int op): SU(su), OpIdx(op) {}
PhysRegSUOper(SUnit *su, int op, unsigned R): SU(su), OpIdx(op), Reg(R) {}
unsigned getSparseSetIndex() const { return Reg; }
};
/// Combine a SparseSet with a 1x1 vector to track physical registers.
/// The SparseSet allows iterating over the (few) live registers for quickly
/// comparing against a regmask or clearing the set.
///
/// Storage for the map is allocated once for the pass. The map can be
/// cleared between scheduling regions without freeing unused entries.
class Reg2SUnitsMap {
SparseSet<unsigned> PhysRegSet;
std::vector<std::vector<PhysRegSUOper> > SUnits;
public:
typedef SparseSet<unsigned>::const_iterator const_iterator;
// Allow iteration over register numbers (keys) in the map. If needed, we
// can provide an iterator over SUnits (values) as well.
const_iterator reg_begin() const { return PhysRegSet.begin(); }
const_iterator reg_end() const { return PhysRegSet.end(); }
/// Initialize the map with the number of registers.
/// If the map is already large enough, no allocation occurs.
/// For simplicity we expect the map to be empty().
void setRegLimit(unsigned Limit);
/// Returns true if the map is empty.
bool empty() const { return PhysRegSet.empty(); }
/// Clear the map without deallocating storage.
void clear();
bool contains(unsigned Reg) const { return PhysRegSet.count(Reg); }
/// If this register is mapped, return its existing SUnits vector.
/// Otherwise map the register and return an empty SUnits vector.
std::vector<PhysRegSUOper> &operator[](unsigned Reg) {
bool New = PhysRegSet.insert(Reg).second;
assert((!New || SUnits[Reg].empty()) && "stale SUnits vector");
(void)New;
return SUnits[Reg];
}
/// Erase an existing element without freeing memory.
void erase(unsigned Reg) {
PhysRegSet.erase(Reg);
SUnits[Reg].clear();
}
};
/// Use a SparseMultiSet to track physical registers. Storage is only
/// allocated once for the pass. It can be cleared in constant time and reused
/// without any frees.
typedef SparseMultiSet<PhysRegSUOper, llvm::identity<unsigned>, uint16_t> Reg2SUnitsMap;
/// Use SparseSet as a SparseMap by relying on the fact that it never
/// compares ValueT's, only unsigned keys. This allows the set to be cleared

View File

@ -168,20 +168,6 @@ void ScheduleDAGInstrs::finishBlock() {
BB = 0;
}
/// Initialize the map with the number of registers.
void Reg2SUnitsMap::setRegLimit(unsigned Limit) {
PhysRegSet.setUniverse(Limit);
SUnits.resize(Limit);
}
/// Clear the map without deallocating storage.
void Reg2SUnitsMap::clear() {
for (const_iterator I = reg_begin(), E = reg_end(); I != E; ++I) {
SUnits[*I].clear();
}
PhysRegSet.clear();
}
/// Initialize the DAG and common scheduler state for the current scheduling
/// region. This does not actually create the DAG, only clears it. The
/// scheduling driver may call BuildSchedGraph multiple times per scheduling
@ -228,7 +214,7 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() {
if (Reg == 0) continue;
if (TRI->isPhysicalRegister(Reg))
Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1));
Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
else {
assert(!IsPostRA && "Virtual register encountered after regalloc.");
if (MO.readsReg()) // ignore undef operands
@ -245,7 +231,7 @@ void ScheduleDAGInstrs::addSchedBarrierDeps() {
E = (*SI)->livein_end(); I != E; ++I) {
unsigned Reg = *I;
if (!Uses.contains(Reg))
Uses[Reg].push_back(PhysRegSUOper(&ExitSU, -1));
Uses.insert(PhysRegSUOper(&ExitSU, -1, Reg));
}
}
}
@ -263,15 +249,14 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) {
Alias.isValid(); ++Alias) {
if (!Uses.contains(*Alias))
continue;
std::vector<PhysRegSUOper> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i].SU;
for (Reg2SUnitsMap::iterator I = Uses.find(*Alias); I != Uses.end(); ++I) {
SUnit *UseSU = I->SU;
if (UseSU == SU)
continue;
// Adjust the dependence latency using operand def/use information,
// then allow the target to perform its own adjustments.
int UseOp = UseList[i].OpIdx;
int UseOp = I->OpIdx;
MachineInstr *RegUse = 0;
SDep Dep;
if (UseOp < 0)
@ -311,9 +296,8 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
Alias.isValid(); ++Alias) {
if (!Defs.contains(*Alias))
continue;
std::vector<PhysRegSUOper> &DefList = Defs[*Alias];
for (unsigned i = 0, e = DefList.size(); i != e; ++i) {
SUnit *DefSU = DefList[i].SU;
for (Reg2SUnitsMap::iterator I = Defs.find(*Alias); I != Defs.end(); ++I) {
SUnit *DefSU = I->SU;
if (DefSU == &ExitSU)
continue;
if (DefSU != SU &&
@ -337,33 +321,37 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) {
// Either insert a new Reg2SUnits entry with an empty SUnits list, or
// retrieve the existing SUnits list for this register's uses.
// Push this SUnit on the use list.
Uses[MO.getReg()].push_back(PhysRegSUOper(SU, OperIdx));
Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg()));
}
else {
addPhysRegDataDeps(SU, OperIdx);
// Either insert a new Reg2SUnits entry with an empty SUnits list, or
// retrieve the existing SUnits list for this register's defs.
std::vector<PhysRegSUOper> &DefList = Defs[MO.getReg()];
unsigned Reg = MO.getReg();
// clear this register's use list
if (Uses.contains(MO.getReg()))
Uses[MO.getReg()].clear();
if (!MO.isDead())
DefList.clear();
if (Uses.contains(Reg))
Uses.eraseAll(Reg);
if (!MO.isDead()) {
Defs.eraseAll(Reg);
} else if (SU->isCall) {
// Calls will not be reordered because of chain dependencies (see
// below). Since call operands are dead, calls may continue to be added
// to the DefList making dependence checking quadratic in the size of
// the block. Instead, we leave only one call at the back of the
// DefList.
if (SU->isCall) {
while (!DefList.empty() && DefList.back().SU->isCall)
DefList.pop_back();
Reg2SUnitsMap::RangePair P = Defs.equal_range(Reg);
Reg2SUnitsMap::iterator B = P.first;
Reg2SUnitsMap::iterator I = P.second;
for (bool isBegin = I == B; !isBegin; /* empty */) {
isBegin = (--I) == B;
if (!I->SU->isCall)
break;
I = Defs.erase(I);
}
}
// Defs are pushed in the order they are visited and never reordered.
DefList.push_back(PhysRegSUOper(SU, OperIdx));
Defs.insert(PhysRegSUOper(SU, OperIdx, Reg));
}
}
@ -726,8 +714,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA,
assert(Defs.empty() && Uses.empty() &&
"Only BuildGraph should update Defs/Uses");
Defs.setRegLimit(TRI->getNumRegs());
Uses.setRegLimit(TRI->getNumRegs());
Defs.setUniverse(TRI->getNumRegs());
Uses.setUniverse(TRI->getNumRegs());
assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs");
// FIXME: Allow SparseSet to reserve space for the creation of virtual

View File

@ -24,6 +24,7 @@ set(ADTSources
SmallStringTest.cpp
SmallVectorTest.cpp
SparseBitVectorTest.cpp
SparseMultiSetTest.cpp
SparseSetTest.cpp
StringMapTest.cpp
StringRefTest.cpp

View File

@ -0,0 +1,235 @@
//===------ ADT/SparseSetTest.cpp - SparseSet unit tests - -----*- C++ -*-===//
//
// The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
#include "llvm/ADT/SparseMultiSet.h"
#include "gtest/gtest.h"
using namespace llvm;
namespace {
typedef SparseMultiSet<unsigned> USet;
// Empty set tests.
TEST(SparseMultiSetTest, EmptySet) {
USet Set;
EXPECT_TRUE(Set.empty());
EXPECT_EQ(0u, Set.size());
Set.setUniverse(10);
// Lookups on empty set.
EXPECT_TRUE(Set.find(0) == Set.end());
EXPECT_TRUE(Set.find(9) == Set.end());
// Same thing on a const reference.
const USet &CSet = Set;
EXPECT_TRUE(CSet.empty());
EXPECT_EQ(0u, CSet.size());
EXPECT_TRUE(CSet.find(0) == CSet.end());
USet::const_iterator I = CSet.find(5);
EXPECT_TRUE(I == CSet.end());
}
// Single entry set tests.
TEST(SparseMultiSetTest, SingleEntrySet) {
USet Set;
Set.setUniverse(10);
USet::iterator I = Set.insert(5);
EXPECT_TRUE(I != Set.end());
EXPECT_TRUE(*I == 5);
EXPECT_FALSE(Set.empty());
EXPECT_EQ(1u, Set.size());
EXPECT_TRUE(Set.find(0) == Set.end());
EXPECT_TRUE(Set.find(9) == Set.end());
EXPECT_FALSE(Set.contains(0));
EXPECT_TRUE(Set.contains(5));
// Extra insert.
I = Set.insert(5);
EXPECT_TRUE(I != Set.end());
EXPECT_TRUE(I == ++Set.find(5));
I--;
EXPECT_TRUE(I == Set.find(5));
// Erase non-existent element.
I = Set.find(1);
EXPECT_TRUE(I == Set.end());
EXPECT_EQ(2u, Set.size());
EXPECT_EQ(5u, *Set.find(5));
// Erase iterator.
I = Set.find(5);
EXPECT_TRUE(I != Set.end());
I = Set.erase(I);
EXPECT_TRUE(I != Set.end());
I = Set.erase(I);
EXPECT_TRUE(I == Set.end());
EXPECT_TRUE(Set.empty());
}
// Multiple entry set tests.
TEST(SparseMultiSetTest, MultipleEntrySet) {
USet Set;
Set.setUniverse(10);
Set.insert(5);
Set.insert(5);
Set.insert(5);
Set.insert(3);
Set.insert(2);
Set.insert(1);
Set.insert(4);
EXPECT_EQ(7u, Set.size());
// Erase last element by key.
EXPECT_TRUE(Set.erase(Set.find(4)) == Set.end());
EXPECT_EQ(6u, Set.size());
EXPECT_FALSE(Set.contains(4));
EXPECT_TRUE(Set.find(4) == Set.end());
// Erase first element by key.
EXPECT_EQ(3u, Set.count(5));
EXPECT_TRUE(Set.find(5) != Set.end());
EXPECT_TRUE(Set.erase(Set.find(5)) != Set.end());
EXPECT_EQ(5u, Set.size());
EXPECT_EQ(2u, Set.count(5));
Set.insert(6);
Set.insert(7);
EXPECT_EQ(7u, Set.size());
// Erase tail by iterator.
EXPECT_TRUE(Set.getTail(6) == Set.getHead(6));
USet::iterator I = Set.erase(Set.find(6));
EXPECT_TRUE(I == Set.end());
EXPECT_EQ(6u, Set.size());
// Erase tails by iterator.
EXPECT_EQ(2u, Set.count(5));
I = Set.getTail(5);
I = Set.erase(I);
EXPECT_TRUE(I == Set.end());
--I;
EXPECT_EQ(1u, Set.count(5));
EXPECT_EQ(5u, *I);
I = Set.erase(I);
EXPECT_TRUE(I == Set.end());
EXPECT_EQ(0u, Set.count(5));
Set.insert(8);
Set.insert(8);
Set.insert(8);
Set.insert(8);
Set.insert(8);
// Erase all the 8s
EXPECT_EQ(5u, std::distance(Set.getHead(8), Set.end()));
Set.eraseAll(8);
EXPECT_EQ(0u, std::distance(Set.getHead(8), Set.end()));
// Clear and resize the universe.
Set.clear();
EXPECT_EQ(0u, Set.size());
EXPECT_FALSE(Set.contains(3));
Set.setUniverse(1000);
// Add more than 256 elements.
for (unsigned i = 100; i != 800; ++i)
Set.insert(i);
for (unsigned i = 0; i != 10; ++i)
Set.eraseAll(i);
for (unsigned i = 100; i != 800; ++i)
EXPECT_EQ(1u, Set.count(i));
EXPECT_FALSE(Set.contains(99));
EXPECT_FALSE(Set.contains(800));
EXPECT_EQ(700u, Set.size());
}
// Test out iterators
TEST(SparseMultiSetTest, Iterators) {
USet Set;
Set.setUniverse(100);
Set.insert(0);
Set.insert(1);
Set.insert(2);
Set.insert(0);
Set.insert(1);
Set.insert(0);
USet::RangePair RangePair = Set.equal_range(0);
USet::iterator B = RangePair.first;
USet::iterator E = RangePair.second;
// Move the iterators around, going to end and coming back.
EXPECT_EQ(3u, std::distance(B, E));
EXPECT_EQ(B, --(--(--E)));
EXPECT_EQ(++(++(++E)), Set.end());
EXPECT_EQ(B, --(--(--E)));
EXPECT_EQ(++(++(++E)), Set.end());
// Insert into the tail, and move around again
Set.insert(0);
EXPECT_EQ(B, --(--(--(--E))));
EXPECT_EQ(++(++(++(++E))), Set.end());
EXPECT_EQ(B, --(--(--(--E))));
EXPECT_EQ(++(++(++(++E))), Set.end());
// Erase a tail, and move around again
USet::iterator Erased = Set.erase(Set.getTail(0));
EXPECT_EQ(Erased, E);
EXPECT_EQ(B, --(--(--E)));
USet Set2;
Set2.setUniverse(11);
Set2.insert(3);
EXPECT_TRUE(!Set2.contains(0));
EXPECT_TRUE(!Set.contains(3));
EXPECT_EQ(Set2.getHead(3), Set2.getTail(3));
EXPECT_EQ(Set2.getHead(0), Set2.getTail(0));
B = Set2.find(3);
EXPECT_EQ(Set2.find(3), --(++B));
}
struct Alt {
unsigned Value;
explicit Alt(unsigned x) : Value(x) {}
unsigned getSparseSetIndex() const { return Value - 1000; }
};
TEST(SparseMultiSetTest, AltStructSet) {
typedef SparseMultiSet<Alt> ASet;
ASet Set;
Set.setUniverse(10);
Set.insert(Alt(1005));
ASet::iterator I = Set.find(5);
ASSERT_TRUE(I != Set.end());
EXPECT_EQ(1005u, I->Value);
Set.insert(Alt(1006));
Set.insert(Alt(1006));
I = Set.erase(Set.find(6));
ASSERT_TRUE(I != Set.end());
EXPECT_EQ(1006u, I->Value);
I = Set.erase(Set.find(6));
ASSERT_TRUE(I == Set.end());
EXPECT_TRUE(Set.contains(5));
EXPECT_FALSE(Set.contains(6));
}
} // namespace