mirror of
https://github.com/RPCSX/llvm.git
synced 2024-11-24 04:09:45 +00:00
Add the ability to "intern" FoldingSetNodeID data into a
BumpPtrAllocator-allocated region to allow it to be stored in a more compact form and to avoid the need for a non-trivial destructor call. Use this new mechanism in ScalarEvolution instead of FastFoldingSetNode to avoid leaking memory in the case where a FoldingSetNodeID uses heap storage, and to reduce overall memory usage. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@98829 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
7c4a121110
commit
c93b4cff89
@ -23,6 +23,7 @@
|
||||
namespace llvm {
|
||||
class APFloat;
|
||||
class APInt;
|
||||
class BumpPtrAllocator;
|
||||
|
||||
/// This folding set used for two purposes:
|
||||
/// 1. Given information about a node we want to create, look up the unique
|
||||
@ -197,6 +198,23 @@ template<typename T> struct FoldingSetTrait {
|
||||
static inline void Profile(T& X, FoldingSetNodeID& ID) { X.Profile(ID); }
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// FoldingSetNodeIDRef - This class describes a reference to an interned
|
||||
/// FoldingSetNodeID, which can be a useful to store node id data rather
|
||||
/// than using plain FoldingSetNodeIDs, since the 32-element SmallVector
|
||||
/// is often much larger than necessary, and the possibility of heap
|
||||
/// allocation means it requires a non-trivial destructor call.
|
||||
class FoldingSetNodeIDRef {
|
||||
unsigned* Data;
|
||||
size_t Size;
|
||||
public:
|
||||
FoldingSetNodeIDRef() : Data(0), Size(0) {}
|
||||
FoldingSetNodeIDRef(unsigned *D, size_t S) : Data(D), Size(S) {}
|
||||
|
||||
unsigned *getData() const { return Data; }
|
||||
size_t getSize() const { return Size; }
|
||||
};
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
/// FoldingSetNodeID - This class is used to gather all the unique data bits of
|
||||
/// a node. When all the bits are gathered this class is used to produce a
|
||||
@ -210,11 +228,8 @@ class FoldingSetNodeID {
|
||||
public:
|
||||
FoldingSetNodeID() {}
|
||||
|
||||
/// getRawData - Return the ith entry in the Bits data.
|
||||
///
|
||||
unsigned getRawData(unsigned i) const {
|
||||
return Bits[i];
|
||||
}
|
||||
FoldingSetNodeID(FoldingSetNodeIDRef Ref)
|
||||
: Bits(Ref.getData(), Ref.getData() + Ref.getSize()) {}
|
||||
|
||||
/// Add* - Add various data types to Bit data.
|
||||
///
|
||||
@ -242,6 +257,11 @@ public:
|
||||
/// operator== - Used to compare two nodes to each other.
|
||||
///
|
||||
bool operator==(const FoldingSetNodeID &RHS) const;
|
||||
|
||||
/// Intern - Copy this node's data to a memory region allocated from the
|
||||
/// given allocator and return a FoldingSetNodeIDRef describing the
|
||||
/// interned data.
|
||||
FoldingSetNodeIDRef Intern(BumpPtrAllocator &Allocator) const;
|
||||
};
|
||||
|
||||
// Convenience type to hide the implementation of the folding set.
|
||||
|
@ -49,7 +49,11 @@ namespace llvm {
|
||||
/// are opaque objects that the client is not allowed to do much with
|
||||
/// directly.
|
||||
///
|
||||
class SCEV : public FastFoldingSetNode {
|
||||
class SCEV : public FoldingSetNode {
|
||||
/// FastID - A reference to an Interned FoldingSetNodeID for this node.
|
||||
/// The ScalarEvolution's BumpPtrAllocator holds the data.
|
||||
FoldingSetNodeIDRef FastID;
|
||||
|
||||
// The SCEV baseclass this node corresponds to
|
||||
const unsigned short SCEVType;
|
||||
|
||||
@ -64,11 +68,14 @@ namespace llvm {
|
||||
protected:
|
||||
virtual ~SCEV();
|
||||
public:
|
||||
explicit SCEV(const FoldingSetNodeID &ID, unsigned SCEVTy) :
|
||||
FastFoldingSetNode(ID), SCEVType(SCEVTy), SubclassData(0) {}
|
||||
explicit SCEV(const FoldingSetNodeIDRef ID, unsigned SCEVTy) :
|
||||
FastID(ID), SCEVType(SCEVTy), SubclassData(0) {}
|
||||
|
||||
unsigned getSCEVType() const { return SCEVType; }
|
||||
|
||||
/// Profile - FoldingSet support.
|
||||
void Profile(FoldingSetNodeID& ID) { ID = FastID; }
|
||||
|
||||
/// isLoopInvariant - Return true if the value of this SCEV is unchanging in
|
||||
/// the specified loop.
|
||||
virtual bool isLoopInvariant(const Loop *L) const = 0;
|
||||
|
@ -37,7 +37,7 @@ namespace llvm {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
ConstantInt *V;
|
||||
SCEVConstant(const FoldingSetNodeID &ID, ConstantInt *v) :
|
||||
SCEVConstant(const FoldingSetNodeIDRef ID, ConstantInt *v) :
|
||||
SCEV(ID, scConstant), V(v) {}
|
||||
public:
|
||||
ConstantInt *getValue() const { return V; }
|
||||
@ -81,7 +81,7 @@ namespace llvm {
|
||||
const SCEV *Op;
|
||||
const Type *Ty;
|
||||
|
||||
SCEVCastExpr(const FoldingSetNodeID &ID,
|
||||
SCEVCastExpr(const FoldingSetNodeIDRef ID,
|
||||
unsigned SCEVTy, const SCEV *op, const Type *ty);
|
||||
|
||||
public:
|
||||
@ -120,7 +120,7 @@ namespace llvm {
|
||||
class SCEVTruncateExpr : public SCEVCastExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVTruncateExpr(const FoldingSetNodeID &ID,
|
||||
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty);
|
||||
|
||||
public:
|
||||
@ -140,7 +140,7 @@ namespace llvm {
|
||||
class SCEVZeroExtendExpr : public SCEVCastExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
|
||||
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty);
|
||||
|
||||
public:
|
||||
@ -160,7 +160,7 @@ namespace llvm {
|
||||
class SCEVSignExtendExpr : public SCEVCastExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVSignExtendExpr(const FoldingSetNodeID &ID,
|
||||
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty);
|
||||
|
||||
public:
|
||||
@ -187,7 +187,7 @@ namespace llvm {
|
||||
const SCEV *const *Operands;
|
||||
size_t NumOperands;
|
||||
|
||||
SCEVNAryExpr(const FoldingSetNodeID &ID,
|
||||
SCEVNAryExpr(const FoldingSetNodeIDRef ID,
|
||||
enum SCEVTypes T, const SCEV *const *O, size_t N)
|
||||
: SCEV(ID, T), Operands(O), NumOperands(N) {}
|
||||
|
||||
@ -262,9 +262,8 @@ namespace llvm {
|
||||
///
|
||||
class SCEVCommutativeExpr : public SCEVNAryExpr {
|
||||
protected:
|
||||
SCEVCommutativeExpr(const FoldingSetNodeID &ID,
|
||||
enum SCEVTypes T,
|
||||
const SCEV *const *O, size_t N)
|
||||
SCEVCommutativeExpr(const FoldingSetNodeIDRef ID,
|
||||
enum SCEVTypes T, const SCEV *const *O, size_t N)
|
||||
: SCEVNAryExpr(ID, T, O, N) {}
|
||||
|
||||
public:
|
||||
@ -289,7 +288,7 @@ namespace llvm {
|
||||
class SCEVAddExpr : public SCEVCommutativeExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVAddExpr(const FoldingSetNodeID &ID,
|
||||
SCEVAddExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *const *O, size_t N)
|
||||
: SCEVCommutativeExpr(ID, scAddExpr, O, N) {
|
||||
}
|
||||
@ -317,7 +316,7 @@ namespace llvm {
|
||||
class SCEVMulExpr : public SCEVCommutativeExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVMulExpr(const FoldingSetNodeID &ID,
|
||||
SCEVMulExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *const *O, size_t N)
|
||||
: SCEVCommutativeExpr(ID, scMulExpr, O, N) {
|
||||
}
|
||||
@ -341,7 +340,7 @@ namespace llvm {
|
||||
|
||||
const SCEV *LHS;
|
||||
const SCEV *RHS;
|
||||
SCEVUDivExpr(const FoldingSetNodeID &ID, const SCEV *lhs, const SCEV *rhs)
|
||||
SCEVUDivExpr(const FoldingSetNodeIDRef ID, const SCEV *lhs, const SCEV *rhs)
|
||||
: SCEV(ID, scUDivExpr), LHS(lhs), RHS(rhs) {}
|
||||
|
||||
public:
|
||||
@ -391,7 +390,7 @@ namespace llvm {
|
||||
|
||||
const Loop *L;
|
||||
|
||||
SCEVAddRecExpr(const FoldingSetNodeID &ID,
|
||||
SCEVAddRecExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *const *O, size_t N, const Loop *l)
|
||||
: SCEVNAryExpr(ID, scAddRecExpr, O, N), L(l) {
|
||||
for (size_t i = 0, e = NumOperands; i != e; ++i)
|
||||
@ -473,7 +472,7 @@ namespace llvm {
|
||||
class SCEVSMaxExpr : public SCEVCommutativeExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVSMaxExpr(const FoldingSetNodeID &ID,
|
||||
SCEVSMaxExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *const *O, size_t N)
|
||||
: SCEVCommutativeExpr(ID, scSMaxExpr, O, N) {
|
||||
// Max never overflows.
|
||||
@ -498,7 +497,7 @@ namespace llvm {
|
||||
class SCEVUMaxExpr : public SCEVCommutativeExpr {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
SCEVUMaxExpr(const FoldingSetNodeID &ID,
|
||||
SCEVUMaxExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *const *O, size_t N)
|
||||
: SCEVCommutativeExpr(ID, scUMaxExpr, O, N) {
|
||||
// Max never overflows.
|
||||
@ -525,7 +524,7 @@ namespace llvm {
|
||||
friend class ScalarEvolution;
|
||||
|
||||
Value *V;
|
||||
SCEVUnknown(const FoldingSetNodeID &ID, Value *v) :
|
||||
SCEVUnknown(const FoldingSetNodeIDRef ID, Value *v) :
|
||||
SCEV(ID, scUnknown), V(v) {}
|
||||
|
||||
public:
|
||||
|
@ -141,7 +141,7 @@ bool SCEV::isAllOnesValue() const {
|
||||
}
|
||||
|
||||
SCEVCouldNotCompute::SCEVCouldNotCompute() :
|
||||
SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
|
||||
SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
|
||||
|
||||
bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
|
||||
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
|
||||
@ -178,7 +178,7 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
|
||||
void *IP = 0;
|
||||
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
|
||||
new (S) SCEVConstant(ID, V);
|
||||
new (S) SCEVConstant(ID.Intern(SCEVAllocator), V);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -199,7 +199,7 @@ void SCEVConstant::print(raw_ostream &OS) const {
|
||||
WriteAsOperand(OS, V, false);
|
||||
}
|
||||
|
||||
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
|
||||
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
|
||||
unsigned SCEVTy, const SCEV *op, const Type *ty)
|
||||
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
|
||||
|
||||
@ -211,7 +211,7 @@ bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
|
||||
return Op->properlyDominates(BB, DT);
|
||||
}
|
||||
|
||||
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
|
||||
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty)
|
||||
: SCEVCastExpr(ID, scTruncate, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
@ -223,7 +223,7 @@ void SCEVTruncateExpr::print(raw_ostream &OS) const {
|
||||
OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
|
||||
}
|
||||
|
||||
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
|
||||
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty)
|
||||
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
@ -235,7 +235,7 @@ void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
|
||||
OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
|
||||
}
|
||||
|
||||
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
|
||||
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
|
||||
const SCEV *op, const Type *ty)
|
||||
: SCEVCastExpr(ID, scSignExtend, op, ty) {
|
||||
assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
|
||||
@ -847,7 +847,7 @@ const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
|
||||
// Recompute the insert position, as it may have been invalidated.
|
||||
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
|
||||
new (S) SCEVTruncateExpr(ID, Op, Ty);
|
||||
new (S) SCEVTruncateExpr(ID.Intern(SCEVAllocator), Op, Ty);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -982,7 +982,7 @@ const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
|
||||
// Recompute the insert position, as it may have been invalidated.
|
||||
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
|
||||
new (S) SCEVZeroExtendExpr(ID, Op, Ty);
|
||||
new (S) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator), Op, Ty);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -1117,7 +1117,7 @@ const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
|
||||
// Recompute the insert position, as it may have been invalidated.
|
||||
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
|
||||
new (S) SCEVSignExtendExpr(ID, Op, Ty);
|
||||
new (S) SCEVSignExtendExpr(ID.Intern(SCEVAllocator), Op, Ty);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -1615,7 +1615,7 @@ const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
S = SCEVAllocator.Allocate<SCEVAddExpr>();
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
|
||||
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
|
||||
new (S) SCEVAddExpr(ID, O, Ops.size());
|
||||
new (S) SCEVAddExpr(ID.Intern(SCEVAllocator), O, Ops.size());
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
}
|
||||
if (HasNUW) S->setHasNoUnsignedWrap(true);
|
||||
@ -1825,7 +1825,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
|
||||
S = SCEVAllocator.Allocate<SCEVMulExpr>();
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
|
||||
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
|
||||
new (S) SCEVMulExpr(ID, O, Ops.size());
|
||||
new (S) SCEVMulExpr(ID.Intern(SCEVAllocator), O, Ops.size());
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
}
|
||||
if (HasNUW) S->setHasNoUnsignedWrap(true);
|
||||
@ -1925,7 +1925,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
|
||||
void *IP = 0;
|
||||
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
|
||||
new (S) SCEVUDivExpr(ID, LHS, RHS);
|
||||
new (S) SCEVUDivExpr(ID.Intern(SCEVAllocator), LHS, RHS);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -2036,7 +2036,7 @@ ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
|
||||
S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
|
||||
std::uninitialized_copy(Operands.begin(), Operands.end(), O);
|
||||
new (S) SCEVAddRecExpr(ID, O, Operands.size(), L);
|
||||
new (S) SCEVAddRecExpr(ID.Intern(SCEVAllocator), O, Operands.size(), L);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
}
|
||||
if (HasNUW) S->setHasNoUnsignedWrap(true);
|
||||
@ -2138,7 +2138,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
|
||||
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
|
||||
new (S) SCEVSMaxExpr(ID, O, Ops.size());
|
||||
new (S) SCEVSMaxExpr(ID.Intern(SCEVAllocator), O, Ops.size());
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -2237,7 +2237,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
|
||||
const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
|
||||
std::uninitialized_copy(Ops.begin(), Ops.end(), O);
|
||||
new (S) SCEVUMaxExpr(ID, O, Ops.size());
|
||||
new (S) SCEVUMaxExpr(ID.Intern(SCEVAllocator), O, Ops.size());
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
@ -2300,7 +2300,7 @@ const SCEV *ScalarEvolution::getUnknown(Value *V) {
|
||||
void *IP = 0;
|
||||
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
|
||||
SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
|
||||
new (S) SCEVUnknown(ID, V);
|
||||
new (S) SCEVUnknown(ID.Intern(SCEVAllocator), V);
|
||||
UniqueSCEVs.InsertNode(S, IP);
|
||||
return S;
|
||||
}
|
||||
|
@ -15,6 +15,7 @@
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/ADT/FoldingSet.h"
|
||||
#include "llvm/Support/Allocator.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
#include "llvm/Support/MathExtras.h"
|
||||
#include <cassert>
|
||||
@ -130,6 +131,15 @@ bool FoldingSetNodeID::operator==(const FoldingSetNodeID &RHS)const{
|
||||
return memcmp(&Bits[0], &RHS.Bits[0], Bits.size()*sizeof(Bits[0])) == 0;
|
||||
}
|
||||
|
||||
/// Intern - Copy this node's data to a memory region allocated from the
|
||||
/// given allocator and return a FoldingSetNodeIDRef describing the
|
||||
/// interned data.
|
||||
FoldingSetNodeIDRef
|
||||
FoldingSetNodeID::Intern(BumpPtrAllocator &Allocator) const {
|
||||
unsigned *New = Allocator.Allocate<unsigned>(Bits.size());
|
||||
std::uninitialized_copy(Bits.begin(), Bits.end(), New);
|
||||
return FoldingSetNodeIDRef(New, Bits.size());
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
/// Helper functions for FoldingSetImpl.
|
||||
|
Loading…
Reference in New Issue
Block a user