[C++11] More 'nullptr' conversion. In some cases just using a boolean check instead of comparing to nullptr.

llvm-svn: 206142
This commit is contained in:
Craig Topper 2014-04-14 00:51:57 +00:00
parent c4a623f8d4
commit 30281a67fb
169 changed files with 1547 additions and 1489 deletions

View File

@ -72,7 +72,7 @@ public:
/// BitVector default ctor - Creates an empty bitvector.
BitVector() : Size(0), Capacity(0) {
Bits = 0;
Bits = nullptr;
}
/// BitVector ctor - Creates a bitvector of specified number of bits. All
@ -88,7 +88,7 @@ public:
/// BitVector copy ctor.
BitVector(const BitVector &RHS) : Size(RHS.size()) {
if (Size == 0) {
Bits = 0;
Bits = nullptr;
Capacity = 0;
return;
}
@ -100,7 +100,7 @@ public:
BitVector(BitVector &&RHS)
: Bits(RHS.Bits), Size(RHS.Size), Capacity(RHS.Capacity) {
RHS.Bits = 0;
RHS.Bits = nullptr;
}
~BitVector() {
@ -467,7 +467,7 @@ public:
Size = RHS.Size;
Capacity = RHS.Capacity;
RHS.Bits = 0;
RHS.Bits = nullptr;
return *this;
}

View File

@ -1177,7 +1177,7 @@ branchRoot(unsigned Position) {
if (Nodes == 1)
size[0] = rootSize;
else
NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, size,
NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, size,
Position, true);
// Allocate new nodes.
@ -1218,7 +1218,7 @@ splitRoot(unsigned Position) {
if (Nodes == 1)
Size[0] = rootSize;
else
NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, NULL, Size,
NewOffset = distribute(Nodes, rootSize, Leaf::Capacity, nullptr, Size,
Position, true);
// Allocate new nodes.
@ -1346,7 +1346,7 @@ protected:
public:
/// const_iterator - Create an iterator that isn't pointing anywhere.
const_iterator() : map(0) {}
const_iterator() : map(nullptr) {}
/// setMap - Change the map iterated over. This call must be followed by a
/// call to goToBegin(), goToEnd(), or find()

View File

@ -146,7 +146,7 @@ public:
T *get() const { return Ptr; }
LLVM_EXPLICIT operator bool() const { return Ptr != 0; }
bool operator!() const { return Ptr == 0; }
bool operator!() const { return Ptr == nullptr; }
void swap(OwningArrayPtr &RHS) {
T *Tmp = RHS.Ptr;

View File

@ -111,7 +111,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
}
inline po_iterator(NodeType *BB) {
this->insertEdge((NodeType*)0, BB);
this->insertEdge((NodeType*)nullptr, BB);
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}
@ -119,7 +119,7 @@ class po_iterator : public std::iterator<std::forward_iterator_tag,
inline po_iterator(NodeType *BB, SetType &S) :
po_iterator_storage<SetType, ExtStorage>(S) {
if (this->insertEdge((NodeType*)0, BB)) {
if (this->insertEdge((NodeType*)nullptr, BB)) {
VisitStack.push_back(std::make_pair(BB, GT::child_begin(BB)));
traverseChild();
}

View File

@ -159,10 +159,10 @@ private:
void operator=(const ScopedHashTable&); // NOT YET IMPLEMENTED
friend class ScopedHashTableScope<K, V, KInfo, AllocatorTy>;
public:
ScopedHashTable() : CurScope(0) {}
ScopedHashTable() : CurScope(nullptr) {}
ScopedHashTable(AllocatorTy A) : CurScope(0), Allocator(A) {}
~ScopedHashTable() {
assert(CurScope == 0 && TopLevelMap.empty() && "Scope imbalance!");
assert(!CurScope && TopLevelMap.empty() && "Scope imbalance!");
}
@ -222,7 +222,7 @@ ScopedHashTableScope<K, V, KInfo, Allocator>::
ScopedHashTableScope(ScopedHashTable<K, V, KInfo, Allocator> &ht) : HT(ht) {
PrevScope = HT.CurScope;
HT.CurScope = this;
LastValInScope = 0;
LastValInScope = nullptr;
}
template <typename K, typename V, typename KInfo, typename Allocator>
@ -233,7 +233,7 @@ ScopedHashTableScope<K, V, KInfo, Allocator>::~ScopedHashTableScope() {
// Pop and delete all values corresponding to this scope.
while (ScopedHashTableVal<K, V> *ThisEntry = LastValInScope) {
// Pop this value out of the TopLevelMap.
if (ThisEntry->getNextForKey() == 0) {
if (!ThisEntry->getNextForKey()) {
assert(HT.TopLevelMap[ThisEntry->getKey()] == ThisEntry &&
"Scope imbalance!");
HT.TopLevelMap.erase(ThisEntry->getKey());

View File

@ -187,7 +187,7 @@ public:
typedef const ValueT *const_pointer;
SparseMultiSet()
: Sparse(0), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) { }
: Sparse(nullptr), Universe(0), FreelistIdx(SMSNode::INVALID), NumFree(0) {}
~SparseMultiSet() { free(Sparse); }

View File

@ -142,7 +142,7 @@ public:
typedef ValueT *pointer;
typedef const ValueT *const_pointer;
SparseSet() : Sparse(0), Universe(0) {}
SparseSet() : Sparse(nullptr), Universe(0) {}
~SparseSet() { free(Sparse); }
/// setUniverse - Set the universe size which determines the largest key the

View File

@ -69,7 +69,7 @@ public:
}
TinyPtrVector(TinyPtrVector &&RHS) : Val(RHS.Val) {
RHS.Val = (EltTy)0;
RHS.Val = (EltTy)nullptr;
}
TinyPtrVector &operator=(TinyPtrVector &&RHS) {
if (this == &RHS)
@ -92,7 +92,7 @@ public:
}
Val = RHS.Val;
RHS.Val = (EltTy)0;
RHS.Val = nullptr;
return *this;
}
@ -174,7 +174,7 @@ public:
}
void push_back(EltTy NewVal) {
assert(NewVal != 0 && "Can't add a null value");
assert(NewVal && "Can't add a null value");
// If we have nothing, add something.
if (Val.isNull()) {
@ -195,7 +195,7 @@ public:
void pop_back() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>())
Val = (EltTy)0;
Val = nullptr;
else if (VecTy *Vec = Val.template get<VecTy*>())
Vec->pop_back();
}
@ -203,7 +203,7 @@ public:
void clear() {
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
Val = (EltTy)0;
Val = (EltTy)nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// If we have a vector form, just clear it.
Vec->clear();
@ -218,7 +218,7 @@ public:
// If we have a single value, convert to empty.
if (Val.template is<EltTy>()) {
if (I == begin())
Val = (EltTy)0;
Val = nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
// multiple items in a vector; just do the erase, there is no
// benefit to collapsing back to a pointer
@ -234,7 +234,7 @@ public:
if (Val.template is<EltTy>()) {
if (S == begin() && S != E)
Val = (EltTy)0;
Val = nullptr;
} else if (VecTy *Vec = Val.template dyn_cast<VecTy*>()) {
return Vec->erase(S, E);
}

View File

@ -75,7 +75,7 @@ protected:
public:
static char ID; // Class identification, replacement for typeinfo
AliasAnalysis() : DL(0), TLI(0), AA(0) {}
AliasAnalysis() : DL(nullptr), TLI(nullptr), AA(nullptr) {}
virtual ~AliasAnalysis(); // We want to be subclassed
/// UnknownSize - This is a special value which can be used with the
@ -116,8 +116,8 @@ public:
/// the location, or null if there is no known unique tag.
const MDNode *TBAATag;
explicit Location(const Value *P = 0, uint64_t S = UnknownSize,
const MDNode *N = 0)
explicit Location(const Value *P = nullptr, uint64_t S = UnknownSize,
const MDNode *N = nullptr)
: Ptr(P), Size(S), TBAATag(N) {}
Location getWithNewPtr(const Value *NewPtr) const {
@ -134,7 +134,7 @@ public:
Location getWithoutTBAATag() const {
Location Copy(*this);
Copy.TBAATag = 0;
Copy.TBAATag = nullptr;
return Copy;
}
};
@ -560,12 +560,12 @@ struct DenseMapInfo<AliasAnalysis::Location> {
static inline AliasAnalysis::Location getEmptyKey() {
return
AliasAnalysis::Location(DenseMapInfo<const Value *>::getEmptyKey(),
0, 0);
0, nullptr);
}
static inline AliasAnalysis::Location getTombstoneKey() {
return
AliasAnalysis::Location(DenseMapInfo<const Value *>::getTombstoneKey(),
0, 0);
0, nullptr);
}
static unsigned getHashValue(const AliasAnalysis::Location &Val) {
return DenseMapInfo<const Value *>::getHashValue(Val.Ptr) ^

View File

@ -157,13 +157,13 @@ class BlockFrequencyInfoImpl {
PE = GraphTraits< Inverse<BlockT *> >::child_end(BB);
if (PI == PE)
return 0;
return nullptr;
BlockT *Pred = *PI;
++PI;
if (PI != PE)
return 0;
return nullptr;
return Pred;
}
@ -313,7 +313,7 @@ class BlockFrequencyInfoImpl {
// Travel over all blocks in postorder.
for (pot_iterator I = pot_begin(), E = pot_end(); I != E; ++I) {
BlockT *BB = *I;
BlockT *LastTail = 0;
BlockT *LastTail = nullptr;
DEBUG(dbgs() << "POT: " << getBlockName(BB) << "\n");
for (typename GT::ChildIteratorType

View File

@ -36,15 +36,16 @@ namespace llvm {
/// Note that this fails if not all of the operands are constant. Otherwise,
/// this function can only fail when attempting to fold instructions like loads
/// and stores, which have no constant expression form.
Constant *ConstantFoldInstruction(Instruction *I, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
Constant *ConstantFoldInstruction(Instruction *I,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified DataLayout. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI =nullptr);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
/// specified operands. If successful, the constant result is returned, if not,
@ -54,8 +55,8 @@ Constant *ConstantFoldConstantExpression(const ConstantExpr *CE,
///
Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
ArrayRef<Constant *> Ops,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
/// instruction (icmp/fcmp) with the specified operands. If it fails, it
@ -63,8 +64,8 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, Type *DestTy,
///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant *LHS, Constant *RHS,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI=nullptr);
/// ConstantFoldInsertValueInstruction - Attempt to constant fold an insertvalue
/// instruction with the specified operands and indices. The constant result is
@ -75,7 +76,8 @@ Constant *ConstantFoldInsertValueInstruction(Constant *Agg, Constant *Val,
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
/// produce if it is constant and determinable. If this is not determinable,
/// return null.
Constant *ConstantFoldLoadFromConstPtr(Constant *C, const DataLayout *TD = 0);
Constant *ConstantFoldLoadFromConstPtr(Constant *C,
const DataLayout *TD = nullptr);
/// ConstantFoldLoadThroughGEPConstantExpr - Given a constant and a
/// getelementptr constantexpr, return the constant value being addressed by the
@ -96,7 +98,7 @@ bool canConstantFoldCallTo(const Function *F);
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
/// with the specified arguments, returning null if unsuccessful.
Constant *ConstantFoldCall(Function *F, ArrayRef<Constant *> Operands,
const TargetLibraryInfo *TLI = 0);
const TargetLibraryInfo *TLI = nullptr);
}
#endif

View File

@ -48,160 +48,166 @@ namespace llvm {
/// SimplifyAddInst - Given operands for an Add, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAddInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifySubInst - Given operands for a Sub, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySubInst(Value *LHS, Value *RHS, bool isNSW, bool isNUW,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// Given operands for an FAdd, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFAddInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// Given operands for an FSub, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFSubInst(Value *LHS, Value *RHS, FastMathFlags FMF,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// Given operands for an FMul, see if we can fold the result. If not, this
/// returns null.
Value *SimplifyFMulInst(Value *LHS, Value *RHS,
FastMathFlags FMF,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyMulInst - Given operands for a Mul, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyMulInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifySDivInst - Given operands for an SDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifySDivInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyUDivInst - Given operands for a UDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyUDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyUDivInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyFDivInst - Given operands for an FDiv, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFDivInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyFDivInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifySRemInst - Given operands for an SRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifySRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifySRemInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyURemInst - Given operands for a URem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyURemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyURemInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyFRemInst - Given operands for an FRem, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFRemInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyFRemInst(Value *LHS, Value *RHS,
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyShlInst - Given operands for a Shl, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyLShrInst - Given operands for a LShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyAShrInst - Given operands for a AShr, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyAndInst - Given operands for an And, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyAndInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyOrInst - Given operands for an Or, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyOrInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyXorInst - Given operands for a Xor, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyXorInst(Value *LHS, Value *RHS, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyICmpInst - Given operands for an ICmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyFCmpInst - Given operands for an FCmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifySelectInst - Given operands for a SelectInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyGEPInst - Given operands for an GetElementPtrInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyGEPInst(ArrayRef<Value *> Ops, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyInsertValueInst - Given operands for an InsertValueInst, see if we
/// can fold the result. If not, this returns null.
Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
ArrayRef<unsigned> Idxs,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyTruncInst - Given operands for an TruncInst, see if we can fold
/// the result. If not, this returns null.
Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyTruncInst(Value *Op, Type *Ty, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
//=== Helper functions for higher up the class hierarchy.
@ -209,40 +215,40 @@ namespace llvm {
/// SimplifyCmpInst - Given operands for a CmpInst, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyBinOp - Given operands for a BinaryOperator, see if we can
/// fold the result. If not, this returns null.
Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// \brief Given a function and iterators over arguments, see if we can fold
/// the result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, User::op_iterator ArgBegin,
User::op_iterator ArgEnd, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
User::op_iterator ArgEnd, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// \brief Given a function and set of arguments, see if we can fold the
/// result.
///
/// If this call could not be simplified returns null.
Value *SimplifyCall(Value *V, ArrayRef<Value *> Args,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// SimplifyInstruction - See if we can compute a simplified version of this
/// instruction. If not, this returns null.
Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
Value *SimplifyInstruction(Instruction *I, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// \brief Replace all uses of 'I' with 'SimpleV' and simplify the uses
@ -254,9 +260,9 @@ namespace llvm {
///
/// The function returns true if any simplifications were performed.
bool replaceAndRecursivelySimplify(Instruction *I, Value *SimpleV,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
/// \brief Recursively attempt to simplify an instruction.
///
@ -265,9 +271,9 @@ namespace llvm {
/// of the users impacted. It returns true if any simplifications were
/// performed.
bool recursivelySimplifyInstruction(Instruction *I,
const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0,
const DominatorTree *DT = 0);
const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr,
const DominatorTree *DT = nullptr);
} // end namespace llvm
#endif

View File

@ -79,7 +79,7 @@ class LoopBase {
operator=(const LoopBase<BlockT, LoopT> &) LLVM_DELETED_FUNCTION;
public:
/// Loop ctor - This creates an empty loop.
LoopBase() : ParentLoop(0) {}
LoopBase() : ParentLoop(nullptr) {}
~LoopBase() {
for (size_t i = 0, e = SubLoops.size(); i != e; ++i)
delete SubLoops[i];
@ -106,7 +106,7 @@ public:
///
bool contains(const LoopT *L) const {
if (L == this) return true;
if (L == 0) return false;
if (!L) return false;
return contains(L->getParentLoop());
}
@ -265,7 +265,7 @@ public:
/// updates the loop depth of the new child.
///
void addChildLoop(LoopT *NewChild) {
assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
assert(!NewChild->ParentLoop && "NewChild already has a parent!");
NewChild->ParentLoop = static_cast<LoopT *>(this);
SubLoops.push_back(NewChild);
}
@ -278,7 +278,7 @@ public:
LoopT *Child = *I;
assert(Child->ParentLoop == this && "Child is not a child of this loop!");
SubLoops.erase(SubLoops.begin()+(I-begin()));
Child->ParentLoop = 0;
Child->ParentLoop = nullptr;
return Child;
}
@ -333,7 +333,7 @@ public:
protected:
friend class LoopInfoBase<BlockT, LoopT>;
explicit LoopBase(BlockT *BB) : ParentLoop(0) {
explicit LoopBase(BlockT *BB) : ParentLoop(nullptr) {
Blocks.push_back(BB);
DenseBlockSet.insert(BB);
}
@ -372,7 +372,7 @@ public:
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Value *V, bool &Changed,
Instruction *InsertPt = 0) const;
Instruction *InsertPt = nullptr) const;
/// makeLoopInvariant - If the given instruction is inside of the
/// loop and it can be hoisted, do so to make it trivially loop-invariant.
@ -384,7 +384,7 @@ public:
/// If null, the terminator of the loop preheader is used.
///
bool makeLoopInvariant(Instruction *I, bool &Changed,
Instruction *InsertPt = 0) const;
Instruction *InsertPt = nullptr) const;
/// getCanonicalInductionVariable - Check to see if the loop has a canonical
/// induction variable: an integer recurrence that starts at 0 and increments
@ -531,7 +531,7 @@ public:
LoopT *removeLoop(iterator I) {
assert(I != end() && "Cannot remove end iterator!");
LoopT *L = *I;
assert(L->getParentLoop() == 0 && "Not a top-level loop!");
assert(!L->getParentLoop() && "Not a top-level loop!");
TopLevelLoops.erase(TopLevelLoops.begin() + (I-begin()));
return L;
}
@ -555,14 +555,14 @@ public:
std::find(TopLevelLoops.begin(), TopLevelLoops.end(), OldLoop);
assert(I != TopLevelLoops.end() && "Old loop not at top level!");
*I = NewLoop;
assert(NewLoop->ParentLoop == 0 && OldLoop->ParentLoop == 0 &&
assert(!NewLoop->ParentLoop && !OldLoop->ParentLoop &&
"Loops already embedded into a subloop!");
}
/// addTopLevelLoop - This adds the specified loop to the collection of
/// top-level loops.
void addTopLevelLoop(LoopT *New) {
assert(New->getParentLoop() == 0 && "Loop already in subloop!");
assert(!New->getParentLoop() && "Loop already in subloop!");
TopLevelLoops.push_back(New);
}
@ -583,7 +583,7 @@ public:
static bool isNotAlreadyContainedIn(const LoopT *SubLoop,
const LoopT *ParentLoop) {
if (SubLoop == 0) return true;
if (!SubLoop) return true;
if (SubLoop == ParentLoop) return false;
return isNotAlreadyContainedIn(SubLoop->getParentLoop(), ParentLoop);
}
@ -660,7 +660,7 @@ public:
void releaseMemory() override { LI.releaseMemory(); }
void print(raw_ostream &O, const Module* M = 0) const override;
void print(raw_ostream &O, const Module* M = nullptr) const override;
void getAnalysisUsage(AnalysisUsage &AU) const override;

View File

@ -53,7 +53,7 @@ BlockT *LoopBase<BlockT, LoopT>::getExitingBlock() const {
getExitingBlocks(ExitingBlocks);
if (ExitingBlocks.size() == 1)
return ExitingBlocks[0];
return 0;
return nullptr;
}
/// getExitBlocks - Return all of the successor blocks of this loop. These
@ -80,7 +80,7 @@ BlockT *LoopBase<BlockT, LoopT>::getExitBlock() const {
getExitBlocks(ExitBlocks);
if (ExitBlocks.size() == 1)
return ExitBlocks[0];
return 0;
return nullptr;
}
/// getExitEdges - Return all pairs of (_inside_block_,_outside_block_).
@ -108,14 +108,14 @@ template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = getLoopPredecessor();
if (!Out) return 0;
if (!Out) return nullptr;
// Make sure there is only one exit out of the preheader.
typedef GraphTraits<BlockT*> BlockTraits;
typename BlockTraits::ChildIteratorType SI = BlockTraits::child_begin(Out);
++SI;
if (SI != BlockTraits::child_end(Out))
return 0; // Multiple exits from the block, must not be a preheader.
return nullptr; // Multiple exits from the block, must not be a preheader.
// The predecessor has exactly one successor, so it is a preheader.
return Out;
@ -129,7 +129,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPreheader() const {
template<class BlockT, class LoopT>
BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
// Keep track of nodes outside the loop branching to the header...
BlockT *Out = 0;
BlockT *Out = nullptr;
// Loop over the predecessors of the header node...
BlockT *Header = getHeader();
@ -140,7 +140,7 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopPredecessor() const {
typename InvBlockTraits::NodeType *N = *PI;
if (!contains(N)) { // If the block is not in the loop...
if (Out && Out != N)
return 0; // Multiple predecessors outside the loop
return nullptr; // Multiple predecessors outside the loop
Out = N;
}
}
@ -160,11 +160,11 @@ BlockT *LoopBase<BlockT, LoopT>::getLoopLatch() const {
InvBlockTraits::child_begin(Header);
typename InvBlockTraits::ChildIteratorType PE =
InvBlockTraits::child_end(Header);
BlockT *Latch = 0;
BlockT *Latch = nullptr;
for (; PI != PE; ++PI) {
typename InvBlockTraits::NodeType *N = *PI;
if (contains(N)) {
if (Latch) return 0;
if (Latch) return nullptr;
Latch = N;
}
}
@ -188,7 +188,7 @@ addBasicBlockToLoop(BlockT *NewBB, LoopInfoBase<BlockT, LoopT> &LIB) {
assert((Blocks.empty() || LIB[getHeader()] == this) &&
"Incorrect LI specified for this loop!");
assert(NewBB && "Cannot add a null basic block to the loop!");
assert(LIB[NewBB] == 0 && "BasicBlock already in the loop!");
assert(!LIB[NewBB] && "BasicBlock already in the loop!");
LoopT *L = static_cast<LoopT *>(this);
@ -210,12 +210,12 @@ template<class BlockT, class LoopT>
void LoopBase<BlockT, LoopT>::
replaceChildLoopWith(LoopT *OldChild, LoopT *NewChild) {
assert(OldChild->ParentLoop == this && "This loop is already broken!");
assert(NewChild->ParentLoop == 0 && "NewChild already has a parent!");
assert(!NewChild->ParentLoop && "NewChild already has a parent!");
typename std::vector<LoopT *>::iterator I =
std::find(SubLoops.begin(), SubLoops.end(), OldChild);
assert(I != SubLoops.end() && "OldChild not in loop!");
*I = NewChild;
OldChild->ParentLoop = 0;
OldChild->ParentLoop = nullptr;
NewChild->ParentLoop = static_cast<LoopT *>(this);
}

View File

@ -105,7 +105,7 @@ public:
/// The returned cost is defined in terms of \c TargetCostConstants, see its
/// comments for a detailed explanation of the cost values.
virtual unsigned getOperationCost(unsigned Opcode, Type *Ty,
Type *OpTy = 0) const;
Type *OpTy = nullptr) const;
/// \brief Estimate the cost of a GEP operation when lowered.
///
@ -356,7 +356,7 @@ public:
/// The index and subtype parameters are used by the subvector insertion and
/// extraction shuffle kinds.
virtual unsigned getShuffleCost(ShuffleKind Kind, Type *Tp, int Index = 0,
Type *SubTp = 0) const;
Type *SubTp = nullptr) const;
/// \return The expected cost of cast instructions, such as bitcast, trunc,
/// zext, etc.
@ -369,7 +369,7 @@ public:
/// \returns The expected cost of compare and select instructions.
virtual unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
Type *CondTy = 0) const;
Type *CondTy = nullptr) const;
/// \return The expected cost of vector Insert and Extract.
/// Use -1 to indicate that there is no information on the index value.

View File

@ -54,7 +54,7 @@ inline unsigned ComputeLinearIndex(Type *Ty,
///
void ComputeValueVTs(const TargetLowering &TLI, Type *Ty,
SmallVectorImpl<EVT> &ValueVTs,
SmallVectorImpl<uint64_t> *Offsets = 0,
SmallVectorImpl<uint64_t> *Offsets = nullptr,
uint64_t StartingOffset = 0);
/// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.

View File

@ -247,7 +247,7 @@ namespace llvm {
/// an explicit alignment requested, it will override the alignment request
/// if required for correctness.
///
void EmitAlignment(unsigned NumBits, const GlobalValue *GV = 0) const;
void EmitAlignment(unsigned NumBits, const GlobalValue *GV = nullptr) const;
/// EmitBasicBlockStart - This method prints the label for the specified
/// MachineBasicBlock, an alignment (if present) and a comment describing
@ -395,10 +395,10 @@ namespace llvm {
//===------------------------------------------------------------------===//
/// EmitSLEB128 - emit the specified signed leb128 value.
void EmitSLEB128(int64_t Value, const char *Desc = 0) const;
void EmitSLEB128(int64_t Value, const char *Desc = nullptr) const;
/// EmitULEB128 - emit the specified unsigned leb128 value.
void EmitULEB128(uint64_t Value, const char *Desc = 0,
void EmitULEB128(uint64_t Value, const char *Desc = nullptr,
unsigned PadTo = 0) const;
/// EmitCFAByte - Emit a .byte 42 directive for a DW_CFA_xxx value.
@ -408,7 +408,7 @@ namespace llvm {
/// encoding. If verbose assembly output is enabled, we output comments
/// describing the encoding. Desc is a string saying what the encoding is
/// specifying (e.g. "LSDA").
void EmitEncodingByte(unsigned Val, const char *Desc = 0) const;
void EmitEncodingByte(unsigned Val, const char *Desc = nullptr) const;
/// GetSizeOfEncodedValue - Return the size of the encoding in bytes.
unsigned GetSizeOfEncodedValue(unsigned Encoding) const;
@ -491,7 +491,7 @@ namespace llvm {
mutable unsigned SetCounter;
/// EmitInlineAsm - Emit a blob of inline asm to the output streamer.
void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = 0,
void EmitInlineAsm(StringRef Str, const MDNode *LocMDNode = nullptr,
InlineAsm::AsmDialect AsmDialect =
InlineAsm::AD_ATT) const;

View File

@ -260,7 +260,7 @@ public:
// Check for buffer overflow.
if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
CurBufferPtr = BufferEnd;
Result = 0;
Result = nullptr;
} else {
// Allocate the space.
Result = CurBufferPtr;
@ -334,7 +334,9 @@ public:
/// getLabelLocations - Return the label locations map of the label IDs to
/// their address.
virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() { return 0; }
virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
return nullptr;
}
};
} // End llvm namespace

View File

@ -62,7 +62,7 @@ namespace llvm {
}
void releaseState() override {
SUnits = 0;
SUnits = nullptr;
}
unsigned getLatency(unsigned NodeNum) const {

View File

@ -44,7 +44,7 @@ typedef std::pair<const MachineInstr *, const MachineInstr *> InsnRange;
///
class LexicalScopes {
public:
LexicalScopes() : MF(NULL), CurrentFnLexicalScope(NULL) {}
LexicalScopes() : MF(nullptr), CurrentFnLexicalScope(nullptr) {}
~LexicalScopes();
/// initialize - Scan machine function and constuct lexical scope nest, resets
@ -55,7 +55,7 @@ public:
void reset();
/// empty - Return true if there is any lexical scope information available.
bool empty() { return CurrentFnLexicalScope == NULL; }
bool empty() { return CurrentFnLexicalScope == nullptr; }
/// isCurrentFunctionScope - Return true if given lexical scope represents
/// current function.
@ -160,8 +160,8 @@ class LexicalScope {
public:
LexicalScope(LexicalScope *P, const MDNode *D, const MDNode *I, bool A)
: Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A), LastInsn(0),
FirstInsn(0), DFSIn(0), DFSOut(0) {
: Parent(P), Desc(D), InlinedAtLocation(I), AbstractScope(A),
LastInsn(nullptr), FirstInsn(nullptr), DFSIn(0), DFSOut(0) {
if (Parent)
Parent->addChild(this);
}
@ -199,11 +199,11 @@ public:
/// closeInsnRange - Create a range based on FirstInsn and LastInsn collected
/// until now. This is used when a new scope is encountered while walking
/// machine instructions.
void closeInsnRange(LexicalScope *NewScope = NULL) {
void closeInsnRange(LexicalScope *NewScope = nullptr) {
assert(LastInsn && "Last insn missing!");
Ranges.push_back(InsnRange(FirstInsn, LastInsn));
FirstInsn = NULL;
LastInsn = NULL;
FirstInsn = nullptr;
LastInsn = nullptr;
// If Parent dominates NewScope then do not close Parent's instruction
// range.
if (Parent && (!NewScope || !Parent->dominates(NewScope)))

View File

@ -116,13 +116,13 @@ namespace llvm {
/// Return the value leaving the instruction, if any. This can be a
/// live-through value, or a live def. A dead def returns NULL.
VNInfo *valueOut() const {
return isDeadDef() ? 0 : LateVal;
return isDeadDef() ? nullptr : LateVal;
}
/// Return the value defined by this instruction, if any. This includes
/// dead defs, it is the value created by the instruction's def operands.
VNInfo *valueDefined() const {
return EarlyVal == LateVal ? 0 : LateVal;
return EarlyVal == LateVal ? nullptr : LateVal;
}
/// Return the end point of the last live range segment to interact with
@ -154,7 +154,7 @@ namespace llvm {
SlotIndex end; // End point of the interval (exclusive)
VNInfo *valno; // identifier for the value contained in this segment.
Segment() : valno(0) {}
Segment() : valno(nullptr) {}
Segment(SlotIndex S, SlotIndex E, VNInfo *V)
: start(S), end(E), valno(V) {
@ -336,20 +336,20 @@ namespace llvm {
/// is none.
const Segment *getSegmentContaining(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx);
return I == end() ? 0 : &*I;
return I == end() ? nullptr : &*I;
}
/// Return the live segment that contains the specified index, or null if
/// there is none.
Segment *getSegmentContaining(SlotIndex Idx) {
iterator I = FindSegmentContaining(Idx);
return I == end() ? 0 : &*I;
return I == end() ? nullptr : &*I;
}
/// getVNInfoAt - Return the VNInfo that is live at Idx, or NULL.
VNInfo *getVNInfoAt(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx);
return I == end() ? 0 : I->valno;
return I == end() ? nullptr : I->valno;
}
/// getVNInfoBefore - Return the VNInfo that is live up to but not
@ -357,7 +357,7 @@ namespace llvm {
/// used by an instruction at this SlotIndex position.
VNInfo *getVNInfoBefore(SlotIndex Idx) const {
const_iterator I = FindSegmentContaining(Idx.getPrevSlot());
return I == end() ? 0 : I->valno;
return I == end() ? nullptr : I->valno;
}
/// Return an iterator to the segment that contains the specified index, or
@ -443,13 +443,13 @@ namespace llvm {
const_iterator I = find(Idx.getBaseIndex());
const_iterator E = end();
if (I == E)
return LiveQueryResult(0, 0, SlotIndex(), false);
return LiveQueryResult(nullptr, nullptr, SlotIndex(), false);
// Is this an instruction live-in segment?
// If Idx is the start index of a basic block, include live-in segments
// that start at Idx.getBaseIndex().
VNInfo *EarlyVal = 0;
VNInfo *LateVal = 0;
VNInfo *EarlyVal = nullptr;
VNInfo *LateVal = nullptr;
SlotIndex EndPoint;
bool Kill = false;
if (I->start <= Idx.getBaseIndex()) {
@ -466,7 +466,7 @@ namespace llvm {
// predecessor.
// Such a value is not live-in.
if (EarlyVal->def == Idx.getBaseIndex())
EarlyVal = 0;
EarlyVal = nullptr;
}
// I now points to the segment that may be live-through, or defined by
// this instr. Ignore segments starting after the current instr.
@ -597,7 +597,7 @@ namespace llvm {
public:
/// Create a LiveRangeUpdater for adding segments to LR.
/// LR will temporarily be in an invalid state until flush() is called.
LiveRangeUpdater(LiveRange *lr = 0) : LR(lr) {}
LiveRangeUpdater(LiveRange *lr = nullptr) : LR(lr) {}
~LiveRangeUpdater() { flush(); }

View File

@ -137,7 +137,7 @@ namespace llvm {
// Interval removal.
void removeInterval(unsigned Reg) {
delete VirtRegIntervals[Reg];
VirtRegIntervals[Reg] = 0;
VirtRegIntervals[Reg] = nullptr;
}
/// Given a register and an instruction, adds a live segment from that
@ -153,7 +153,7 @@ namespace llvm {
/// Return true if the interval may have been separated into multiple
/// connected components.
bool shrinkToUses(LiveInterval *li,
SmallVectorImpl<MachineInstr*> *dead = 0);
SmallVectorImpl<MachineInstr*> *dead = nullptr);
/// extendToIndices - Extend the live range of LI to reach all points in
/// Indices. The points in the Indices array must be jointly dominated by
@ -262,7 +262,7 @@ namespace llvm {
bool runOnMachineFunction(MachineFunction&) override;
/// print - Implement the dump method.
void print(raw_ostream &O, const Module* = 0) const override;
void print(raw_ostream &O, const Module* = nullptr) const override;
/// intervalIsInOneMBB - If LI is confined to a single basic block, return
/// a pointer to that block. If LI is live in to or out of any block,

View File

@ -122,8 +122,8 @@ public:
{}
void clear() {
LiveUnion = NULL;
VirtReg = NULL;
LiveUnion = nullptr;
VirtReg = nullptr;
InterferingVRegs.clear();
CheckedFirstInterference = false;
SeenAllInterferences = false;
@ -182,7 +182,7 @@ public:
unsigned Size;
LiveIntervalUnion *LIUs;
public:
Array() : Size(0), LIUs(0) {}
Array() : Size(0), LIUs(nullptr) {}
~Array() { clear(); }
// Initialize the array to have Size entries.

View File

@ -48,7 +48,7 @@ class LivePhysRegs {
LivePhysRegs &operator=(const LivePhysRegs&) LLVM_DELETED_FUNCTION;
public:
/// \brief Constructs a new empty LivePhysRegs set.
LivePhysRegs() : TRI(0), LiveRegs() {}
LivePhysRegs() : TRI(nullptr), LiveRegs() {}
/// \brief Constructs and initialize an empty LivePhysRegs set.
LivePhysRegs(const TargetRegisterInfo *TRI) : TRI(TRI) {

View File

@ -116,7 +116,7 @@ public:
MachineFunction &MF,
LiveIntervals &lis,
VirtRegMap *vrm,
Delegate *delegate = 0)
Delegate *delegate = nullptr)
: Parent(parent), NewRegs(newRegs),
MRI(MF.getRegInfo()), LIS(lis), VRM(vrm),
TII(*MF.getTarget().getInstrInfo()),
@ -174,7 +174,7 @@ public:
struct Remat {
VNInfo *ParentVNI; // parent_'s value at the remat location.
MachineInstr *OrigMI; // Instruction defining ParentVNI.
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(0) {}
explicit Remat(VNInfo *ParentVNI) : ParentVNI(ParentVNI), OrigMI(nullptr) {}
};
/// canRematerializeAt - Determine if ParentVNI can be rematerialized at

View File

@ -92,7 +92,7 @@ namespace llvm {
bool runOnMachineFunction(MachineFunction&) override;
/// print - Implement the dump method.
void print(raw_ostream &O, const Module* = 0) const override;
void print(raw_ostream &O, const Module* = nullptr) const override;
};
}

View File

@ -160,7 +160,7 @@ public:
template<class OtherTy, class OtherIterTy>
bundle_iterator(const bundle_iterator<OtherTy, OtherIterTy> &I)
: MII(I.getInstrIterator()) {}
bundle_iterator() : MII(0) {}
bundle_iterator() : MII(nullptr) {}
Ty &operator*() const { return *MII; }
Ty *operator->() const { return &operator*(); }
@ -621,7 +621,7 @@ public:
// Debugging methods.
void dump() const;
void print(raw_ostream &OS, SlotIndexes* = 0) const;
void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
// Printing method used by LoopInfo.
void printAsOperand(raw_ostream &OS, bool PrintType = true);

View File

@ -262,7 +262,7 @@ public:
// Check for buffer overflow.
if (Size >= (uintptr_t)(BufferEnd-CurBufferPtr)) {
CurBufferPtr = BufferEnd;
Result = 0;
Result = nullptr;
} else {
// Allocate the space.
Result = CurBufferPtr;

View File

@ -519,7 +519,7 @@ public:
/// a nonnegative identifier to represent it.
///
int CreateStackObject(uint64_t Size, unsigned Alignment, bool isSS,
const AllocaInst *Alloca = 0);
const AllocaInst *Alloca = nullptr);
/// CreateSpillStackObject - Create a new statically sized stack object that
/// represents a spill slot, returning a nonnegative identifier to represent

View File

@ -271,12 +271,12 @@ public:
/// dense, and match the ordering of the blocks within the function. If a
/// specific MachineBasicBlock is specified, only that block and those after
/// it are renumbered.
void RenumberBlocks(MachineBasicBlock *MBBFrom = 0);
void RenumberBlocks(MachineBasicBlock *MBBFrom = nullptr);
/// print - Print out the MachineFunction in a format suitable for debugging
/// to the specified stream.
///
void print(raw_ostream &OS, SlotIndexes* = 0) const;
void print(raw_ostream &OS, SlotIndexes* = nullptr) const;
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
@ -299,7 +299,7 @@ public:
/// verify - Run the current MachineFunction through the machine code
/// verifier, useful for debugger use.
void verify(Pass *p = NULL, const char *Banner = NULL) const;
void verify(Pass *p = nullptr, const char *Banner = nullptr) const;
// Provide accessors for the MachineBasicBlock list...
typedef BasicBlockListType::iterator iterator;
@ -367,7 +367,7 @@ public:
/// implementation.
void removeFromMBBNumbering(unsigned N) {
assert(N < MBBNumbering.size() && "Illegal basic block #");
MBBNumbering[N] = 0;
MBBNumbering[N] = nullptr;
}
/// CreateMachineInstr - Allocate a new MachineInstr. Use this instead
@ -392,7 +392,7 @@ public:
/// CreateMachineBasicBlock - Allocate a new MachineBasicBlock. Use this
/// instead of `new MachineBasicBlock'.
///
MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = 0);
MachineBasicBlock *CreateMachineBasicBlock(const BasicBlock *bb = nullptr);
/// DeleteMachineBasicBlock - Delete the given MachineBasicBlock.
///
@ -404,8 +404,8 @@ public:
MachineMemOperand *getMachineMemOperand(MachinePointerInfo PtrInfo,
unsigned f, uint64_t s,
unsigned base_alignment,
const MDNode *TBAAInfo = 0,
const MDNode *Ranges = 0);
const MDNode *TBAAInfo = nullptr,
const MDNode *Ranges = nullptr);
/// getMachineMemOperand - Allocate a new MachineMemOperand by copying
/// an existing one, adjusting by an offset and using the given size.

View File

@ -760,7 +760,8 @@ public:
/// is a read of a super-register.
/// This does not count partial redefines of virtual registers as reads:
/// %reg1024:6 = OP.
bool readsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
bool readsRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, false, TRI) != -1;
}
@ -776,12 +777,13 @@ public:
/// partial defines.
/// If Ops is not null, all operand indices for Reg are added.
std::pair<bool,bool> readsWritesVirtualRegister(unsigned Reg,
SmallVectorImpl<unsigned> *Ops = 0) const;
SmallVectorImpl<unsigned> *Ops = nullptr) const;
/// killsRegister - Return true if the MachineInstr kills the specified
/// register. If TargetRegisterInfo is passed, then it also checks if there is
/// a kill of a super-register.
bool killsRegister(unsigned Reg, const TargetRegisterInfo *TRI = NULL) const {
bool killsRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterUseOperandIdx(Reg, true, TRI) != -1;
}
@ -789,7 +791,8 @@ public:
/// specified register. If TargetRegisterInfo is passed, then it also checks
/// if there is a def of a super-register.
/// NOTE: It's ignoring subreg indices on virtual registers.
bool definesRegister(unsigned Reg, const TargetRegisterInfo *TRI=NULL) const {
bool definesRegister(unsigned Reg,
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, false, false, TRI) != -1;
}
@ -804,7 +807,7 @@ public:
/// instruction. If TargetRegisterInfo is passed, then it also checks
/// if there is a dead def of a super-register.
bool registerDefIsDead(unsigned Reg,
const TargetRegisterInfo *TRI = NULL) const {
const TargetRegisterInfo *TRI = nullptr) const {
return findRegisterDefOperandIdx(Reg, true, false, TRI) != -1;
}
@ -812,14 +815,14 @@ public:
/// the specific register or -1 if it is not found. It further tightens
/// the search criteria to a use that kills the register if isKill is true.
int findRegisterUseOperandIdx(unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = NULL) const;
const TargetRegisterInfo *TRI = nullptr) const;
/// findRegisterUseOperand - Wrapper for findRegisterUseOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterUseOperand(unsigned Reg, bool isKill = false,
const TargetRegisterInfo *TRI = NULL) {
const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterUseOperandIdx(Reg, isKill, TRI);
return (Idx == -1) ? NULL : &getOperand(Idx);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
/// findRegisterDefOperandIdx() - Returns the operand index that is a def of
@ -830,14 +833,14 @@ public:
/// This may also return a register mask operand when Overlap is true.
int findRegisterDefOperandIdx(unsigned Reg,
bool isDead = false, bool Overlap = false,
const TargetRegisterInfo *TRI = NULL) const;
const TargetRegisterInfo *TRI = nullptr) const;
/// findRegisterDefOperand - Wrapper for findRegisterDefOperandIdx, it returns
/// a pointer to the MachineOperand rather than an index.
MachineOperand *findRegisterDefOperand(unsigned Reg, bool isDead = false,
const TargetRegisterInfo *TRI = NULL) {
const TargetRegisterInfo *TRI = nullptr) {
int Idx = findRegisterDefOperandIdx(Reg, isDead, false, TRI);
return (Idx == -1) ? NULL : &getOperand(Idx);
return (Idx == -1) ? nullptr : &getOperand(Idx);
}
/// findFirstPredOperandIdx() - Find the index of the first operand in the
@ -855,7 +858,7 @@ public:
/// The flag operand is an immediate that can be decoded with methods like
/// InlineAsm::hasRegClassConstraint().
///
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = 0) const;
int findInlineAsmFlagIdx(unsigned OpIdx, unsigned *GroupNo = nullptr) const;
/// getRegClassConstraint - Compute the static register class constraint for
/// operand OpIdx. For normal instructions, this is derived from the
@ -917,7 +920,8 @@ public:
/// check if the register def is tied to a source operand, due to either
/// two-address elimination or inline assembly constraints. Returns the
/// first tied use operand index by reference if UseOpIdx is not null.
bool isRegTiedToUseOperand(unsigned DefOpIdx, unsigned *UseOpIdx = 0) const {
bool isRegTiedToUseOperand(unsigned DefOpIdx,
unsigned *UseOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(DefOpIdx);
if (!MO.isReg() || !MO.isDef() || !MO.isTied())
return false;
@ -929,7 +933,8 @@ public:
/// isRegTiedToDefOperand - Return true if the use operand of the specified
/// index is tied to an def operand. It also returns the def operand index by
/// reference if DefOpIdx is not null.
bool isRegTiedToDefOperand(unsigned UseOpIdx, unsigned *DefOpIdx = 0) const {
bool isRegTiedToDefOperand(unsigned UseOpIdx,
unsigned *DefOpIdx = nullptr) const {
const MachineOperand &MO = getOperand(UseOpIdx);
if (!MO.isReg() || !MO.isUse() || !MO.isTied())
return false;
@ -968,7 +973,8 @@ public:
/// addRegisterDefined - We have determined MI defines a register. Make sure
/// there is an operand defining Reg.
void addRegisterDefined(unsigned Reg, const TargetRegisterInfo *RegInfo = 0);
void addRegisterDefined(unsigned Reg,
const TargetRegisterInfo *RegInfo = nullptr);
/// setPhysRegsDeadExcept - Mark every physreg used by this instruction as
/// dead except those in the UsedRegs list.
@ -1022,7 +1028,7 @@ public:
//
// Debugging support
//
void print(raw_ostream &OS, const TargetMachine *TM = 0,
void print(raw_ostream &OS, const TargetMachine *TM = nullptr,
bool SkipOpers = false) const;
void dump() const;
@ -1123,7 +1129,7 @@ private:
/// useful for CSE, etc.
struct MachineInstrExpressionTrait : DenseMapInfo<MachineInstr*> {
static inline MachineInstr *getEmptyKey() {
return 0;
return nullptr;
}
static inline MachineInstr *getTombstoneKey() {

View File

@ -46,7 +46,7 @@ class MachineInstrBuilder {
MachineFunction *MF;
MachineInstr *MI;
public:
MachineInstrBuilder() : MF(0), MI(0) {}
MachineInstrBuilder() : MF(nullptr), MI(nullptr) {}
/// Create a MachineInstrBuilder for manipulating an existing instruction.
/// F must be the machine function that was used to allocate I.

View File

@ -196,7 +196,7 @@ public:
/// each operand referring to Reg.
/// @returns A filled-in RegInfo struct.
VirtRegInfo analyzeVirtReg(unsigned Reg,
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = 0);
SmallVectorImpl<std::pair<MachineInstr*, unsigned> > *Ops = nullptr);
/// analyzePhysReg - Analyze how the current instruction or bundle uses a
/// physical register. This function should not be called after operator++(),

View File

@ -38,11 +38,11 @@ struct MachinePointerInfo {
/// Offset - This is an offset from the base Value*.
int64_t Offset;
explicit MachinePointerInfo(const Value *v = 0, int64_t offset = 0)
explicit MachinePointerInfo(const Value *v = nullptr, int64_t offset = 0)
: V(v), Offset(offset) {}
MachinePointerInfo getWithOffset(int64_t O) const {
if (V == 0) return MachinePointerInfo(0, 0);
if (V == nullptr) return MachinePointerInfo(nullptr, 0);
return MachinePointerInfo(V, Offset+O);
}
@ -109,8 +109,8 @@ public:
/// MachineMemOperand - Construct an MachineMemOperand object with the
/// specified PtrInfo, flags, size, and base alignment.
MachineMemOperand(MachinePointerInfo PtrInfo, unsigned flags, uint64_t s,
unsigned base_alignment, const MDNode *TBAAInfo = 0,
const MDNode *Ranges = 0);
unsigned base_alignment, const MDNode *TBAAInfo = nullptr,
const MDNode *Ranges = nullptr);
const MachinePointerInfo &getPointerInfo() const { return PtrInfo; }

View File

@ -71,7 +71,7 @@ struct LandingPadInfo {
std::vector<int> TypeIds; // List of type ids (filters negative)
explicit LandingPadInfo(MachineBasicBlock *MBB)
: LandingPadBlock(MBB), LandingPadLabel(0), Personality(0) {}
: LandingPadBlock(MBB), LandingPadLabel(nullptr), Personality(nullptr) {}
};
//===----------------------------------------------------------------------===//
@ -201,7 +201,7 @@ public:
///
template<typename Ty>
Ty &getObjFileInfo() {
if (ObjFileMMI == 0)
if (ObjFileMMI == nullptr)
ObjFileMMI = new Ty(*this);
return *static_cast<Ty*>(ObjFileMMI);
}
@ -334,7 +334,7 @@ public:
/// TidyLandingPads - Remap landing pad labels and remove any deleted landing
/// pads.
void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = 0);
void TidyLandingPads(DenseMap<MCSymbol*, uintptr_t> *LPMap = nullptr);
/// getLandingPads - Return a reference to the landing pad info for the
/// current function.

View File

@ -181,7 +181,7 @@ private:
} Contents;
explicit MachineOperand(MachineOperandType K)
: OpKind(K), SubReg_TargetFlags(0), ParentMI(0) {}
: OpKind(K), SubReg_TargetFlags(0), ParentMI(nullptr) {}
public:
/// getType - Returns the MachineOperandType for this operand.
///
@ -215,9 +215,9 @@ public:
///
/// Never call clearParent() on an operand in a MachineInstr.
///
void clearParent() { ParentMI = 0; }
void clearParent() { ParentMI = nullptr; }
void print(raw_ostream &os, const TargetMachine *TM = 0) const;
void print(raw_ostream &os, const TargetMachine *TM = nullptr) const;
//===--------------------------------------------------------------------===//
// Accessors that tell you what kind of MachineOperand you're looking at.
@ -593,8 +593,8 @@ public:
Op.TiedTo = 0;
Op.IsDebug = isDebug;
Op.SmallContents.RegNo = Reg;
Op.Contents.Reg.Prev = 0;
Op.Contents.Reg.Next = 0;
Op.Contents.Reg.Prev = nullptr;
Op.Contents.Reg.Next = nullptr;
Op.setSubReg(SubReg);
return Op;
}
@ -711,12 +711,12 @@ private:
/// part of a machine instruction.
bool isOnRegUseList() const {
assert(isReg() && "Can only add reg operand to use lists");
return Contents.Reg.Prev != 0;
return Contents.Reg.Prev != nullptr;
}
};
inline raw_ostream &operator<<(raw_ostream &OS, const MachineOperand& MO) {
MO.print(OS, 0);
MO.print(OS, nullptr);
return OS;
}

View File

@ -59,7 +59,7 @@ private:
public:
MachinePassRegistryNode(const char *N, const char *D, MachinePassCtor C)
: Next(NULL)
: Next(nullptr)
, Name(N)
, Description(D)
, Ctor(C)
@ -123,7 +123,7 @@ class RegisterPassParser : public MachinePassRegistryListener,
public cl::parser<typename RegistryClass::FunctionPassCtor> {
public:
RegisterPassParser() {}
~RegisterPassParser() { RegistryClass::setListener(NULL); }
~RegisterPassParser() { RegistryClass::setListener(nullptr); }
void initialize(cl::Option &O) {
cl::parser<typename RegistryClass::FunctionPassCtor>::initialize(O);

View File

@ -79,7 +79,7 @@ public:
bool runOnMachineFunction(MachineFunction &MF) override;
void getAnalysisUsage(AnalysisUsage &AU) const override;
void print(llvm::raw_ostream &OS, const Module *M = 0) const override;
void print(llvm::raw_ostream &OS, const Module *M = nullptr) const override;
};
} //end of namespace llvm

View File

@ -135,7 +135,7 @@ public:
// notifications, we will need to change to using a list.
assert(TheDelegate == delegate &&
"Only the current delegate can perform reset!");
TheDelegate = 0;
TheDelegate = nullptr;
}
void setDelegate(Delegate *delegate) {
@ -223,7 +223,7 @@ public:
reg_iterator reg_begin(unsigned RegNo) const {
return reg_iterator(getRegUseDefListHead(RegNo));
}
static reg_iterator reg_end() { return reg_iterator(0); }
static reg_iterator reg_end() { return reg_iterator(nullptr); }
inline iterator_range<reg_iterator> reg_operands(unsigned Reg) const {
return iterator_range<reg_iterator>(reg_begin(Reg), reg_end());
@ -236,7 +236,9 @@ public:
reg_instr_iterator reg_instr_begin(unsigned RegNo) const {
return reg_instr_iterator(getRegUseDefListHead(RegNo));
}
static reg_instr_iterator reg_instr_end() { return reg_instr_iterator(0); }
static reg_instr_iterator reg_instr_end() {
return reg_instr_iterator(nullptr);
}
inline iterator_range<reg_instr_iterator>
reg_instructions(unsigned Reg) const {
@ -251,7 +253,9 @@ public:
reg_bundle_iterator reg_bundle_begin(unsigned RegNo) const {
return reg_bundle_iterator(getRegUseDefListHead(RegNo));
}
static reg_bundle_iterator reg_bundle_end() { return reg_bundle_iterator(0); }
static reg_bundle_iterator reg_bundle_end() {
return reg_bundle_iterator(nullptr);
}
inline iterator_range<reg_bundle_iterator> reg_bundles(unsigned Reg) const {
return iterator_range<reg_bundle_iterator>(reg_bundle_begin(Reg),
@ -269,7 +273,9 @@ public:
reg_nodbg_iterator reg_nodbg_begin(unsigned RegNo) const {
return reg_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_nodbg_iterator reg_nodbg_end() { return reg_nodbg_iterator(0); }
static reg_nodbg_iterator reg_nodbg_end() {
return reg_nodbg_iterator(nullptr);
}
inline iterator_range<reg_nodbg_iterator>
reg_nodbg_operands(unsigned Reg) const {
@ -286,7 +292,7 @@ public:
return reg_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_instr_nodbg_iterator reg_instr_nodbg_end() {
return reg_instr_nodbg_iterator(0);
return reg_instr_nodbg_iterator(nullptr);
}
inline iterator_range<reg_instr_nodbg_iterator>
@ -304,7 +310,7 @@ public:
return reg_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static reg_bundle_nodbg_iterator reg_bundle_nodbg_end() {
return reg_bundle_nodbg_iterator(0);
return reg_bundle_nodbg_iterator(nullptr);
}
inline iterator_range<reg_bundle_nodbg_iterator>
@ -325,7 +331,7 @@ public:
def_iterator def_begin(unsigned RegNo) const {
return def_iterator(getRegUseDefListHead(RegNo));
}
static def_iterator def_end() { return def_iterator(0); }
static def_iterator def_end() { return def_iterator(nullptr); }
inline iterator_range<def_iterator> def_operands(unsigned Reg) const {
return iterator_range<def_iterator>(def_begin(Reg), def_end());
@ -338,7 +344,9 @@ public:
def_instr_iterator def_instr_begin(unsigned RegNo) const {
return def_instr_iterator(getRegUseDefListHead(RegNo));
}
static def_instr_iterator def_instr_end() { return def_instr_iterator(0); }
static def_instr_iterator def_instr_end() {
return def_instr_iterator(nullptr);
}
inline iterator_range<def_instr_iterator>
def_instructions(unsigned Reg) const {
@ -353,7 +361,9 @@ public:
def_bundle_iterator def_bundle_begin(unsigned RegNo) const {
return def_bundle_iterator(getRegUseDefListHead(RegNo));
}
static def_bundle_iterator def_bundle_end() { return def_bundle_iterator(0); }
static def_bundle_iterator def_bundle_end() {
return def_bundle_iterator(nullptr);
}
inline iterator_range<def_bundle_iterator> def_bundles(unsigned Reg) const {
return iterator_range<def_bundle_iterator>(def_bundle_begin(Reg),
@ -379,7 +389,7 @@ public:
use_iterator use_begin(unsigned RegNo) const {
return use_iterator(getRegUseDefListHead(RegNo));
}
static use_iterator use_end() { return use_iterator(0); }
static use_iterator use_end() { return use_iterator(nullptr); }
inline iterator_range<use_iterator> use_operands(unsigned Reg) const {
return iterator_range<use_iterator>(use_begin(Reg), use_end());
@ -392,7 +402,9 @@ public:
use_instr_iterator use_instr_begin(unsigned RegNo) const {
return use_instr_iterator(getRegUseDefListHead(RegNo));
}
static use_instr_iterator use_instr_end() { return use_instr_iterator(0); }
static use_instr_iterator use_instr_end() {
return use_instr_iterator(nullptr);
}
inline iterator_range<use_instr_iterator>
use_instructions(unsigned Reg) const {
@ -407,7 +419,9 @@ public:
use_bundle_iterator use_bundle_begin(unsigned RegNo) const {
return use_bundle_iterator(getRegUseDefListHead(RegNo));
}
static use_bundle_iterator use_bundle_end() { return use_bundle_iterator(0); }
static use_bundle_iterator use_bundle_end() {
return use_bundle_iterator(nullptr);
}
inline iterator_range<use_bundle_iterator> use_bundles(unsigned Reg) const {
return iterator_range<use_bundle_iterator>(use_bundle_begin(Reg),
@ -434,7 +448,9 @@ public:
use_nodbg_iterator use_nodbg_begin(unsigned RegNo) const {
return use_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_nodbg_iterator use_nodbg_end() { return use_nodbg_iterator(0); }
static use_nodbg_iterator use_nodbg_end() {
return use_nodbg_iterator(nullptr);
}
inline iterator_range<use_nodbg_iterator>
use_nodbg_operands(unsigned Reg) const {
@ -451,7 +467,7 @@ public:
return use_instr_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_instr_nodbg_iterator use_instr_nodbg_end() {
return use_instr_nodbg_iterator(0);
return use_instr_nodbg_iterator(nullptr);
}
inline iterator_range<use_instr_nodbg_iterator>
@ -469,7 +485,7 @@ public:
return use_bundle_nodbg_iterator(getRegUseDefListHead(RegNo));
}
static use_bundle_nodbg_iterator use_bundle_nodbg_end() {
return use_bundle_nodbg_iterator(0);
return use_bundle_nodbg_iterator(nullptr);
}
inline iterator_range<use_bundle_nodbg_iterator>
@ -779,7 +795,7 @@ public:
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
Op = 0;
Op = nullptr;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
@ -797,7 +813,7 @@ public:
MachineInstr, ptrdiff_t>::pointer pointer;
defusechain_iterator(const defusechain_iterator &I) : Op(I.Op) {}
defusechain_iterator() : Op(0) {}
defusechain_iterator() : Op(nullptr) {}
bool operator==(const defusechain_iterator &x) const {
return Op == x.Op;
@ -807,7 +823,7 @@ public:
}
/// atEnd - return true if this iterator is equal to reg_end() on the value.
bool atEnd() const { return Op == 0; }
bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only
defusechain_iterator &operator++() { // Preincrement
@ -882,7 +898,7 @@ public:
if (!ReturnUses) {
if (Op) {
if (Op->isUse())
Op = 0;
Op = nullptr;
else
assert(!Op->isDebug() && "Can't have debug defs");
}
@ -900,7 +916,7 @@ public:
MachineInstr, ptrdiff_t>::pointer pointer;
defusechain_instr_iterator(const defusechain_instr_iterator &I) : Op(I.Op){}
defusechain_instr_iterator() : Op(0) {}
defusechain_instr_iterator() : Op(nullptr) {}
bool operator==(const defusechain_instr_iterator &x) const {
return Op == x.Op;
@ -910,7 +926,7 @@ public:
}
/// atEnd - return true if this iterator is equal to reg_end() on the value.
bool atEnd() const { return Op == 0; }
bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only
defusechain_instr_iterator &operator++() { // Preincrement
@ -957,7 +973,7 @@ class PSetIterator {
const int *PSet;
unsigned Weight;
public:
PSetIterator(): PSet(0), Weight(0) {}
PSetIterator(): PSet(nullptr), Weight(0) {}
PSetIterator(unsigned RegUnit, const MachineRegisterInfo *MRI) {
const TargetRegisterInfo *TRI = MRI->getTargetRegisterInfo();
if (TargetRegisterInfo::isVirtualRegister(RegUnit)) {
@ -970,7 +986,7 @@ public:
Weight = TRI->getRegUnitWeight(RegUnit);
}
if (*PSet == -1)
PSet = 0;
PSet = nullptr;
}
bool isValid() const { return PSet; }
@ -982,7 +998,7 @@ public:
assert(isValid() && "Invalid PSetIterator.");
++PSet;
if (*PSet == -1)
PSet = 0;
PSet = nullptr;
}
};

View File

@ -57,7 +57,7 @@ public:
/// MachineSSAUpdater constructor. If InsertedPHIs is specified, it will be
/// filled in with all PHI Nodes created by rewriting.
explicit MachineSSAUpdater(MachineFunction &MF,
SmallVectorImpl<MachineInstr*> *InsertedPHIs = 0);
SmallVectorImpl<MachineInstr*> *InsertedPHIs = nullptr);
~MachineSSAUpdater();
/// Initialize - Reset this object to get ready for a new set of SSA

View File

@ -250,7 +250,7 @@ public:
ScheduleDAGInstrs(*C->MF, *C->MLI, *C->MDT, IsPostRA,
/*RemoveKillFlags=*/IsPostRA, C->LIS),
AA(C->AA), SchedImpl(S), Topo(SUnits, &ExitSU), CurrentTop(),
CurrentBottom(), NextClusterPred(NULL), NextClusterSucc(NULL) {
CurrentBottom(), NextClusterPred(nullptr), NextClusterSucc(nullptr) {
#ifndef NDEBUG
NumInstrsScheduled = 0;
#endif
@ -377,7 +377,7 @@ protected:
public:
ScheduleDAGMILive(MachineSchedContext *C, MachineSchedStrategy *S):
ScheduleDAGMI(C, S, /*IsPostRA=*/false), RegClassInfo(C->RegClassInfo),
DFSResult(0), ShouldTrackPressure(false), RPTracker(RegPressure),
DFSResult(nullptr), ShouldTrackPressure(false), RPTracker(RegPressure),
TopRPTracker(TopPressure), BotRPTracker(BotPressure)
{}
@ -628,9 +628,9 @@ public:
/// Pending queues extend the ready queues with the same ID and the
/// PendingFlag set.
SchedBoundary(unsigned ID, const Twine &Name):
DAG(0), SchedModel(0), Rem(0), Available(ID, Name+".A"),
DAG(nullptr), SchedModel(nullptr), Rem(nullptr), Available(ID, Name+".A"),
Pending(ID << LogMaxQID, Name+".P"),
HazardRec(0) {
HazardRec(nullptr) {
reset();
}

View File

@ -154,7 +154,7 @@ public:
unsigned InstrHeight;
TraceBlockInfo() :
Pred(0), Succ(0),
Pred(nullptr), Succ(nullptr),
InstrDepth(~0u), InstrHeight(~0u),
HasValidInstrDepths(false), HasValidInstrHeights(false) {}

View File

@ -54,7 +54,7 @@ public:
entry->incRef();
}
PoolRef& operator=(const PoolRef &r) {
assert(entry != 0 && "entry should not be null.");
assert(entry != nullptr && "entry should not be null.");
PoolEntry *temp = r.entry;
temp->incRef();
entry->decRef();

View File

@ -86,7 +86,7 @@ namespace PBQP {
ConservativelyAllocatable,
NotProvablyAllocatable } ReductionState;
NodeMetadata() : RS(Unprocessed), DeniedOpts(0), OptUnsafeEdges(0) {}
NodeMetadata() : RS(Unprocessed), DeniedOpts(0), OptUnsafeEdges(nullptr){}
~NodeMetadata() { delete[] OptUnsafeEdges; }
void setup(const Vector& Costs) {

View File

@ -59,7 +59,7 @@ class IdentifyingPassPtr {
};
bool IsInstance;
public:
IdentifyingPassPtr() : P(0), IsInstance(false) {}
IdentifyingPassPtr() : P(nullptr), IsInstance(false) {}
IdentifyingPassPtr(AnalysisID IDPtr) : ID(IDPtr), IsInstance(false) {}
IdentifyingPassPtr(Pass *InstancePtr) : P(InstancePtr), IsInstance(true) {}
@ -151,7 +151,7 @@ public:
void setStartStopPasses(AnalysisID Start, AnalysisID Stop) {
StartAfter = Start;
StopAfter = Stop;
Started = (StartAfter == 0);
Started = (StartAfter == nullptr);
}
void setDisableVerify(bool Disable) { setOpt(DisableVerify, Disable); }
@ -218,14 +218,14 @@ public:
/// Return NULL to select the default (generic) machine scheduler.
virtual ScheduleDAGInstrs *
createMachineScheduler(MachineSchedContext *C) const {
return 0;
return nullptr;
}
/// Similar to createMachineScheduler but used when postRA machine scheduling
/// is enabled.
virtual ScheduleDAGInstrs *
createPostMachineScheduler(MachineSchedContext *C) const {
return 0;
return nullptr;
}
protected:
@ -372,7 +372,7 @@ namespace llvm {
/// createCodeGenPreparePass - Transform the code to expose more pattern
/// matching during instruction selection.
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = 0);
FunctionPass *createCodeGenPreparePass(const TargetMachine *TM = nullptr);
/// MachineLoopInfo - This pass is a loop analysis pass.
extern char &MachineLoopInfoID;
@ -547,7 +547,7 @@ namespace llvm {
/// createMachineVerifierPass - This pass verifies cenerated machine code
/// instructions for correctness.
///
FunctionPass *createMachineVerifierPass(const char *Banner = 0);
FunctionPass *createMachineVerifierPass(const char *Banner = nullptr);
/// createDwarfEHPass - This pass mulches exception handling code into a form
/// adapted to code generation. Required if using dwarf exception handling.

View File

@ -159,7 +159,7 @@ namespace llvm {
FunctionPass *
createPBQPRegisterAllocator(std::unique_ptr<PBQPBuilder> &builder,
char *customPassID = 0);
char *customPassID = nullptr);
}
#endif /* LLVM_CODEGEN_REGALLOCPBQP_H */

View File

@ -158,7 +158,7 @@ class PressureDiffs {
unsigned Size;
unsigned Max;
public:
PressureDiffs(): PDiffArray(0), Size(0), Max(0) {}
PressureDiffs(): PDiffArray(nullptr), Size(0), Max(0) {}
~PressureDiffs() { free(PDiffArray); }
void clear() { Size = 0; }
@ -285,12 +285,12 @@ class RegPressureTracker {
public:
RegPressureTracker(IntervalPressure &rp) :
MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(true),
TrackUntiedDefs(false) {}
MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
RequireIntervals(true), TrackUntiedDefs(false) {}
RegPressureTracker(RegionPressure &rp) :
MF(0), TRI(0), RCI(0), LIS(0), MBB(0), P(rp), RequireIntervals(false),
TrackUntiedDefs(false) {}
MF(nullptr), TRI(nullptr), RCI(nullptr), LIS(nullptr), MBB(nullptr), P(rp),
RequireIntervals(false), TrackUntiedDefs(false) {}
void reset();
@ -318,7 +318,8 @@ public:
SlotIndex getCurrSlot() const;
/// Recede across the previous instruction.
bool recede(SmallVectorImpl<unsigned> *LiveUses = 0, PressureDiff *PDiff = 0);
bool recede(SmallVectorImpl<unsigned> *LiveUses = nullptr,
PressureDiff *PDiff = nullptr);
/// Advance across the current instruction.
bool advance();
@ -393,7 +394,7 @@ public:
MaxPressureLimit);
assert(isBottomClosed() && "Uninitialized pressure tracker");
return getMaxUpwardPressureDelta(MI, 0, Delta, CriticalPSets,
return getMaxUpwardPressureDelta(MI, nullptr, Delta, CriticalPSets,
MaxPressureLimit);
}

View File

@ -42,7 +42,7 @@ class RegScavenger {
/// Information on scavenged registers (held in a spill slot).
struct ScavengedInfo {
ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(NULL) {}
ScavengedInfo(int FI = -1) : FrameIndex(FI), Reg(0), Restore(nullptr) {}
/// A spill slot used for scavenging a register post register allocation.
int FrameIndex;
@ -73,7 +73,7 @@ class RegScavenger {
public:
RegScavenger()
: MBB(NULL), NumPhysRegs(0), Tracking(false) {}
: MBB(nullptr), NumPhysRegs(0), Tracking(false) {}
/// enterBasicBlock - Start tracking liveness from the begin of the specific
/// basic block.
@ -104,7 +104,7 @@ public:
/// skipTo - Move the internal MBB iterator but do not update register states.
void skipTo(MachineBasicBlock::iterator I) {
if (I == MachineBasicBlock::iterator(NULL))
if (I == MachineBasicBlock::iterator(nullptr))
Tracking = false;
MBBI = I;
}

View File

@ -95,7 +95,7 @@ namespace llvm {
/// SDep - Construct a null SDep. This is only for use by container
/// classes which require default constructors. SUnits may not
/// have null SDep edges.
SDep() : Dep(0, Data) {}
SDep() : Dep(nullptr, Data) {}
/// SDep - Construct an SDep with the specified values.
SDep(SUnit *S, Kind kind, unsigned Reg)
@ -317,46 +317,49 @@ namespace llvm {
/// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
/// an SDNode and any nodes flagged to it.
SUnit(SDNode *node, unsigned nodenum)
: Node(node), Instr(0), OrigNode(0), SchedClass(0), NodeNum(nodenum),
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
isAvailable(false), isScheduled(false), isScheduleHigh(false),
isScheduleLow(false), isCloned(false), isUnbuffered(false),
hasReservedResource(false), SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
: Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
isCallOp(false), isTwoAddress(false), isCommutable(false),
hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
isUnbuffered(false), hasReservedResource(false),
SchedulingPref(Sched::None), isDepthCurrent(false),
isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
/// SUnit - Construct an SUnit for post-regalloc scheduling to represent
/// a MachineInstr.
SUnit(MachineInstr *instr, unsigned nodenum)
: Node(0), Instr(instr), OrigNode(0), SchedClass(0), NodeNum(nodenum),
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
isAvailable(false), isScheduled(false), isScheduleHigh(false),
isScheduleLow(false), isCloned(false), isUnbuffered(false),
hasReservedResource(false), SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
: Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr),
NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
isCallOp(false), isTwoAddress(false), isCommutable(false),
hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
isUnbuffered(false), hasReservedResource(false),
SchedulingPref(Sched::None), isDepthCurrent(false),
isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
/// SUnit - Construct a placeholder SUnit.
SUnit()
: Node(0), Instr(0), OrigNode(0), SchedClass(0), NodeNum(BoundaryID),
NodeQueueId(0), NumPreds(0), NumSuccs(0), NumPredsLeft(0),
NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0), NumRegDefsLeft(0),
Latency(0), isVRegCycle(false), isCall(false), isCallOp(false),
isTwoAddress(false), isCommutable(false), hasPhysRegUses(false),
hasPhysRegDefs(false), hasPhysRegClobbers(false), isPending(false),
isAvailable(false), isScheduled(false), isScheduleHigh(false),
isScheduleLow(false), isCloned(false), isUnbuffered(false),
hasReservedResource(false), SchedulingPref(Sched::None),
isDepthCurrent(false), isHeightCurrent(false), Depth(0), Height(0),
TopReadyCycle(0), BotReadyCycle(0), CopyDstRC(NULL), CopySrcRC(NULL) {}
: Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0),
NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
isCallOp(false), isTwoAddress(false), isCommutable(false),
hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
isPending(false), isAvailable(false), isScheduled(false),
isScheduleHigh(false), isScheduleLow(false), isCloned(false),
isUnbuffered(false), hasReservedResource(false),
SchedulingPref(Sched::None), isDepthCurrent(false),
isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
/// \brief Boundary nodes are placeholders for the boundary of the
/// scheduling region.

View File

@ -158,7 +158,7 @@ namespace llvm {
const MachineDominatorTree &mdt,
bool IsPostRAFlag,
bool RemoveKillFlags = false,
LiveIntervals *LIS = 0);
LiveIntervals *LIS = nullptr);
virtual ~ScheduleDAGInstrs() {}
@ -206,8 +206,9 @@ namespace llvm {
/// buildSchedGraph - Build SUnits from the MachineBasicBlock that we are
/// input.
void buildSchedGraph(AliasAnalysis *AA, RegPressureTracker *RPTracker = 0,
PressureDiffs *PDiffs = 0);
void buildSchedGraph(AliasAnalysis *AA,
RegPressureTracker *RPTracker = nullptr,
PressureDiffs *PDiffs = nullptr);
/// addSchedBarrierDeps - Add dependencies from instructions in the current
/// list of instructions being scheduled to scheduling barrier. We want to
@ -259,10 +260,10 @@ namespace llvm {
/// newSUnit - Creates a new SUnit and return a ptr to it.
inline SUnit *ScheduleDAGInstrs::newSUnit(MachineInstr *MI) {
#ifndef NDEBUG
const SUnit *Addr = SUnits.empty() ? 0 : &SUnits[0];
const SUnit *Addr = SUnits.empty() ? nullptr : &SUnits[0];
#endif
SUnits.push_back(SUnit(MI, (unsigned)SUnits.size()));
assert((Addr == 0 || Addr == &SUnits[0]) &&
assert((Addr == nullptr || Addr == &SUnits[0]) &&
"SUnits std::vector reallocated on the fly!");
SUnits.back().OrigNode = &SUnits.back();
return &SUnits.back();
@ -272,7 +273,7 @@ namespace llvm {
inline SUnit *ScheduleDAGInstrs::getSUnit(MachineInstr *MI) const {
DenseMap<MachineInstr*, SUnit*>::const_iterator I = MISUnitMap.find(MI);
if (I == MISUnitMap.end())
return 0;
return nullptr;
return I->second;
}
} // namespace llvm

View File

@ -47,7 +47,7 @@ class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
// Indices into the Scoreboard that represent the current cycle.
size_t Head;
public:
Scoreboard():Data(NULL), Depth(0), Head(0) { }
Scoreboard():Data(nullptr), Depth(0), Head(0) { }
~Scoreboard() {
delete[] Data;
}
@ -62,7 +62,7 @@ class ScoreboardHazardRecognizer : public ScheduleHazardRecognizer {
}
void reset(size_t d = 1) {
if (Data == NULL) {
if (!Data) {
Depth = d;
Data = new unsigned[Depth];
}

View File

@ -100,7 +100,7 @@ class SDValue {
SDNode *Node; // The node defining the value we are using.
unsigned ResNo; // Which return value of the node we are using.
public:
SDValue() : Node(0), ResNo(0) {}
SDValue() : Node(nullptr), ResNo(0) {}
SDValue(SDNode *node, unsigned resno) : Node(node), ResNo(resno) {}
/// get the index which selects a specific result in the SDNode
@ -234,7 +234,7 @@ class SDUse {
void operator=(const SDUse &U) LLVM_DELETED_FUNCTION;
public:
SDUse() : Val(), User(NULL), Prev(NULL), Next(NULL) {}
SDUse() : Val(), User(nullptr), Prev(nullptr), Next(nullptr) {}
/// Normally SDUse will just implicitly convert to an SDValue that it holds.
operator const SDValue&() const { return Val; }
@ -408,7 +408,7 @@ public:
/// use_empty - Return true if there are no uses of this node.
///
bool use_empty() const { return UseList == NULL; }
bool use_empty() const { return UseList == nullptr; }
/// hasOneUse - Return true if there is exactly one use of this node.
///
@ -458,7 +458,7 @@ public:
SDUse, ptrdiff_t>::pointer pointer;
use_iterator(const use_iterator &I) : Op(I.Op) {}
use_iterator() : Op(0) {}
use_iterator() : Op(nullptr) {}
bool operator==(const use_iterator &x) const {
return Op == x.Op;
@ -468,7 +468,7 @@ public:
}
/// atEnd - return true if this iterator is at the end of uses list.
bool atEnd() const { return Op == 0; }
bool atEnd() const { return Op == nullptr; }
// Iterator traversal: forward iteration only.
use_iterator &operator++() { // Preincrement
@ -506,7 +506,7 @@ public:
return use_iterator(UseList);
}
static use_iterator use_end() { return use_iterator(0); }
static use_iterator use_end() { return use_iterator(nullptr); }
inline iterator_range<use_iterator> uses() {
return iterator_range<use_iterator>(use_begin(), use_end());
@ -586,7 +586,7 @@ public:
if (getNumOperands() != 0 &&
getOperand(getNumOperands()-1).getValueType() == MVT::Glue)
return getOperand(getNumOperands()-1).getNode();
return 0;
return nullptr;
}
// If this is a pseudo op, like copyfromreg, look to see if there is a
@ -611,7 +611,7 @@ public:
for (use_iterator UI = use_begin(), UE = use_end(); UI != UE; ++UI)
if (UI.getUse().get().getValueType() == MVT::Glue)
return *UI;
return 0;
return nullptr;
}
/// getNumValues - Return the number of values defined/returned by this
@ -644,12 +644,12 @@ public:
/// getOperationName - Return the opcode of this operation for printing.
///
std::string getOperationName(const SelectionDAG *G = 0) const;
std::string getOperationName(const SelectionDAG *G = nullptr) const;
static const char* getIndexedModeName(ISD::MemIndexedMode AM);
void print_types(raw_ostream &OS, const SelectionDAG *G) const;
void print_details(raw_ostream &OS, const SelectionDAG *G) const;
void print(raw_ostream &OS, const SelectionDAG *G = 0) const;
void printr(raw_ostream &OS, const SelectionDAG *G = 0) const;
void print(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
void printr(raw_ostream &OS, const SelectionDAG *G = nullptr) const;
/// printrFull - Print a SelectionDAG node and all children down to
/// the leaves. The given SelectionDAG allows target-specific nodes
@ -657,7 +657,7 @@ public:
/// print the whole DAG, including children that appear multiple
/// times.
///
void printrFull(raw_ostream &O, const SelectionDAG *G = 0) const;
void printrFull(raw_ostream &O, const SelectionDAG *G = nullptr) const;
/// printrWithDepth - Print a SelectionDAG node and children up to
/// depth "depth." The given SelectionDAG allows target-specific
@ -665,7 +665,7 @@ public:
/// will print children that appear multiple times wherever they are
/// used.
///
void printrWithDepth(raw_ostream &O, const SelectionDAG *G = 0,
void printrWithDepth(raw_ostream &O, const SelectionDAG *G = nullptr,
unsigned depth = 100) const;
@ -690,14 +690,15 @@ public:
/// Unlike dumpr, this will print the whole DAG, including children
/// that appear multiple times.
///
void dumprFull(const SelectionDAG *G = 0) const;
void dumprFull(const SelectionDAG *G = nullptr) const;
/// dumprWithDepth - printrWithDepth to dbgs(). The given
/// SelectionDAG allows target-specific nodes to be printed in
/// human-readable form. Unlike dumpr, this will print children
/// that appear multiple times wherever they are used.
///
void dumprWithDepth(const SelectionDAG *G = 0, unsigned depth = 100) const;
void dumprWithDepth(const SelectionDAG *G = nullptr,
unsigned depth = 100) const;
/// Profile - Gather unique data for the node.
///
@ -717,8 +718,8 @@ protected:
const SDValue *Ops, unsigned NumOps)
: NodeType(Opc), OperandsNeedDelete(true), HasDebugValue(false),
SubclassData(0), NodeId(-1),
OperandList(NumOps ? new SDUse[NumOps] : 0),
ValueList(VTs.VTs), UseList(NULL),
OperandList(NumOps ? new SDUse[NumOps] : nullptr),
ValueList(VTs.VTs), UseList(nullptr),
NumOperands(NumOps), NumValues(VTs.NumVTs),
debugLoc(dl), IROrder(Order) {
for (unsigned i = 0; i != NumOps; ++i) {
@ -732,9 +733,9 @@ protected:
/// set later with InitOperands.
SDNode(unsigned Opc, unsigned Order, const DebugLoc dl, SDVTList VTs)
: NodeType(Opc), OperandsNeedDelete(false), HasDebugValue(false),
SubclassData(0), NodeId(-1), OperandList(0),
ValueList(VTs.VTs), UseList(NULL), NumOperands(0), NumValues(VTs.NumVTs),
debugLoc(dl), IROrder(Order) {}
SubclassData(0), NodeId(-1), OperandList(nullptr), ValueList(VTs.VTs),
UseList(nullptr), NumOperands(0), NumValues(VTs.NumVTs), debugLoc(dl),
IROrder(Order) {}
/// InitOperands - Initialize the operands list of this with 1 operand.
void InitOperands(SDUse *Ops, const SDValue &Op0) {
@ -819,7 +820,7 @@ private:
int IROrder;
public:
SDLoc() : Ptr(NULL), IROrder(0) {}
SDLoc() : Ptr(nullptr), IROrder(0) {}
SDLoc(const SDNode *N) : Ptr(N), IROrder(-1) {
assert(N && "null SDNode");
}
@ -830,14 +831,14 @@ public:
assert(Order >= 0 && "bad IROrder");
}
unsigned getIROrder() {
if (IROrder >= 0 || Ptr == NULL) {
if (IROrder >= 0 || Ptr == nullptr) {
return (unsigned)IROrder;
}
const SDNode *N = (const SDNode*)(Ptr);
return N->getIROrder();
}
DebugLoc getDebugLoc() {
if (Ptr == NULL) {
if (!Ptr) {
return DebugLoc();
}
if (IROrder >= 0) {
@ -1834,7 +1835,7 @@ public:
private:
friend class SelectionDAG;
MachineSDNode(unsigned Opc, unsigned Order, const DebugLoc DL, SDVTList VTs)
: SDNode(Opc, Order, DL, VTs), MemRefs(0), MemRefsEnd(0) {}
: SDNode(Opc, Order, DL, VTs), MemRefs(nullptr), MemRefsEnd(nullptr) {}
/// LocalOperands - Operands for this instruction, if they fit here. If
/// they don't, this field is unused.

View File

@ -147,11 +147,11 @@ namespace llvm {
};
/// Construct an invalid index.
SlotIndex() : lie(0, 0) {}
SlotIndex() : lie(nullptr, 0) {}
// Construct a new slot index from the given one, and set the slot.
SlotIndex(const SlotIndex &li, Slot s) : lie(li.listEntry(), unsigned(s)) {
assert(lie.getPointer() != 0 &&
assert(lie.getPointer() != nullptr &&
"Attempt to construct index with 0 pointer.");
}
@ -421,7 +421,7 @@ namespace llvm {
/// Returns the instruction for the given index, or null if the given
/// index has no instruction associated with it.
MachineInstr* getInstructionFromIndex(SlotIndex index) const {
return index.isValid() ? index.listEntry()->getInstr() : 0;
return index.isValid() ? index.listEntry()->getInstr() : nullptr;
}
/// Returns the next non-null index, if one exists.
@ -551,14 +551,14 @@ namespace llvm {
// Check that we don't cross the boundary into this block.
if (itr->first < end)
return 0;
return nullptr;
itr = std::prev(itr);
if (itr->first <= start)
return itr->second;
return 0;
return nullptr;
}
/// Insert the given machine instruction into the mapping. Returns the
@ -574,7 +574,7 @@ namespace llvm {
// affected by debug information.
assert(!mi->isDebugValue() && "Cannot number DBG_VALUE instructions.");
assert(mi->getParent() != 0 && "Instr must be added to function.");
assert(mi->getParent() != nullptr && "Instr must be added to function.");
// Get the entries where mi should be inserted.
IndexList::iterator prevItr, nextItr;
@ -615,7 +615,7 @@ namespace llvm {
IndexListEntry *miEntry(mi2iItr->second.listEntry());
assert(miEntry->getInstr() == mi && "Instruction indexes broken.");
// FIXME: Eventually we want to actually delete these indexes.
miEntry->setInstr(0);
miEntry->setInstr(nullptr);
mi2iMap.erase(mi2iItr);
}
}
@ -640,15 +640,15 @@ namespace llvm {
MachineFunction::iterator nextMBB =
std::next(MachineFunction::iterator(mbb));
IndexListEntry *startEntry = 0;
IndexListEntry *endEntry = 0;
IndexListEntry *startEntry = nullptr;
IndexListEntry *endEntry = nullptr;
IndexList::iterator newItr;
if (nextMBB == mbb->getParent()->end()) {
startEntry = &indexList.back();
endEntry = createEntry(0, 0);
endEntry = createEntry(nullptr, 0);
newItr = indexList.insertAfter(startEntry, endEntry);
} else {
startEntry = createEntry(0, 0);
startEntry = createEntry(nullptr, 0);
endEntry = getMBBStartIdx(nextMBB).listEntry();
newItr = indexList.insert(endEntry, startEntry);
}

View File

@ -140,7 +140,7 @@ private:
uint64_t ID;
LocationVec Locations;
LiveOutVec LiveOuts;
CallsiteInfo() : CSOffsetExpr(0), ID(0) {}
CallsiteInfo() : CSOffsetExpr(nullptr), ID(0) {}
CallsiteInfo(const MCExpr *CSOffsetExpr, uint64_t ID,
LocationVec &Locations, LiveOutVec &LiveOuts)
: CSOffsetExpr(CSOffsetExpr), ID(ID), Locations(Locations),

View File

@ -105,11 +105,12 @@ private:
public:
static char ID; // Pass identification, replacement for typeid.
StackProtector() : FunctionPass(ID), TM(0), TLI(0), SSPBufferSize(0) {
StackProtector()
: FunctionPass(ID), TM(nullptr), TLI(nullptr), SSPBufferSize(0) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}
StackProtector(const TargetMachine *TM)
: FunctionPass(ID), TM(TM), TLI(0), Trip(TM->getTargetTriple()),
: FunctionPass(ID), TM(TM), TLI(nullptr), Trip(TM->getTargetTriple()),
SSPBufferSize(8) {
initializeStackProtectorPass(*PassRegistry::getPassRegistry());
}

View File

@ -41,7 +41,7 @@ class TargetSchedModel {
unsigned MicroOpFactor; // Multiply to normalize microops to resource units.
unsigned ResourceLCM; // Resource units per cycle. Latency normalization factor.
public:
TargetSchedModel(): STI(0), TII(0) {}
TargetSchedModel(): STI(nullptr), TII(nullptr) {}
/// \brief Initialize the machine model for instruction scheduling.
///
@ -75,7 +75,7 @@ public:
const InstrItineraryData *getInstrItineraries() const {
if (hasInstrItineraries())
return &InstrItins;
return 0;
return nullptr;
}
/// \brief Identify the processor corresponding to the current subtarget.
@ -86,7 +86,7 @@ public:
/// \brief Return the number of issue slots required for this MI.
unsigned getNumMicroOps(const MachineInstr *MI,
const MCSchedClassDesc *SC = 0) const;
const MCSchedClassDesc *SC = nullptr) const;
/// \brief Get the number of kinds of resources for this target.
unsigned getNumProcResourceKinds() const {

View File

@ -177,7 +177,7 @@ namespace llvm {
/// the specified stack slot
void assignVirt2StackSlot(unsigned virtReg, int frameIndex);
void print(raw_ostream &OS, const Module* M = 0) const override;
void print(raw_ostream &OS, const Module* M = nullptr) const override;
void dump() const;
};

View File

@ -67,7 +67,7 @@ struct ValueMapConfig {
/// and onDelete) and not inside other ValueMap methods. NULL means that no
/// mutex is necessary.
template<typename ExtraDataT>
static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return NULL; }
static sys::Mutex *getMutex(const ExtraDataT &/*Data*/) { return nullptr; }
};
/// See the file comment.
@ -253,10 +253,10 @@ struct DenseMapInfo<ValueMapCallbackVH<KeyT, ValueT, Config> > {
typedef DenseMapInfo<KeyT> PointerInfo;
static inline VH getEmptyKey() {
return VH(PointerInfo::getEmptyKey(), NULL);
return VH(PointerInfo::getEmptyKey(), nullptr);
}
static inline VH getTombstoneKey() {
return VH(PointerInfo::getTombstoneKey(), NULL);
return VH(PointerInfo::getTombstoneKey(), nullptr);
}
static unsigned getHashValue(const VH &Val) {
return PointerInfo::getHashValue(Val.Unwrap());

View File

@ -182,7 +182,7 @@ AnalysisType *Pass::getAnalysisIfAvailable() const {
const void *PI = &AnalysisType::ID;
Pass *ResultPass = Resolver->getAnalysisIfAvailable(PI, true);
if (!ResultPass) return 0;
if (!ResultPass) return nullptr;
// Because the AnalysisType may not be a subclass of pass (for
// AnalysisGroups), we use getAdjustedAnalysisPointer here to potentially

View File

@ -44,10 +44,10 @@ class ArrayRecycler {
// Return NULL if no entries are available.
T *pop(unsigned Idx) {
if (Idx >= Bucket.size())
return 0;
return nullptr;
FreeList *Entry = Bucket[Idx];
if (!Entry)
return 0;
return nullptr;
Bucket[Idx] = Entry->Next;
return reinterpret_cast<T*>(Entry);
}

View File

@ -1063,7 +1063,7 @@ class opt_storage {
OptionValue<DataType> Default;
void check_location() const {
assert(Location != 0 && "cl::location(...) not specified for a command "
assert(Location && "cl::location(...) not specified for a command "
"line option with external storage, "
"or cl::init specified before cl::location()!!");
}

View File

@ -489,7 +489,7 @@ public:
/// creates a new node as a child of DomBB dominator node,linking it into
/// the children list of the immediate dominator.
DomTreeNodeBase<NodeT> *addNewBlock(NodeT *BB, NodeT *DomBB) {
assert(getNode(BB) == 0 && "Block already in dominator tree!");
assert(getNode(BB) == nullptr && "Block already in dominator tree!");
DomTreeNodeBase<NodeT> *IDomNode = getNode(DomBB);
assert(IDomNode && "Not immediate dominator specified for block!");
DFSInfoValid = false;
@ -636,7 +636,7 @@ protected:
// immediate dominator.
NodeT *IDom = getIDom(BB);
assert(IDom || this->DomTreeNodes[NULL]);
assert(IDom || this->DomTreeNodes[nullptr]);
DomTreeNodeBase<NodeT> *IDomNode = getNodeForBlock(IDom);
// Add a new tree node for this NodeT, and link it as a child of

View File

@ -263,7 +263,7 @@ void Calculate(DominatorTreeBase<typename GraphTraits<NodeT>::NodeType>& DT,
typename GraphT::NodeType* ImmDom = DT.getIDom(W);
assert(ImmDom || DT.DomTreeNodes[NULL]);
assert(ImmDom || DT.DomTreeNodes[nullptr]);
// Get or calculate the node for the immediate dominator
DomTreeNodeBase<typename GraphT::NodeType> *IDomNode =

View File

@ -88,7 +88,7 @@ namespace llvm {
const entry& Val;
public:
node(const entry& V) : Next(0), Val(V) {
node(const entry& V) : Next(nullptr), Val(V) {
if (Tail)
Tail->Next = this;
else
@ -116,7 +116,7 @@ namespace llvm {
};
static iterator begin() { return iterator(Head); }
static iterator end() { return iterator(0); }
static iterator end() { return iterator(nullptr); }
/// Abstract base class for registry listeners, which are informed when new

View File

@ -88,10 +88,10 @@ public:
explicit Timer(StringRef N) : TG(nullptr) { init(N); }
Timer(StringRef N, TimerGroup &tg) : TG(nullptr) { init(N, tg); }
Timer(const Timer &RHS) : TG(nullptr) {
assert(RHS.TG == 0 && "Can only copy uninitialized timers");
assert(!RHS.TG && "Can only copy uninitialized timers");
}
const Timer &operator=(const Timer &T) {
assert(TG == 0 && T.TG == 0 && "Can only assign uninit timers");
assert(!TG && !T.TG && "Can only assign uninit timers");
return *this;
}
~Timer();

View File

@ -105,7 +105,7 @@ public:
virtual const SpillSlot *
getCalleeSavedSpillSlots(unsigned &NumEntries) const {
NumEntries = 0;
return 0;
return nullptr;
}
/// targetHandlesStackFrameRounding - Returns true if the target is
@ -190,7 +190,7 @@ public:
/// before PrologEpilogInserter scans the physical registers used to determine
/// what callee saved registers should be spilled. This method is optional.
virtual void processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
RegScavenger *RS = NULL) const {
RegScavenger *RS = nullptr) const {
}
@ -200,7 +200,7 @@ public:
/// replaced with direct constants. This method is optional.
///
virtual void processFunctionBeforeFrameFinalized(MachineFunction &MF,
RegScavenger *RS = NULL) const {
RegScavenger *RS = nullptr) const {
}
/// eliminateCallFramePseudoInstr - This method is called during prolog/epilog

View File

@ -66,7 +66,7 @@ public:
/// rematerializable, meaning it has no side effects and requires no operands
/// that aren't always available.
bool isTriviallyReMaterializable(const MachineInstr *MI,
AliasAnalysis *AA = 0) const {
AliasAnalysis *AA = nullptr) const {
return MI->getOpcode() == TargetOpcode::IMPLICIT_DEF ||
(MI->getDesc().isRematerializable() &&
(isReallyTriviallyReMaterializable(MI, AA) ||
@ -230,7 +230,7 @@ public:
virtual MachineInstr *
convertToThreeAddress(MachineFunction::iterator &MFI,
MachineBasicBlock::iterator &MBBI, LiveVariables *LV) const {
return 0;
return nullptr;
}
/// commuteInstruction - If a target has any instructions that are
@ -257,7 +257,7 @@ public:
/// aggressive checks.
virtual bool produceSameValue(const MachineInstr *MI0,
const MachineInstr *MI1,
const MachineRegisterInfo *MRI = 0) const;
const MachineRegisterInfo *MRI = nullptr) const;
/// AnalyzeBranch - Analyze the branching code at the end of MBB, returning
/// true if it cannot be understood (e.g. it's a switch dispatch or isn't
@ -555,7 +555,7 @@ protected:
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
int FrameIndex) const {
return 0;
return nullptr;
}
/// foldMemoryOperandImpl - Target-dependent implementation for
@ -565,7 +565,7 @@ protected:
MachineInstr* MI,
const SmallVectorImpl<unsigned> &Ops,
MachineInstr* LoadMI) const {
return 0;
return nullptr;
}
public:
@ -597,7 +597,7 @@ public:
/// value.
virtual unsigned getOpcodeAfterMemoryUnfold(unsigned Opc,
bool UnfoldLoad, bool UnfoldStore,
unsigned *LoadRegIndex = 0) const {
unsigned *LoadRegIndex = nullptr) const {
return 0;
}
@ -780,7 +780,7 @@ public:
const MachineRegisterInfo *MRI,
unsigned &FoldAsLoadDefReg,
MachineInstr *&DefMI) const {
return 0;
return nullptr;
}
/// FoldImmediate - 'Reg' is known to be defined by a move immediate
@ -838,7 +838,7 @@ public:
/// PredCost.
virtual unsigned getInstrLatency(const InstrItineraryData *ItinData,
const MachineInstr *MI,
unsigned *PredCost = 0) const;
unsigned *PredCost = nullptr) const;
virtual unsigned getPredicationCost(const MachineInstr *MI) const;
@ -1003,7 +1003,7 @@ public:
/// Create machine specific model for scheduling.
virtual DFAPacketizer*
CreateTargetScheduleState(const TargetMachine*, const ScheduleDAG*) const {
return NULL;
return nullptr;
}
private:

View File

@ -323,7 +323,7 @@ public:
bool isTypeLegal(EVT VT) const {
assert(!VT.isSimple() ||
(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
}
class ValueTypeActionImpl {
@ -755,7 +755,7 @@ public:
/// alignment error (trap) on the target machine.
virtual bool allowsUnalignedMemoryAccesses(EVT,
unsigned AddrSpace = 0,
bool * /*Fast*/ = 0) const {
bool * /*Fast*/ = nullptr) const {
return false;
}
@ -1179,7 +1179,7 @@ public:
int64_t BaseOffs;
bool HasBaseReg;
int64_t Scale;
AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
/// Return true if the addressing mode represented by AM is legal for this
@ -2089,7 +2089,7 @@ public:
IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
Args(args), DAG(dag), DL(dl), CS(NULL) {}
Args(args), DAG(dag), DL(dl), CS(nullptr) {}
};
/// This function lowers an abstract call to a function into an actual call.
@ -2173,7 +2173,7 @@ public:
/// Returns a 0 terminated array of registers that can be safely used as
/// scratch registers.
virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
return NULL;
return nullptr;
}
/// This callback is used to prepare for a volatile or atomic load.
@ -2234,7 +2234,7 @@ public:
/// target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &,
const TargetLibraryInfo *) const {
return 0;
return nullptr;
}
@ -2308,7 +2308,7 @@ public:
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
: InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
CallOperandVal(0), ConstraintVT(MVT::Other) {
CallOperandVal(nullptr), ConstraintVT(MVT::Other) {
}
};
@ -2336,7 +2336,7 @@ public:
/// Op, otherwise an empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
SelectionDAG *DAG = 0) const;
SelectionDAG *DAG = nullptr) const;
/// Given a constraint, return the type of constraint it is for this target.
virtual ConstraintType getConstraintType(const std::string &Constraint) const;

View File

@ -44,7 +44,7 @@ class TargetLoweringObjectFile : public MCObjectFileInfo {
public:
MCContext &getContext() const { return *Ctx; }
TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(0), DL(0) {}
TargetLoweringObjectFile() : MCObjectFileInfo(), Ctx(nullptr), DL(nullptr) {}
virtual ~TargetLoweringObjectFile();
@ -104,7 +104,7 @@ public:
virtual const MCSection *getSpecialCasedSectionGlobals(const GlobalValue *GV,
SectionKind Kind,
Mangler &Mang) const {
return 0;
return nullptr;
}
/// Return an MCExpr to use for a reference to the specified global variable
@ -148,7 +148,7 @@ public:
virtual const MCExpr *
getExecutableRelativeSymbol(const ConstantExpr *CE, Mangler &Mang,
const TargetMachine &TM) const {
return 0;
return nullptr;
}
/// \brief True if the section is atomized using the symbols in it.

View File

@ -102,7 +102,9 @@ public:
/// getSubtargetImpl - virtual method implemented by subclasses that returns
/// a reference to that target's TargetSubtargetInfo-derived member variable.
virtual const TargetSubtargetInfo *getSubtargetImpl() const { return 0; }
virtual const TargetSubtargetInfo *getSubtargetImpl() const {
return nullptr;
}
mutable TargetOptions Options;
@ -118,11 +120,15 @@ public:
//
// N.B. These objects may change during compilation. It's not safe to cache
// them between functions.
virtual const TargetInstrInfo *getInstrInfo() const { return 0; }
virtual const TargetFrameLowering *getFrameLowering() const { return 0; }
virtual const TargetLowering *getTargetLowering() const { return 0; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const{ return 0; }
virtual const DataLayout *getDataLayout() const { return 0; }
virtual const TargetInstrInfo *getInstrInfo() const { return nullptr; }
virtual const TargetFrameLowering *getFrameLowering() const {
return nullptr;
}
virtual const TargetLowering *getTargetLowering() const { return nullptr; }
virtual const TargetSelectionDAGInfo *getSelectionDAGInfo() const {
return nullptr;
}
virtual const DataLayout *getDataLayout() const { return nullptr; }
/// getMCAsmInfo - Return target specific asm information.
///
@ -139,23 +145,23 @@ public:
/// not, return null. This is kept separate from RegInfo until RegInfo has
/// details of graph coloring register allocation removed from it.
///
virtual const TargetRegisterInfo *getRegisterInfo() const { return 0; }
virtual const TargetRegisterInfo *getRegisterInfo() const { return nullptr; }
/// getIntrinsicInfo - If intrinsic information is available, return it. If
/// not, return null.
///
virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return 0; }
virtual const TargetIntrinsicInfo *getIntrinsicInfo() const { return nullptr;}
/// getJITInfo - If this target supports a JIT, return information for it,
/// otherwise return null.
///
virtual TargetJITInfo *getJITInfo() { return 0; }
virtual TargetJITInfo *getJITInfo() { return nullptr; }
/// getInstrItineraryData - Returns instruction itinerary data for the target
/// or specific subtarget.
///
virtual const InstrItineraryData *getInstrItineraryData() const {
return 0;
return nullptr;
}
bool requiresStructuredCFG() const { return RequireStructuredCFG; }
@ -263,8 +269,8 @@ public:
formatted_raw_ostream &,
CodeGenFileType,
bool /*DisableVerify*/ = true,
AnalysisID /*StartAfter*/ = 0,
AnalysisID /*StopAfter*/ = 0) {
AnalysisID /*StartAfter*/ = nullptr,
AnalysisID /*StopAfter*/ = nullptr) {
return true;
}
@ -323,8 +329,8 @@ public:
/// generation.
bool addPassesToEmitFile(PassManagerBase &PM, formatted_raw_ostream &Out,
CodeGenFileType FileType, bool DisableVerify = true,
AnalysisID StartAfter = 0,
AnalysisID StopAfter = 0) override;
AnalysisID StartAfter = nullptr,
AnalysisID StopAfter = nullptr) override;
/// addPassesToEmitMachineCode - Add passes to the specified pass manager to
/// get machine code emitted. This uses a JITCodeEmitter object to handle

View File

@ -174,7 +174,7 @@ public:
/// isASubClass - return true if this TargetRegisterClass is a subset
/// class of at least one other TargetRegisterClass.
bool isASubClass() const {
return SuperClasses[0] != 0;
return SuperClasses[0] != nullptr;
}
/// getRawAllocationOrder - Returns the preferred order for allocating
@ -317,7 +317,7 @@ public:
/// indicating if a register is allocatable or not. If a register class is
/// specified, returns the subset for the class.
BitVector getAllocatableSet(const MachineFunction &MF,
const TargetRegisterClass *RC = NULL) const;
const TargetRegisterClass *RC = nullptr) const;
/// getCostPerUse - Return the additional cost of using this register instead
/// of other registers in its class.
@ -420,8 +420,8 @@ public:
/// order of desired callee-save stack frame offset. The first register is
/// closest to the incoming stack pointer if stack grows down, and vice versa.
///
virtual const MCPhysReg* getCalleeSavedRegs(const MachineFunction *MF = 0)
const = 0;
virtual const MCPhysReg*
getCalleeSavedRegs(const MachineFunction *MF = nullptr) const = 0;
/// getCallPreservedMask - Return a mask of call-preserved registers for the
/// given calling convention on the current sub-target. The mask should
@ -443,7 +443,7 @@ public:
///
virtual const uint32_t *getCallPreservedMask(CallingConv::ID) const {
// The default mask clobbers everything. All targets should override.
return 0;
return nullptr;
}
/// getReservedRegs - Returns a bitset indexed by physical register number
@ -651,7 +651,7 @@ public:
ArrayRef<MCPhysReg> Order,
SmallVectorImpl<MCPhysReg> &Hints,
const MachineFunction &MF,
const VirtRegMap *VRM = 0) const;
const VirtRegMap *VRM = nullptr) const;
/// avoidWriteAfterWrite - Return true if the register allocator should avoid
/// writing a register from RC in two consecutive instructions.
@ -805,7 +805,7 @@ public:
/// instruction. FIOperandNum is the FI operand number.
virtual void eliminateFrameIndex(MachineBasicBlock::iterator MI,
int SPAdj, unsigned FIOperandNum,
RegScavenger *RS = NULL) const = 0;
RegScavenger *RS = nullptr) const = 0;
//===--------------------------------------------------------------------===//
/// Debug information queries.
@ -874,7 +874,7 @@ public:
Mask += RCMaskWords;
SubReg = *Idx++;
if (!SubReg)
Idx = 0;
Idx = nullptr;
}
};
@ -902,7 +902,7 @@ class PrintReg {
unsigned Reg;
unsigned SubIdx;
public:
explicit PrintReg(unsigned reg, const TargetRegisterInfo *tri = 0,
explicit PrintReg(unsigned reg, const TargetRegisterInfo *tri = nullptr,
unsigned subidx = 0)
: TRI(tri), Reg(reg), SubIdx(subidx) {}
void print(raw_ostream&) const;

View File

@ -122,7 +122,7 @@ Pass *createLICMPass();
//
Pass *createLoopStrengthReducePass();
Pass *createGlobalMergePass(const TargetMachine *TM = 0);
Pass *createGlobalMergePass(const TargetMachine *TM = nullptr);
//===----------------------------------------------------------------------===//
//

View File

@ -39,18 +39,18 @@ void DeleteDeadBlock(BasicBlock *BB);
/// any single-entry PHI nodes in it, fold them away. This handles the case
/// when all entries to the PHI nodes in a block are guaranteed equal, such as
/// when the block has exactly one predecessor.
void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = 0);
void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = nullptr);
/// DeleteDeadPHIs - Examine each PHI in the given block and delete it if it
/// is dead. Also recursively delete any operands that become dead as
/// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle. Return true
/// if any PHIs were deleted.
bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = 0);
bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = nullptr);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure.
bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = 0);
bool MergeBlockIntoPredecessor(BasicBlock *BB, Pass *P = nullptr);
// ReplaceInstWithValue - Replace all uses of an instruction (specified by BI)
// with a value, then remove and delete the original instruction.
@ -89,12 +89,13 @@ void ReplaceInstWithInst(Instruction *From, Instruction *To);
/// to.
///
BasicBlock *SplitCriticalEdge(TerminatorInst *TI, unsigned SuccNum,
Pass *P = 0, bool MergeIdenticalEdges = false,
Pass *P = nullptr,
bool MergeIdenticalEdges = false,
bool DontDeleteUselessPHIs = false,
bool SplitLandingPads = false);
inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
Pass *P = 0) {
Pass *P = nullptr) {
return SplitCriticalEdge(BB->getTerminator(), SI.getSuccessorIndex(), P);
}
@ -103,7 +104,8 @@ inline BasicBlock *SplitCriticalEdge(BasicBlock *BB, succ_iterator SI,
/// This updates all of the same analyses as the other SplitCriticalEdge
/// function. If P is specified, it updates the analyses
/// described above.
inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI, Pass *P = 0) {
inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI,
Pass *P = nullptr) {
bool MadeChange = false;
TerminatorInst *TI = (*PI)->getTerminator();
for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i)
@ -117,7 +119,7 @@ inline bool SplitCriticalEdge(BasicBlock *Succ, pred_iterator PI, Pass *P = 0) {
/// an edge between the two blocks. If P is specified, it updates the analyses
/// described above.
inline BasicBlock *SplitCriticalEdge(BasicBlock *Src, BasicBlock *Dst,
Pass *P = 0,
Pass *P = nullptr,
bool MergeIdenticalEdges = false,
bool DontDeleteUselessPHIs = false) {
TerminatorInst *TI = Src->getTerminator();
@ -155,7 +157,7 @@ BasicBlock *SplitBlock(BasicBlock *Old, Instruction *SplitPt, Pass *P);
/// is an exit of a loop with other exits).
///
BasicBlock *SplitBlockPredecessors(BasicBlock *BB, ArrayRef<BasicBlock*> Preds,
const char *Suffix, Pass *P = 0);
const char *Suffix, Pass *P = nullptr);
/// SplitLandingPadPredecessors - This method transforms the landing pad,
/// OrigBB, by introducing two new basic blocks into the function. One of those
@ -203,7 +205,7 @@ ReturnInst *FoldReturnIntoUncondBranch(ReturnInst *RI, BasicBlock *BB,
/// Returns the NewBasicBlock's terminator.
TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
bool Unreachable,
MDNode *BranchWeights = 0);
MDNode *BranchWeights = nullptr);
/// SplitBlockAndInsertIfThenElse is similar to SplitBlockAndInsertIfThen,
@ -223,7 +225,7 @@ TerminatorInst *SplitBlockAndInsertIfThen(Value *Cond, Instruction *SplitBefore,
void SplitBlockAndInsertIfThenElse(Value *Cond, Instruction *SplitBefore,
TerminatorInst **ThenTerm,
TerminatorInst **ElseTerm,
MDNode *BranchWeights = 0);
MDNode *BranchWeights = nullptr);
///
/// GetIfCondition - Check whether BB is the merge point of a if-region.

View File

@ -55,7 +55,7 @@ template<typename T> class SmallVectorImpl;
/// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
const TargetLibraryInfo *TLI = 0);
const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
// Local dead code elimination.
@ -64,21 +64,23 @@ bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
/// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects.
///
bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0);
bool isInstructionTriviallyDead(Instruction *I,
const TargetLibraryInfo *TLI = nullptr);
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
/// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
const TargetLibraryInfo *TLI=0);
const TargetLibraryInfo *TLI = nullptr);
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
/// dead PHI node, due to being a def-use chain of single-use nodes that
/// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made.
bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
bool RecursivelyDeleteDeadPHINode(PHINode *PN,
const TargetLibraryInfo *TLI = nullptr);
/// SimplifyInstructionsInBlock - Scan the specified basic block and try to
@ -86,8 +88,8 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
///
/// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
const TargetLibraryInfo *TLI = 0);
bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = nullptr,
const TargetLibraryInfo *TLI = nullptr);
//===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring.
@ -105,7 +107,7 @@ bool SimplifyInstructionsInBlock(BasicBlock *BB, const DataLayout *TD = 0,
/// .. and delete the predecessor corresponding to the '1', this will attempt to
/// recursively fold the 'and' to 0.
void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
DataLayout *TD = 0);
DataLayout *TD = nullptr);
/// MergeBasicBlockIntoOnlyPred - BB is a block with one predecessor and its
@ -113,7 +115,7 @@ void RemovePredecessorAndSimplify(BasicBlock *BB, BasicBlock *Pred,
/// between them, moving the instructions in the predecessor into BB. This
/// deletes the predecessor block.
///
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = 0);
void MergeBasicBlockIntoOnlyPred(BasicBlock *BB, Pass *P = nullptr);
/// TryToSimplifyUncondBranchFromEmptyBlock - BB is known to contain an
@ -137,13 +139,13 @@ bool EliminateDuplicatePHINodes(BasicBlock *BB);
/// the basic block that was pointed to.
///
bool SimplifyCFG(BasicBlock *BB, const TargetTransformInfo &TTI,
const DataLayout *TD = 0);
const DataLayout *TD = nullptr);
/// FlatternCFG - This function is used to flatten a CFG. For
/// example, it uses parallel-and and parallel-or mode to collapse
// if-conditions and merge if-regions with identical statements.
///
bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = 0);
bool FlattenCFG(BasicBlock *BB, AliasAnalysis *AA = nullptr);
/// FoldBranchToCommonDest - If this basic block is ONLY a setcc and a branch,
/// and if a predecessor branches to us and one of our successors, fold the
@ -159,22 +161,23 @@ bool FoldBranchToCommonDest(BranchInst *BI);
///
AllocaInst *DemoteRegToStack(Instruction &X,
bool VolatileLoads = false,
Instruction *AllocaPoint = 0);
Instruction *AllocaPoint = nullptr);
/// DemotePHIToStack - This function takes a virtual register computed by a phi
/// node and replaces it with a slot in the stack frame, allocated via alloca.
/// The phi node is deleted and it returns the pointer to the alloca inserted.
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = 0);
AllocaInst *DemotePHIToStack(PHINode *P, Instruction *AllocaPoint = nullptr);
/// getOrEnforceKnownAlignment - If the specified pointer has an alignment that
/// we can determine, return it, otherwise return 0. If PrefAlign is specified,
/// and it is more than the alignment of the ultimate object, see if we can
/// increase the alignment of the ultimate object, making this check succeed.
unsigned getOrEnforceKnownAlignment(Value *V, unsigned PrefAlign,
const DataLayout *TD = 0);
const DataLayout *TD = nullptr);
/// getKnownAlignment - Try to infer an alignment for the specified pointer.
static inline unsigned getKnownAlignment(Value *V, const DataLayout *TD = 0) {
static inline unsigned getKnownAlignment(Value *V,
const DataLayout *TD = nullptr) {
return getOrEnforceKnownAlignment(V, 0, TD);
}

View File

@ -56,7 +56,7 @@ private:
public:
/// If InsertedPHIs is specified, it will be filled
/// in with all PHI Nodes created by rewriting.
explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = 0);
explicit SSAUpdater(SmallVectorImpl<PHINode*> *InsertedPHIs = nullptr);
~SSAUpdater();
/// \brief Reset this object to get ready for a new set of SSA updates with

View File

@ -52,8 +52,8 @@ private:
PhiT *PHITag; // Marker for existing PHIs that match.
BBInfo(BlkT *ThisBB, ValT V)
: BB(ThisBB), AvailableVal(V), DefBB(V ? this : 0), BlkNum(0), IDom(0),
NumPreds(0), Preds(0), PHITag(0) { }
: BB(ThisBB), AvailableVal(V), DefBB(V ? this : nullptr), BlkNum(0),
IDom(nullptr), NumPreds(0), Preds(nullptr), PHITag(nullptr) {}
};
typedef DenseMap<BlkT*, ValT> AvailableValsTy;
@ -115,7 +115,7 @@ public:
Traits::FindPredecessorBlocks(Info->BB, &Preds);
Info->NumPreds = Preds.size();
if (Info->NumPreds == 0)
Info->Preds = 0;
Info->Preds = nullptr;
else
Info->Preds = static_cast<BBInfo**>
(Allocator.Allocate(Info->NumPreds * sizeof(BBInfo*),
@ -148,7 +148,7 @@ public:
// Now that we know what blocks are backwards-reachable from the starting
// block, do a forward depth-first traversal to assign postorder numbers
// to those blocks.
BBInfo *PseudoEntry = new (Allocator) BBInfo(0, 0);
BBInfo *PseudoEntry = new (Allocator) BBInfo(nullptr, 0);
unsigned BlkNum = 1;
// Initialize the worklist with the roots from the backward traversal.
@ -231,7 +231,7 @@ public:
for (typename BlockListTy::reverse_iterator I = BlockList->rbegin(),
E = BlockList->rend(); I != E; ++I) {
BBInfo *Info = *I;
BBInfo *NewIDom = 0;
BBInfo *NewIDom = nullptr;
// Iterate through the block's predecessors.
for (unsigned p = 0; p != Info->NumPreds; ++p) {
@ -386,7 +386,7 @@ public:
// Match failed: clear all the PHITag values.
for (typename BlockListTy::iterator I = BlockList->begin(),
E = BlockList->end(); I != E; ++I)
(*I)->PHITag = 0;
(*I)->PHITag = nullptr;
}
}

View File

@ -121,7 +121,7 @@ AggressiveAntiDepBreaker(MachineFunction& MFi,
TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
RegClassInfo(RCI),
State(NULL) {
State(nullptr) {
/* Collect a bitset of all registers that are only broken if they
are on the critical path. */
for (unsigned i = 0, e = CriticalPathRCs.size(); i < e; ++i) {
@ -144,7 +144,7 @@ AggressiveAntiDepBreaker::~AggressiveAntiDepBreaker() {
}
void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
assert(State == NULL);
assert(!State);
State = new AggressiveAntiDepState(TRI->getNumRegs(), BB);
bool IsReturnBlock = (!BB->empty() && BB->back().isReturn());
@ -183,7 +183,7 @@ void AggressiveAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
void AggressiveAntiDepBreaker::FinishBlock() {
delete State;
State = NULL;
State = nullptr;
}
void AggressiveAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
@ -230,13 +230,13 @@ bool AggressiveAntiDepBreaker::IsImplicitDefUse(MachineInstr *MI,
if (Reg == 0)
return false;
MachineOperand *Op = NULL;
MachineOperand *Op = nullptr;
if (MO.isDef())
Op = MI->findRegisterUseOperand(Reg, true);
else
Op = MI->findRegisterDefOperand(Reg);
return((Op != NULL) && Op->isImplicit());
return(Op && Op->isImplicit());
}
void AggressiveAntiDepBreaker::GetPassthruRegs(MachineInstr *MI,
@ -273,10 +273,10 @@ static void AntiDepEdges(const SUnit *SU, std::vector<const SDep*>& Edges) {
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static const SUnit *CriticalPathStep(const SUnit *SU) {
const SDep *Next = 0;
const SDep *Next = nullptr;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
if (SU != 0) {
if (SU) {
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
P != PE; ++P) {
const SUnit *PredSU = P->getSUnit();
@ -292,7 +292,7 @@ static const SUnit *CriticalPathStep(const SUnit *SU) {
}
}
return (Next) ? Next->getSUnit() : 0;
return (Next) ? Next->getSUnit() : nullptr;
}
void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
@ -309,8 +309,8 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DefIndices[Reg] = ~0u;
RegRefs.erase(Reg);
State->LeaveGroup(Reg);
DEBUG(if (header != NULL) {
dbgs() << header << TRI->getName(Reg); header = NULL; });
DEBUG(if (header) {
dbgs() << header << TRI->getName(Reg); header = nullptr; });
DEBUG(dbgs() << "->g" << State->GetGroup(Reg) << tag);
}
// Repeat for subregisters.
@ -321,14 +321,14 @@ void AggressiveAntiDepBreaker::HandleLastUse(unsigned Reg, unsigned KillIdx,
DefIndices[SubregReg] = ~0u;
RegRefs.erase(SubregReg);
State->LeaveGroup(SubregReg);
DEBUG(if (header != NULL) {
dbgs() << header << TRI->getName(Reg); header = NULL; });
DEBUG(if (header) {
dbgs() << header << TRI->getName(Reg); header = nullptr; });
DEBUG(dbgs() << " " << TRI->getName(SubregReg) << "->g" <<
State->GetGroup(SubregReg) << tag);
}
}
DEBUG(if ((header == NULL) && (footer != NULL)) dbgs() << footer);
DEBUG(if (!header && footer) dbgs() << footer);
}
void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
@ -382,7 +382,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI,
}
// Note register reference...
const TargetRegisterClass *RC = NULL;
const TargetRegisterClass *RC = nullptr;
if (i < MI->getDesc().getNumOperands())
RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
@ -466,7 +466,7 @@ void AggressiveAntiDepBreaker::ScanInstruction(MachineInstr *MI,
}
// Note register reference...
const TargetRegisterClass *RC = NULL;
const TargetRegisterClass *RC = nullptr;
if (i < MI->getDesc().getNumOperands())
RC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
AggressiveAntiDepState::RegisterReference RR = { &MO, RC };
@ -516,7 +516,7 @@ BitVector AggressiveAntiDepBreaker::GetRenameRegisters(unsigned Reg) {
AggressiveAntiDepState::RegisterReference>::iterator Q = Range.first,
QE = Range.second; Q != QE; ++Q) {
const TargetRegisterClass *RC = Q->second.RC;
if (RC == NULL) continue;
if (!RC) continue;
BitVector RCBV = TRI->getAllocatableSet(MF, RC);
if (first) {
@ -734,8 +734,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// Track progress along the critical path through the SUnit graph as
// we walk the instructions. This is needed for regclasses that only
// break critical-path anti-dependencies.
const SUnit *CriticalPathSU = 0;
MachineInstr *CriticalPathMI = 0;
const SUnit *CriticalPathSU = nullptr;
MachineInstr *CriticalPathMI = nullptr;
if (CriticalPathSet.any()) {
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
@ -788,10 +788,10 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// If MI is not on the critical path, then we don't rename
// registers in the CriticalPathSet.
BitVector *ExcludeRegs = NULL;
BitVector *ExcludeRegs = nullptr;
if (MI == CriticalPathMI) {
CriticalPathSU = CriticalPathStep(CriticalPathSU);
CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : 0;
CriticalPathMI = (CriticalPathSU) ? CriticalPathSU->getInstr() : nullptr;
} else if (CriticalPathSet.any()) {
ExcludeRegs = &CriticalPathSet;
}
@ -815,7 +815,7 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
// Don't break anti-dependencies on non-allocatable registers.
DEBUG(dbgs() << " (non-allocatable)\n");
continue;
} else if ((ExcludeRegs != NULL) && ExcludeRegs->test(AntiDepReg)) {
} else if (ExcludeRegs && ExcludeRegs->test(AntiDepReg)) {
// Don't break anti-dependencies for critical path registers
// if not on the critical path
DEBUG(dbgs() << " (not critical-path)\n");
@ -829,9 +829,8 @@ unsigned AggressiveAntiDepBreaker::BreakAntiDependencies(
} else {
// No anti-dep breaking for implicit deps
MachineOperand *AntiDepOp = MI->findRegisterDefOperand(AntiDepReg);
assert(AntiDepOp != NULL &&
"Can't find index for defined register operand");
if ((AntiDepOp == NULL) || AntiDepOp->isImplicit()) {
assert(AntiDepOp && "Can't find index for defined register operand");
if (!AntiDepOp || AntiDepOp->isImplicit()) {
DEBUG(dbgs() << " (implicit)\n");
continue;
}

View File

@ -46,7 +46,7 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
EI != EE; ++EI) {
if (Indices && *Indices == unsigned(EI - EB))
return ComputeLinearIndex(*EI, Indices+1, IndicesEnd, CurIndex);
CurIndex = ComputeLinearIndex(*EI, 0, 0, CurIndex);
CurIndex = ComputeLinearIndex(*EI, nullptr, nullptr, CurIndex);
}
return CurIndex;
}
@ -56,7 +56,7 @@ unsigned llvm::ComputeLinearIndex(Type *Ty,
for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i) {
if (Indices && *Indices == i)
return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
CurIndex = ComputeLinearIndex(EltTy, 0, 0, CurIndex);
CurIndex = ComputeLinearIndex(EltTy, nullptr, nullptr, CurIndex);
}
return CurIndex;
}
@ -228,7 +228,7 @@ static const Value *getNoopInput(const Value *V,
// through.
const Instruction *I = dyn_cast<Instruction>(V);
if (!I || I->getNumOperands() == 0) return V;
const Value *NoopInput = 0;
const Value *NoopInput = nullptr;
Value *Op = I->getOperand(0);
if (isa<BitCastInst>(I)) {

View File

@ -34,7 +34,7 @@ class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); }
public:
BasicTTI() : ImmutablePass(ID), TM(0) {
BasicTTI() : ImmutablePass(ID), TM(nullptr) {
llvm_unreachable("This pass cannot be directly constructed");
}

View File

@ -189,7 +189,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
TII = tii;
TRI = tri;
MMI = mmi;
RS = NULL;
RS = nullptr;
// Use a RegScavenger to help update liveness when required.
MachineRegisterInfo &MRI = MF.getRegInfo();
@ -201,7 +201,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
// Fix CFG. The later algorithms expect it to be right.
bool MadeChange = false;
for (MachineFunction::iterator I = MF.begin(), E = MF.end(); I != E; I++) {
MachineBasicBlock *MBB = I, *TBB = 0, *FBB = 0;
MachineBasicBlock *MBB = I, *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true))
MadeChange |= MBB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
@ -220,7 +220,7 @@ bool BranchFolder::OptimizeFunction(MachineFunction &MF,
// See if any jump tables have become dead as the code generator
// did its thing.
MachineJumpTableInfo *JTI = MF.getJumpTableInfo();
if (JTI == 0) {
if (!JTI) {
delete RS;
return MadeChange;
}
@ -416,7 +416,7 @@ MachineBasicBlock *BranchFolder::SplitMBBAt(MachineBasicBlock &CurMBB,
MachineBasicBlock::iterator BBI1,
const BasicBlock *BB) {
if (!TII->isLegalToSplitMBBAt(CurMBB, BBI1))
return 0;
return nullptr;
MachineFunction &MF = *CurMBB.getParent();
@ -466,7 +466,7 @@ static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
const TargetInstrInfo *TII) {
MachineFunction *MF = CurMBB->getParent();
MachineFunction::iterator I = std::next(MachineFunction::iterator(CurMBB));
MachineBasicBlock *TBB = 0, *FBB = 0;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
DebugLoc dl; // FIXME: this is nowhere
if (I != MF->end() &&
@ -475,12 +475,12 @@ static void FixTail(MachineBasicBlock *CurMBB, MachineBasicBlock *SuccBB,
if (TBB == NextBB && !Cond.empty() && !FBB) {
if (!TII->ReverseBranchCondition(Cond)) {
TII->RemoveBranch(*CurMBB);
TII->InsertBranch(*CurMBB, SuccBB, NULL, Cond, dl);
TII->InsertBranch(*CurMBB, SuccBB, nullptr, Cond, dl);
return;
}
}
}
TII->InsertBranch(*CurMBB, SuccBB, NULL,
TII->InsertBranch(*CurMBB, SuccBB, nullptr,
SmallVector<MachineOperand, 0>(), dl);
}
@ -849,7 +849,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// See if we can do any tail merging on those.
if (MergePotentials.size() >= 2)
MadeChange |= TryTailMergeBlocks(NULL, NULL);
MadeChange |= TryTailMergeBlocks(nullptr, nullptr);
// Look at blocks (IBB) with multiple predecessors (PBB).
// We change each predecessor to a canonical form, by
@ -896,7 +896,7 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
if (PBB->getLandingPadSuccessor())
continue;
MachineBasicBlock *TBB = 0, *FBB = 0;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*PBB, TBB, FBB, Cond, true)) {
// Failing case: IBB is the target of a cbr, and we cannot reverse the
@ -915,10 +915,10 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
// a bit in the edge so we didn't have to do all this.
if (IBB->isLandingPad()) {
MachineFunction::iterator IP = PBB; IP++;
MachineBasicBlock *PredNextBB = NULL;
MachineBasicBlock *PredNextBB = nullptr;
if (IP != MF.end())
PredNextBB = IP;
if (TBB == NULL) {
if (!TBB) {
if (IBB != PredNextBB) // fallthrough
continue;
} else if (FBB) {
@ -939,7 +939,8 @@ bool BranchFolder::TailMergeBlocks(MachineFunction &MF) {
TII->RemoveBranch(*PBB);
if (!Cond.empty())
// reinsert conditional branch only, for now
TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, 0, NewCond, dl);
TII->InsertBranch(*PBB, (TBB == IBB) ? FBB : TBB, nullptr,
NewCond, dl);
}
MergePotentials.push_back(MergePotentialsElt(HashEndOfMBB(PBB), *P));
@ -1099,7 +1100,7 @@ ReoptimizeBlock:
// one.
MachineBasicBlock &PrevBB = *std::prev(MachineFunction::iterator(MBB));
MachineBasicBlock *PriorTBB = 0, *PriorFBB = 0;
MachineBasicBlock *PriorTBB = nullptr, *PriorFBB = nullptr;
SmallVector<MachineOperand, 4> PriorCond;
bool PriorUnAnalyzable =
TII->AnalyzeBranch(PrevBB, PriorTBB, PriorFBB, PriorCond, true);
@ -1116,7 +1117,7 @@ ReoptimizeBlock:
TII->RemoveBranch(PrevBB);
PriorCond.clear();
if (PriorTBB != MBB)
TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
TII->InsertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@ -1160,7 +1161,7 @@ ReoptimizeBlock:
// If the previous branch *only* branches to *this* block (conditional or
// not) remove the branch.
if (PriorTBB == MBB && PriorFBB == 0) {
if (PriorTBB == MBB && !PriorFBB) {
TII->RemoveBranch(PrevBB);
MadeChange = true;
++NumBranchOpts;
@ -1172,7 +1173,7 @@ ReoptimizeBlock:
if (PriorFBB == MBB) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorTBB, 0, PriorCond, dl);
TII->InsertBranch(PrevBB, PriorTBB, nullptr, PriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@ -1186,7 +1187,7 @@ ReoptimizeBlock:
if (!TII->ReverseBranchCondition(NewPriorCond)) {
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, PriorFBB, 0, NewPriorCond, dl);
TII->InsertBranch(PrevBB, PriorFBB, nullptr, NewPriorCond, dl);
MadeChange = true;
++NumBranchOpts;
goto ReoptimizeBlock;
@ -1201,7 +1202,7 @@ ReoptimizeBlock:
// We consider it more likely that execution will stay in the function (e.g.
// due to loops) than it is to exit it. This asserts in loops etc, moving
// the assert condition out of the loop body.
if (MBB->succ_empty() && !PriorCond.empty() && PriorFBB == 0 &&
if (MBB->succ_empty() && !PriorCond.empty() && !PriorFBB &&
MachineFunction::iterator(PriorTBB) == FallThrough &&
!MBB->canFallThrough()) {
bool DoTransform = true;
@ -1224,7 +1225,7 @@ ReoptimizeBlock:
DebugLoc dl = getBranchDebugLoc(PrevBB);
TII->RemoveBranch(PrevBB);
TII->InsertBranch(PrevBB, MBB, 0, NewPriorCond, dl);
TII->InsertBranch(PrevBB, MBB, nullptr, NewPriorCond, dl);
// Move this block to the end of the function.
MBB->moveAfter(--MF.end());
@ -1237,7 +1238,7 @@ ReoptimizeBlock:
}
// Analyze the branch in the current block.
MachineBasicBlock *CurTBB = 0, *CurFBB = 0;
MachineBasicBlock *CurTBB = nullptr, *CurFBB = nullptr;
SmallVector<MachineOperand, 4> CurCond;
bool CurUnAnalyzable= TII->AnalyzeBranch(*MBB, CurTBB, CurFBB, CurCond, true);
if (!CurUnAnalyzable) {
@ -1263,7 +1264,7 @@ ReoptimizeBlock:
// If this branch is the only thing in its block, see if we can forward
// other blocks across it.
if (CurTBB && CurCond.empty() && CurFBB == 0 &&
if (CurTBB && CurCond.empty() && !CurFBB &&
IsBranchOnlyBlock(MBB) && CurTBB != MBB &&
!MBB->hasAddressTaken()) {
DebugLoc dl = getBranchDebugLoc(*MBB);
@ -1301,12 +1302,12 @@ ReoptimizeBlock:
// explicit branch to us to make updates simpler.
if (!PredHasNoFallThrough && PrevBB.isSuccessor(MBB) &&
PriorTBB != MBB && PriorFBB != MBB) {
if (PriorTBB == 0) {
assert(PriorCond.empty() && PriorFBB == 0 &&
if (!PriorTBB) {
assert(PriorCond.empty() && !PriorFBB &&
"Bad branch analysis");
PriorTBB = MBB;
} else {
assert(PriorFBB == 0 && "Machine CFG out of date!");
assert(!PriorFBB && "Machine CFG out of date!");
PriorFBB = MBB;
}
DebugLoc pdl = getBranchDebugLoc(PrevBB);
@ -1330,7 +1331,7 @@ ReoptimizeBlock:
// If this change resulted in PMBB ending in a conditional
// branch where both conditions go to the same destination,
// change this to an unconditional branch (and fix the CFG).
MachineBasicBlock *NewCurTBB = 0, *NewCurFBB = 0;
MachineBasicBlock *NewCurTBB = nullptr, *NewCurFBB = nullptr;
SmallVector<MachineOperand, 4> NewCurCond;
bool NewCurUnAnalyzable = TII->AnalyzeBranch(*PMBB, NewCurTBB,
NewCurFBB, NewCurCond, true);
@ -1338,10 +1339,10 @@ ReoptimizeBlock:
DebugLoc pdl = getBranchDebugLoc(*PMBB);
TII->RemoveBranch(*PMBB);
NewCurCond.clear();
TII->InsertBranch(*PMBB, NewCurTBB, 0, NewCurCond, pdl);
TII->InsertBranch(*PMBB, NewCurTBB, nullptr, NewCurCond, pdl);
MadeChange = true;
++NumBranchOpts;
PMBB->CorrectExtraCFGEdges(NewCurTBB, 0, false);
PMBB->CorrectExtraCFGEdges(NewCurTBB, nullptr, false);
}
}
}
@ -1358,7 +1359,7 @@ ReoptimizeBlock:
}
// Add the branch back if the block is more than just an uncond branch.
TII->InsertBranch(*MBB, CurTBB, 0, CurCond, dl);
TII->InsertBranch(*MBB, CurTBB, nullptr, CurCond, dl);
}
}
@ -1379,7 +1380,7 @@ ReoptimizeBlock:
// Analyze the branch at the end of the pred.
MachineBasicBlock *PredBB = *PI;
MachineFunction::iterator PredFallthrough = PredBB; ++PredFallthrough;
MachineBasicBlock *PredTBB = 0, *PredFBB = 0;
MachineBasicBlock *PredTBB = nullptr, *PredFBB = nullptr;
SmallVector<MachineOperand, 4> PredCond;
if (PredBB != MBB && !PredBB->canFallThrough() &&
!TII->AnalyzeBranch(*PredBB, PredTBB, PredFBB, PredCond, true)
@ -1399,7 +1400,7 @@ ReoptimizeBlock:
MachineBasicBlock *NextBB =
std::next(MachineFunction::iterator(MBB));
CurCond.clear();
TII->InsertBranch(*MBB, NextBB, 0, CurCond, DebugLoc());
TII->InsertBranch(*MBB, NextBB, nullptr, CurCond, DebugLoc());
}
MBB->moveAfter(PredBB);
MadeChange = true;
@ -1432,7 +1433,7 @@ ReoptimizeBlock:
// Okay, there is no really great place to put this block. If, however,
// the block before this one would be a fall-through if this block were
// removed, move this block to the end of the function.
MachineBasicBlock *PrevTBB = 0, *PrevFBB = 0;
MachineBasicBlock *PrevTBB = nullptr, *PrevFBB = nullptr;
SmallVector<MachineOperand, 4> PrevCond;
if (FallThrough != MF.end() &&
!TII->AnalyzeBranch(PrevBB, PrevTBB, PrevFBB, PrevCond, true) &&
@ -1473,7 +1474,7 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
if (SuccBB != TrueBB)
return SuccBB;
}
return NULL;
return nullptr;
}
/// findHoistingInsertPosAndDeps - Find the location to move common instructions
@ -1547,7 +1548,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
// Also avoid moving code above predicated instruction since it's hard to
// reason about register liveness with predicated instruction.
bool DontMoveAcrossStore = true;
if (!PI->isSafeToMove(TII, 0, DontMoveAcrossStore) ||
if (!PI->isSafeToMove(TII, nullptr, DontMoveAcrossStore) ||
TII->isPredicated(PI))
return MBB->end();
@ -1581,7 +1582,7 @@ MachineBasicBlock::iterator findHoistingInsertPosAndDeps(MachineBasicBlock *MBB,
/// sequence at the start of the function, move the instructions before MBB
/// terminator if it's legal.
bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
MachineBasicBlock *TBB = 0, *FBB = 0;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (TII->AnalyzeBranch(*MBB, TBB, FBB, Cond, true) || !TBB || Cond.empty())
return false;
@ -1686,7 +1687,7 @@ bool BranchFolder::HoistCommonCodeInSuccs(MachineBasicBlock *MBB) {
break;
bool DontMoveAcrossStore = true;
if (!TIB->isSafeToMove(TII, 0, DontMoveAcrossStore))
if (!TIB->isSafeToMove(TII, nullptr, DontMoveAcrossStore))
break;
// Remove kills from LocalDefsSet, these registers had short live ranges.

View File

@ -96,8 +96,8 @@ void
VirtRegAuxInfo::calculateSpillWeightAndHint(LiveInterval &li) {
MachineRegisterInfo &mri = MF.getRegInfo();
const TargetRegisterInfo &tri = *MF.getTarget().getRegisterInfo();
MachineBasicBlock *mbb = 0;
MachineLoop *loop = 0;
MachineBasicBlock *mbb = nullptr;
MachineLoop *loop = nullptr;
bool isExiting = false;
float totalWeight = 0;
SmallPtrSet<MachineInstr*, 8> visited;

View File

@ -76,7 +76,7 @@ CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
dbgs() << "Formal argument #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}
}
@ -108,7 +108,7 @@ void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
dbgs() << "Return operand #" << i << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}
}
@ -126,7 +126,7 @@ void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
dbgs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}
}
@ -145,7 +145,7 @@ void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
dbgs() << "Call operand #" << i << " has unhandled type "
<< EVT(ArgVT).getEVTString() << '\n';
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}
}
@ -162,7 +162,7 @@ void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
dbgs() << "Call result #" << i << " has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}
}
@ -175,6 +175,6 @@ void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
dbgs() << "Call result has unhandled type "
<< EVT(VT).getEVTString() << '\n';
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}

View File

@ -116,8 +116,8 @@ typedef DenseMap<Instruction *, Type *> InstrToOrigTy;
public:
static char ID; // Pass identification, replacement for typeid
explicit CodeGenPrepare(const TargetMachine *TM = 0)
: FunctionPass(ID), TM(TM), TLI(0) {
explicit CodeGenPrepare(const TargetMachine *TM = nullptr)
: FunctionPass(ID), TM(TM), TLI(nullptr) {
initializeCodeGenPreparePass(*PassRegistry::getPassRegistry());
}
bool runOnFunction(Function &F) override;
@ -182,7 +182,7 @@ bool CodeGenPrepare::runOnFunction(Function &F) {
TLInfo = &getAnalysis<TargetLibraryInfo>();
DominatorTreeWrapperPass *DTWP =
getAnalysisIfAvailable<DominatorTreeWrapperPass>();
DT = DTWP ? &DTWP->getDomTree() : 0;
DT = DTWP ? &DTWP->getDomTree() : nullptr;
OptSize = F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
Attribute::OptimizeForSize);
@ -676,8 +676,9 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
// happens.
WeakVH IterHandle(CurInstIterator);
replaceAndRecursivelySimplify(CI, RetVal, TLI ? TLI->getDataLayout() : 0,
TLInfo, ModifiedDT ? 0 : DT);
replaceAndRecursivelySimplify(CI, RetVal,
TLI ? TLI->getDataLayout() : nullptr,
TLInfo, ModifiedDT ? nullptr : DT);
// If the iterator instruction was recursively deleted, start over at the
// start of the block.
@ -698,10 +699,10 @@ bool CodeGenPrepare::OptimizeCallInst(CallInst *CI) {
}
// From here on out we're working with named functions.
if (CI->getCalledFunction() == 0) return false;
if (!CI->getCalledFunction()) return false;
// We'll need DataLayout from here on out.
const DataLayout *TD = TLI ? TLI->getDataLayout() : 0;
const DataLayout *TD = TLI ? TLI->getDataLayout() : nullptr;
if (!TD) return false;
// Lower all default uses of _chk calls. This is very similar
@ -751,8 +752,8 @@ bool CodeGenPrepare::DupRetToEnableTailCallOpts(BasicBlock *BB) {
if (!RI)
return false;
PHINode *PN = 0;
BitCastInst *BCI = 0;
PHINode *PN = nullptr;
BitCastInst *BCI = nullptr;
Value *V = RI->getReturnValue();
if (V) {
BCI = dyn_cast<BitCastInst>(V);
@ -867,7 +868,7 @@ namespace {
struct ExtAddrMode : public TargetLowering::AddrMode {
Value *BaseReg;
Value *ScaledReg;
ExtAddrMode() : BaseReg(0), ScaledReg(0) {}
ExtAddrMode() : BaseReg(nullptr), ScaledReg(nullptr) {}
void print(raw_ostream &OS) const;
void dump() const;
@ -1194,10 +1195,10 @@ class TypePromotionTransaction {
public:
/// \brief Remove all reference of \p Inst and optinally replace all its
/// uses with New.
/// \pre If !Inst->use_empty(), then New != NULL
InstructionRemover(Instruction *Inst, Value *New = NULL)
/// \pre If !Inst->use_empty(), then New != nullptr
InstructionRemover(Instruction *Inst, Value *New = nullptr)
: TypePromotionAction(Inst), Inserter(Inst), Hider(Inst),
Replacer(NULL) {
Replacer(nullptr) {
if (New)
Replacer = new UsesReplacer(Inst, New);
DEBUG(dbgs() << "Do: InstructionRemover: " << *Inst << "\n");
@ -1237,7 +1238,7 @@ public:
/// Same as Instruction::setOperand.
void setOperand(Instruction *Inst, unsigned Idx, Value *NewVal);
/// Same as Instruction::eraseFromParent.
void eraseInstruction(Instruction *Inst, Value *NewVal = NULL);
void eraseInstruction(Instruction *Inst, Value *NewVal = nullptr);
/// Same as Value::replaceAllUsesWith.
void replaceAllUsesWith(Instruction *Inst, Value *New);
/// Same as Value::mutateType.
@ -1301,7 +1302,7 @@ void TypePromotionTransaction::moveBefore(Instruction *Inst,
TypePromotionTransaction::ConstRestorationPt
TypePromotionTransaction::getRestorationPoint() const {
return Actions.rbegin() != Actions.rend() ? *Actions.rbegin() : NULL;
return Actions.rbegin() != Actions.rend() ? *Actions.rbegin() : nullptr;
}
void TypePromotionTransaction::commit() {
@ -1395,7 +1396,7 @@ private:
bool MatchScaledValue(Value *ScaleReg, int64_t Scale, unsigned Depth);
bool MatchAddr(Value *V, unsigned Depth);
bool MatchOperationAddr(User *Operation, unsigned Opcode, unsigned Depth,
bool *MovedAway = NULL);
bool *MovedAway = nullptr);
bool IsProfitableToFoldIntoAddressingMode(Instruction *I,
ExtAddrMode &AMBefore,
ExtAddrMode &AMAfter);
@ -1440,7 +1441,7 @@ bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
// Okay, we decided that we can add ScaleReg+Scale to AddrMode. Check now
// to see if ScaleReg is actually X+C. If so, we can turn this into adding
// X*Scale + C*Scale to addr mode.
ConstantInt *CI = 0; Value *AddLHS = 0;
ConstantInt *CI = nullptr; Value *AddLHS = nullptr;
if (isa<Instruction>(ScaleReg) && // not a constant expr.
match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)))) {
TestAddrMode.ScaledReg = AddLHS;
@ -1617,13 +1618,13 @@ TypePromotionHelper::Action TypePromotionHelper::getAction(
// get through.
// If it, check we can get through.
if (!SExtOpnd || !canGetThrough(SExtOpnd, SExtTy, PromotedInsts))
return NULL;
return nullptr;
// Do not promote if the operand has been added by codegenprepare.
// Otherwise, it means we are undoing an optimization that is likely to be
// redone, thus causing potential infinite loop.
if (isa<TruncInst>(SExtOpnd) && InsertedTruncs.count(SExtOpnd))
return NULL;
return nullptr;
// SExt or Trunc instructions.
// Return the related handler.
@ -1634,7 +1635,7 @@ TypePromotionHelper::Action TypePromotionHelper::getAction(
// Abort early if we will have to insert non-free instructions.
if (!SExtOpnd->hasOneUse() &&
!TLI.isTruncateFree(SExtTy, SExtOpnd->getType()))
return NULL;
return nullptr;
return promoteOperandForOther;
}
@ -1745,7 +1746,7 @@ TypePromotionHelper::promoteOperandForOther(Instruction *SExt,
TPT.moveBefore(SExtForOpnd, SExtOpnd);
TPT.setOperand(SExtOpnd, OpIdx, SExtForOpnd);
// If more sext are required, new instructions will have to be created.
SExtForOpnd = NULL;
SExtForOpnd = nullptr;
}
if (SExtForOpnd == SExt) {
DEBUG(dbgs() << "Sign extension is useless now\n");
@ -2027,11 +2028,11 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
AddrMode.BaseOffs -= CI->getSExtValue();
} else if (GlobalValue *GV = dyn_cast<GlobalValue>(Addr)) {
// If this is a global variable, try to fold it into the addressing mode.
if (AddrMode.BaseGV == 0) {
if (!AddrMode.BaseGV) {
AddrMode.BaseGV = GV;
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
return true;
AddrMode.BaseGV = 0;
AddrMode.BaseGV = nullptr;
}
} else if (Instruction *I = dyn_cast<Instruction>(Addr)) {
ExtAddrMode BackupAddrMode = AddrMode;
@ -2076,7 +2077,7 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
return true;
AddrMode.HasBaseReg = false;
AddrMode.BaseReg = 0;
AddrMode.BaseReg = nullptr;
}
// If the base register is already taken, see if we can do [r+r].
@ -2086,7 +2087,7 @@ bool AddressingModeMatcher::MatchAddr(Value *Addr, unsigned Depth) {
if (TLI.isLegalAddressingMode(AddrMode, AccessTy))
return true;
AddrMode.Scale = 0;
AddrMode.ScaledReg = 0;
AddrMode.ScaledReg = nullptr;
}
// Couldn't match.
TPT.rollback(LastKnownGood);
@ -2171,7 +2172,7 @@ static bool FindAllMemoryUses(Instruction *I,
bool AddressingModeMatcher::ValueAlreadyLiveAtInst(Value *Val,Value *KnownLive1,
Value *KnownLive2) {
// If Val is either of the known-live values, we know it is live!
if (Val == 0 || Val == KnownLive1 || Val == KnownLive2)
if (Val == nullptr || Val == KnownLive1 || Val == KnownLive2)
return true;
// All values other than instructions and arguments (e.g. constants) are live.
@ -2230,13 +2231,13 @@ IsProfitableToFoldIntoAddressingMode(Instruction *I, ExtAddrMode &AMBefore,
// If the BaseReg or ScaledReg was referenced by the previous addrmode, their
// lifetime wasn't extended by adding this instruction.
if (ValueAlreadyLiveAtInst(BaseReg, AMBefore.BaseReg, AMBefore.ScaledReg))
BaseReg = 0;
BaseReg = nullptr;
if (ValueAlreadyLiveAtInst(ScaledReg, AMBefore.BaseReg, AMBefore.ScaledReg))
ScaledReg = 0;
ScaledReg = nullptr;
// If folding this instruction (and it's subexprs) didn't extend any live
// ranges, we're ok with it.
if (BaseReg == 0 && ScaledReg == 0)
if (!BaseReg && !ScaledReg)
return true;
// If all uses of this instruction are ultimately load/store/inlineasm's,
@ -2325,7 +2326,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// Use a worklist to iteratively look through PHI nodes, and ensure that
// the addressing mode obtained from the non-PHI roots of the graph
// are equivalent.
Value *Consensus = 0;
Value *Consensus = nullptr;
unsigned NumUsesConsensus = 0;
bool IsNumUsesConsensusValid = false;
SmallVector<Instruction*, 16> AddrModeInsts;
@ -2339,7 +2340,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
// Break use-def graph loops.
if (!Visited.insert(V)) {
Consensus = 0;
Consensus = nullptr;
break;
}
@ -2385,7 +2386,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
continue;
}
Consensus = 0;
Consensus = nullptr;
break;
}
@ -2435,12 +2436,12 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
Value *ResultPtr = 0, *ResultIndex = 0;
Value *ResultPtr = nullptr, *ResultIndex = nullptr;
// First, find the pointer.
if (AddrMode.BaseReg && AddrMode.BaseReg->getType()->isPointerTy()) {
ResultPtr = AddrMode.BaseReg;
AddrMode.BaseReg = 0;
AddrMode.BaseReg = nullptr;
}
if (AddrMode.Scale && AddrMode.ScaledReg->getType()->isPointerTy()) {
@ -2466,7 +2467,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
if (!ResultPtr && AddrMode.BaseReg) {
ResultPtr =
Builder.CreateIntToPtr(AddrMode.BaseReg, Addr->getType(), "sunkaddr");
AddrMode.BaseReg = 0;
AddrMode.BaseReg = nullptr;
} else if (!ResultPtr && AddrMode.Scale == 1) {
ResultPtr =
Builder.CreateIntToPtr(AddrMode.ScaledReg, Addr->getType(), "sunkaddr");
@ -2553,7 +2554,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
DEBUG(dbgs() << "CGP: SINKING nonlocal addrmode: " << AddrMode << " for "
<< *MemoryInst);
Type *IntPtrTy = TLI->getDataLayout()->getIntPtrType(Addr->getType());
Value *Result = 0;
Value *Result = nullptr;
// Start with the base register. Do this first so that subsequent address
// matching finds it last, which will prevent it from trying to match it
@ -2617,7 +2618,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
Result = V;
}
if (Result == 0)
if (!Result)
SunkAddr = Constant::getNullValue(Addr->getType());
else
SunkAddr = Builder.CreateIntToPtr(Result, Addr->getType(), "sunkaddr");
@ -2942,7 +2943,7 @@ bool CodeGenPrepare::OptimizeInst(Instruction *I) {
// It is possible for very late stage optimizations (such as SimplifyCFG)
// to introduce PHI nodes too late to be cleaned up. If we detect such a
// trivial PHI, go ahead and zap it here.
if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : 0,
if (Value *V = SimplifyInstruction(P, TLI ? TLI->getDataLayout() : nullptr,
TLInfo, DT)) {
P->replaceAllUsesWith(V);
P->eraseFromParent();
@ -3045,7 +3046,7 @@ bool CodeGenPrepare::OptimizeBlock(BasicBlock &BB) {
bool CodeGenPrepare::PlaceDbgValues(Function &F) {
bool MadeChange = false;
for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
Instruction *PrevNonDbgInst = NULL;
Instruction *PrevNonDbgInst = nullptr;
for (BasicBlock::iterator BI = I->begin(), BE = I->end(); BI != BE;) {
Instruction *Insn = BI; ++BI;
DbgValueInst *DVI = dyn_cast<DbgValueInst>(Insn);

View File

@ -33,7 +33,7 @@ CriticalAntiDepBreaker(MachineFunction& MFi, const RegisterClassInfo &RCI) :
TII(MF.getTarget().getInstrInfo()),
TRI(MF.getTarget().getRegisterInfo()),
RegClassInfo(RCI),
Classes(TRI->getNumRegs(), static_cast<const TargetRegisterClass *>(0)),
Classes(TRI->getNumRegs(), nullptr),
KillIndices(TRI->getNumRegs(), 0),
DefIndices(TRI->getNumRegs(), 0),
KeepRegs(TRI->getNumRegs(), false) {}
@ -45,7 +45,7 @@ void CriticalAntiDepBreaker::StartBlock(MachineBasicBlock *BB) {
const unsigned BBSize = BB->size();
for (unsigned i = 0, e = TRI->getNumRegs(); i != e; ++i) {
// Clear out the register class data.
Classes[i] = static_cast<const TargetRegisterClass *>(0);
Classes[i] = nullptr;
// Initialize the indices to indicate that no registers are live.
KillIndices[i] = ~0u;
@ -124,7 +124,7 @@ void CriticalAntiDepBreaker::Observe(MachineInstr *MI, unsigned Count,
/// CriticalPathStep - Return the next SUnit after SU on the bottom-up
/// critical path.
static const SDep *CriticalPathStep(const SUnit *SU) {
const SDep *Next = 0;
const SDep *Next = nullptr;
unsigned NextDepth = 0;
// Find the predecessor edge with the greatest depth.
for (SUnit::const_pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
@ -171,7 +171,7 @@ void CriticalAntiDepBreaker::PrescanInstruction(MachineInstr *MI) {
if (!MO.isReg()) continue;
unsigned Reg = MO.getReg();
if (Reg == 0) continue;
const TargetRegisterClass *NewRC = 0;
const TargetRegisterClass *NewRC = nullptr;
if (i < MI->getDesc().getNumOperands())
NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
@ -227,7 +227,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
DefIndices[i] = Count;
KillIndices[i] = ~0u;
KeepRegs.reset(i);
Classes[i] = 0;
Classes[i] = nullptr;
RegRefs.erase(i);
}
@ -244,7 +244,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
(DefIndices[Reg] == ~0u)) &&
"Kill and Def maps aren't consistent for Reg!");
KeepRegs.reset(Reg);
Classes[Reg] = 0;
Classes[Reg] = nullptr;
RegRefs.erase(Reg);
// Repeat, for all subregs.
for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) {
@ -252,7 +252,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
DefIndices[SubregReg] = Count;
KillIndices[SubregReg] = ~0u;
KeepRegs.reset(SubregReg);
Classes[SubregReg] = 0;
Classes[SubregReg] = nullptr;
RegRefs.erase(SubregReg);
}
// Conservatively mark super-registers as unusable.
@ -267,7 +267,7 @@ void CriticalAntiDepBreaker::ScanInstruction(MachineInstr *MI,
if (Reg == 0) continue;
if (!MO.isUse()) continue;
const TargetRegisterClass *NewRC = 0;
const TargetRegisterClass *NewRC = nullptr;
if (i < MI->getDesc().getNumOperands())
NewRC = TII->getRegClass(MI->getDesc(), i, TRI, MF);
@ -419,7 +419,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
DenseMap<MachineInstr*,const SUnit*> MISUnitMap;
// Find the node at the bottom of the critical path.
const SUnit *Max = 0;
const SUnit *Max = nullptr;
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
const SUnit *SU = &SUnits[i];
MISUnitMap[SU->getInstr()] = SU;
@ -551,8 +551,8 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
CriticalPathMI = CriticalPathSU->getInstr();
} else {
// We've reached the end of the critical path.
CriticalPathSU = 0;
CriticalPathMI = 0;
CriticalPathSU = nullptr;
CriticalPathMI = nullptr;
}
}
@ -589,8 +589,9 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
// Determine AntiDepReg's register class, if it is live and is
// consistently used within a single class.
const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
assert((AntiDepReg == 0 || RC != NULL) &&
const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg]
: nullptr;
assert((AntiDepReg == 0 || RC != nullptr) &&
"Register should be live if it's causing an anti-dependence!");
if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
AntiDepReg = 0;
@ -638,7 +639,7 @@ BreakAntiDependencies(const std::vector<SUnit>& SUnits,
(DefIndices[NewReg] == ~0u)) &&
"Kill and Def maps aren't consistent for NewReg!");
Classes[AntiDepReg] = 0;
Classes[AntiDepReg] = nullptr;
DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
KillIndices[AntiDepReg] = ~0u;
assert(((KillIndices[AntiDepReg] == ~0u) !=

View File

@ -121,7 +121,7 @@ DefaultVLIWScheduler::DefaultVLIWScheduler(
void DefaultVLIWScheduler::schedule() {
// Build the scheduling graph.
buildSchedGraph(0);
buildSchedGraph(nullptr);
}
// VLIWPacketizerList Ctor
@ -129,7 +129,7 @@ VLIWPacketizerList::VLIWPacketizerList(
MachineFunction &MF, MachineLoopInfo &MLI, MachineDominatorTree &MDT,
bool IsPostRA) : TM(MF.getTarget()), MF(MF) {
TII = TM.getInstrInfo();
ResourceTracker = TII->CreateTargetScheduleState(&TM, 0);
ResourceTracker = TII->CreateTargetScheduleState(&TM, nullptr);
VLIWScheduler = new DefaultVLIWScheduler(MF, MLI, MDT, IsPostRA);
}

View File

@ -59,7 +59,7 @@ bool DeadMachineInstructionElim::isDead(const MachineInstr *MI) const {
// Don't delete instructions with side effects.
bool SawStore = false;
if (!MI->isSafeToMove(TII, 0, SawStore) && !MI->isPHI())
if (!MI->isSafeToMove(TII, nullptr, SawStore) && !MI->isPHI())
return false;
// Examine each operand.

View File

@ -43,7 +43,7 @@ namespace {
public:
static char ID; // Pass identification, replacement for typeid.
DwarfEHPrepare(const TargetMachine *TM)
: FunctionPass(ID), TM(TM), RewindFunction(0) {
: FunctionPass(ID), TM(TM), RewindFunction(nullptr) {
initializeDominatorTreeWrapperPassPass(*PassRegistry::getPassRegistry());
}
@ -68,10 +68,10 @@ FunctionPass *llvm::createDwarfEHPass(const TargetMachine *TM) {
/// instructions, including the 'resume' instruction.
Value *DwarfEHPrepare::GetExceptionObject(ResumeInst *RI) {
Value *V = RI->getOperand(0);
Value *ExnObj = 0;
Value *ExnObj = nullptr;
InsertValueInst *SelIVI = dyn_cast<InsertValueInst>(V);
LoadInst *SelLoad = 0;
InsertValueInst *ExcIVI = 0;
LoadInst *SelLoad = nullptr;
InsertValueInst *ExcIVI = nullptr;
bool EraseIVIs = false;
if (SelIVI) {

View File

@ -219,7 +219,7 @@ bool SSAIfConv::canSpeculateInstrs(MachineBasicBlock *MBB) {
// We never speculate stores, so an AA pointer isn't necessary.
bool DontMoveAcrossStore = true;
if (!I->isSafeToMove(TII, 0, DontMoveAcrossStore)) {
if (!I->isSafeToMove(TII, nullptr, DontMoveAcrossStore)) {
DEBUG(dbgs() << "Can't speculate: " << *I);
return false;
}
@ -338,7 +338,7 @@ bool SSAIfConv::findInsertionPoint() {
///
bool SSAIfConv::canConvertIf(MachineBasicBlock *MBB) {
Head = MBB;
TBB = FBB = Tail = 0;
TBB = FBB = Tail = nullptr;
if (Head->succ_size() != 2)
return false;
@ -463,7 +463,7 @@ void SSAIfConv::replacePHIInstrs() {
TII->insertSelect(*Head, FirstTerm, HeadDL, DstReg, Cond, PI.TReg, PI.FReg);
DEBUG(dbgs() << " --> " << *std::prev(FirstTerm));
PI.PHI->eraseFromParent();
PI.PHI = 0;
PI.PHI = nullptr;
}
}
@ -564,7 +564,7 @@ void SSAIfConv::convertIf(SmallVectorImpl<MachineBasicBlock*> &RemovedBlocks) {
// We need a branch to Tail, let code placement work it out later.
DEBUG(dbgs() << "Converting to unconditional branch.\n");
SmallVector<MachineOperand, 0> EmptyCond;
TII->InsertBranch(*Head, Tail, 0, EmptyCond, HeadDL);
TII->InsertBranch(*Head, Tail, nullptr, EmptyCond, HeadDL);
Head->addSuccessor(Tail);
}
DEBUG(dbgs() << *Head);
@ -783,7 +783,7 @@ bool EarlyIfConverter::runOnMachineFunction(MachineFunction &MF) {
DomTree = &getAnalysis<MachineDominatorTree>();
Loops = getAnalysisIfAvailable<MachineLoopInfo>();
Traces = &getAnalysis<MachineTraceMetrics>();
MinInstr = 0;
MinInstr = nullptr;
bool Changed = false;
IfConv.runOnMachineFunction(MF);

View File

@ -100,7 +100,7 @@ struct DomainValue {
// Clear this DomainValue and point to next which has all its data.
void clear() {
AvailableDomains = 0;
Next = 0;
Next = nullptr;
Instrs.clear();
}
};
@ -275,7 +275,7 @@ void ExeDepsFix::kill(int rx) {
return;
release(LiveRegs[rx].Value);
LiveRegs[rx].Value = 0;
LiveRegs[rx].Value = nullptr;
}
/// Force register rx into domain.
@ -360,7 +360,7 @@ void ExeDepsFix::enterBasicBlock(MachineBasicBlock *MBB) {
// Default values are 'nothing happened a long time ago'.
for (unsigned rx = 0; rx != NumRegs; ++rx) {
LiveRegs[rx].Value = 0;
LiveRegs[rx].Value = nullptr;
LiveRegs[rx].Def = -(1 << 20);
}
@ -440,7 +440,7 @@ void ExeDepsFix::leaveBasicBlock(MachineBasicBlock *MBB) {
release(LiveRegs[i].Value);
delete[] LiveRegs;
}
LiveRegs = 0;
LiveRegs = nullptr;
}
void ExeDepsFix::visitInstr(MachineInstr *MI) {
@ -664,7 +664,7 @@ void ExeDepsFix::visitSoftInstr(MachineInstr *mi, unsigned mask) {
// doms are now sorted in order of appearance. Try to merge them all, giving
// priority to the latest ones.
DomainValue *dv = 0;
DomainValue *dv = nullptr;
while (!Regs.empty()) {
if (!dv) {
dv = Regs.pop_back_val().Value;
@ -714,7 +714,7 @@ bool ExeDepsFix::runOnMachineFunction(MachineFunction &mf) {
MF = &mf;
TII = MF->getTarget().getInstrInfo();
TRI = MF->getTarget().getRegisterInfo();
LiveRegs = 0;
LiveRegs = nullptr;
assert(NumRegs == RC->getNumRegs() && "Bad regclass");
DEBUG(dbgs() << "********** FIX EXECUTION DEPENDENCIES: "

View File

@ -84,7 +84,7 @@ GCStrategy *GCModuleInfo::getOrCreateStrategy(const Module *M,
}
dbgs() << "unsupported GC: " << Name << "\n";
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
GCFunctionInfo &GCModuleInfo::getFunctionInfo(const Function &F) {

View File

@ -118,7 +118,7 @@ bool GCStrategy::performCustomLowering(Function &F) {
bool GCStrategy::findCustomSafePoints(GCFunctionInfo& FI, MachineFunction &F) {
dbgs() << "gc " << getName() << " must override findCustomSafePoints.\n";
llvm_unreachable(0);
llvm_unreachable(nullptr);
}

View File

@ -127,7 +127,8 @@ namespace {
IsAnalyzed(false), IsEnqueued(false), IsBrAnalyzable(false),
HasFallThrough(false), IsUnpredicable(false),
CannotBeCopied(false), ClobbersPred(false), NonPredSize(0),
ExtraCost(0), ExtraCost2(0), BB(0), TrueBB(0), FalseBB(0) {}
ExtraCost(0), ExtraCost2(0), BB(nullptr), TrueBB(nullptr),
FalseBB(nullptr) {}
};
/// IfcvtToken - Record information about pending if-conversions to attempt:
@ -205,7 +206,7 @@ namespace {
void PredicateBlock(BBInfo &BBI,
MachineBasicBlock::iterator E,
SmallVectorImpl<MachineOperand> &Cond,
SmallSet<unsigned, 4> *LaterRedefs = 0);
SmallSet<unsigned, 4> *LaterRedefs = nullptr);
void CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
SmallVectorImpl<MachineOperand> &Cond,
bool IgnoreBr = false);
@ -230,7 +231,7 @@ namespace {
// blockAlwaysFallThrough - Block ends without a terminator.
bool blockAlwaysFallThrough(BBInfo &BBI) const {
return BBI.IsBrAnalyzable && BBI.TrueBB == NULL;
return BBI.IsBrAnalyzable && BBI.TrueBB == nullptr;
}
// IfcvtTokenCmp - Used to sort if-conversion candidates.
@ -438,7 +439,7 @@ static MachineBasicBlock *findFalseBlock(MachineBasicBlock *BB,
if (SuccBB != TrueBB)
return SuccBB;
}
return NULL;
return nullptr;
}
/// ReverseBranchCondition - Reverse the condition of the end of the block
@ -460,7 +461,7 @@ static inline MachineBasicBlock *getNextBlock(MachineBasicBlock *BB) {
MachineFunction::iterator I = BB;
MachineFunction::iterator E = BB->getParent()->end();
if (++I == E)
return NULL;
return nullptr;
return I;
}
@ -551,7 +552,7 @@ bool IfConverter::ValidDiamond(BBInfo &TrueBBI, BBInfo &FalseBBI,
FT = getNextBlock(FalseBBI.BB);
if (TT != FT)
return false;
if (TT == NULL && (TrueBBI.IsBrAnalyzable || FalseBBI.IsBrAnalyzable))
if (!TT && (TrueBBI.IsBrAnalyzable || FalseBBI.IsBrAnalyzable))
return false;
if (TrueBBI.BB->pred_size() > 1 || FalseBBI.BB->pred_size() > 1)
return false;
@ -641,11 +642,11 @@ void IfConverter::ScanInstructions(BBInfo &BBI) {
bool AlreadyPredicated = !BBI.Predicate.empty();
// First analyze the end of BB branches.
BBI.TrueBB = BBI.FalseBB = NULL;
BBI.TrueBB = BBI.FalseBB = nullptr;
BBI.BrCond.clear();
BBI.IsBrAnalyzable =
!TII->AnalyzeBranch(*BBI.BB, BBI.TrueBB, BBI.FalseBB, BBI.BrCond);
BBI.HasFallThrough = BBI.IsBrAnalyzable && BBI.FalseBB == NULL;
BBI.HasFallThrough = BBI.IsBrAnalyzable && BBI.FalseBB == nullptr;
if (BBI.BrCond.size()) {
// No false branch. This BB must end with a conditional branch and a
@ -954,13 +955,13 @@ static void InsertUncondBranch(MachineBasicBlock *BB, MachineBasicBlock *ToBB,
const TargetInstrInfo *TII) {
DebugLoc dl; // FIXME: this is nowhere
SmallVector<MachineOperand, 0> NoCond;
TII->InsertBranch(*BB, ToBB, NULL, NoCond, dl);
TII->InsertBranch(*BB, ToBB, nullptr, NoCond, dl);
}
/// RemoveExtraEdges - Remove true / false edges if either / both are no longer
/// successors.
void IfConverter::RemoveExtraEdges(BBInfo &BBI) {
MachineBasicBlock *TBB = NULL, *FBB = NULL;
MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
SmallVector<MachineOperand, 4> Cond;
if (!TII->AnalyzeBranch(*BBI.BB, TBB, FBB, Cond))
BBI.BB->CorrectExtraCFGEdges(TBB, FBB, !Cond.empty());
@ -1179,7 +1180,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
DontKill.clear();
bool HasEarlyExit = CvtBBI->FalseBB != NULL;
bool HasEarlyExit = CvtBBI->FalseBB != nullptr;
uint64_t CvtNext = 0, CvtFalse = 0, BBNext = 0, BBCvt = 0, SumWeight = 0;
uint32_t WeightScale = 0;
if (HasEarlyExit) {
@ -1215,7 +1216,7 @@ bool IfConverter::IfConvertTriangle(BBInfo &BBI, IfcvtKind Kind) {
CvtBBI->BrCond.end());
if (TII->ReverseBranchCondition(RevCond))
llvm_unreachable("Unable to reverse branch condition!");
TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, NULL, RevCond, dl);
TII->InsertBranch(*BBI.BB, CvtBBI->FalseBB, nullptr, RevCond, dl);
BBI.BB->addSuccessor(CvtBBI->FalseBB);
// Update the edge weight for both CvtBBI->FalseBB and NextBBI.
// New_Weight(BBI.BB, NextBBI->BB) =
@ -1453,8 +1454,8 @@ bool IfConverter::IfConvertDiamond(BBInfo &BBI, IfcvtKind Kind,
PredicateBlock(*BBI2, DI2, *Cond2);
// Merge the true block into the entry of the diamond.
MergeBlocks(BBI, *BBI1, TailBB == 0);
MergeBlocks(BBI, *BBI2, TailBB == 0);
MergeBlocks(BBI, *BBI1, TailBB == nullptr);
MergeBlocks(BBI, *BBI2, TailBB == nullptr);
// If the if-converted block falls through or unconditionally branches into
// the tail block, and the tail block does not have other predecessors, then
@ -1503,7 +1504,7 @@ static bool MaySpeculate(const MachineInstr *MI,
SmallSet<unsigned, 4> &LaterRedefs,
const TargetInstrInfo *TII) {
bool SawStore = true;
if (!MI->isSafeToMove(TII, 0, SawStore))
if (!MI->isSafeToMove(TII, nullptr, SawStore))
return false;
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
@ -1527,7 +1528,7 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
SmallVectorImpl<MachineOperand> &Cond,
SmallSet<unsigned, 4> *LaterRedefs) {
bool AnyUnpred = false;
bool MaySpec = LaterRedefs != 0;
bool MaySpec = LaterRedefs != nullptr;
for (MachineBasicBlock::iterator I = BBI.BB->begin(); I != E; ++I) {
if (I->isDebugValue() || TII->isPredicated(I))
continue;
@ -1545,7 +1546,7 @@ void IfConverter::PredicateBlock(BBInfo &BBI,
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
// If the predicated instruction now redefines a register as the result of
@ -1590,7 +1591,7 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
#ifndef NDEBUG
dbgs() << "Unable to predicate " << *I << "!\n";
#endif
llvm_unreachable(0);
llvm_unreachable(nullptr);
}
}
@ -1607,7 +1608,7 @@ void IfConverter::CopyAndPredicateBlock(BBInfo &ToBBI, BBInfo &FromBBI,
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr;
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
MachineBasicBlock *Succ = Succs[i];
@ -1643,7 +1644,7 @@ void IfConverter::MergeBlocks(BBInfo &ToBBI, BBInfo &FromBBI, bool AddEdges) {
std::vector<MachineBasicBlock *> Succs(FromBBI.BB->succ_begin(),
FromBBI.BB->succ_end());
MachineBasicBlock *NBB = getNextBlock(FromBBI.BB);
MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : NULL;
MachineBasicBlock *FallThrough = FromBBI.HasFallThrough ? NBB : nullptr;
for (unsigned i = 0, e = Succs.size(); i != e; ++i) {
MachineBasicBlock *Succ = Succs[i];

View File

@ -121,7 +121,7 @@ public:
SibValueInfo(unsigned Reg, VNInfo *VNI)
: AllDefsAreReloads(true), DefByOrigPHI(false), KillsSource(false),
SpillReg(Reg), SpillVNI(VNI), SpillMBB(0), DefMI(0) {}
SpillReg(Reg), SpillVNI(VNI), SpillMBB(nullptr), DefMI(nullptr) {}
// Returns true when a def has been found.
bool hasDef() const { return DefByOrigPHI || DefMI; }
@ -167,7 +167,7 @@ private:
bool isSibling(unsigned Reg);
MachineInstr *traceSiblingValue(unsigned, VNInfo*, VNInfo*);
void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = 0);
void propagateSiblingValue(SibValueMap::iterator, VNInfo *VNI = nullptr);
void analyzeSiblingValues();
bool hoistSpill(LiveInterval &SpillLI, MachineInstr *CopyMI);
@ -179,7 +179,7 @@ private:
bool coalesceStackAccess(MachineInstr *MI, unsigned Reg);
bool foldMemoryOperand(ArrayRef<std::pair<MachineInstr*, unsigned> >,
MachineInstr *LoadMI = 0);
MachineInstr *LoadMI = nullptr);
void insertReload(unsigned VReg, SlotIndex, MachineBasicBlock::iterator MI);
void insertSpill(unsigned VReg, bool isKill, MachineBasicBlock::iterator MI);
@ -236,7 +236,7 @@ bool InlineSpiller::isSnippet(const LiveInterval &SnipLI) {
if (SnipLI.getNumValNums() > 2 || !LIS.intervalIsInOneMBB(SnipLI))
return false;
MachineInstr *UseMI = 0;
MachineInstr *UseMI = nullptr;
// Check that all uses satisfy our criteria.
for (MachineRegisterInfo::reg_instr_nodbg_iterator
@ -367,7 +367,7 @@ void InlineSpiller::propagateSiblingValue(SibValueMap::iterator SVIIter,
do {
SVI = WorkList.pop_back_val();
TinyPtrVector<VNInfo*> *Deps = VNI ? &FirstDeps : &SVI->second.Deps;
VNI = 0;
VNI = nullptr;
SibValueInfo &SV = SVI->second;
if (!SV.SpillMBB)
@ -659,7 +659,7 @@ void InlineSpiller::analyzeSiblingValues() {
VNInfo *VNI = *VI;
if (VNI->isUnused())
continue;
MachineInstr *DefMI = 0;
MachineInstr *DefMI = nullptr;
if (!VNI->isPHIDef()) {
DefMI = LIS.getInstructionFromIndex(VNI->def);
assert(DefMI && "No defining instruction");
@ -1359,7 +1359,7 @@ void InlineSpiller::spill(LiveRangeEdit &edit) {
// Share a stack slot among all descendants of Original.
Original = VRM.getOriginal(edit.getReg());
StackSlot = VRM.getStackSlot(Original);
StackInt = 0;
StackInt = nullptr;
DEBUG(dbgs() << "Inline spilling "
<< MRI.getRegClass(edit.getReg())->getName()

View File

@ -115,21 +115,21 @@ void IntrinsicLowering::AddPrototypes(Module &M) {
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context), (Type *)0);
DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::memmove:
M.getOrInsertFunction("memmove",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
DL.getIntPtrType(Context), (Type *)0);
DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::memset:
M.getOrInsertFunction("memset",
Type::getInt8PtrTy(Context),
Type::getInt8PtrTy(Context),
Type::getInt32Ty(M.getContext()),
DL.getIntPtrType(Context), (Type *)0);
DL.getIntPtrType(Context), nullptr);
break;
case Intrinsic::sqrt:
EnsureFPIntrinsicsExist(M, I, "sqrtf", "sqrt", "sqrtl");

View File

@ -138,7 +138,7 @@ static MCContext *addPassesToGenerateCode(LLVMTargetMachine *TM,
// Ask the target for an isel.
if (PassConfig->addInstSelector())
return NULL;
return nullptr;
PassConfig->addMachinePasses();
@ -185,7 +185,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
MII, MRI, STI);
// Create a code emitter if asked to show the encoding.
MCCodeEmitter *MCE = 0;
MCCodeEmitter *MCE = nullptr;
if (ShowMCEncoding)
MCE = getTarget().createMCCodeEmitter(MII, MRI, STI, *Context);
@ -208,7 +208,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
*Context);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
if (MCE == 0 || MAB == 0)
if (!MCE || !MAB)
return true;
AsmStreamer.reset(getTarget().createMCObjectStreamer(
@ -225,7 +225,7 @@ bool LLVMTargetMachine::addPassesToEmitFile(PassManagerBase &PM,
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
if (Printer == 0)
if (!Printer)
return true;
// If successful, createAsmPrinter took ownership of AsmStreamer.
@ -246,7 +246,8 @@ bool LLVMTargetMachine::addPassesToEmitMachineCode(PassManagerBase &PM,
JITCodeEmitter &JCE,
bool DisableVerify) {
// Add common CodeGen passes.
MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify, 0, 0);
MCContext *Context = addPassesToGenerateCode(this, PM, DisableVerify, nullptr,
nullptr);
if (!Context)
return true;
@ -265,7 +266,7 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
raw_ostream &Out,
bool DisableVerify) {
// Add common CodeGen passes.
Ctx = addPassesToGenerateCode(this, PM, DisableVerify, 0, 0);
Ctx = addPassesToGenerateCode(this, PM, DisableVerify, nullptr, nullptr);
if (!Ctx)
return true;
@ -280,7 +281,7 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
STI, *Ctx);
MCAsmBackend *MAB = getTarget().createMCAsmBackend(MRI, getTargetTriple(),
TargetCPU);
if (MCE == 0 || MAB == 0)
if (!MCE || !MAB)
return true;
std::unique_ptr<MCStreamer> AsmStreamer;
@ -290,7 +291,7 @@ bool LLVMTargetMachine::addPassesToEmitMC(PassManagerBase &PM,
// Create the AsmPrinter, which takes ownership of AsmStreamer if successful.
FunctionPass *Printer = getTarget().createAsmPrinter(*this, *AsmStreamer);
if (Printer == 0)
if (!Printer)
return true;
// If successful, createAsmPrinter took ownership of AsmStreamer.

View File

@ -53,7 +53,7 @@ bool latency_sort::operator()(const SUnit *LHS, const SUnit *RHS) const {
/// getSingleUnscheduledPred - If there is exactly one unscheduled predecessor
/// of SU, return it, otherwise return null.
SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
SUnit *OnlyAvailablePred = 0;
SUnit *OnlyAvailablePred = nullptr;
for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
I != E; ++I) {
SUnit &Pred = *I->getSUnit();
@ -61,7 +61,7 @@ SUnit *LatencyPriorityQueue::getSingleUnscheduledPred(SUnit *SU) {
// We found an available, but not scheduled, predecessor. If it's the
// only one we have found, keep track of it... otherwise give up.
if (OnlyAvailablePred && OnlyAvailablePred != &Pred)
return 0;
return nullptr;
OnlyAvailablePred = &Pred;
}
}
@ -105,7 +105,7 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
if (SU->isAvailable) return; // All preds scheduled.
SUnit *OnlyAvailablePred = getSingleUnscheduledPred(SU);
if (OnlyAvailablePred == 0 || !OnlyAvailablePred->isAvailable) return;
if (!OnlyAvailablePred || !OnlyAvailablePred->isAvailable) return;
// Okay, we found a single predecessor that is available, but not scheduled.
// Since it is available, it must be in the priority queue. First remove it.
@ -117,7 +117,7 @@ void LatencyPriorityQueue::AdjustPriorityOfUnscheduledPreds(SUnit *SU) {
}
SUnit *LatencyPriorityQueue::pop() {
if (empty()) return NULL;
if (empty()) return nullptr;
std::vector<SUnit *>::iterator Best = Queue.begin();
for (std::vector<SUnit *>::iterator I = std::next(Queue.begin()),
E = Queue.end(); I != E; ++I)

View File

@ -30,8 +30,8 @@ LexicalScopes::~LexicalScopes() { reset(); }
/// reset - Reset the instance so that it's prepared for another function.
void LexicalScopes::reset() {
MF = NULL;
CurrentFnLexicalScope = NULL;
MF = nullptr;
CurrentFnLexicalScope = nullptr;
DeleteContainerSeconds(LexicalScopeMap);
DeleteContainerSeconds(AbstractScopeMap);
InlinedLexicalScopeMap.clear();
@ -60,8 +60,8 @@ void LexicalScopes::extractLexicalScopes(
// Scan each instruction and create scopes. First build working set of scopes.
for (MachineFunction::const_iterator I = MF->begin(), E = MF->end(); I != E;
++I) {
const MachineInstr *RangeBeginMI = NULL;
const MachineInstr *PrevMI = NULL;
const MachineInstr *RangeBeginMI = nullptr;
const MachineInstr *PrevMI = nullptr;
DebugLoc PrevDL;
for (MachineBasicBlock::const_iterator II = I->begin(), IE = I->end();
II != IE; ++II) {
@ -113,11 +113,11 @@ void LexicalScopes::extractLexicalScopes(
/// findLexicalScope - Find lexical scope, either regular or inlined, for the
/// given DebugLoc. Return NULL if not found.
LexicalScope *LexicalScopes::findLexicalScope(DebugLoc DL) {
MDNode *Scope = NULL;
MDNode *IA = NULL;
MDNode *Scope = nullptr;
MDNode *IA = nullptr;
DL.getScopeAndInlinedAt(Scope, IA, MF->getFunction()->getContext());
if (!Scope)
return NULL;
return nullptr;
// The scope that we were created with could have an extra file - which
// isn't what we care about in this case.
@ -133,8 +133,8 @@ LexicalScope *LexicalScopes::findLexicalScope(DebugLoc DL) {
/// getOrCreateLexicalScope - Find lexical scope for the given DebugLoc. If
/// not available then create new lexical scope.
LexicalScope *LexicalScopes::getOrCreateLexicalScope(DebugLoc DL) {
MDNode *Scope = NULL;
MDNode *InlinedAt = NULL;
MDNode *Scope = nullptr;
MDNode *InlinedAt = nullptr;
DL.getScopeAndInlinedAt(Scope, InlinedAt, MF->getFunction()->getContext());
if (InlinedAt) {
@ -159,10 +159,10 @@ LexicalScope *LexicalScopes::getOrCreateRegularScope(MDNode *Scope) {
if (WScope)
return WScope;
LexicalScope *Parent = NULL;
LexicalScope *Parent = nullptr;
if (D.isLexicalBlock())
Parent = getOrCreateLexicalScope(DebugLoc::getFromDILexicalBlock(Scope));
WScope = new LexicalScope(Parent, DIDescriptor(Scope), NULL, false);
WScope = new LexicalScope(Parent, DIDescriptor(Scope), nullptr, false);
LexicalScopeMap.insert(std::make_pair(Scope, WScope));
if (!Parent && DIDescriptor(Scope).isSubprogram() &&
DISubprogram(Scope).describes(MF->getFunction()))
@ -197,13 +197,13 @@ LexicalScope *LexicalScopes::getOrCreateAbstractScope(const MDNode *N) {
if (AScope)
return AScope;
LexicalScope *Parent = NULL;
LexicalScope *Parent = nullptr;
if (Scope.isLexicalBlock()) {
DILexicalBlock DB(N);
DIDescriptor ParentDesc = DB.getContext();
Parent = getOrCreateAbstractScope(ParentDesc);
}
AScope = new LexicalScope(Parent, DIDescriptor(N), NULL, true);
AScope = new LexicalScope(Parent, DIDescriptor(N), nullptr, true);
AbstractScopeMap[N] = AScope;
if (DIDescriptor(N).isSubprogram())
AbstractScopesList.push_back(AScope);
@ -244,7 +244,7 @@ void LexicalScopes::assignInstructionRanges(
SmallVectorImpl<InsnRange> &MIRanges,
DenseMap<const MachineInstr *, LexicalScope *> &MI2ScopeMap) {
LexicalScope *PrevLexicalScope = NULL;
LexicalScope *PrevLexicalScope = nullptr;
for (SmallVectorImpl<InsnRange>::const_iterator RI = MIRanges.begin(),
RE = MIRanges.end();
RI != RE; ++RI) {

View File

@ -64,7 +64,7 @@ void LiveDebugVariables::getAnalysisUsage(AnalysisUsage &AU) const {
MachineFunctionPass::getAnalysisUsage(AU);
}
LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(0) {
LiveDebugVariables::LiveDebugVariables() : MachineFunctionPass(ID), pImpl(nullptr) {
initializeLiveDebugVariablesPass(*PassRegistry::getPassRegistry());
}
@ -139,7 +139,7 @@ public:
UserValue(const MDNode *var, unsigned o, bool i, DebugLoc L,
LocMap::Allocator &alloc)
: variable(var), offset(o), IsIndirect(i), dl(L), leader(this),
next(0), locInts(alloc)
next(nullptr), locInts(alloc)
{}
/// getLeader - Get the leader of this value's equivalence class.
@ -444,7 +444,7 @@ void LDVImpl::mapVirtReg(unsigned VirtReg, UserValue *EC) {
UserValue *LDVImpl::lookupVirtReg(unsigned VirtReg) {
if (UserValue *UV = virtRegToEqClass.lookup(VirtReg))
return UV->getLeader();
return 0;
return nullptr;
}
bool LDVImpl::handleDebugValue(MachineInstr *MI, SlotIndex Idx) {
@ -646,14 +646,14 @@ UserValue::computeIntervals(MachineRegisterInfo &MRI,
const MachineOperand &Loc = locations[LocNo];
if (!Loc.isReg()) {
extendDef(Idx, LocNo, 0, 0, 0, LIS, MDT, UVS);
extendDef(Idx, LocNo, nullptr, nullptr, nullptr, LIS, MDT, UVS);
continue;
}
// Register locations are constrained to where the register value is live.
if (TargetRegisterInfo::isVirtualRegister(Loc.getReg())) {
LiveInterval *LI = 0;
const VNInfo *VNI = 0;
LiveInterval *LI = nullptr;
const VNInfo *VNI = nullptr;
if (LIS.hasInterval(Loc.getReg())) {
LI = &LIS.getInterval(Loc.getReg());
VNI = LI->getVNInfoAt(Idx);
@ -670,7 +670,7 @@ UserValue::computeIntervals(MachineRegisterInfo &MRI,
LiveRange *LR = &LIS.getRegUnit(Unit);
const VNInfo *VNI = LR->getVNInfoAt(Idx);
// Don't track copies from physregs, it is too expensive.
extendDef(Idx, LocNo, LR, VNI, 0, LIS, MDT, UVS);
extendDef(Idx, LocNo, LR, VNI, nullptr, LIS, MDT, UVS);
}
// Finally, erase all the undefs.
@ -733,7 +733,7 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
LiveIntervals& LIS) {
DEBUG({
dbgs() << "Splitting Loc" << OldLocNo << '\t';
print(dbgs(), 0);
print(dbgs(), nullptr);
});
bool DidChange = false;
LocMap::iterator LocMapI;
@ -823,7 +823,7 @@ UserValue::splitLocation(unsigned OldLocNo, ArrayRef<unsigned> NewRegs,
}
}
DEBUG({dbgs() << "Split result: \t"; print(dbgs(), 0);});
DEBUG({dbgs() << "Split result: \t"; print(dbgs(), nullptr);});
return DidChange;
}

View File

@ -331,13 +331,13 @@ LiveRange::iterator LiveRange::addSegmentFrom(Segment S, iterator From) {
/// the value. If there is no live range before Kill, return NULL.
VNInfo *LiveRange::extendInBlock(SlotIndex StartIdx, SlotIndex Kill) {
if (empty())
return 0;
return nullptr;
iterator I = std::upper_bound(begin(), end(), Kill.getPrevSlot());
if (I == begin())
return 0;
return nullptr;
--I;
if (I->end <= StartIdx)
return 0;
return nullptr;
if (I->end < Kill)
extendSegmentEndTo(I, Kill);
return I->valno;
@ -435,7 +435,7 @@ void LiveRange::join(LiveRange &Other,
OutIt->valno = NewVNInfo[LHSValNoAssignments[OutIt->valno->id]];
for (iterator I = std::next(OutIt), E = end(); I != E; ++I) {
VNInfo* nextValNo = NewVNInfo[LHSValNoAssignments[I->valno->id]];
assert(nextValNo != 0 && "Huh?");
assert(nextValNo && "Huh?");
// If this live range has the same value # as its immediate predecessor,
// and if they are neighbors, remove one Segment. This happens when we
@ -638,7 +638,7 @@ void LiveRange::verify() const {
assert(I->start.isValid());
assert(I->end.isValid());
assert(I->start < I->end);
assert(I->valno != 0);
assert(I->valno != nullptr);
assert(I->valno->id < valnos.size());
assert(I->valno == valnos[I->valno->id]);
if (std::next(I) != E) {
@ -857,7 +857,7 @@ unsigned ConnectedVNInfoEqClasses::Classify(const LiveInterval *LI) {
EqClass.clear();
EqClass.grow(LI->getNumValNums());
const VNInfo *used = 0, *unused = 0;
const VNInfo *used = nullptr, *unused = nullptr;
// Determine connections.
for (LiveInterval::const_vni_iterator I = LI->vni_begin(), E = LI->vni_end();

View File

@ -79,7 +79,7 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
}
LiveIntervals::LiveIntervals() : MachineFunctionPass(ID),
DomTree(0), LRCalc(0) {
DomTree(nullptr), LRCalc(nullptr) {
initializeLiveIntervalsPass(*PassRegistry::getPassRegistry());
}
@ -572,9 +572,9 @@ void LiveIntervals::addKillFlags(const VirtRegMap *VRM) {
break;
}
if (CancelKill)
MI->clearRegisterKills(Reg, NULL);
MI->clearRegisterKills(Reg, nullptr);
else
MI->addRegisterKilled(Reg, NULL);
MI->addRegisterKilled(Reg, nullptr);
}
}
}
@ -590,17 +590,17 @@ LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const {
SlotIndex Start = LI.beginIndex();
if (Start.isBlock())
return NULL;
return nullptr;
SlotIndex Stop = LI.endIndex();
if (Stop.isBlock())
return NULL;
return nullptr;
// getMBBFromIndex doesn't need to search the MBB table when both indexes
// belong to proper instructions.
MachineBasicBlock *MBB1 = Indexes->getMBBFromIndex(Start);
MachineBasicBlock *MBB2 = Indexes->getMBBFromIndex(Stop);
return MBB1 == MBB2 ? MBB1 : NULL;
return MBB1 == MBB2 ? MBB1 : nullptr;
}
bool

Some files were not shown because too many files have changed in this diff Show More