mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-19 00:14:20 +00:00
[C++11] More 'nullptr' conversion. In some cases just using a boolean check instead of comparing to nullptr.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@206356 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
73a7844c65
commit
695aa80f07
@ -176,7 +176,8 @@ namespace llvm {
|
||||
return FMF;
|
||||
}
|
||||
|
||||
bool ParseOptionalToken(lltok::Kind T, bool &Present, LocTy *Loc = 0) {
|
||||
bool ParseOptionalToken(lltok::Kind T, bool &Present,
|
||||
LocTy *Loc = nullptr) {
|
||||
if (Lex.getKind() != T) {
|
||||
Present = false;
|
||||
} else {
|
||||
@ -348,7 +349,7 @@ namespace llvm {
|
||||
PerFunctionState &PFS);
|
||||
|
||||
// Constant Parsing.
|
||||
bool ParseValID(ValID &ID, PerFunctionState *PFS = NULL);
|
||||
bool ParseValID(ValID &ID, PerFunctionState *PFS = nullptr);
|
||||
bool ParseGlobalValue(Type *Ty, Constant *&V);
|
||||
bool ParseGlobalTypeAndValue(Constant *&V);
|
||||
bool ParseGlobalValueVector(SmallVectorImpl<Constant*> &Elts);
|
||||
|
@ -170,7 +170,8 @@ class RegisterClassInfo;
|
||||
void GetPassthruRegs(MachineInstr *MI, std::set<unsigned>& PassthruRegs);
|
||||
|
||||
void HandleLastUse(unsigned Reg, unsigned KillIdx, const char *tag,
|
||||
const char *header =NULL, const char *footer =NULL);
|
||||
const char *header = nullptr,
|
||||
const char *footer = nullptr);
|
||||
|
||||
void PrescanInstruction(MachineInstr *MI, unsigned Count,
|
||||
std::set<unsigned>& PassthruRegs);
|
||||
|
@ -77,7 +77,8 @@ class InterferenceCache {
|
||||
/// Iterator pointing into the fixed RegUnit interference.
|
||||
LiveInterval::iterator FixedI;
|
||||
|
||||
RegUnitInfo(LiveIntervalUnion &LIU) : VirtTag(LIU.getTag()), Fixed(0) {
|
||||
RegUnitInfo(LiveIntervalUnion &LIU)
|
||||
: VirtTag(LIU.getTag()), Fixed(nullptr) {
|
||||
VirtI.setMap(LIU.getMap());
|
||||
}
|
||||
};
|
||||
@ -93,7 +94,7 @@ class InterferenceCache {
|
||||
void update(unsigned MBBNum);
|
||||
|
||||
public:
|
||||
Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(0), LIS(0) {}
|
||||
Entry() : PhysReg(0), Tag(0), RefCount(0), Indexes(nullptr), LIS(nullptr) {}
|
||||
|
||||
void clear(MachineFunction *mf, SlotIndexes *indexes, LiveIntervals *lis) {
|
||||
assert(!hasRefs() && "Cannot clear cache entry with references");
|
||||
@ -148,8 +149,9 @@ class InterferenceCache {
|
||||
Entry *get(unsigned PhysReg);
|
||||
|
||||
public:
|
||||
InterferenceCache() : TRI(0), LIUArray(0), MF(0), PhysRegEntries(NULL),
|
||||
PhysRegEntriesCount(0), RoundRobin(0) {}
|
||||
InterferenceCache()
|
||||
: TRI(nullptr), LIUArray(nullptr), MF(nullptr), PhysRegEntries(nullptr),
|
||||
PhysRegEntriesCount(0), RoundRobin(0) {}
|
||||
|
||||
~InterferenceCache() {
|
||||
free(PhysRegEntries);
|
||||
@ -172,7 +174,7 @@ public:
|
||||
static BlockInterference NoInterference;
|
||||
|
||||
void setEntry(Entry *E) {
|
||||
Current = 0;
|
||||
Current = nullptr;
|
||||
// Update reference counts. Nothing happens when RefCount reaches 0, so
|
||||
// we don't have to check for E == CacheEntry etc.
|
||||
if (CacheEntry)
|
||||
@ -184,10 +186,10 @@ public:
|
||||
|
||||
public:
|
||||
/// Cursor - Create a dangling cursor.
|
||||
Cursor() : CacheEntry(0), Current(0) {}
|
||||
~Cursor() { setEntry(0); }
|
||||
Cursor() : CacheEntry(nullptr), Current(nullptr) {}
|
||||
~Cursor() { setEntry(nullptr); }
|
||||
|
||||
Cursor(const Cursor &O) : CacheEntry(0), Current(0) {
|
||||
Cursor(const Cursor &O) : CacheEntry(nullptr), Current(nullptr) {
|
||||
setEntry(O.CacheEntry);
|
||||
}
|
||||
|
||||
@ -200,7 +202,7 @@ public:
|
||||
void setPhysReg(InterferenceCache &Cache, unsigned PhysReg) {
|
||||
// Release reference before getting a new one. That guarantees we can
|
||||
// actually have CacheEntries live cursors.
|
||||
setEntry(0);
|
||||
setEntry(nullptr);
|
||||
if (PhysReg)
|
||||
setEntry(Cache.get(PhysReg));
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ class LiveRangeCalc {
|
||||
VNInfo *Value;
|
||||
|
||||
LiveInBlock(LiveRange &LR, MachineDomTreeNode *node, SlotIndex kill)
|
||||
: LR(LR), DomNode(node), Kill(kill), Value(0) {}
|
||||
: LR(LR), DomNode(node), Kill(kill), Value(nullptr) {}
|
||||
};
|
||||
|
||||
/// LiveIn - Work list of blocks where the live-in value has yet to be
|
||||
@ -125,7 +125,8 @@ class LiveRangeCalc {
|
||||
void updateLiveIns();
|
||||
|
||||
public:
|
||||
LiveRangeCalc() : MF(0), MRI(0), Indexes(0), DomTree(0), Alloc(0) {}
|
||||
LiveRangeCalc() : MF(nullptr), MRI(nullptr), Indexes(nullptr),
|
||||
DomTree(nullptr), Alloc(nullptr) {}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// High-level interface.
|
||||
@ -203,7 +204,7 @@ public:
|
||||
/// addLiveInBlock().
|
||||
void setLiveOutValue(MachineBasicBlock *MBB, VNInfo *VNI) {
|
||||
Seen.set(MBB->getNumber());
|
||||
LiveOut[MBB] = LiveOutPair(VNI, (MachineDomTreeNode *)0);
|
||||
LiveOut[MBB] = LiveOutPair(VNI, nullptr);
|
||||
}
|
||||
|
||||
/// addLiveInBlock - Add a block with an unknown live-in value. This
|
||||
|
@ -65,7 +65,8 @@ protected:
|
||||
LiveRegMatrix *Matrix;
|
||||
RegisterClassInfo RegClassInfo;
|
||||
|
||||
RegAllocBase(): TRI(0), MRI(0), VRM(0), LIS(0), Matrix(0) {}
|
||||
RegAllocBase()
|
||||
: TRI(nullptr), MRI(nullptr), VRM(nullptr), LIS(nullptr), Matrix(nullptr) {}
|
||||
|
||||
virtual ~RegAllocBase() {}
|
||||
|
||||
|
@ -61,14 +61,14 @@ namespace llvm {
|
||||
public:
|
||||
CoalescerPair(const TargetRegisterInfo &tri)
|
||||
: TRI(tri), DstReg(0), SrcReg(0), DstIdx(0), SrcIdx(0),
|
||||
Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
|
||||
Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
|
||||
|
||||
/// Create a CoalescerPair representing a virtreg-to-physreg copy.
|
||||
/// No need to call setRegisters().
|
||||
CoalescerPair(unsigned VirtReg, unsigned PhysReg,
|
||||
const TargetRegisterInfo &tri)
|
||||
: TRI(tri), DstReg(PhysReg), SrcReg(VirtReg), DstIdx(0), SrcIdx(0),
|
||||
Partial(false), CrossClass(false), Flipped(false), NewRC(0) {}
|
||||
Partial(false), CrossClass(false), Flipped(false), NewRC(nullptr) {}
|
||||
|
||||
/// setRegisters - set registers to match the copy instruction MI. Return
|
||||
/// false if MI is not a coalescable copy instruction.
|
||||
|
@ -139,7 +139,7 @@ namespace llvm {
|
||||
public:
|
||||
RegDefIter(const SUnit *SU, const ScheduleDAGSDNodes *SD);
|
||||
|
||||
bool IsValid() const { return Node != NULL; }
|
||||
bool IsValid() const { return Node != nullptr; }
|
||||
|
||||
MVT GetValue() const {
|
||||
assert(IsValid() && "bad iterator");
|
||||
|
@ -96,7 +96,7 @@ class SelectionDAGBuilder {
|
||||
DebugLoc dl;
|
||||
unsigned SDNodeOrder;
|
||||
public:
|
||||
DanglingDebugInfo() : DI(0), dl(DebugLoc()), SDNodeOrder(0) { }
|
||||
DanglingDebugInfo() : DI(nullptr), dl(DebugLoc()), SDNodeOrder(0) { }
|
||||
DanglingDebugInfo(const DbgValueInst *di, DebugLoc DL, unsigned SDNO) :
|
||||
DI(di), dl(DL), SDNodeOrder(SDNO) { }
|
||||
const DbgValueInst* getDI() { return DI; }
|
||||
@ -135,7 +135,7 @@ private:
|
||||
MachineBasicBlock* BB;
|
||||
uint32_t ExtraWeight;
|
||||
|
||||
Case() : Low(0), High(0), BB(0), ExtraWeight(0) { }
|
||||
Case() : Low(nullptr), High(nullptr), BB(nullptr), ExtraWeight(0) { }
|
||||
Case(const Constant *low, const Constant *high, MachineBasicBlock *bb,
|
||||
uint32_t extraweight) : Low(low), High(high), BB(bb),
|
||||
ExtraWeight(extraweight) { }
|
||||
@ -396,8 +396,8 @@ private:
|
||||
/// the same function, use the same failure basic block).
|
||||
class StackProtectorDescriptor {
|
||||
public:
|
||||
StackProtectorDescriptor() : ParentMBB(0), SuccessMBB(0), FailureMBB(0),
|
||||
Guard(0) { }
|
||||
StackProtectorDescriptor() : ParentMBB(nullptr), SuccessMBB(nullptr),
|
||||
FailureMBB(nullptr), Guard(nullptr) { }
|
||||
~StackProtectorDescriptor() { }
|
||||
|
||||
/// Returns true if all fields of the stack protector descriptor are
|
||||
@ -432,8 +432,8 @@ private:
|
||||
/// parent mbb after we create the stack protector check (SuccessMBB). This
|
||||
/// BB is visited only on stack protector check success.
|
||||
void resetPerBBState() {
|
||||
ParentMBB = 0;
|
||||
SuccessMBB = 0;
|
||||
ParentMBB = nullptr;
|
||||
SuccessMBB = nullptr;
|
||||
}
|
||||
|
||||
/// Reset state that only changes when we switch functions.
|
||||
@ -446,8 +446,8 @@ private:
|
||||
/// 2.The guard variable since the guard variable we are checking against is
|
||||
/// always the same.
|
||||
void resetPerFunctionState() {
|
||||
FailureMBB = 0;
|
||||
Guard = 0;
|
||||
FailureMBB = nullptr;
|
||||
Guard = nullptr;
|
||||
}
|
||||
|
||||
MachineBasicBlock *getParentMBB() { return ParentMBB; }
|
||||
@ -482,7 +482,7 @@ private:
|
||||
/// block will be created.
|
||||
MachineBasicBlock *AddSuccessorMBB(const BasicBlock *BB,
|
||||
MachineBasicBlock *ParentMBB,
|
||||
MachineBasicBlock *SuccMBB = 0);
|
||||
MachineBasicBlock *SuccMBB = nullptr);
|
||||
};
|
||||
|
||||
private:
|
||||
@ -538,7 +538,7 @@ public:
|
||||
|
||||
SelectionDAGBuilder(SelectionDAG &dag, FunctionLoweringInfo &funcinfo,
|
||||
CodeGenOpt::Level ol)
|
||||
: CurInst(NULL), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()),
|
||||
: CurInst(nullptr), SDNodeOrder(LowestSDNodeOrder), TM(dag.getTarget()),
|
||||
DAG(dag), FuncInfo(funcinfo), OptLevel(ol),
|
||||
HasTailCall(false) {
|
||||
}
|
||||
@ -600,13 +600,13 @@ public:
|
||||
|
||||
void setValue(const Value *V, SDValue NewN) {
|
||||
SDValue &N = NodeMap[V];
|
||||
assert(N.getNode() == 0 && "Already set a value for this node!");
|
||||
assert(!N.getNode() && "Already set a value for this node!");
|
||||
N = NewN;
|
||||
}
|
||||
|
||||
void setUnusedArgValue(const Value *V, SDValue NewN) {
|
||||
SDValue &N = UnusedArgNodeMap[V];
|
||||
assert(N.getNode() == 0 && "Already set a value for this node!");
|
||||
assert(!N.getNode() && "Already set a value for this node!");
|
||||
N = NewN;
|
||||
}
|
||||
|
||||
@ -624,7 +624,7 @@ public:
|
||||
void CopyToExportRegsIfNeeded(const Value *V);
|
||||
void ExportFromCurrentBlock(const Value *V);
|
||||
void LowerCallTo(ImmutableCallSite CS, SDValue Callee, bool IsTailCall,
|
||||
MachineBasicBlock *LandingPad = NULL);
|
||||
MachineBasicBlock *LandingPad = nullptr);
|
||||
|
||||
std::pair<SDValue, SDValue> LowerCallOperands(const CallInst &CI,
|
||||
unsigned ArgIdx,
|
||||
|
@ -65,7 +65,7 @@ class SpillPlacement : public MachineFunctionPass {
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid.
|
||||
|
||||
SpillPlacement() : MachineFunctionPass(ID), nodes(0) {}
|
||||
SpillPlacement() : MachineFunctionPass(ID), nodes(nullptr) {}
|
||||
~SpillPlacement() { releaseMemory(); }
|
||||
|
||||
/// BorderConstraint - A basic block has separate constraints for entry and
|
||||
|
@ -417,7 +417,7 @@ public:
|
||||
/// @param LRMap When not null, this vector will map each live range in Edit
|
||||
/// back to the indices returned by openIntv.
|
||||
/// There may be extra indices created by dead code elimination.
|
||||
void finish(SmallVectorImpl<unsigned> *LRMap = 0);
|
||||
void finish(SmallVectorImpl<unsigned> *LRMap = nullptr);
|
||||
|
||||
/// dump - print the current interval maping to dbgs().
|
||||
void dump() const;
|
||||
|
@ -148,8 +148,8 @@ namespace EEVT {
|
||||
/// valid on completely unknown type sets. If Pred is non-null, only MVTs
|
||||
/// that pass the predicate are added.
|
||||
bool FillWithPossibleTypes(TreePattern &TP,
|
||||
bool (*Pred)(MVT::SimpleValueType) = 0,
|
||||
const char *PredicateName = 0);
|
||||
bool (*Pred)(MVT::SimpleValueType) = nullptr,
|
||||
const char *PredicateName = nullptr);
|
||||
};
|
||||
}
|
||||
|
||||
@ -329,11 +329,11 @@ class TreePatternNode {
|
||||
public:
|
||||
TreePatternNode(Record *Op, const std::vector<TreePatternNode*> &Ch,
|
||||
unsigned NumResults)
|
||||
: Operator(Op), Val(0), TransformFn(0), Children(Ch) {
|
||||
: Operator(Op), Val(nullptr), TransformFn(nullptr), Children(Ch) {
|
||||
Types.resize(NumResults);
|
||||
}
|
||||
TreePatternNode(Init *val, unsigned NumResults) // leaf ctor
|
||||
: Operator(0), Val(val), TransformFn(0) {
|
||||
: Operator(nullptr), Val(val), TransformFn(nullptr) {
|
||||
Types.resize(NumResults);
|
||||
}
|
||||
~TreePatternNode();
|
||||
@ -342,7 +342,7 @@ public:
|
||||
const std::string &getName() const { return Name; }
|
||||
void setName(StringRef N) { Name.assign(N.begin(), N.end()); }
|
||||
|
||||
bool isLeaf() const { return Val != 0; }
|
||||
bool isLeaf() const { return Val != nullptr; }
|
||||
|
||||
// Type accessors.
|
||||
unsigned getNumTypes() const { return Types.size(); }
|
||||
@ -580,7 +580,7 @@ public:
|
||||
/// patterns as possible. Return true if all types are inferred, false
|
||||
/// otherwise. Bail out if a type contradiction is found.
|
||||
bool InferAllTypes(const StringMap<SmallVector<TreePatternNode*,1> >
|
||||
*NamedTypes=0);
|
||||
*NamedTypes=nullptr);
|
||||
|
||||
/// error - If this is the first error in the current resolution step,
|
||||
/// print it and set the error flag. Otherwise, continue silently.
|
||||
@ -619,7 +619,7 @@ public:
|
||||
const std::vector<Record*> &operands,
|
||||
const std::vector<Record*> &impresults)
|
||||
: Pattern(TP), Results(results), Operands(operands),
|
||||
ImpResults(impresults), ResultPattern(0) {}
|
||||
ImpResults(impresults), ResultPattern(nullptr) {}
|
||||
|
||||
TreePattern *getPattern() const { return Pattern; }
|
||||
unsigned getNumResults() const { return Results.size(); }
|
||||
@ -768,7 +768,7 @@ public:
|
||||
return PatternFragments.find(R)->second;
|
||||
}
|
||||
TreePattern *getPatternFragmentIfRead(Record *R) const {
|
||||
if (!PatternFragments.count(R)) return 0;
|
||||
if (!PatternFragments.count(R)) return nullptr;
|
||||
return PatternFragments.find(R)->second;
|
||||
}
|
||||
|
||||
|
@ -71,7 +71,7 @@ namespace llvm {
|
||||
// Returns NULL if this and Idx don't compose.
|
||||
CodeGenSubRegIndex *compose(CodeGenSubRegIndex *Idx) const {
|
||||
CompMap::const_iterator I = Composed.find(Idx);
|
||||
return I == Composed.end() ? 0 : I->second;
|
||||
return I == Composed.end() ? nullptr : I->second;
|
||||
}
|
||||
|
||||
// Add a composite subreg index: this+A = B.
|
||||
@ -90,7 +90,8 @@ namespace llvm {
|
||||
B->Offset = Offset + A->Offset;
|
||||
B->Size = A->Size;
|
||||
}
|
||||
return (Ins.second || Ins.first->second == B) ? 0 : Ins.first->second;
|
||||
return (Ins.second || Ins.first->second == B) ? nullptr
|
||||
: Ins.first->second;
|
||||
}
|
||||
|
||||
// Update the composite maps of components specified in 'ComposedOf'.
|
||||
@ -414,7 +415,9 @@ namespace llvm {
|
||||
// contain this unit.
|
||||
unsigned RegClassUnitSetsIdx;
|
||||
|
||||
RegUnit() : Weight(0), RegClassUnitSetsIdx(0) { Roots[0] = Roots[1] = 0; }
|
||||
RegUnit() : Weight(0), RegClassUnitSetsIdx(0) {
|
||||
Roots[0] = Roots[1] = nullptr;
|
||||
}
|
||||
|
||||
ArrayRef<const CodeGenRegister*> getRoots() const {
|
||||
assert(!(Roots[1] && !Roots[0]) && "Invalid roots array");
|
||||
@ -572,7 +575,7 @@ namespace llvm {
|
||||
|
||||
// Create a native register unit that is associated with one or two root
|
||||
// registers.
|
||||
unsigned newRegUnit(CodeGenRegister *R0, CodeGenRegister *R1 = 0) {
|
||||
unsigned newRegUnit(CodeGenRegister *R0, CodeGenRegister *R1 = nullptr) {
|
||||
RegUnits.resize(RegUnits.size() + 1);
|
||||
RegUnits.back().Roots[0] = R0;
|
||||
RegUnits.back().Roots[1] = R1;
|
||||
|
@ -56,7 +56,7 @@ struct CodeGenSchedRW {
|
||||
RecVec Aliases;
|
||||
|
||||
CodeGenSchedRW()
|
||||
: Index(0), TheDef(0), IsRead(false), IsAlias(false),
|
||||
: Index(0), TheDef(nullptr), IsRead(false), IsAlias(false),
|
||||
HasVariants(false), IsVariadic(false), IsSequence(false) {}
|
||||
CodeGenSchedRW(unsigned Idx, Record *Def)
|
||||
: Index(Idx), TheDef(Def), IsAlias(false), IsVariadic(false) {
|
||||
@ -74,7 +74,7 @@ struct CodeGenSchedRW {
|
||||
|
||||
CodeGenSchedRW(unsigned Idx, bool Read, const IdxVec &Seq,
|
||||
const std::string &Name)
|
||||
: Index(Idx), Name(Name), TheDef(0), IsRead(Read), IsAlias(false),
|
||||
: Index(Idx), Name(Name), TheDef(nullptr), IsRead(Read), IsAlias(false),
|
||||
HasVariants(false), IsVariadic(false), IsSequence(true), Sequence(Seq) {
|
||||
assert(Sequence.size() > 1 && "implied sequence needs >1 RWs");
|
||||
}
|
||||
@ -142,7 +142,7 @@ struct CodeGenSchedClass {
|
||||
// off to join another inferred class.
|
||||
RecVec InstRWs;
|
||||
|
||||
CodeGenSchedClass(): Index(0), ItinClassDef(0) {}
|
||||
CodeGenSchedClass(): Index(0), ItinClassDef(nullptr) {}
|
||||
|
||||
bool isKeyEqual(Record *IC, const IdxVec &W, const IdxVec &R) {
|
||||
return ItinClassDef == IC && Writes == W && Reads == R;
|
||||
|
@ -207,7 +207,7 @@ public:
|
||||
|
||||
Matcher *takeChild(unsigned i) {
|
||||
Matcher *Res = Children[i];
|
||||
Children[i] = 0;
|
||||
Children[i] = nullptr;
|
||||
return Res;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user