Analysis: Remove implicit ilist iterator conversions

Remove implicit ilist iterator conversions from LLVMAnalysis.

I came across something really scary in `llvm::isKnownNotFullPoison()`
which relied on `Instruction::getNextNode()` being completely broken
(not surprising, but scary nevertheless).  This function is documented
(and coded to) return `nullptr` when it gets to the sentinel, but with
an `ilist_half_node` as a sentinel, the sentinel check looks into some
other memory and we don't recognize we've hit the end.

Rooting out these scary cases is the reason I'm removing the implicit
conversions before doing anything else with `ilist`; I'm not at all
surprised that clients rely on badness.

I found another scary case -- this time, not relying on badness, just
bad (but I guess getting lucky so far) -- in
`ObjectSizeOffsetEvaluator::compute_()`.  Here, we save out the
insertion point, do some things, and then restore it.  Previously, we
let the iterator auto-convert to `Instruction*`, and then set it back
using the `Instruction*` version:

    Instruction *PrevInsertPoint = Builder.GetInsertPoint();

    /* Logic that may change insert point */

    if (PrevInsertPoint)
      Builder.SetInsertPoint(PrevInsertPoint);

The check for `PrevInsertPoint` doesn't protect correctly against bad
accesses.  If the insertion point has been set to the end of a basic
block (i.e., `SetInsertPoint(SomeBB)`), then `GetInsertPoint()` returns
an iterator pointing at the list sentinel.  The version of
`SetInsertPoint()` that's getting called will then call
`PrevInsertPoint->getParent()`, which explodes horribly.  The only
reason this hasn't blown up is that it's fairly unlikely the builder is
adding to the end of the block; usually, we're adding instructions
somewhere before the terminator.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@249925 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Duncan P. N. Exon Smith 2015-10-10 00:53:03 +00:00
parent f2e1e0eaf4
commit d3a5adc5ba
22 changed files with 99 additions and 99 deletions

View File

@ -351,12 +351,12 @@ bool AAResults::canInstructionRangeModRef(const Instruction &I1,
const ModRefInfo Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
BasicBlock::const_iterator I = &I1;
BasicBlock::const_iterator E = &I2;
BasicBlock::const_iterator I = I1.getIterator();
BasicBlock::const_iterator E = I2.getIterator();
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
if (getModRefInfo(I, Loc) & Mode)
if (getModRefInfo(&*I, Loc) & Mode)
return true;
return false;
}

View File

@ -149,9 +149,9 @@ bool AAEval::runOnFunction(Function &F) {
SetVector<Value *> Loads;
SetVector<Value *> Stores;
for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
if (I->getType()->isPointerTy()) // Add all pointer arguments.
Pointers.insert(I);
for (auto &I : F.args())
if (I.getType()->isPointerTy()) // Add all pointer arguments.
Pointers.insert(&I);
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
if (I->getType()->isPointerTy()) // Add all pointer instructions.

View File

@ -221,7 +221,7 @@ AliasSet *AliasSetTracker::findAliasSetForPointer(const Value *Ptr,
if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, AAInfo, AA)) continue;
if (!FoundSet) { // If this is the first alias set ptr can go into.
FoundSet = Cur; // Remember it.
FoundSet = &*Cur; // Remember it.
} else { // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
}
@ -255,7 +255,7 @@ AliasSet *AliasSetTracker::findAliasSetForUnknownInst(Instruction *Inst) {
if (Cur->Forward || !Cur->aliasesUnknownInst(Inst, AA))
continue;
if (!FoundSet) // If this is the first alias set ptr can go into.
FoundSet = Cur; // Remember it.
FoundSet = &*Cur; // Remember it.
else if (!Cur->Forward) // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
}
@ -372,8 +372,8 @@ bool AliasSetTracker::add(Instruction *I) {
}
void AliasSetTracker::add(BasicBlock &BB) {
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
add(I);
for (auto &I : BB)
add(&I);
}
void AliasSetTracker::add(const AliasSetTracker &AST) {

View File

@ -1418,7 +1418,7 @@ bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
// the Values cannot come from different iterations of a potential cycle the
// phi nodes could be involved in.
for (auto *P : VisitedPhiBBs)
if (isPotentiallyReachable(P->begin(), Inst, DT, LI))
if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
return false;
return true;

View File

@ -55,7 +55,7 @@ struct GraphTraits<BlockFrequencyInfo *> {
typedef Function::const_iterator nodes_iterator;
static inline const NodeType *getEntryNode(const BlockFrequencyInfo *G) {
return G->getFunction()->begin();
return &G->getFunction()->front();
}
static ChildIteratorType child_begin(const NodeType *N) {
return succ_begin(N);

View File

@ -514,11 +514,10 @@ void BranchProbabilityInfo::print(raw_ostream &OS) const {
// We print the probabilities from the last function the analysis ran over,
// or the function it is currently running over.
assert(LastF && "Cannot print prior to running over a function");
for (Function::const_iterator BI = LastF->begin(), BE = LastF->end();
BI != BE; ++BI) {
for (succ_const_iterator SI = succ_begin(BI), SE = succ_end(BI);
SI != SE; ++SI) {
printEdgeProbability(OS << " ", BI, *SI);
for (const auto &BI : *LastF) {
for (succ_const_iterator SI = succ_begin(&BI), SE = succ_end(&BI); SI != SE;
++SI) {
printEdgeProbability(OS << " ", &BI, *SI);
}
}
}

View File

@ -203,7 +203,8 @@ bool llvm::isPotentiallyReachable(const Instruction *A, const Instruction *B,
return true;
// Linear scan, start at 'A', see whether we hit 'B' or the end first.
for (BasicBlock::const_iterator I = A, E = BB->end(); I != E; ++I) {
for (BasicBlock::const_iterator I = A->getIterator(), E = BB->end(); I != E;
++I) {
if (&*I == B)
return true;
}

View File

@ -116,7 +116,7 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB,
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
II != E; ++II) {
// Skip ephemeral values.
if (EphValues.count(II))
if (EphValues.count(&*II))
continue;
// Special handling for calls.

View File

@ -523,7 +523,7 @@ void CostModelAnalysis::print(raw_ostream &OS, const Module*) const {
for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) {
Instruction *Inst = it;
Instruction *Inst = &*it;
unsigned Cost = getInstructionCost(Inst);
if (Cost != (unsigned)-1)
OS << "Cost Model: Found an estimated cost of " << Cost;

View File

@ -147,8 +147,8 @@ void DivergencePropagator::exploreSyncDependency(TerminatorInst *TI) {
for (auto I = IPostDom->begin(); isa<PHINode>(I); ++I) {
// A PHINode is uniform if it returns the same value no matter which path is
// taken.
if (!cast<PHINode>(I)->hasConstantValue() && DV.insert(I).second)
Worklist.push_back(I);
if (!cast<PHINode>(I)->hasConstantValue() && DV.insert(&*I).second)
Worklist.push_back(&*I);
}
// Propagation rule 2: if a value defined in a loop is used outside, the user

View File

@ -276,7 +276,7 @@ bool IVUsers::runOnLoop(Loop *l, LPPassManager &LPM) {
// them by stride. Start by finding all of the PHI nodes in the header for
// this loop. If they are induction variables, inspect their uses.
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
(void)AddUsersIfInteresting(I);
(void)AddUsersIfInteresting(&*I);
return false;
}

View File

@ -960,7 +960,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
continue;
// Skip ephemeral values.
if (EphValues.count(I))
if (EphValues.count(&*I))
continue;
++NumInstructions;
@ -992,7 +992,7 @@ bool CallAnalyzer::analyzeBlock(BasicBlock *BB,
// all of the per-instruction logic. The visit tree returns true if we
// consumed the instruction in any way, and false if the instruction's base
// cost should count against inlining.
if (Base::visit(I))
if (Base::visit(&*I))
++NumInstructionsSimplified;
else
Cost += InlineConstants::InstrCost;
@ -1172,15 +1172,15 @@ bool CallAnalyzer::analyzeCall(CallSite CS) {
FAI != FAE; ++FAI, ++CAI) {
assert(CAI != CS.arg_end());
if (Constant *C = dyn_cast<Constant>(CAI))
SimplifiedValues[FAI] = C;
SimplifiedValues[&*FAI] = C;
Value *PtrArg = *CAI;
if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
// We can SROA any pointer arguments derived from alloca instructions.
if (isa<AllocaInst>(PtrArg)) {
SROAArgValues[FAI] = PtrArg;
SROAArgValues[&*FAI] = PtrArg;
SROAArgCosts[PtrArg] = 0;
}
}
@ -1423,9 +1423,8 @@ bool InlineCostAnalysis::isInlineViable(Function &F) {
if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
return false;
for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
++II) {
CallSite CS(II);
for (auto &II : *BI) {
CallSite CS(&II);
if (!CS)
continue;

View File

@ -234,7 +234,7 @@ void Lint::visitCallSite(CallSite CS) {
for (; AI != AE; ++AI) {
Value *Actual = *AI;
if (PI != PE) {
Argument *Formal = PI++;
Argument *Formal = &*PI++;
Assert(Formal->getType() == Actual->getType(),
"Undefined behavior: Call argument type mismatches "
"callee parameter type",
@ -602,8 +602,8 @@ void Lint::visitInsertElementInst(InsertElementInst &I) {
void Lint::visitUnreachableInst(UnreachableInst &I) {
// This isn't undefined behavior, it's merely suspicious.
Assert(&I == I.getParent()->begin() ||
std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(),
Assert(&I == &I.getParent()->front() ||
std::prev(I.getIterator())->mayHaveSideEffects(),
"Unusual: unreachable immediately preceded by instruction without "
"side effects",
&I);
@ -635,7 +635,7 @@ Value *Lint::findValueImpl(Value *V, bool OffsetOk,
// TODO: Look through vector insert/extract/shuffle.
V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
BasicBlock::iterator BBI = L;
BasicBlock::iterator BBI = L->getIterator();
BasicBlock *BB = L->getParent();
SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
for (;;) {

View File

@ -118,7 +118,8 @@ bool llvm::isSafeToLoadUnconditionally(Value *V, Instruction *ScanFrom,
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
// the load entirely).
BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
BasicBlock::iterator BBI = ScanFrom->getIterator(),
E = ScanFrom->getParent()->begin();
// We can at least always strip pointer casts even though we can't use the
// base here.
@ -211,7 +212,7 @@ Value *llvm::FindAvailableLoadedValue(Value *Ptr, BasicBlock *ScanBB,
while (ScanFrom != ScanBB->begin()) {
// We must ignore debug info directives when counting (otherwise they
// would affect codegen).
Instruction *Inst = --ScanFrom;
Instruction *Inst = &*--ScanFrom;
if (isa<DbgInfoIntrinsic>(Inst))
continue;

View File

@ -1397,7 +1397,7 @@ void LoopAccessInfo::analyzeLoop(const ValueToValueMap &Strides) {
if (it->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(it);
if (!St) {
emitAnalysis(LoopAccessReport(it) <<
emitAnalysis(LoopAccessReport(&*it) <<
"instruction cannot be vectorized");
CanVecMem = false;
return;

View File

@ -621,7 +621,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
// always generate code immediately before the instruction being
// processed, so that the generated code dominates the same BBs
Instruction *PrevInsertPoint = Builder.GetInsertPoint();
BuilderTy::InsertPointGuard Guard(Builder);
if (Instruction *I = dyn_cast<Instruction>(V))
Builder.SetInsertPoint(I);
@ -650,9 +650,6 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
Result = unknown();
}
if (PrevInsertPoint)
Builder.SetInsertPoint(PrevInsertPoint);
// Don't reuse CacheIt since it may be invalid at this point.
CacheMap[V] = Result;
return Result;
@ -742,7 +739,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitPHINode(PHINode &PHI) {
// compute offset/size for each PHI incoming pointer
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
Builder.SetInsertPoint(PHI.getIncomingBlock(i)->getFirstInsertionPt());
Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt());
SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i));
if (!bothKnown(EdgeData)) {

View File

@ -216,7 +216,7 @@ getCallSiteDependencyFrom(CallSite CS, bool isReadOnlyCall,
if (!Limit)
return MemDepResult::getUnknown();
Instruction *Inst = --ScanIt;
Instruction *Inst = &*--ScanIt;
// If this inst is a memory op, get the pointer it accessed
MemoryLocation Loc;
@ -502,7 +502,7 @@ MemDepResult MemoryDependenceAnalysis::getSimplePointerDependencyFrom(
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
Instruction *Inst = --ScanIt;
Instruction *Inst = &*--ScanIt;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
// Debug intrinsics don't (and can't) cause dependencies.
@ -767,13 +767,13 @@ MemDepResult MemoryDependenceAnalysis::getDependency(Instruction *QueryInst) {
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
QueryParent, QueryInst);
LocalCache = getPointerDependencyFrom(
MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
CallSite QueryCS(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
QueryParent);
LocalCache = getCallSiteDependencyFrom(
QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
} else
// Non-memory instruction.
LocalCache = MemDepResult::getUnknown();
@ -896,7 +896,7 @@ MemoryDependenceAnalysis::getNonLocalCallDependency(CallSite QueryCS) {
BasicBlock::iterator ScanPos = DirtyBB->end();
if (ExistingResult) {
if (Instruction *Inst = ExistingResult->getResult().getInst()) {
ScanPos = Inst;
ScanPos = Inst->getIterator();
// We're removing QueryInst's use of Inst.
RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
QueryCS.getInstruction());
@ -1035,11 +1035,11 @@ MemDepResult MemoryDependenceAnalysis::GetNonLocalInfoForBlock(
assert(ExistingResult->getResult().getInst()->getParent() == BB &&
"Instruction invalidated?");
++NumCacheDirtyNonLocalPtr;
ScanPos = ExistingResult->getResult().getInst();
ScanPos = ExistingResult->getResult().getInst()->getIterator();
// Eliminating the dirty entry from 'Cache', so update the reverse info.
ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
} else {
++NumUncacheNonLocalPtr;
}
@ -1590,7 +1590,7 @@ void MemoryDependenceAnalysis::removeInstruction(Instruction *RemInst) {
// the entire block to get to this point.
MemDepResult NewDirtyVal;
if (!RemInst->isTerminator())
NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
if (ReverseDepIt != ReverseLocalDeps.end()) {

View File

@ -93,7 +93,7 @@ ARCInstKind llvm::objcarc::GetFunctionClass(const Function *F) {
.Default(ARCInstKind::CallOrUser);
// One argument.
const Argument *A0 = AI++;
const Argument *A0 = &*AI++;
if (AI == AE)
// Argument is a pointer.
if (PointerType *PTy = dyn_cast<PointerType>(A0->getType())) {
@ -131,7 +131,7 @@ ARCInstKind llvm::objcarc::GetFunctionClass(const Function *F) {
}
// Two arguments, first is i8**.
const Argument *A1 = AI++;
const Argument *A1 = &*AI++;
if (AI == AE)
if (PointerType *PTy = dyn_cast<PointerType>(A0->getType()))
if (PointerType *Pte = dyn_cast<PointerType>(PTy->getElementType()))

View File

@ -63,7 +63,7 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// Create a new cast, and leave the old cast in place in case
// it is being used as an insert point. Clear its operand
// so that it doesn't hold anything live.
Ret = CastInst::Create(Op, V, Ty, "", IP);
Ret = CastInst::Create(Op, V, Ty, "", &*IP);
Ret->takeName(CI);
CI->replaceAllUsesWith(Ret);
CI->setOperand(0, UndefValue::get(V->getType()));
@ -75,12 +75,12 @@ Value *SCEVExpander::ReuseOrCreateCast(Value *V, Type *Ty,
// Create a new cast.
if (!Ret)
Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
// We assert at the end of the function since IP might point to an
// instruction with different dominance properties than a cast
// (an invoke for example) and not dominate BIP (but the cast does).
assert(SE.DT.dominates(Ret, BIP));
assert(SE.DT.dominates(Ret, &*BIP));
rememberInstruction(Ret);
return Ret;
@ -143,7 +143,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, Type *Ty) {
// Cast the instruction immediately after the instruction.
Instruction *I = cast<Instruction>(V);
BasicBlock::iterator IP = I; ++IP;
BasicBlock::iterator IP = ++I->getIterator();
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
if (CatchPadInst *CPI = dyn_cast<CatchPadInst>(I))
@ -176,7 +176,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
ScanLimit++;
if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
IP->getOperand(1) == RHS)
return IP;
return &*IP;
if (IP == BlockBegin) break;
}
}
@ -192,7 +192,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
Builder.SetInsertPoint(Preheader->getTerminator());
}
// If we haven't found this binop, insert it.
@ -485,7 +485,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
assert(!isa<Instruction>(V) ||
SE.DT.dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
@ -510,7 +510,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
ScanLimit++;
if (IP->getOpcode() == Instruction::GetElementPtr &&
IP->getOperand(0) == V && IP->getOperand(1) == Idx)
return IP;
return &*IP;
if (IP == BlockBegin) break;
}
}
@ -525,7 +525,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
Builder.SetInsertPoint(Preheader->getTerminator());
}
// Emit a GEP.
@ -556,7 +556,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
if (!Preheader) break;
// Ok, move up a level.
Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
Builder.SetInsertPoint(Preheader->getTerminator());
}
// Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
@ -1168,8 +1168,8 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
PostIncLoops.clear();
// Expand code for the start value.
Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
L->getHeader()->begin());
Value *StartV =
expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front());
// StartV must be hoisted into L's preheader to dominate the new phi.
assert(!isa<Instruction>(StartV) ||
@ -1186,7 +1186,7 @@ SCEVExpander::getAddRecExprPHILiterally(const SCEVAddRecExpr *Normalized,
if (useSubtract)
Step = SE.getNegativeSCEV(Step);
// Expand the step somewhere that dominates the loop header.
Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
// The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
// we actually do emit an addition. It does not apply if we emit a
@ -1302,7 +1302,8 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
if (isa<Instruction>(Result) &&
!SE.DT.dominates(cast<Instruction>(Result), Builder.GetInsertPoint())) {
!SE.DT.dominates(cast<Instruction>(Result),
&*Builder.GetInsertPoint())) {
// The induction variable's postinc expansion does not dominate this use.
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
@ -1320,7 +1321,7 @@ Value *SCEVExpander::expandAddRecExprLiterally(const SCEVAddRecExpr *S) {
{
// Expand the step somewhere that dominates the loop header.
BuilderType::InsertPointGuard Guard(Builder);
StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
}
Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
}
@ -1400,7 +1401,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
isa<LandingPadInst>(NewInsertPt))
++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
NewInsertPt);
&*NewInsertPt);
return V;
}
@ -1441,7 +1442,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
BasicBlock *Header = L->getHeader();
pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
Header->begin());
&Header->front());
rememberInstruction(CanonicalIV);
SmallSet<BasicBlock *, 4> PredSeen;
@ -1586,7 +1587,8 @@ Value *SCEVExpander::visitUMaxExpr(const SCEVUMaxExpr *S) {
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
Instruction *IP) {
Builder.SetInsertPoint(IP->getParent(), IP);
assert(IP);
Builder.SetInsertPoint(IP);
return expandCodeFor(SH, Ty);
}
@ -1604,7 +1606,7 @@ Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty) {
Value *SCEVExpander::expand(const SCEV *S) {
// Compute an insertion point for this SCEV object. Hoist the instructions
// as far out in the loop nest as possible.
Instruction *InsertPt = Builder.GetInsertPoint();
Instruction *InsertPt = &*Builder.GetInsertPoint();
for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
L = L->getParentLoop())
if (SE.isLoopInvariant(S, L)) {
@ -1615,18 +1617,18 @@ Value *SCEVExpander::expand(const SCEV *S) {
// LSR sets the insertion point for AddRec start/step values to the
// block start to simplify value reuse, even though it's an invalid
// position. SCEVExpander must correct for this in all cases.
InsertPt = L->getHeader()->getFirstInsertionPt();
InsertPt = &*L->getHeader()->getFirstInsertionPt();
}
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
InsertPt = L->getHeader()->getFirstInsertionPt();
InsertPt = &*L->getHeader()->getFirstInsertionPt();
while (InsertPt != Builder.GetInsertPoint()
&& (isInsertedInstruction(InsertPt)
|| isa<DbgInfoIntrinsic>(InsertPt))) {
InsertPt = std::next(BasicBlock::iterator(InsertPt));
InsertPt = &*std::next(InsertPt->getIterator());
}
break;
}
@ -1638,7 +1640,7 @@ Value *SCEVExpander::expand(const SCEV *S) {
return I->second;
BuilderType::InsertPointGuard Guard(Builder);
Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
Builder.SetInsertPoint(InsertPt);
// Expand the expression into instructions.
Value *V = visit(S);
@ -1676,8 +1678,8 @@ SCEVExpander::getOrInsertCanonicalInductionVariable(const Loop *L,
// Emit code for it.
BuilderType::InsertPointGuard Guard(Builder);
PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
L->getHeader()->begin()));
PHINode *V =
cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
return V;
}
@ -1783,7 +1785,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
if (OrigInc->getType() != IsomorphicInc->getType()) {
Instruction *IP = nullptr;
if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
IP = PN->getParent()->getFirstInsertionPt();
IP = &*PN->getParent()->getFirstInsertionPt();
else
IP = OrigInc->getNextNode();
@ -1801,7 +1803,7 @@ unsigned SCEVExpander::replaceCongruentIVs(Loop *L, const DominatorTree *DT,
++NumElim;
Value *NewIV = OrigPhiRef;
if (OrigPhiRef->getType() != Phi->getType()) {
IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
}

View File

@ -109,7 +109,7 @@ TransformImpl(const SCEV *S, Instruction *User, Value *OperandValToReplace) {
SmallVector<const SCEV *, 8> Operands;
const Loop *L = AR->getLoop();
// The addrec conceptually uses its operands at loop entry.
Instruction *LUser = L->getHeader()->begin();
Instruction *LUser = &L->getHeader()->front();
// Transform each operand.
for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
I != E; ++I) {

View File

@ -328,17 +328,17 @@ void SparseSolver::Solve(Function &F) {
void SparseSolver::Print(Function &F, raw_ostream &OS) const {
OS << "\nFUNCTION: " << F.getName() << "\n";
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
if (!BBExecutable.count(BB))
for (auto &BB : F) {
if (!BBExecutable.count(&BB))
OS << "INFEASIBLE: ";
OS << "\t";
if (BB->hasName())
OS << BB->getName() << ":\n";
if (BB.hasName())
OS << BB.getName() << ":\n";
else
OS << "; anon bb\n";
for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
LatticeFunc->PrintValue(getLatticeState(I), OS);
OS << *I << "\n";
for (auto &I : BB) {
LatticeFunc->PrintValue(getLatticeState(&I), OS);
OS << I << "\n";
}
OS << "\n";

View File

@ -455,7 +455,7 @@ static bool isValidAssumeForContext(Value *V, const Query &Q) {
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
@ -472,14 +472,14 @@ static bool isValidAssumeForContext(Value *V, const Query &Q) {
// of the block); the common case is that the assume will come first.
for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
IE = Inv->getParent()->end(); I != IE; ++I)
if (I == Q.CxtI)
if (&*I == Q.CxtI)
return true;
// The context must come first...
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
@ -3635,16 +3635,17 @@ bool llvm::isKnownNotFullPoison(const Instruction *PoisonI) {
SmallSet<const Value *, 16> YieldsPoison;
YieldsPoison.insert(PoisonI);
for (const Instruction *I = PoisonI, *E = BB->end(); I != E;
I = I->getNextNode()) {
if (I != PoisonI) {
const Value *NotPoison = getGuaranteedNonFullPoisonOp(I);
for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
I != E; ++I) {
if (&*I != PoisonI) {
const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
if (!isGuaranteedToTransferExecutionToSuccessor(I)) return false;
if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
return false;
}
// Mark poison that propagates from I through uses of I.
if (YieldsPoison.count(I)) {
if (YieldsPoison.count(&*I)) {
for (const User *User : I->users()) {
const Instruction *UserI = cast<Instruction>(User);
if (UserI->getParent() == BB && propagatesFullPoison(UserI))