diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index e0ac56c65e7..2da8e6fbd63 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -4658,7 +4658,8 @@ ScalarEvolution::ComputeLoadConstantCompareExitLimit( /// specified type, assuming that all operands were constants. static bool CanConstantFold(const Instruction *I) { if (isa(I) || isa(I) || - isa(I) || isa(I) || isa(I)) + isa(I) || isa(I) || isa(I) || + isa(I)) return true; if (const CallInst *CI = dyn_cast(I)) @@ -4751,13 +4752,19 @@ static Constant *EvaluateExpression(Value *V, const Loop *L, const TargetData *TD) { // Convenient constant check, but redundant for recursive calls. if (Constant *C = dyn_cast(V)) return C; + Instruction *I = dyn_cast(V); + if (!I) return 0; - Instruction *I = cast(V); if (Constant *C = Vals.lookup(I)) return C; - assert(!isa(I) && "loop header phis should be mapped to constant"); - assert(canConstantEvolve(I, L) && "cannot evaluate expression in this loop"); - (void)L; + // An instruction inside the loop depends on a value outside the loop that we + // weren't given a mapping for, or a value such as a call inside the loop. + if (!canConstantEvolve(I, L)) return 0; + + // An unmapped PHI can be due to a branch or another loop inside this loop, + // or due to this not being the initial iteration through a loop where we + // couldn't compute the evolution of this particular PHI last time. + if (isa(I)) return 0; std::vector Operands(I->getNumOperands()); @@ -4774,9 +4781,13 @@ static Constant *EvaluateExpression(Value *V, const Loop *L, Operands[i] = C; } - if (const CmpInst *CI = dyn_cast(I)) + if (CmpInst *CI = dyn_cast(I)) return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], Operands[1], TD); + if (LoadInst *LI = dyn_cast(I)) { + if (!LI->isVolatile()) + return ConstantFoldLoadFromConstPtr(Operands[0], TD); + } return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD); } @@ -4798,23 +4809,26 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, Constant *&RetVal = ConstantEvolutionLoopExitValue[PN]; - // FIXME: Nick's fix for PR11034 will seed constants for multiple header phis. DenseMap CurrentIterVals; + BasicBlock *Header = L->getHeader(); + assert(PN->getParent() == Header && "Can't evaluate PHI not in loop header!"); // Since the loop is canonicalized, the PHI node must have two entries. One // entry must be a constant (coming in from outside of the loop), and the // second must be derived from the same PHI. bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1)); - Constant *StartCST = - dyn_cast(PN->getIncomingValue(!SecondIsBackedge)); - if (StartCST == 0) - return RetVal = 0; // Must be a constant. - CurrentIterVals[PN] = StartCST; + PHINode *PHI = 0; + for (BasicBlock::iterator I = Header->begin(); + (PHI = dyn_cast(I)); ++I) { + Constant *StartCST = + dyn_cast(PHI->getIncomingValue(!SecondIsBackedge)); + if (StartCST == 0) continue; + CurrentIterVals[PHI] = StartCST; + } + if (!CurrentIterVals.count(PN)) + return RetVal = 0; Value *BEValue = PN->getIncomingValue(SecondIsBackedge); - if (getConstantEvolvingPHI(BEValue, L) != PN && - !isa(BEValue)) - return RetVal = 0; // Not derived from same PHI. // Execute the loop symbolically to determine the exit value. if (BEs.getActiveBits() >= 32) @@ -4826,15 +4840,29 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, if (IterationNum == NumIterations) return RetVal = CurrentIterVals[PN]; // Got exit value! - // Compute the value of the PHI node for the next iteration. + // Compute the value of the PHIs for the next iteration. // EvaluateExpression adds non-phi values to the CurrentIterVals map. + DenseMap NextIterVals; Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD); if (NextPHI == CurrentIterVals[PN]) return RetVal = NextPHI; // Stopped evolving! if (NextPHI == 0) return 0; // Couldn't evaluate! - DenseMap NextIterVals; NextIterVals[PN] = NextPHI; + + // Also evaluate the other PHI nodes. However, we don't get to stop if we + // cease to be able to evaluate one of them or if they stop evolving, + // because that doesn't necessarily prevent us from computing PN. + for (DenseMap::const_iterator + I = CurrentIterVals.begin(), E = CurrentIterVals.end(); I != E; ++I){ + PHINode *PHI = dyn_cast(I->first); + if (!PHI || PHI == PN) continue; + Constant *&NextPHI = NextIterVals[PHI]; + if (NextPHI) continue; // Already computed! + + Value *BEValue = PHI->getIncomingValue(SecondIsBackedge); + NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD); + } CurrentIterVals.swap(NextIterVals); } } @@ -4844,9 +4872,9 @@ ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN, /// try to evaluate a few iterations of the loop until we get the exit /// condition gets a value of ExitWhen (true or false). If we cannot /// evaluate the trip count of the loop, return getCouldNotCompute(). -const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, - Value *Cond, - bool ExitWhen) { +const SCEV *ScalarEvolution::ComputeExitCountExhaustively(const Loop *L, + Value *Cond, + bool ExitWhen) { PHINode *PN = getConstantEvolvingPHI(Cond, L); if (PN == 0) return getCouldNotCompute(); @@ -4921,6 +4949,98 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { return C; } +/// This builds up a Constant using the ConstantExpr interface. That way, we +/// will return Constants for objects which aren't represented by a +/// SCEVConstant, because SCEVConstant is restricted to ConstantInt. +/// Returns NULL if the SCEV isn't representable as a Constant. +static Constant *BuildConstantFromSCEV(const SCEV *V) { + switch (V->getSCEVType()) { + default: // TODO: smax, umax. + case scCouldNotCompute: + case scAddRecExpr: + break; + case scConstant: + return cast(V)->getValue(); + case scUnknown: + return dyn_cast(cast(V)->getValue()); + case scSignExtend: { + const SCEVSignExtendExpr *SS = cast(V); + if (Constant *CastOp = BuildConstantFromSCEV(SS->getOperand())) + return ConstantExpr::getSExt(CastOp, SS->getType()); + break; + } + case scZeroExtend: { + const SCEVZeroExtendExpr *SZ = cast(V); + if (Constant *CastOp = BuildConstantFromSCEV(SZ->getOperand())) + return ConstantExpr::getZExt(CastOp, SZ->getType()); + break; + } + case scTruncate: { + const SCEVTruncateExpr *ST = cast(V); + if (Constant *CastOp = BuildConstantFromSCEV(ST->getOperand())) + return ConstantExpr::getTrunc(CastOp, ST->getType()); + break; + } + case scAddExpr: { + const SCEVAddExpr *SA = cast(V); + if (Constant *C = BuildConstantFromSCEV(SA->getOperand(0))) { + if (C->getType()->isPointerTy()) + C = ConstantExpr::getBitCast(C, Type::getInt8PtrTy(C->getContext())); + for (unsigned i = 1, e = SA->getNumOperands(); i != e; ++i) { + Constant *C2 = BuildConstantFromSCEV(SA->getOperand(i)); + if (!C2) return 0; + + // First pointer! + if (!C->getType()->isPointerTy() && C2->getType()->isPointerTy()) { + std::swap(C, C2); + // The offsets have been converted to bytes. We can add bytes to an + // i8* by GEP with the byte count in the first index. + C = ConstantExpr::getBitCast(C,Type::getInt8PtrTy(C->getContext())); + } + + // Don't bother trying to sum two pointers. We probably can't + // statically compute a load that results from it anyway. + if (C2->getType()->isPointerTy()) + return 0; + + if (C->getType()->isPointerTy()) { + if (cast(C->getType())->getElementType()->isStructTy()) + C2 = ConstantExpr::getIntegerCast( + C2, Type::getInt32Ty(C->getContext()), true); + C = ConstantExpr::getGetElementPtr(C, C2); + } else + C = ConstantExpr::getAdd(C, C2); + } + return C; + } + break; + } + case scMulExpr: { + const SCEVMulExpr *SM = cast(V); + if (Constant *C = BuildConstantFromSCEV(SM->getOperand(0))) { + // Don't bother with pointers at all. + if (C->getType()->isPointerTy()) return 0; + for (unsigned i = 1, e = SM->getNumOperands(); i != e; ++i) { + Constant *C2 = BuildConstantFromSCEV(SM->getOperand(i)); + if (!C2 || C2->getType()->isPointerTy()) return 0; + C = ConstantExpr::getMul(C, C2); + } + return C; + } + break; + } + case scUDivExpr: { + const SCEVUDivExpr *SU = cast(V); + if (Constant *LHS = BuildConstantFromSCEV(SU->getLHS())) + if (Constant *RHS = BuildConstantFromSCEV(SU->getRHS())) + if (LHS->getType() == RHS->getType()) + return ConstantExpr::getUDiv(LHS, RHS); + break; + } + } + return 0; +} + const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { if (isa(V)) return V; @@ -4973,11 +5093,7 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { const SCEV *OpV = getSCEVAtScope(OrigV, L); MadeImprovement |= OrigV != OpV; - Constant *C = 0; - if (const SCEVConstant *SC = dyn_cast(OpV)) - C = SC->getValue(); - if (const SCEVUnknown *SU = dyn_cast(OpV)) - C = dyn_cast(SU->getValue()); + Constant *C = BuildConstantFromSCEV(OpV); if (!C) return V; if (C->getType() != Op->getType()) C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false, @@ -4993,7 +5109,10 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) { if (const CmpInst *CI = dyn_cast(I)) C = ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0], Operands[1], TD); - else + else if (const LoadInst *LI = dyn_cast(I)) { + if (!LI->isVolatile()) + C = ConstantFoldLoadFromConstPtr(Operands[0], TD); + } else C = ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD); if (!C) return V; diff --git a/test/Analysis/ScalarEvolution/load.ll b/test/Analysis/ScalarEvolution/load.ll new file mode 100644 index 00000000000..203515285be --- /dev/null +++ b/test/Analysis/ScalarEvolution/load.ll @@ -0,0 +1,33 @@ +; RUN: opt -analyze -scalar-evolution < %s 2>&1 | FileCheck %s +; PR11034 + +target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:32:32-n8:16:32" +target triple = "i386-pc-linux-gnu" + +@arr1 = internal unnamed_addr constant [50 x i32] [i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50], align 4 +@arr2 = internal unnamed_addr constant [50 x i32] [i32 49, i32 48, i32 47, i32 46, i32 45, i32 44, i32 43, i32 42, i32 41, i32 40, i32 39, i32 38, i32 37, i32 36, i32 35, i32 34, i32 33, i32 32, i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0], align 4 + +define i32 @test1() nounwind readnone { +; CHECK: test1 +entry: + br label %for.body + +for.body: ; preds = %entry, %for.body + %sum.04 = phi i32 [ 0, %entry ], [ %add2, %for.body ] +; CHECK: --> %sum.04{{ *}}Exits: 2450 + %i.03 = phi i32 [ 0, %entry ], [ %inc, %for.body ] + %arrayidx = getelementptr inbounds [50 x i32]* @arr1, i32 0, i32 %i.03 + %0 = load i32* %arrayidx, align 4 +; CHECK: --> %0{{ *}}Exits: 50 + %arrayidx1 = getelementptr inbounds [50 x i32]* @arr2, i32 0, i32 %i.03 + %1 = load i32* %arrayidx1, align 4 +; CHECK: --> %1{{ *}}Exits: 0 + %add = add i32 %0, %sum.04 + %add2 = add i32 %add, %1 + %inc = add nsw i32 %i.03, 1 + %cmp = icmp eq i32 %inc, 50 + br i1 %cmp, label %for.end, label %for.body + +for.end: ; preds = %for.body + ret i32 %add2 +}