mirror of
https://github.com/RPCSX/llvm.git
synced 2025-02-24 15:12:36 +00:00
Teach ScalarEvolution about pointer address spaces
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@190425 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
be5faa8a1e
commit
14807bd8c8
@ -636,21 +636,24 @@ namespace llvm {
|
||||
const SCEV *getUnknown(Value *V);
|
||||
const SCEV *getCouldNotCompute();
|
||||
|
||||
/// getSizeOfExpr - Return an expression for sizeof on the given type.
|
||||
/// getSizeOfExpr - Return an expression for sizeof AllocTy that is type
|
||||
/// IntTy
|
||||
///
|
||||
const SCEV *getSizeOfExpr(Type *AllocTy);
|
||||
const SCEV *getSizeOfExpr(Type *IntTy, Type *AllocTy);
|
||||
|
||||
/// getAlignOfExpr - Return an expression for alignof on the given type.
|
||||
/// getAlignOfExpr - Return an expression for alignof AllocTy
|
||||
///
|
||||
const SCEV *getAlignOfExpr(Type *AllocTy);
|
||||
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field
|
||||
/// with type IntTy
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(StructType *STy, unsigned FieldNo);
|
||||
const SCEV *getOffsetOfExpr(Type *IntTy, StructType *STy, unsigned FieldNo);
|
||||
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field.
|
||||
/// getOffsetOfExpr - Return an expression for offsetof on the given field
|
||||
/// that is type IntTy
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(Type *CTy, Constant *FieldNo);
|
||||
const SCEV *getOffsetOfExpr(Type *IntTy, Type *CTy, Constant *FieldNo);
|
||||
|
||||
/// getNegativeSCEV - Return the SCEV object corresponding to -V.
|
||||
///
|
||||
|
@ -2590,19 +2590,19 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
|
||||
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(Type *IntTy, Type *AllocTy) {
|
||||
// If we have DataLayout, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
if (TD)
|
||||
return getConstant(TD->getIntPtrType(getContext()),
|
||||
TD->getTypeAllocSize(AllocTy));
|
||||
return getConstant(IntTy, TD->getTypeAllocSize(AllocTy));
|
||||
|
||||
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD, TLI))
|
||||
C = Folded;
|
||||
Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
assert(Ty == IntTy && "Effective SCEV type doesn't match");
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
@ -2615,14 +2615,16 @@ const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
|
||||
StructType *STy,
|
||||
unsigned FieldNo) {
|
||||
// If we have DataLayout, we can bypass creating a target-independent
|
||||
// constant expression and then folding it back into a ConstantInt.
|
||||
// This is just a compile-time optimization.
|
||||
if (TD)
|
||||
return getConstant(TD->getIntPtrType(getContext()),
|
||||
if (TD) {
|
||||
return getConstant(IntTy,
|
||||
TD->getStructLayout(STy)->getElementOffset(FieldNo));
|
||||
}
|
||||
|
||||
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
@ -2632,7 +2634,8 @@ const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(Type *IntTy,
|
||||
Type *CTy,
|
||||
Constant *FieldNo) {
|
||||
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
@ -2703,12 +2706,15 @@ uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
|
||||
Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
|
||||
assert(isSCEVable(Ty) && "Type is not SCEVable!");
|
||||
|
||||
if (Ty->isIntegerTy())
|
||||
if (Ty->isIntegerTy()) {
|
||||
return Ty;
|
||||
}
|
||||
|
||||
// The only other support type is pointer.
|
||||
assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
|
||||
if (TD) return TD->getIntPtrType(getContext());
|
||||
|
||||
if (TD)
|
||||
return TD->getIntPtrType(Ty);
|
||||
|
||||
// Without DataLayout, conservatively assume pointers are 64-bit.
|
||||
return Type::getInt64Ty(getContext());
|
||||
@ -3199,13 +3205,13 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||
if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
|
||||
// For a struct, add the member offset.
|
||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
|
||||
const SCEV *FieldOffset = getOffsetOfExpr(IntPtrTy, STy, FieldNo);
|
||||
|
||||
// Add the field offset to the running total offset.
|
||||
TotalOffset = getAddExpr(TotalOffset, FieldOffset);
|
||||
} else {
|
||||
// For an array, add the element offset, explicitly scaled.
|
||||
const SCEV *ElementSize = getSizeOfExpr(*GTI);
|
||||
const SCEV *ElementSize = getSizeOfExpr(IntPtrTy, *GTI);
|
||||
const SCEV *IndexS = getSCEV(Index);
|
||||
// Getelementptr indices are signed.
|
||||
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
|
||||
|
@ -407,6 +407,10 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// without the other.
|
||||
SplitAddRecs(Ops, Ty, SE);
|
||||
|
||||
Type *IntPtrTy = SE.TD
|
||||
? SE.TD->getIntPtrType(PTy)
|
||||
: Type::getInt64Ty(PTy->getContext());
|
||||
|
||||
// Descend down the pointer's type and attempt to convert the other
|
||||
// operands into GEP indices, at each level. The first index in a GEP
|
||||
// indexes into the array implied by the pointer operand; the rest of
|
||||
@ -417,7 +421,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// array indexing.
|
||||
SmallVector<const SCEV *, 8> ScaledOps;
|
||||
if (ElTy->isSized()) {
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(IntPtrTy, ElTy);
|
||||
if (!ElSize->isZero()) {
|
||||
SmallVector<const SCEV *, 8> NewOps;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
|
@ -1492,7 +1492,7 @@ static Value *genLoopLimit(PHINode *IndVar, const SCEV *IVCount, Loop *L,
|
||||
assert(AR->getStart() == SE->getSCEV(GEPBase) && "bad loop counter");
|
||||
// We could handle pointer IVs other than i8*, but we need to compensate for
|
||||
// gep index scaling. See canExpandBackedgeTakenCount comments.
|
||||
assert(SE->getSizeOfExpr(
|
||||
assert(SE->getSizeOfExpr(IntegerType::getInt64Ty(IndVar->getContext()),
|
||||
cast<PointerType>(GEPBase->getType())->getElementType())->isOne()
|
||||
&& "unit stride pointer IV must be i8*");
|
||||
|
||||
|
@ -0,0 +1,68 @@
|
||||
; RUN: opt < %s -analyze -scalar-evolution | FileCheck %s
|
||||
|
||||
; ScalarEvolution should be able to understand the loop and eliminate the casts.
|
||||
|
||||
target datalayout = "e-p:32:32:32-p1:16:16:16-p2:8:8:8-p4:64:64:64-n16:32:64"
|
||||
|
||||
; CHECK: {%d,+,4}<%bb> Exits: ((4 * (trunc i32 (-1 + %n) to i16)) + %d)
|
||||
|
||||
|
||||
define void @foo(i32 addrspace(1)* nocapture %d, i32 %n) nounwind {
|
||||
; CHECK: @foo
|
||||
entry:
|
||||
%0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
|
||||
br i1 %0, label %bb.nph, label %return
|
||||
|
||||
bb.nph: ; preds = %entry
|
||||
br label %bb
|
||||
|
||||
bb: ; preds = %bb1, %bb.nph
|
||||
%i.02 = phi i32 [ %5, %bb1 ], [ 0, %bb.nph ] ; <i32> [#uses=2]
|
||||
%p.01 = phi i8 [ %4, %bb1 ], [ -1, %bb.nph ] ; <i8> [#uses=2]
|
||||
%1 = sext i8 %p.01 to i32 ; <i32> [#uses=1]
|
||||
%2 = sext i32 %i.02 to i64 ; <i64> [#uses=1]
|
||||
%3 = getelementptr i32 addrspace(1)* %d, i64 %2 ; <i32*> [#uses=1]
|
||||
store i32 %1, i32 addrspace(1)* %3, align 4
|
||||
%4 = add i8 %p.01, 1 ; <i8> [#uses=1]
|
||||
%5 = add i32 %i.02, 1 ; <i32> [#uses=2]
|
||||
br label %bb1
|
||||
|
||||
bb1: ; preds = %bb
|
||||
%6 = icmp slt i32 %5, %n ; <i1> [#uses=1]
|
||||
br i1 %6, label %bb, label %bb1.return_crit_edge
|
||||
|
||||
bb1.return_crit_edge: ; preds = %bb1
|
||||
br label %return
|
||||
|
||||
return: ; preds = %bb1.return_crit_edge, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @test(i8 addrspace(1)* %a, i32 %n) nounwind {
|
||||
; CHECK: @test
|
||||
entry:
|
||||
%cmp1 = icmp sgt i32 %n, 0
|
||||
br i1 %cmp1, label %for.body.lr.ph, label %for.end
|
||||
|
||||
for.body.lr.ph: ; preds = %entry
|
||||
%tmp = zext i32 %n to i64
|
||||
br label %for.body
|
||||
|
||||
for.body: ; preds = %for.body, %for.body.lr.ph
|
||||
%indvar = phi i64 [ %indvar.next, %for.body ], [ 0, %for.body.lr.ph ]
|
||||
%arrayidx = getelementptr i8 addrspace(1)* %a, i64 %indvar
|
||||
store i8 0, i8 addrspace(1)* %arrayidx, align 1
|
||||
%indvar.next = add i64 %indvar, 1
|
||||
%exitcond = icmp ne i64 %indvar.next, %tmp
|
||||
br i1 %exitcond, label %for.body, label %for.cond.for.end_crit_edge
|
||||
|
||||
for.cond.for.end_crit_edge: ; preds = %for.body
|
||||
br label %for.end
|
||||
|
||||
for.end: ; preds = %for.cond.for.end_crit_edge, %entry
|
||||
ret void
|
||||
}
|
||||
|
||||
; CHECK: Determining loop execution counts for: @test
|
||||
; CHECK-NEXT: backedge-taken count is
|
||||
; CHECK-NEXT: max backedge-taken count is -1
|
Loading…
x
Reference in New Issue
Block a user