mirror of
https://github.com/RPCS3/llvm.git
synced 2024-11-24 20:30:06 +00:00
Generalize target-independent folding rules for sizeof to handle more
cases, and implement target-independent folding rules for alignof and offsetof. Also, reassociate reassociative operators when it leads to more folding. Generalize ScalarEvolution's isOffsetOf to recognize offsetof on arrays. Rename getAllocSizeExpr to getSizeOfExpr, and getFieldOffsetExpr to getOffsetOfExpr, for consistency with analagous ConstantExpr routines. Make the target-dependent folder promote GEP array indices to pointer-sized integers, to make implicit casting explicit and exposed to subsequent folding. And add a bunch of testcases for this new functionality, and a bunch of related existing functionality. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@94987 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
d569561835
commit
4f8eea82d8
@ -452,11 +452,25 @@ namespace llvm {
|
||||
const SCEV *getUMaxExpr(SmallVectorImpl<const SCEV *> &Operands);
|
||||
const SCEV *getSMinExpr(const SCEV *LHS, const SCEV *RHS);
|
||||
const SCEV *getUMinExpr(const SCEV *LHS, const SCEV *RHS);
|
||||
const SCEV *getFieldOffsetExpr(const StructType *STy, unsigned FieldNo);
|
||||
const SCEV *getAllocSizeExpr(const Type *AllocTy);
|
||||
const SCEV *getUnknown(Value *V);
|
||||
const SCEV *getCouldNotCompute();
|
||||
|
||||
/// getSizeOfExpr - Return an expression for sizeof on the given type.
|
||||
///
|
||||
const SCEV *getSizeOfExpr(const Type *AllocTy);
|
||||
|
||||
/// getSizeOfExpr - Return an expression for alignof on the given type.
|
||||
///
|
||||
const SCEV *getAlignOfExpr(const Type *AllocTy);
|
||||
|
||||
/// getSizeOfExpr - Return an expression for offsetof on the given field.
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(const StructType *STy, unsigned FieldNo);
|
||||
|
||||
/// getSizeOfExpr - Return an expression for offsetof on the given field.
|
||||
///
|
||||
const SCEV *getOffsetOfExpr(const Type *CTy, Constant *FieldNo);
|
||||
|
||||
/// getNegativeSCEV - Return the SCEV object corresponding to -V.
|
||||
///
|
||||
const SCEV *getNegativeSCEV(const SCEV *V);
|
||||
|
@ -534,7 +534,7 @@ namespace llvm {
|
||||
/// where it isn't absolutely required for these to succeed.
|
||||
bool isSizeOf(const Type *&AllocTy) const;
|
||||
bool isAlignOf(const Type *&AllocTy) const;
|
||||
bool isOffsetOf(const StructType *&STy, Constant *&FieldNo) const;
|
||||
bool isOffsetOf(const Type *&STy, Constant *&FieldNo) const;
|
||||
|
||||
virtual bool isLoopInvariant(const Loop *L) const;
|
||||
virtual bool hasComputableLoopEvolution(const Loop *QL) const {
|
||||
|
@ -517,6 +517,42 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// CastGEPIndices - If array indices are not pointer-sized integers,
|
||||
/// explicitly cast them so that they aren't implicitly casted by the
|
||||
/// getelementptr.
|
||||
static Constant *CastGEPIndices(Constant *const *Ops, unsigned NumOps,
|
||||
const Type *ResultTy,
|
||||
const TargetData *TD) {
|
||||
if (!TD) return 0;
|
||||
const Type *IntPtrTy = TD->getIntPtrType(ResultTy->getContext());
|
||||
|
||||
bool Any = false;
|
||||
SmallVector<Constant*, 32> NewIdxs;
|
||||
for (unsigned i = 1; i != NumOps; ++i) {
|
||||
if ((i == 1 ||
|
||||
!isa<StructType>(GetElementPtrInst::getIndexedType(Ops[0]->getType(),
|
||||
reinterpret_cast<Value *const *>(Ops+1),
|
||||
i-1))) &&
|
||||
Ops[i]->getType() != IntPtrTy) {
|
||||
Any = true;
|
||||
NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
|
||||
true,
|
||||
IntPtrTy,
|
||||
true),
|
||||
Ops[i], IntPtrTy));
|
||||
} else
|
||||
NewIdxs.push_back(Ops[i]);
|
||||
}
|
||||
if (!Any) return 0;
|
||||
|
||||
Constant *C =
|
||||
ConstantExpr::getGetElementPtr(Ops[0], &NewIdxs[0], NewIdxs.size());
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
|
||||
C = Folded;
|
||||
return C;
|
||||
}
|
||||
|
||||
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
|
||||
/// constant expression, do so.
|
||||
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
|
||||
@ -810,6 +846,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
case Instruction::ShuffleVector:
|
||||
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
|
||||
case Instruction::GetElementPtr:
|
||||
if (Constant *C = CastGEPIndices(Ops, NumOps, DestTy, TD))
|
||||
return C;
|
||||
if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD))
|
||||
return C;
|
||||
|
||||
|
@ -347,26 +347,6 @@ const Type *SCEVUnknown::getType() const {
|
||||
return V->getType();
|
||||
}
|
||||
|
||||
bool SCEVUnknown::isOffsetOf(const StructType *&STy, Constant *&FieldNo) const {
|
||||
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
|
||||
if (VCE->getOpcode() == Instruction::PtrToInt)
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr)
|
||||
if (CE->getOperand(0)->isNullValue()) {
|
||||
const Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
if (const StructType *StructTy = dyn_cast<StructType>(Ty))
|
||||
if (CE->getNumOperands() == 3 &&
|
||||
CE->getOperand(1)->isNullValue()) {
|
||||
STy = StructTy;
|
||||
FieldNo = CE->getOperand(2);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
|
||||
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
|
||||
if (VCE->getOpcode() == Instruction::PtrToInt)
|
||||
@ -395,7 +375,8 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
|
||||
const Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (CE->getNumOperands() == 3 &&
|
||||
if (!STy->isPacked() &&
|
||||
CE->getNumOperands() == 3 &&
|
||||
CE->getOperand(1)->isNullValue()) {
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
|
||||
if (CI->isOne() &&
|
||||
@ -410,6 +391,28 @@ bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
|
||||
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
|
||||
if (VCE->getOpcode() == Instruction::PtrToInt)
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr &&
|
||||
CE->getNumOperands() == 3 &&
|
||||
CE->getOperand(0)->isNullValue() &&
|
||||
CE->getOperand(1)->isNullValue()) {
|
||||
const Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
// Ignore vector types here so that ScalarEvolutionExpander doesn't
|
||||
// emit getelementptrs that index into vectors.
|
||||
if (isa<StructType>(Ty) || isa<ArrayType>(Ty)) {
|
||||
CTy = Ty;
|
||||
FieldNo = CE->getOperand(2);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
void SCEVUnknown::print(raw_ostream &OS) const {
|
||||
const Type *AllocTy;
|
||||
if (isSizeOf(AllocTy)) {
|
||||
@ -421,10 +424,10 @@ void SCEVUnknown::print(raw_ostream &OS) const {
|
||||
return;
|
||||
}
|
||||
|
||||
const StructType *STy;
|
||||
const Type *CTy;
|
||||
Constant *FieldNo;
|
||||
if (isOffsetOf(STy, FieldNo)) {
|
||||
OS << "offsetof(" << *STy << ", ";
|
||||
if (isOffsetOf(CTy, FieldNo)) {
|
||||
OS << "offsetof(" << *CTy << ", ";
|
||||
WriteAsOperand(OS, FieldNo, false);
|
||||
OS << ")";
|
||||
return;
|
||||
@ -2231,8 +2234,24 @@ const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
|
||||
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy,
|
||||
unsigned FieldNo) {
|
||||
const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
|
||||
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
C = ConstantFoldConstantExpression(CE, TD);
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
|
||||
Constant *C = ConstantExpr::getAlignOf(AllocTy);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
C = ConstantFoldConstantExpression(CE, TD);
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
|
||||
unsigned FieldNo) {
|
||||
Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
C = ConstantFoldConstantExpression(CE, TD);
|
||||
@ -2240,11 +2259,12 @@ const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy,
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getAllocSizeExpr(const Type *AllocTy) {
|
||||
Constant *C = ConstantExpr::getSizeOf(AllocTy);
|
||||
const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
|
||||
Constant *FieldNo) {
|
||||
Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
C = ConstantFoldConstantExpression(CE, TD);
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
|
||||
const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
|
||||
return getTruncateOrZeroExtend(getSCEV(C), Ty);
|
||||
}
|
||||
|
||||
@ -2695,7 +2715,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||
// For a struct, add the member offset.
|
||||
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
|
||||
TotalOffset = getAddExpr(TotalOffset,
|
||||
getFieldOffsetExpr(STy, FieldNo),
|
||||
getOffsetOfExpr(STy, FieldNo),
|
||||
/*HasNUW=*/false, /*HasNSW=*/InBounds);
|
||||
} else {
|
||||
// For an array, add the element offset, explicitly scaled.
|
||||
@ -2704,7 +2724,7 @@ const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
|
||||
// Getelementptr indicies are signed.
|
||||
LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
|
||||
// Lower "inbounds" GEPs to NSW arithmetic.
|
||||
LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI),
|
||||
LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
|
||||
/*HasNUW=*/false, /*HasNSW=*/InBounds);
|
||||
TotalOffset = getAddExpr(TotalOffset, LocalOffset,
|
||||
/*HasNUW=*/false, /*HasNSW=*/InBounds);
|
||||
@ -3197,7 +3217,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
case Instruction::Shl:
|
||||
// Turn shift left of a constant amount into a multiply.
|
||||
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
||||
uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
||||
uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
|
||||
Constant *X = ConstantInt::get(getContext(),
|
||||
APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
|
||||
return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
|
||||
@ -3207,7 +3227,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
case Instruction::LShr:
|
||||
// Turn logical shift right of a constant into a unsigned divide.
|
||||
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
||||
uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
||||
uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
|
||||
Constant *X = ConstantInt::get(getContext(),
|
||||
APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
|
||||
return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
|
||||
@ -3248,10 +3268,10 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
return getSCEV(U->getOperand(0));
|
||||
break;
|
||||
|
||||
// It's tempting to handle inttoptr and ptrtoint, however this can
|
||||
// lead to pointer expressions which cannot be expanded to GEPs
|
||||
// (because they may overflow). For now, the only pointer-typed
|
||||
// expressions we handle are GEPs and address literals.
|
||||
// It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
|
||||
// lead to pointer expressions which cannot safely be expanded to GEPs,
|
||||
// because ScalarEvolution doesn't respect the GEP aliasing rules when
|
||||
// simplifying integer expressions.
|
||||
|
||||
case Instruction::GetElementPtr:
|
||||
return createNodeForGEP(cast<GEPOperator>(U));
|
||||
|
@ -369,7 +369,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// array indexing.
|
||||
SmallVector<const SCEV *, 8> ScaledOps;
|
||||
if (ElTy->isSized()) {
|
||||
const SCEV *ElSize = SE.getAllocSizeExpr(ElTy);
|
||||
const SCEV *ElSize = SE.getSizeOfExpr(ElTy);
|
||||
if (!ElSize->isZero()) {
|
||||
SmallVector<const SCEV *, 8> NewOps;
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
|
||||
@ -433,9 +433,9 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// appropriate struct type.
|
||||
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
|
||||
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Ops[i])) {
|
||||
const StructType *StructTy;
|
||||
const Type *CTy;
|
||||
Constant *FieldNo;
|
||||
if (U->isOffsetOf(StructTy, FieldNo) && StructTy == STy) {
|
||||
if (U->isOffsetOf(CTy, FieldNo) && CTy == STy) {
|
||||
GepIndices.push_back(FieldNo);
|
||||
ElTy =
|
||||
STy->getTypeAtIndex(cast<ConstantInt>(FieldNo)->getZExtValue());
|
||||
|
@ -323,6 +323,116 @@ static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
|
||||
}
|
||||
}
|
||||
|
||||
/// getFoldedSizeOf - Return a ConstantExpr with type DestTy for sizeof
|
||||
/// on Ty, with any known factors factored out. If Folded is false,
|
||||
/// return null if no factoring was possible, to avoid endlessly
|
||||
/// bouncing an unfoldable expression back into the top-level folder.
|
||||
///
|
||||
static Constant *getFoldedSizeOf(const Type *Ty, const Type *DestTy,
|
||||
bool Folded) {
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
|
||||
Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
|
||||
return ConstantExpr::getNUWMul(E, N);
|
||||
}
|
||||
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
||||
Constant *N = ConstantInt::get(DestTy, VTy->getNumElements());
|
||||
Constant *E = getFoldedSizeOf(VTy->getElementType(), DestTy, true);
|
||||
return ConstantExpr::getNUWMul(E, N);
|
||||
}
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (!STy->isPacked()) {
|
||||
unsigned NumElems = STy->getNumElements();
|
||||
// An empty struct has size zero.
|
||||
if (NumElems == 0)
|
||||
return ConstantExpr::getNullValue(DestTy);
|
||||
// Check for a struct with all members having the same type.
|
||||
const Type *MemberTy = STy->getElementType(0);
|
||||
bool AllSame = true;
|
||||
for (unsigned i = 1; i != NumElems; ++i)
|
||||
if (MemberTy != STy->getElementType(i)) {
|
||||
AllSame = false;
|
||||
break;
|
||||
}
|
||||
if (AllSame) {
|
||||
Constant *N = ConstantInt::get(DestTy, NumElems);
|
||||
Constant *E = getFoldedSizeOf(MemberTy, DestTy, true);
|
||||
return ConstantExpr::getNUWMul(E, N);
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no interesting folding happening, bail so that we don't create
|
||||
// a constant that looks like it needs folding but really doesn't.
|
||||
if (!Folded)
|
||||
return 0;
|
||||
|
||||
// Base case: Get a regular sizeof expression.
|
||||
Constant *C = ConstantExpr::getSizeOf(Ty);
|
||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
||||
DestTy, false),
|
||||
C, DestTy);
|
||||
return C;
|
||||
}
|
||||
|
||||
/// getFoldedOffsetOf - Return a ConstantExpr with type DestTy for offsetof
|
||||
/// on Ty and FieldNo, with any known factors factored out. If Folded is false,
|
||||
/// return null if no factoring was possible, to avoid endlessly
|
||||
/// bouncing an unfoldable expression back into the top-level folder.
|
||||
///
|
||||
static Constant *getFoldedOffsetOf(const Type *Ty, Constant *FieldNo,
|
||||
const Type *DestTy,
|
||||
bool Folded) {
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
|
||||
DestTy, false),
|
||||
FieldNo, DestTy);
|
||||
Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
|
||||
return ConstantExpr::getNUWMul(E, N);
|
||||
}
|
||||
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
||||
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
|
||||
DestTy, false),
|
||||
FieldNo, DestTy);
|
||||
Constant *E = getFoldedSizeOf(VTy->getElementType(), DestTy, true);
|
||||
return ConstantExpr::getNUWMul(E, N);
|
||||
}
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (!STy->isPacked()) {
|
||||
unsigned NumElems = STy->getNumElements();
|
||||
// An empty struct has no members.
|
||||
if (NumElems == 0)
|
||||
return 0;
|
||||
// Check for a struct with all members having the same type.
|
||||
const Type *MemberTy = STy->getElementType(0);
|
||||
bool AllSame = true;
|
||||
for (unsigned i = 1; i != NumElems; ++i)
|
||||
if (MemberTy != STy->getElementType(i)) {
|
||||
AllSame = false;
|
||||
break;
|
||||
}
|
||||
if (AllSame) {
|
||||
Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo,
|
||||
false,
|
||||
DestTy,
|
||||
false),
|
||||
FieldNo, DestTy);
|
||||
Constant *E = getFoldedSizeOf(MemberTy, DestTy, true);
|
||||
return ConstantExpr::getNUWMul(E, N);
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no interesting folding happening, bail so that we don't create
|
||||
// a constant that looks like it needs folding but really doesn't.
|
||||
if (!Folded)
|
||||
return 0;
|
||||
|
||||
// Base case: Get a regular offsetof expression.
|
||||
Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo);
|
||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
||||
DestTy, false),
|
||||
C, DestTy);
|
||||
return C;
|
||||
}
|
||||
|
||||
Constant *llvm::ConstantFoldCastInstruction(LLVMContext &Context,
|
||||
unsigned opc, Constant *V,
|
||||
@ -418,33 +528,59 @@ Constant *llvm::ConstantFoldCastInstruction(LLVMContext &Context,
|
||||
// Is it a null pointer value?
|
||||
if (V->isNullValue())
|
||||
return ConstantInt::get(DestTy, 0);
|
||||
// If this is a sizeof of an array or vector, pull out a multiplication
|
||||
// by the element size to expose it to subsequent folding.
|
||||
// If this is a sizeof-like expression, pull out multiplications by
|
||||
// known factors to expose them to subsequent folding. If it's an
|
||||
// alignof-like expression, factor out known factors.
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
|
||||
if (CE->getOpcode() == Instruction::GetElementPtr &&
|
||||
CE->getNumOperands() == 2 &&
|
||||
CE->getOperand(0)->isNullValue())
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
|
||||
if (CI->isOne()) {
|
||||
const Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
|
||||
Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
|
||||
Constant *E = ConstantExpr::getSizeOf(ATy->getElementType());
|
||||
E = ConstantExpr::getCast(CastInst::getCastOpcode(E, false,
|
||||
CE->getOperand(0)->isNullValue()) {
|
||||
const Type *Ty =
|
||||
cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
|
||||
if (CE->getNumOperands() == 2) {
|
||||
// Handle a sizeof-like expression.
|
||||
Constant *Idx = CE->getOperand(1);
|
||||
bool isOne = isa<ConstantInt>(Idx) && cast<ConstantInt>(Idx)->isOne();
|
||||
if (Constant *C = getFoldedSizeOf(Ty, DestTy, !isOne)) {
|
||||
Idx = ConstantExpr::getCast(CastInst::getCastOpcode(Idx, true,
|
||||
DestTy, false),
|
||||
E, DestTy);
|
||||
return ConstantExpr::getMul(N, E);
|
||||
}
|
||||
if (const VectorType *VTy = dyn_cast<VectorType>(Ty)) {
|
||||
Constant *N = ConstantInt::get(DestTy, VTy->getNumElements());
|
||||
Constant *E = ConstantExpr::getSizeOf(VTy->getElementType());
|
||||
E = ConstantExpr::getCast(CastInst::getCastOpcode(E, false,
|
||||
DestTy, false),
|
||||
E, DestTy);
|
||||
return ConstantExpr::getMul(N, E);
|
||||
}
|
||||
Idx, DestTy);
|
||||
return ConstantExpr::getMul(C, Idx);
|
||||
}
|
||||
} else if (CE->getNumOperands() == 3 &&
|
||||
CE->getOperand(1)->isNullValue()) {
|
||||
// Handle an alignof-like expression.
|
||||
if (const StructType *STy = dyn_cast<StructType>(Ty))
|
||||
if (!STy->isPacked()) {
|
||||
ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2));
|
||||
if (CI->isOne() &&
|
||||
STy->getNumElements() == 2 &&
|
||||
STy->getElementType(0)->isInteger(1)) {
|
||||
// The alignment of an array is equal to the alignment of the
|
||||
// array element. Note that this is not always true for vectors.
|
||||
if (const ArrayType *ATy =
|
||||
dyn_cast<ArrayType>(STy->getElementType(1))) {
|
||||
Constant *C = ConstantExpr::getAlignOf(ATy->getElementType());
|
||||
C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
|
||||
DestTy,
|
||||
false),
|
||||
C, DestTy);
|
||||
return C;
|
||||
}
|
||||
// Packed structs always have an alignment of 1.
|
||||
if (const StructType *InnerSTy =
|
||||
dyn_cast<StructType>(STy->getElementType(1)))
|
||||
if (InnerSTy->isPacked())
|
||||
return ConstantInt::get(DestTy, 1);
|
||||
}
|
||||
}
|
||||
// Handle an offsetof-like expression.
|
||||
if (isa<StructType>(Ty) || isa<ArrayType>(Ty) || isa<VectorType>(Ty)){
|
||||
if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
|
||||
DestTy, false))
|
||||
return C;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Other pointer types cannot be casted
|
||||
return 0;
|
||||
case Instruction::UIToFP:
|
||||
@ -1156,10 +1292,19 @@ Constant *llvm::ConstantFoldBinaryInstruction(LLVMContext &Context,
|
||||
}
|
||||
}
|
||||
|
||||
if (isa<ConstantExpr>(C1)) {
|
||||
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
|
||||
// There are many possible foldings we could do here. We should probably
|
||||
// at least fold add of a pointer with an integer into the appropriate
|
||||
// getelementptr. This will improve alias analysis a bit.
|
||||
|
||||
// Given ((a + b) + c), if (b + c) folds to something interesting, return
|
||||
// (a + (b + c)).
|
||||
if (Instruction::isAssociative(Opcode, C1->getType()) &&
|
||||
CE1->getOpcode() == Opcode) {
|
||||
Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2);
|
||||
if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode)
|
||||
return ConstantExpr::get(Opcode, CE1->getOperand(0), T);
|
||||
}
|
||||
} else if (isa<ConstantExpr>(C2)) {
|
||||
// If C2 is a constant expr and C1 isn't, flop them around and fold the
|
||||
// other way if possible.
|
||||
@ -2004,7 +2149,7 @@ Constant *llvm::ConstantFoldGetElementPtr(LLVMContext &Context,
|
||||
}
|
||||
|
||||
// Implement folding of:
|
||||
// int* getelementptr ([2 x int]* cast ([3 x int]* %X to [2 x int]*),
|
||||
// int* getelementptr ([2 x int]* bitcast ([3 x int]* %X to [2 x int]*),
|
||||
// long 0, long 0)
|
||||
// To: int* getelementptr ([3 x int]* %X, long 0, long 0)
|
||||
//
|
||||
|
@ -1,9 +1,32 @@
|
||||
; "PLAIN" - No optimizations. This tests the target-independent
|
||||
; constant folder.
|
||||
; RUN: opt -S -o - < %s | FileCheck --check-prefix=PLAIN %s
|
||||
|
||||
; "OPT" - Optimizations but no targetdata. This tests target-independent
|
||||
; folding in the optimizers.
|
||||
; RUN: opt -S -o - -instcombine -globalopt < %s | FileCheck --check-prefix=OPT %s
|
||||
|
||||
; "TO" - Optimizations and targetdata. This tests target-dependent
|
||||
; folding in the optimizers.
|
||||
; RUN: opt -S -o - -instcombine -globalopt -default-data-layout="e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64" < %s | FileCheck --check-prefix=TO %s
|
||||
|
||||
; "SCEV" - ScalarEvolution but no targetdata.
|
||||
; RUN: opt -analyze -scalar-evolution < %s | FileCheck --check-prefix=SCEV %s
|
||||
|
||||
; ScalarEvolution with targetdata isn't interesting on these testcases
|
||||
; because ScalarEvolution doesn't attempt to duplicate all of instcombine's
|
||||
; and the constant folders' folding.
|
||||
|
||||
; PLAIN: %0 = type { i1, double }
|
||||
; PLAIN: %1 = type { double, float, double, double }
|
||||
; PLAIN: %2 = type { i64, i64 }
|
||||
; OPT: %0 = type { i1, double }
|
||||
; OPT: %1 = type { double, float, double, double }
|
||||
; OPT: %2 = type { i64, i64 }
|
||||
|
||||
; The automatic constant folder in opt does not have targetdata access, so
|
||||
; it can't fold gep arithmetic, in general. However, the constant folder run
|
||||
; from instcombine and global opt does, and can.
|
||||
; from instcombine and global opt can use targetdata.
|
||||
|
||||
; PLAIN: @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
|
||||
; PLAIN: @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
|
||||
@ -11,51 +34,18 @@
|
||||
; PLAIN: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
|
||||
; PLAIN: @H8 = global i8* getelementptr (i8* null, i32 -1)
|
||||
; PLAIN: @H1 = global i1* getelementptr (i1* null, i32 -1)
|
||||
; PLAIN: define i8* @goo8() nounwind {
|
||||
; PLAIN: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1* @goo1() nounwind {
|
||||
; PLAIN: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
|
||||
; PLAIN: }
|
||||
; PLAIN: define i8* @foo8() nounwind {
|
||||
; PLAIN: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1* @foo1() nounwind {
|
||||
; PLAIN: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
|
||||
; PLAIN: }
|
||||
; PLAIN: define i8* @hoo8() nounwind {
|
||||
; PLAIN: ret i8* getelementptr (i8* null, i32 -1)
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1* @hoo1() nounwind {
|
||||
; PLAIN: ret i1* getelementptr (i1* null, i32 -1)
|
||||
; PLAIN: }
|
||||
|
||||
; OPT: @G8 = global i8* null
|
||||
; OPT: @G1 = global i1* null
|
||||
; OPT: @F8 = global i8* inttoptr (i64 -1 to i8*)
|
||||
; OPT: @F1 = global i1* inttoptr (i64 -1 to i1*)
|
||||
; OPT: @H8 = global i8* inttoptr (i64 -1 to i8*)
|
||||
; OPT: @H1 = global i1* inttoptr (i64 -1 to i1*)
|
||||
; OPT: define i8* @goo8() nounwind {
|
||||
; OPT: ret i8* null
|
||||
; OPT: }
|
||||
; OPT: define i1* @goo1() nounwind {
|
||||
; OPT: ret i1* null
|
||||
; OPT: }
|
||||
; OPT: define i8* @foo8() nounwind {
|
||||
; OPT: ret i8* inttoptr (i64 -1 to i8*)
|
||||
; OPT: }
|
||||
; OPT: define i1* @foo1() nounwind {
|
||||
; OPT: ret i1* inttoptr (i64 -1 to i1*)
|
||||
; OPT: }
|
||||
; OPT: define i8* @hoo8() nounwind {
|
||||
; OPT: ret i8* inttoptr (i64 -1 to i8*)
|
||||
; OPT: }
|
||||
; OPT: define i1* @hoo1() nounwind {
|
||||
; OPT: ret i1* inttoptr (i64 -1 to i1*)
|
||||
; OPT: }
|
||||
|
||||
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
|
||||
; OPT: @G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
|
||||
; OPT: @G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
|
||||
; OPT: @F8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
|
||||
; OPT: @F1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
|
||||
; OPT: @H8 = global i8* getelementptr (i8* null, i32 -1)
|
||||
; OPT: @H1 = global i1* getelementptr (i1* null, i32 -1)
|
||||
; TO: @G8 = global i8* null
|
||||
; TO: @G1 = global i1* null
|
||||
; TO: @F8 = global i8* inttoptr (i64 -1 to i8*)
|
||||
; TO: @F1 = global i1* inttoptr (i64 -1 to i1*)
|
||||
; TO: @H8 = global i8* inttoptr (i64 -1 to i8*)
|
||||
; TO: @H1 = global i1* inttoptr (i64 -1 to i1*)
|
||||
|
||||
@G8 = global i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
|
||||
@G1 = global i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
|
||||
@ -64,21 +54,310 @@ target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64"
|
||||
@H8 = global i8* getelementptr (i8* inttoptr (i32 0 to i8*), i32 -1)
|
||||
@H1 = global i1* getelementptr (i1* inttoptr (i32 0 to i1*), i32 -1)
|
||||
|
||||
; The target-independent folder should be able to do some clever
|
||||
; simplifications on sizeof, alignof, and offsetof expressions. The
|
||||
; target-dependent folder should fold these down to constants.
|
||||
|
||||
; PLAIN: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
|
||||
; PLAIN: @b = constant i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
|
||||
; PLAIN: @c = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
|
||||
; PLAIN: @d = constant i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
|
||||
; PLAIN: @e = constant i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64)
|
||||
; PLAIN: @f = constant i64 1
|
||||
; OPT: @a = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
|
||||
; OPT: @b = constant i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
|
||||
; OPT: @c = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
|
||||
; OPT: @d = constant i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
|
||||
; OPT: @e = constant i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64)
|
||||
; OPT: @f = constant i64 1
|
||||
; TO: @a = constant i64 18480
|
||||
; TO: @b = constant i64 8
|
||||
; TO: @c = constant i64 16
|
||||
; TO: @d = constant i64 88
|
||||
; TO: @e = constant i64 16
|
||||
; TO: @f = constant i64 1
|
||||
|
||||
@a = constant i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5))
|
||||
@b = constant i64 ptrtoint ([13 x double]* getelementptr ({i1, [13 x double]}* null, i64 0, i32 1) to i64)
|
||||
@c = constant i64 ptrtoint (double* getelementptr ({double, double, double, double}* null, i64 0, i32 2) to i64)
|
||||
@d = constant i64 ptrtoint (double* getelementptr ([13 x double]* null, i64 0, i32 11) to i64)
|
||||
@e = constant i64 ptrtoint (double* getelementptr ({double, float, double, double}* null, i64 0, i32 2) to i64)
|
||||
@f = constant i64 ptrtoint (<{ i16, i128 }>* getelementptr ({i1, <{ i16, i128 }>}* null, i64 0, i32 1) to i64)
|
||||
|
||||
; The target-dependent folder should cast GEP indices to integer-sized pointers.
|
||||
|
||||
; PLAIN: @M = constant i64* getelementptr (i64* null, i32 1)
|
||||
; PLAIN: @N = constant i64* getelementptr (%2* null, i32 0, i32 1)
|
||||
; PLAIN: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
|
||||
; OPT: @M = constant i64* getelementptr (i64* null, i32 1)
|
||||
; OPT: @N = constant i64* getelementptr (%2* null, i32 0, i32 1)
|
||||
; OPT: @O = constant i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
|
||||
; TO: @M = constant i64* inttoptr (i64 8 to i64*)
|
||||
; TO: @N = constant i64* inttoptr (i64 8 to i64*)
|
||||
; TO: @O = constant i64* inttoptr (i64 8 to i64*)
|
||||
|
||||
@M = constant i64* getelementptr (i64 *null, i32 1)
|
||||
@N = constant i64* getelementptr ({ i64, i64 } *null, i32 0, i32 1)
|
||||
@O = constant i64* getelementptr ([2 x i64] *null, i32 0, i32 1)
|
||||
|
||||
; Duplicate all of the above as function return values rather than
|
||||
; global initializers.
|
||||
|
||||
; PLAIN: define i8* @goo8() nounwind {
|
||||
; PLAIN: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
|
||||
; PLAIN: ret i8* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1* @goo1() nounwind {
|
||||
; PLAIN: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
|
||||
; PLAIN: ret i1* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i8* @foo8() nounwind {
|
||||
; PLAIN: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
|
||||
; PLAIN: ret i8* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1* @foo1() nounwind {
|
||||
; PLAIN: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
|
||||
; PLAIN: ret i1* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i8* @hoo8() nounwind {
|
||||
; PLAIN: %t = bitcast i8* getelementptr (i8* null, i32 -1) to i8*
|
||||
; PLAIN: ret i8* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i1* @hoo1() nounwind {
|
||||
; PLAIN: %t = bitcast i1* getelementptr (i1* null, i32 -1) to i1*
|
||||
; PLAIN: ret i1* %t
|
||||
; PLAIN: }
|
||||
; OPT: define i8* @goo8() nounwind {
|
||||
; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
|
||||
; OPT: }
|
||||
; OPT: define i1* @goo1() nounwind {
|
||||
; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
|
||||
; OPT: }
|
||||
; OPT: define i8* @foo8() nounwind {
|
||||
; OPT: ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
|
||||
; OPT: }
|
||||
; OPT: define i1* @foo1() nounwind {
|
||||
; OPT: ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
|
||||
; OPT: }
|
||||
; OPT: define i8* @hoo8() nounwind {
|
||||
; OPT: ret i8* getelementptr (i8* null, i32 -1)
|
||||
; OPT: }
|
||||
; OPT: define i1* @hoo1() nounwind {
|
||||
; OPT: ret i1* getelementptr (i1* null, i32 -1)
|
||||
; OPT: }
|
||||
; TO: define i8* @goo8() nounwind {
|
||||
; TO: ret i8* null
|
||||
; TO: }
|
||||
; TO: define i1* @goo1() nounwind {
|
||||
; TO: ret i1* null
|
||||
; TO: }
|
||||
; TO: define i8* @foo8() nounwind {
|
||||
; TO: ret i8* inttoptr (i64 -1 to i8*)
|
||||
; TO: }
|
||||
; TO: define i1* @foo1() nounwind {
|
||||
; TO: ret i1* inttoptr (i64 -1 to i1*)
|
||||
; TO: }
|
||||
; TO: define i8* @hoo8() nounwind {
|
||||
; TO: ret i8* inttoptr (i64 -1 to i8*)
|
||||
; TO: }
|
||||
; TO: define i1* @hoo1() nounwind {
|
||||
; TO: ret i1* inttoptr (i64 -1 to i1*)
|
||||
; TO: }
|
||||
; SCEV: Classifying expressions for: @goo8
|
||||
; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
|
||||
; SCEV: --> ((-1 * sizeof(i8)) + inttoptr (i32 1 to i8*))
|
||||
; SCEV: Classifying expressions for: @goo1
|
||||
; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
|
||||
; SCEV: --> ((-1 * sizeof(i1)) + inttoptr (i32 1 to i1*))
|
||||
; SCEV: Classifying expressions for: @foo8
|
||||
; SCEV: %t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
|
||||
; SCEV: --> ((-2 * sizeof(i8)) + inttoptr (i32 1 to i8*))
|
||||
; SCEV: Classifying expressions for: @foo1
|
||||
; SCEV: %t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
|
||||
; SCEV: --> ((-2 * sizeof(i1)) + inttoptr (i32 1 to i1*))
|
||||
; SCEV: Classifying expressions for: @hoo8
|
||||
; SCEV: --> (-1 * sizeof(i8))
|
||||
; SCEV: Classifying expressions for: @hoo1
|
||||
; SCEV: --> (-1 * sizeof(i1))
|
||||
|
||||
define i8* @goo8() nounwind {
|
||||
ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1)
|
||||
%t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -1) to i8*
|
||||
ret i8* %t
|
||||
}
|
||||
define i1* @goo1() nounwind {
|
||||
ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1)
|
||||
%t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -1) to i1*
|
||||
ret i1* %t
|
||||
}
|
||||
define i8* @foo8() nounwind {
|
||||
ret i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2)
|
||||
%t = bitcast i8* getelementptr (i8* inttoptr (i32 1 to i8*), i32 -2) to i8*
|
||||
ret i8* %t
|
||||
}
|
||||
define i1* @foo1() nounwind {
|
||||
ret i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2)
|
||||
%t = bitcast i1* getelementptr (i1* inttoptr (i32 1 to i1*), i32 -2) to i1*
|
||||
ret i1* %t
|
||||
}
|
||||
define i8* @hoo8() nounwind {
|
||||
ret i8* getelementptr (i8* inttoptr (i32 0 to i8*), i32 -1)
|
||||
%t = bitcast i8* getelementptr (i8* inttoptr (i32 0 to i8*), i32 -1) to i8*
|
||||
ret i8* %t
|
||||
}
|
||||
define i1* @hoo1() nounwind {
|
||||
ret i1* getelementptr (i1* inttoptr (i32 0 to i1*), i32 -1)
|
||||
%t = bitcast i1* getelementptr (i1* inttoptr (i32 0 to i1*), i32 -1) to i1*
|
||||
ret i1* %t
|
||||
}
|
||||
|
||||
; PLAIN: define i64 @fa() nounwind {
|
||||
; PLAIN: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
|
||||
; PLAIN: ret i64 %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64 @fb() nounwind {
|
||||
; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64) to i64
|
||||
; PLAIN: ret i64 %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64 @fc() nounwind {
|
||||
; PLAIN: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
|
||||
; PLAIN: ret i64 %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64 @fd() nounwind {
|
||||
; PLAIN: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
|
||||
; PLAIN: ret i64 %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64 @fe() nounwind {
|
||||
; PLAIN: %t = bitcast i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64) to i64
|
||||
; PLAIN: ret i64 %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64 @ff() nounwind {
|
||||
; PLAIN: %t = bitcast i64 1 to i64
|
||||
; PLAIN: ret i64 %t
|
||||
; PLAIN: }
|
||||
; OPT: define i64 @fa() nounwind {
|
||||
; OPT: ret i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310)
|
||||
; OPT: }
|
||||
; OPT: define i64 @fb() nounwind {
|
||||
; OPT: ret i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64)
|
||||
; OPT: }
|
||||
; OPT: define i64 @fc() nounwind {
|
||||
; OPT: ret i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2)
|
||||
; OPT: }
|
||||
; OPT: define i64 @fd() nounwind {
|
||||
; OPT: ret i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11)
|
||||
; OPT: }
|
||||
; OPT: define i64 @fe() nounwind {
|
||||
; OPT: ret i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64)
|
||||
; OPT: }
|
||||
; OPT: define i64 @ff() nounwind {
|
||||
; OPT: ret i64 1
|
||||
; OPT: }
|
||||
; TO: define i64 @fa() nounwind {
|
||||
; TO: ret i64 18480
|
||||
; TO: }
|
||||
; TO: define i64 @fb() nounwind {
|
||||
; TO: ret i64 8
|
||||
; TO: }
|
||||
; TO: define i64 @fc() nounwind {
|
||||
; TO: ret i64 16
|
||||
; TO: }
|
||||
; TO: define i64 @fd() nounwind {
|
||||
; TO: ret i64 88
|
||||
; TO: }
|
||||
; TO: define i64 @fe() nounwind {
|
||||
; TO: ret i64 16
|
||||
; TO: }
|
||||
; TO: define i64 @ff() nounwind {
|
||||
; TO: ret i64 1
|
||||
; TO: }
|
||||
; SCEV: Classifying expressions for: @fa
|
||||
; SCEV: %t = bitcast i64 mul (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2310) to i64
|
||||
; SCEV: --> (2310 * sizeof(double))
|
||||
; SCEV: Classifying expressions for: @fb
|
||||
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr (%0* null, i64 0, i32 1) to i64) to i64
|
||||
; SCEV: --> alignof(double)
|
||||
; SCEV: Classifying expressions for: @fc
|
||||
; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 2) to i64
|
||||
; SCEV: --> (2 * sizeof(double))
|
||||
; SCEV: Classifying expressions for: @fd
|
||||
; SCEV: %t = bitcast i64 mul nuw (i64 ptrtoint (double* getelementptr (double* null, i32 1) to i64), i64 11) to i64
|
||||
; SCEV: --> (11 * sizeof(double))
|
||||
; SCEV: Classifying expressions for: @fe
|
||||
; SCEV: %t = bitcast i64 ptrtoint (double* getelementptr (%1* null, i64 0, i32 2) to i64) to i64
|
||||
; SCEV: --> offsetof({ double, float, double, double }, 2)
|
||||
; SCEV: Classifying expressions for: @ff
|
||||
; SCEV: %t = bitcast i64 1 to i64
|
||||
; SCEV: --> 1
|
||||
|
||||
define i64 @fa() nounwind {
|
||||
%t = bitcast i64 mul (i64 3, i64 mul (i64 ptrtoint ({[7 x double], [7 x double]}* getelementptr ({[7 x double], [7 x double]}* null, i64 11) to i64), i64 5)) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fb() nounwind {
|
||||
%t = bitcast i64 ptrtoint ([13 x double]* getelementptr ({i1, [13 x double]}* null, i64 0, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fc() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double* getelementptr ({double, double, double, double}* null, i64 0, i32 2) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fd() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double* getelementptr ([13 x double]* null, i64 0, i32 11) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @fe() nounwind {
|
||||
%t = bitcast i64 ptrtoint (double* getelementptr ({double, float, double, double}* null, i64 0, i32 2) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
define i64 @ff() nounwind {
|
||||
%t = bitcast i64 ptrtoint (<{ i16, i128 }>* getelementptr ({i1, <{ i16, i128 }>}* null, i64 0, i32 1) to i64) to i64
|
||||
ret i64 %t
|
||||
}
|
||||
|
||||
; PLAIN: define i64* @fM() nounwind {
|
||||
; PLAIN: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
|
||||
; PLAIN: ret i64* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64* @fN() nounwind {
|
||||
; PLAIN: %t = bitcast i64* getelementptr (%2* null, i32 0, i32 1) to i64*
|
||||
; PLAIN: ret i64* %t
|
||||
; PLAIN: }
|
||||
; PLAIN: define i64* @fO() nounwind {
|
||||
; PLAIN: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
|
||||
; PLAIN: ret i64* %t
|
||||
; PLAIN: }
|
||||
; OPT: define i64* @fM() nounwind {
|
||||
; OPT: ret i64* getelementptr (i64* null, i32 1)
|
||||
; OPT: }
|
||||
; OPT: define i64* @fN() nounwind {
|
||||
; OPT: ret i64* getelementptr (%2* null, i32 0, i32 1)
|
||||
; OPT: }
|
||||
; OPT: define i64* @fO() nounwind {
|
||||
; OPT: ret i64* getelementptr ([2 x i64]* null, i32 0, i32 1)
|
||||
; OPT: }
|
||||
; TO: define i64* @fM() nounwind {
|
||||
; TO: ret i64* inttoptr (i64 8 to i64*)
|
||||
; TO: }
|
||||
; TO: define i64* @fN() nounwind {
|
||||
; TO: ret i64* inttoptr (i64 8 to i64*)
|
||||
; TO: }
|
||||
; TO: define i64* @fO() nounwind {
|
||||
; TO: ret i64* inttoptr (i64 8 to i64*)
|
||||
; TO: }
|
||||
; SCEV: Classifying expressions for: @fM
|
||||
; SCEV: %t = bitcast i64* getelementptr (i64* null, i32 1) to i64*
|
||||
; SCEV: --> sizeof(i64)
|
||||
; SCEV: Classifying expressions for: @fN
|
||||
; SCEV: %t = bitcast i64* getelementptr (%2* null, i32 0, i32 1) to i64*
|
||||
; SCEV: --> sizeof(i64)
|
||||
; SCEV: Classifying expressions for: @fO
|
||||
; SCEV: %t = bitcast i64* getelementptr ([2 x i64]* null, i32 0, i32 1) to i64*
|
||||
; SCEV: --> sizeof(i64)
|
||||
|
||||
define i64* @fM() nounwind {
|
||||
%t = bitcast i64* getelementptr (i64 *null, i32 1) to i64*
|
||||
ret i64* %t
|
||||
}
|
||||
define i64* @fN() nounwind {
|
||||
%t = bitcast i64* getelementptr ({ i64, i64 } *null, i32 0, i32 1) to i64*
|
||||
ret i64* %t
|
||||
}
|
||||
define i64* @fO() nounwind {
|
||||
%t = bitcast i64* getelementptr ([2 x i64] *null, i32 0, i32 1) to i64*
|
||||
ret i64* %t
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ bc0:
|
||||
store i32 0, i32* %tmp53
|
||||
ret void
|
||||
; CHECK: @test24
|
||||
; CHECK: store i32 0, i32* getelementptr (%"java/lang/StringBuffer"* null, i32 0, i32 1)
|
||||
; CHECK: store i32 0, i32* getelementptr (%"java/lang/StringBuffer"* null, i64 0, i32 1)
|
||||
}
|
||||
|
||||
define void @test25() {
|
||||
|
Loading…
Reference in New Issue
Block a user