mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-17 23:44:43 +00:00
remove a bunch of extraneous LLVMContext arguments
from various APIs, addressing PR5325. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@86231 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
6580da4755
commit
7b550ccfc5
@ -26,20 +26,18 @@ namespace llvm {
|
||||
class TargetData;
|
||||
class Function;
|
||||
class Type;
|
||||
class LLVMContext;
|
||||
|
||||
/// ConstantFoldInstruction - Attempt to constant fold the specified
|
||||
/// instruction. If successful, the constant result is returned, if not, null
|
||||
/// is returned. Note that this function can only fail when attempting to fold
|
||||
/// instructions like loads and stores, which have no constant expression form.
|
||||
///
|
||||
Constant *ConstantFoldInstruction(Instruction *I, LLVMContext &Context,
|
||||
const TargetData *TD = 0);
|
||||
Constant *ConstantFoldInstruction(Instruction *I, const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
|
||||
/// using the specified TargetData. If successful, the constant result is
|
||||
/// result is returned, if not, null is returned.
|
||||
Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext &Context,
|
||||
Constant *ConstantFoldConstantExpression(ConstantExpr *CE,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
|
||||
@ -49,8 +47,7 @@ Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext &Context,
|
||||
/// form.
|
||||
///
|
||||
Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant*const * Ops, unsigned NumOps,
|
||||
LLVMContext &Context,
|
||||
Constant *const *Ops, unsigned NumOps,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
|
||||
@ -58,8 +55,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
/// returns a constant expression of the specified operands.
|
||||
///
|
||||
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant*const * Ops, unsigned NumOps,
|
||||
LLVMContext &Context,
|
||||
Constant *const *Ops, unsigned NumOps,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldLoadFromConstPtr - Return the value that a load from C would
|
||||
@ -79,7 +75,7 @@ bool canConstantFoldCallTo(const Function *F);
|
||||
/// ConstantFoldCall - Attempt to constant fold a call to the specified function
|
||||
/// with the specified arguments, returning null if unsuccessful.
|
||||
Constant *
|
||||
ConstantFoldCall(Function *F, Constant* const* Operands, unsigned NumOperands);
|
||||
ConstantFoldCall(Function *F, Constant *const *Operands, unsigned NumOperands);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -17,7 +17,6 @@
|
||||
|
||||
namespace llvm {
|
||||
class CallInst;
|
||||
class LLVMContext;
|
||||
class PointerType;
|
||||
class TargetData;
|
||||
class Type;
|
||||
@ -29,43 +28,42 @@ class Value;
|
||||
|
||||
/// isMalloc - Returns true if the value is either a malloc call or a bitcast of
|
||||
/// the result of a malloc call
|
||||
bool isMalloc(const Value* I);
|
||||
bool isMalloc(const Value *I);
|
||||
|
||||
/// extractMallocCall - Returns the corresponding CallInst if the instruction
|
||||
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
|
||||
/// ignore InvokeInst here.
|
||||
const CallInst* extractMallocCall(const Value* I);
|
||||
CallInst* extractMallocCall(Value* I);
|
||||
const CallInst* extractMallocCall(const Value *I);
|
||||
CallInst* extractMallocCall(Value *I);
|
||||
|
||||
/// extractMallocCallFromBitCast - Returns the corresponding CallInst if the
|
||||
/// instruction is a bitcast of the result of a malloc call.
|
||||
const CallInst* extractMallocCallFromBitCast(const Value* I);
|
||||
CallInst* extractMallocCallFromBitCast(Value* I);
|
||||
const CallInst* extractMallocCallFromBitCast(const Value *I);
|
||||
CallInst* extractMallocCallFromBitCast(Value *I);
|
||||
|
||||
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
|
||||
/// is a call to malloc whose array size can be determined and the array size
|
||||
/// is not constant 1. Otherwise, return NULL.
|
||||
CallInst* isArrayMalloc(Value* I, LLVMContext &Context, const TargetData* TD);
|
||||
const CallInst* isArrayMalloc(const Value* I, LLVMContext &Context,
|
||||
const TargetData* TD);
|
||||
CallInst* isArrayMalloc(Value *I, const TargetData *TD);
|
||||
const CallInst* isArrayMalloc(const Value *I,
|
||||
const TargetData *TD);
|
||||
|
||||
/// getMallocType - Returns the PointerType resulting from the malloc call.
|
||||
/// This PointerType is the result type of the call's only bitcast use.
|
||||
/// If there is no unique bitcast use, then return NULL.
|
||||
const PointerType* getMallocType(const CallInst* CI);
|
||||
const PointerType* getMallocType(const CallInst *CI);
|
||||
|
||||
/// getMallocAllocatedType - Returns the Type allocated by malloc call. This
|
||||
/// Type is the result type of the call's only bitcast use. If there is no
|
||||
/// unique bitcast use, then return NULL.
|
||||
const Type* getMallocAllocatedType(const CallInst* CI);
|
||||
const Type* getMallocAllocatedType(const CallInst *CI);
|
||||
|
||||
/// getMallocArraySize - Returns the array size of a malloc call. If the
|
||||
/// argument passed to malloc is a multiple of the size of the malloced type,
|
||||
/// then return that multiple. For non-array mallocs, the multiple is
|
||||
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
|
||||
/// determined.
|
||||
Value* getMallocArraySize(CallInst* CI, LLVMContext &Context,
|
||||
const TargetData* TD);
|
||||
Value* getMallocArraySize(CallInst *CI, const TargetData *TD);
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// free Call Utility Functions.
|
||||
|
@ -28,7 +28,6 @@ namespace llvm {
|
||||
|
||||
class Module;
|
||||
class Constant;
|
||||
class LLVMContext;
|
||||
template<typename ValueSubClass, typename ItemParentClass>
|
||||
class SymbolTableListTraits;
|
||||
|
||||
@ -50,8 +49,7 @@ public:
|
||||
}
|
||||
/// GlobalVariable ctor - If a parent module is specified, the global is
|
||||
/// automatically inserted into the end of the specified modules global list.
|
||||
GlobalVariable(LLVMContext &Context, const Type *Ty, bool isConstant,
|
||||
LinkageTypes Linkage,
|
||||
GlobalVariable(const Type *Ty, bool isConstant, LinkageTypes Linkage,
|
||||
Constant *Initializer = 0, const Twine &Name = "",
|
||||
bool ThreadLocal = false, unsigned AddressSpace = 0);
|
||||
/// GlobalVariable ctor - This creates a global and inserts it before the
|
||||
|
@ -26,24 +26,22 @@
|
||||
namespace llvm {
|
||||
|
||||
class TargetData;
|
||||
class LLVMContext;
|
||||
|
||||
/// TargetFolder - Create constants with target dependent folding.
|
||||
class TargetFolder {
|
||||
const TargetData *TD;
|
||||
LLVMContext &Context;
|
||||
|
||||
/// Fold - Fold the constant using target specific information.
|
||||
Constant *Fold(Constant *C) const {
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *CF = ConstantFoldConstantExpression(CE, Context, TD))
|
||||
if (Constant *CF = ConstantFoldConstantExpression(CE, TD))
|
||||
return CF;
|
||||
return C;
|
||||
}
|
||||
|
||||
public:
|
||||
explicit TargetFolder(const TargetData *TheTD, LLVMContext &C) :
|
||||
TD(TheTD), Context(C) {}
|
||||
TD(TheTD) {}
|
||||
|
||||
//===--------------------------------------------------------------------===//
|
||||
// Binary Operators
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "llvm/GlobalVariable.h"
|
||||
#include "llvm/Instructions.h"
|
||||
#include "llvm/IntrinsicInst.h"
|
||||
#include "llvm/LLVMContext.h"
|
||||
#include "llvm/Operator.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
@ -99,7 +98,7 @@ static bool isNonEscapingLocalObject(const Value *V) {
|
||||
/// isObjectSmallerThan - Return true if we can prove that the object specified
|
||||
/// by V is smaller than Size.
|
||||
static bool isObjectSmallerThan(const Value *V, unsigned Size,
|
||||
LLVMContext &Context, const TargetData &TD) {
|
||||
const TargetData &TD) {
|
||||
const Type *AccessTy;
|
||||
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
|
||||
AccessTy = GV->getType()->getElementType();
|
||||
@ -109,7 +108,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,
|
||||
else
|
||||
return false;
|
||||
} else if (const CallInst* CI = extractMallocCall(V)) {
|
||||
if (!isArrayMalloc(V, Context, &TD))
|
||||
if (!isArrayMalloc(V, &TD))
|
||||
// The size is the argument to the malloc call.
|
||||
if (const ConstantInt* C = dyn_cast<ConstantInt>(CI->getOperand(1)))
|
||||
return (C->getZExtValue() < Size);
|
||||
@ -665,10 +664,9 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
|
||||
|
||||
// If the size of one access is larger than the entire object on the other
|
||||
// side, then we know such behavior is undefined and can assume no alias.
|
||||
LLVMContext &Context = V1->getContext();
|
||||
if (TD)
|
||||
if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, Context, *TD)) ||
|
||||
(V2Size != ~0U && isObjectSmallerThan(O1, V2Size, Context, *TD)))
|
||||
if ((V1Size != ~0U && isObjectSmallerThan(O2, V1Size, *TD)) ||
|
||||
(V2Size != ~0U && isObjectSmallerThan(O1, V2Size, *TD)))
|
||||
return NoAlias;
|
||||
|
||||
// If one pointer is the result of a call/invoke and the other is a
|
||||
@ -707,16 +705,16 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
|
||||
|
||||
// This function is used to determine if the indices of two GEP instructions are
|
||||
// equal. V1 and V2 are the indices.
|
||||
static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext &Context) {
|
||||
static bool IndexOperandsEqual(Value *V1, Value *V2) {
|
||||
if (V1->getType() == V2->getType())
|
||||
return V1 == V2;
|
||||
if (Constant *C1 = dyn_cast<Constant>(V1))
|
||||
if (Constant *C2 = dyn_cast<Constant>(V2)) {
|
||||
// Sign extend the constants to long types, if necessary
|
||||
if (C1->getType() != Type::getInt64Ty(Context))
|
||||
C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(Context));
|
||||
if (C2->getType() != Type::getInt64Ty(Context))
|
||||
C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(Context));
|
||||
if (C1->getType() != Type::getInt64Ty(C1->getContext()))
|
||||
C1 = ConstantExpr::getSExt(C1, Type::getInt64Ty(C1->getContext()));
|
||||
if (C2->getType() != Type::getInt64Ty(C1->getContext()))
|
||||
C2 = ConstantExpr::getSExt(C2, Type::getInt64Ty(C1->getContext()));
|
||||
return C1 == C2;
|
||||
}
|
||||
return false;
|
||||
@ -737,8 +735,6 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
|
||||
const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty);
|
||||
|
||||
LLVMContext &Context = GEPPointerTy->getContext();
|
||||
|
||||
// Find the (possibly empty) initial sequence of equal values... which are not
|
||||
// necessarily constants.
|
||||
unsigned NumGEP1Operands = NumGEP1Ops, NumGEP2Operands = NumGEP2Ops;
|
||||
@ -746,8 +742,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
unsigned MaxOperands = std::max(NumGEP1Operands, NumGEP2Operands);
|
||||
unsigned UnequalOper = 0;
|
||||
while (UnequalOper != MinOperands &&
|
||||
IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper],
|
||||
Context)) {
|
||||
IndexOperandsEqual(GEP1Ops[UnequalOper], GEP2Ops[UnequalOper])) {
|
||||
// Advance through the type as we go...
|
||||
++UnequalOper;
|
||||
if (const CompositeType *CT = dyn_cast<CompositeType>(BasePtr1Ty))
|
||||
@ -811,10 +806,11 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
if (Constant *G2OC = dyn_cast<ConstantInt>(const_cast<Value*>(G2Oper))){
|
||||
if (G1OC->getType() != G2OC->getType()) {
|
||||
// Sign extend both operands to long.
|
||||
if (G1OC->getType() != Type::getInt64Ty(Context))
|
||||
G1OC = ConstantExpr::getSExt(G1OC, Type::getInt64Ty(Context));
|
||||
if (G2OC->getType() != Type::getInt64Ty(Context))
|
||||
G2OC = ConstantExpr::getSExt(G2OC, Type::getInt64Ty(Context));
|
||||
const Type *Int64Ty = Type::getInt64Ty(G1OC->getContext());
|
||||
if (G1OC->getType() != Int64Ty)
|
||||
G1OC = ConstantExpr::getSExt(G1OC, Int64Ty);
|
||||
if (G2OC->getType() != Int64Ty)
|
||||
G2OC = ConstantExpr::getSExt(G2OC, Int64Ty);
|
||||
GEP1Ops[FirstConstantOper] = G1OC;
|
||||
GEP2Ops[FirstConstantOper] = G2OC;
|
||||
}
|
||||
@ -950,7 +946,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
for (unsigned i = 0; i != FirstConstantOper; ++i) {
|
||||
if (!isa<StructType>(ZeroIdxTy))
|
||||
GEP1Ops[i] = GEP2Ops[i] =
|
||||
Constant::getNullValue(Type::getInt32Ty(Context));
|
||||
Constant::getNullValue(Type::getInt32Ty(ZeroIdxTy->getContext()));
|
||||
|
||||
if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy))
|
||||
ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]);
|
||||
@ -992,11 +988,11 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
//
|
||||
if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty))
|
||||
GEP1Ops[i] =
|
||||
ConstantInt::get(Type::getInt64Ty(Context),
|
||||
ConstantInt::get(Type::getInt64Ty(AT->getContext()),
|
||||
AT->getNumElements()-1);
|
||||
else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty))
|
||||
GEP1Ops[i] =
|
||||
ConstantInt::get(Type::getInt64Ty(Context),
|
||||
ConstantInt::get(Type::getInt64Ty(VT->getContext()),
|
||||
VT->getNumElements()-1);
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,6 @@
|
||||
#include "llvm/GlobalVariable.h"
|
||||
#include "llvm/Instructions.h"
|
||||
#include "llvm/Intrinsics.h"
|
||||
#include "llvm/LLVMContext.h"
|
||||
#include "llvm/Analysis/ValueTracking.h"
|
||||
#include "llvm/Target/TargetData.h"
|
||||
#include "llvm/ADT/SmallVector.h"
|
||||
@ -493,8 +492,7 @@ static Constant *ConstantFoldLoadInst(const LoadInst *LI, const TargetData *TD){
|
||||
/// these together. If target data info is available, it is provided as TD,
|
||||
/// otherwise TD is null.
|
||||
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
Constant *Op1, const TargetData *TD,
|
||||
LLVMContext &Context){
|
||||
Constant *Op1, const TargetData *TD){
|
||||
// SROA
|
||||
|
||||
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
|
||||
@ -521,15 +519,15 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
|
||||
/// SymbolicallyEvaluateGEP - If we can symbolically evaluate the specified GEP
|
||||
/// constant expression, do so.
|
||||
static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
static Constant *SymbolicallyEvaluateGEP(Constant *const *Ops, unsigned NumOps,
|
||||
const Type *ResultTy,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
Constant *Ptr = Ops[0];
|
||||
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
|
||||
return 0;
|
||||
|
||||
unsigned BitWidth = TD->getTypeSizeInBits(TD->getIntPtrType(Context));
|
||||
unsigned BitWidth =
|
||||
TD->getTypeSizeInBits(TD->getIntPtrType(Ptr->getContext()));
|
||||
APInt BasePtr(BitWidth, 0);
|
||||
bool BaseIsInt = true;
|
||||
if (!Ptr->isNullValue()) {
|
||||
@ -558,7 +556,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
// If the base value for this address is a literal integer value, fold the
|
||||
// getelementptr to the resulting integer value casted to the pointer type.
|
||||
if (BaseIsInt) {
|
||||
Constant *C = ConstantInt::get(Context, Offset+BasePtr);
|
||||
Constant *C = ConstantInt::get(Ptr->getContext(), Offset+BasePtr);
|
||||
return ConstantExpr::getIntToPtr(C, ResultTy);
|
||||
}
|
||||
|
||||
@ -579,7 +577,8 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
return 0;
|
||||
APInt NewIdx = Offset.udiv(ElemSize);
|
||||
Offset -= NewIdx * ElemSize;
|
||||
NewIdxs.push_back(ConstantInt::get(TD->getIntPtrType(Context), NewIdx));
|
||||
NewIdxs.push_back(ConstantInt::get(TD->getIntPtrType(Ty->getContext()),
|
||||
NewIdx));
|
||||
Ty = ATy->getElementType();
|
||||
} else if (const StructType *STy = dyn_cast<StructType>(Ty)) {
|
||||
// Determine which field of the struct the offset points into. The
|
||||
@ -587,7 +586,8 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
// know the offset is within the struct at this point.
|
||||
const StructLayout &SL = *TD->getStructLayout(STy);
|
||||
unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
|
||||
NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Context), ElIdx));
|
||||
NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
|
||||
ElIdx));
|
||||
Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
|
||||
Ty = STy->getTypeAtIndex(ElIdx);
|
||||
} else {
|
||||
@ -628,8 +628,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
/// is returned. Note that this function can only fail when attempting to fold
|
||||
/// instructions like loads and stores, which have no constant expression form.
|
||||
///
|
||||
Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
Constant *llvm::ConstantFoldInstruction(Instruction *I, const TargetData *TD) {
|
||||
if (PHINode *PN = dyn_cast<PHINode>(I)) {
|
||||
if (PN->getNumIncomingValues() == 0)
|
||||
return UndefValue::get(PN->getType());
|
||||
@ -657,21 +656,19 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext &Context,
|
||||
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
return ConstantFoldCompareInstOperands(CI->getPredicate(),
|
||||
Ops.data(), Ops.size(),
|
||||
Context, TD);
|
||||
Ops.data(), Ops.size(), TD);
|
||||
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
|
||||
return ConstantFoldLoadInst(LI, TD);
|
||||
|
||||
return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
|
||||
Ops.data(), Ops.size(), Context, TD);
|
||||
Ops.data(), Ops.size(), TD);
|
||||
}
|
||||
|
||||
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
|
||||
/// using the specified TargetData. If successful, the constant result is
|
||||
/// result is returned, if not, null is returned.
|
||||
Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
SmallVector<Constant*, 8> Ops;
|
||||
for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
|
||||
@ -679,10 +676,9 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
|
||||
|
||||
if (CE->isCompare())
|
||||
return ConstantFoldCompareInstOperands(CE->getPredicate(),
|
||||
Ops.data(), Ops.size(),
|
||||
Context, TD);
|
||||
Ops.data(), Ops.size(), TD);
|
||||
return ConstantFoldInstOperands(CE->getOpcode(), CE->getType(),
|
||||
Ops.data(), Ops.size(), Context, TD);
|
||||
Ops.data(), Ops.size(), TD);
|
||||
}
|
||||
|
||||
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
|
||||
@ -693,13 +689,11 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
|
||||
///
|
||||
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant* const* Ops, unsigned NumOps,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
// Handle easy binops first.
|
||||
if (Instruction::isBinaryOp(Opcode)) {
|
||||
if (isa<ConstantExpr>(Ops[0]) || isa<ConstantExpr>(Ops[1]))
|
||||
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD,
|
||||
Context))
|
||||
if (Constant *C = SymbolicallyEvaluateBinop(Opcode, Ops[0], Ops[1], TD))
|
||||
return C;
|
||||
|
||||
return ConstantExpr::get(Opcode, Ops[0], Ops[1]);
|
||||
@ -724,7 +718,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
unsigned InWidth = Input->getType()->getScalarSizeInBits();
|
||||
if (TD->getPointerSizeInBits() < InWidth) {
|
||||
Constant *Mask =
|
||||
ConstantInt::get(Context, APInt::getLowBitsSet(InWidth,
|
||||
ConstantInt::get(CE->getContext(), APInt::getLowBitsSet(InWidth,
|
||||
TD->getPointerSizeInBits()));
|
||||
Input = ConstantExpr::getAnd(Input, Mask);
|
||||
}
|
||||
@ -766,7 +760,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
AT->getNumElements()))) {
|
||||
Constant *Index[] = {
|
||||
Constant::getNullValue(CE->getType()),
|
||||
ConstantInt::get(Context, ElemIdx)
|
||||
ConstantInt::get(ElTy->getContext(), ElemIdx)
|
||||
};
|
||||
return
|
||||
ConstantExpr::getGetElementPtr(GV, &Index[0], 2);
|
||||
@ -800,7 +794,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
case Instruction::ShuffleVector:
|
||||
return ConstantExpr::getShuffleVector(Ops[0], Ops[1], Ops[2]);
|
||||
case Instruction::GetElementPtr:
|
||||
if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, Context, TD))
|
||||
if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, TD))
|
||||
return C;
|
||||
|
||||
return ConstantExpr::getGetElementPtr(Ops[0], Ops+1, NumOps-1);
|
||||
@ -812,9 +806,8 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
/// returns a constant expression of the specified operands.
|
||||
///
|
||||
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant*const * Ops,
|
||||
Constant *const *Ops,
|
||||
unsigned NumOps,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
// fold: icmp (inttoptr x), null -> icmp x, 0
|
||||
// fold: icmp (ptrtoint x), 0 -> icmp x, null
|
||||
@ -825,15 +818,14 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
// around to know if bit truncation is happening.
|
||||
if (ConstantExpr *CE0 = dyn_cast<ConstantExpr>(Ops[0])) {
|
||||
if (TD && Ops[1]->isNullValue()) {
|
||||
const Type *IntPtrTy = TD->getIntPtrType(Context);
|
||||
const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
// proper extension or truncation.
|
||||
Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
|
||||
IntPtrTy, false);
|
||||
Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) };
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
Context, TD);
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
|
||||
}
|
||||
|
||||
// Only do this transformation if the int is intptrty in size, otherwise
|
||||
@ -843,14 +835,13 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant *C = CE0->getOperand(0);
|
||||
Constant *NewOps[] = { C, Constant::getNullValue(C->getType()) };
|
||||
// FIXME!
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
Context, TD);
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
|
||||
}
|
||||
}
|
||||
|
||||
if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(Ops[1])) {
|
||||
if (TD && CE0->getOpcode() == CE1->getOpcode()) {
|
||||
const Type *IntPtrTy = TD->getIntPtrType(Context);
|
||||
const Type *IntPtrTy = TD->getIntPtrType(CE0->getContext());
|
||||
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
@ -860,8 +851,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
|
||||
IntPtrTy, false);
|
||||
Constant *NewOps[] = { C0, C1 };
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
Context, TD);
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
|
||||
}
|
||||
|
||||
// Only do this transformation if the int is intptrty in size, otherwise
|
||||
@ -872,8 +862,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant *NewOps[] = {
|
||||
CE0->getOperand(0), CE1->getOperand(0)
|
||||
};
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
Context, TD);
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2, TD);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -996,7 +985,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
|
||||
}
|
||||
|
||||
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
const Type *Ty, LLVMContext &Context) {
|
||||
const Type *Ty) {
|
||||
errno = 0;
|
||||
V = NativeFP(V);
|
||||
if (errno != 0) {
|
||||
@ -1005,17 +994,15 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
}
|
||||
|
||||
if (Ty->isFloatTy())
|
||||
return ConstantFP::get(Context, APFloat((float)V));
|
||||
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
||||
if (Ty->isDoubleTy())
|
||||
return ConstantFP::get(Context, APFloat(V));
|
||||
return ConstantFP::get(Ty->getContext(), APFloat(V));
|
||||
llvm_unreachable("Can only constant fold float/double");
|
||||
return 0; // dummy return to suppress warning
|
||||
}
|
||||
|
||||
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||
double V, double W,
|
||||
const Type *Ty,
|
||||
LLVMContext &Context) {
|
||||
double V, double W, const Type *Ty) {
|
||||
errno = 0;
|
||||
V = NativeFP(V, W);
|
||||
if (errno != 0) {
|
||||
@ -1024,9 +1011,9 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||
}
|
||||
|
||||
if (Ty->isFloatTy())
|
||||
return ConstantFP::get(Context, APFloat((float)V));
|
||||
return ConstantFP::get(Ty->getContext(), APFloat((float)V));
|
||||
if (Ty->isDoubleTy())
|
||||
return ConstantFP::get(Context, APFloat(V));
|
||||
return ConstantFP::get(Ty->getContext(), APFloat(V));
|
||||
llvm_unreachable("Can only constant fold float/double");
|
||||
return 0; // dummy return to suppress warning
|
||||
}
|
||||
@ -1037,7 +1024,6 @@ Constant *
|
||||
llvm::ConstantFoldCall(Function *F,
|
||||
Constant *const *Operands, unsigned NumOperands) {
|
||||
if (!F->hasName()) return 0;
|
||||
LLVMContext &Context = F->getContext();
|
||||
StringRef Name = F->getName();
|
||||
|
||||
const Type *Ty = F->getReturnType();
|
||||
@ -1054,62 +1040,62 @@ llvm::ConstantFoldCall(Function *F,
|
||||
switch (Name[0]) {
|
||||
case 'a':
|
||||
if (Name == "acos")
|
||||
return ConstantFoldFP(acos, V, Ty, Context);
|
||||
return ConstantFoldFP(acos, V, Ty);
|
||||
else if (Name == "asin")
|
||||
return ConstantFoldFP(asin, V, Ty, Context);
|
||||
return ConstantFoldFP(asin, V, Ty);
|
||||
else if (Name == "atan")
|
||||
return ConstantFoldFP(atan, V, Ty, Context);
|
||||
return ConstantFoldFP(atan, V, Ty);
|
||||
break;
|
||||
case 'c':
|
||||
if (Name == "ceil")
|
||||
return ConstantFoldFP(ceil, V, Ty, Context);
|
||||
return ConstantFoldFP(ceil, V, Ty);
|
||||
else if (Name == "cos")
|
||||
return ConstantFoldFP(cos, V, Ty, Context);
|
||||
return ConstantFoldFP(cos, V, Ty);
|
||||
else if (Name == "cosh")
|
||||
return ConstantFoldFP(cosh, V, Ty, Context);
|
||||
return ConstantFoldFP(cosh, V, Ty);
|
||||
else if (Name == "cosf")
|
||||
return ConstantFoldFP(cos, V, Ty, Context);
|
||||
return ConstantFoldFP(cos, V, Ty);
|
||||
break;
|
||||
case 'e':
|
||||
if (Name == "exp")
|
||||
return ConstantFoldFP(exp, V, Ty, Context);
|
||||
return ConstantFoldFP(exp, V, Ty);
|
||||
break;
|
||||
case 'f':
|
||||
if (Name == "fabs")
|
||||
return ConstantFoldFP(fabs, V, Ty, Context);
|
||||
return ConstantFoldFP(fabs, V, Ty);
|
||||
else if (Name == "floor")
|
||||
return ConstantFoldFP(floor, V, Ty, Context);
|
||||
return ConstantFoldFP(floor, V, Ty);
|
||||
break;
|
||||
case 'l':
|
||||
if (Name == "log" && V > 0)
|
||||
return ConstantFoldFP(log, V, Ty, Context);
|
||||
return ConstantFoldFP(log, V, Ty);
|
||||
else if (Name == "log10" && V > 0)
|
||||
return ConstantFoldFP(log10, V, Ty, Context);
|
||||
return ConstantFoldFP(log10, V, Ty);
|
||||
else if (Name == "llvm.sqrt.f32" ||
|
||||
Name == "llvm.sqrt.f64") {
|
||||
if (V >= -0.0)
|
||||
return ConstantFoldFP(sqrt, V, Ty, Context);
|
||||
return ConstantFoldFP(sqrt, V, Ty);
|
||||
else // Undefined
|
||||
return Constant::getNullValue(Ty);
|
||||
}
|
||||
break;
|
||||
case 's':
|
||||
if (Name == "sin")
|
||||
return ConstantFoldFP(sin, V, Ty, Context);
|
||||
return ConstantFoldFP(sin, V, Ty);
|
||||
else if (Name == "sinh")
|
||||
return ConstantFoldFP(sinh, V, Ty, Context);
|
||||
return ConstantFoldFP(sinh, V, Ty);
|
||||
else if (Name == "sqrt" && V >= 0)
|
||||
return ConstantFoldFP(sqrt, V, Ty, Context);
|
||||
return ConstantFoldFP(sqrt, V, Ty);
|
||||
else if (Name == "sqrtf" && V >= 0)
|
||||
return ConstantFoldFP(sqrt, V, Ty, Context);
|
||||
return ConstantFoldFP(sqrt, V, Ty);
|
||||
else if (Name == "sinf")
|
||||
return ConstantFoldFP(sin, V, Ty, Context);
|
||||
return ConstantFoldFP(sin, V, Ty);
|
||||
break;
|
||||
case 't':
|
||||
if (Name == "tan")
|
||||
return ConstantFoldFP(tan, V, Ty, Context);
|
||||
return ConstantFoldFP(tan, V, Ty);
|
||||
else if (Name == "tanh")
|
||||
return ConstantFoldFP(tanh, V, Ty, Context);
|
||||
return ConstantFoldFP(tanh, V, Ty);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
@ -1120,7 +1106,7 @@ llvm::ConstantFoldCall(Function *F,
|
||||
|
||||
if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
|
||||
if (Name.startswith("llvm.bswap"))
|
||||
return ConstantInt::get(Context, Op->getValue().byteSwap());
|
||||
return ConstantInt::get(F->getContext(), Op->getValue().byteSwap());
|
||||
else if (Name.startswith("llvm.ctpop"))
|
||||
return ConstantInt::get(Ty, Op->getValue().countPopulation());
|
||||
else if (Name.startswith("llvm.cttz"))
|
||||
@ -1149,18 +1135,20 @@ llvm::ConstantFoldCall(Function *F,
|
||||
Op2->getValueAPF().convertToDouble();
|
||||
|
||||
if (Name == "pow")
|
||||
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty, Context);
|
||||
return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
|
||||
if (Name == "fmod")
|
||||
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty, Context);
|
||||
return ConstantFoldBinaryFP(fmod, Op1V, Op2V, Ty);
|
||||
if (Name == "atan2")
|
||||
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty, Context);
|
||||
return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
|
||||
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
|
||||
if (Name == "llvm.powi.f32")
|
||||
return ConstantFP::get(Context, APFloat((float)std::pow((float)Op1V,
|
||||
return ConstantFP::get(F->getContext(),
|
||||
APFloat((float)std::pow((float)Op1V,
|
||||
(int)Op2C->getZExtValue())));
|
||||
if (Name == "llvm.powi.f64")
|
||||
return ConstantFP::get(Context, APFloat((double)std::pow((double)Op1V,
|
||||
(int)Op2C->getZExtValue())));
|
||||
return ConstantFP::get(F->getContext(),
|
||||
APFloat((double)std::pow((double)Op1V,
|
||||
(int)Op2C->getZExtValue())));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
@ -91,8 +91,7 @@ static bool isConstantOne(Value *val) {
|
||||
return isa<ConstantInt>(val) && cast<ConstantInt>(val)->isOne();
|
||||
}
|
||||
|
||||
static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
static Value *isArrayMallocHelper(const CallInst *CI, const TargetData *TD) {
|
||||
if (!CI)
|
||||
return NULL;
|
||||
|
||||
@ -109,7 +108,7 @@ static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context,
|
||||
ElementSize = ConstantExpr::getTruncOrBitCast(ElementSize,
|
||||
MallocArg->getType());
|
||||
Constant *FoldedElementSize =
|
||||
ConstantFoldConstantExpression(cast<ConstantExpr>(ElementSize), Context, TD);
|
||||
ConstantFoldConstantExpression(cast<ConstantExpr>(ElementSize), TD);
|
||||
|
||||
// First, check if CI is a non-array malloc.
|
||||
if (CO && ((CO == ElementSize) ||
|
||||
@ -159,7 +158,7 @@ static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context,
|
||||
|
||||
APInt Op1Int = Op1CI->getValue();
|
||||
uint64_t BitToSet = Op1Int.getLimitedValue(Op1Int.getBitWidth() - 1);
|
||||
Value *Op1Pow = ConstantInt::get(Context,
|
||||
Value *Op1Pow = ConstantInt::get(Op1CI->getContext(),
|
||||
APInt(Op1Int.getBitWidth(), 0).set(BitToSet));
|
||||
if (Op0 == ElementSize || (FoldedElementSize && Op0 == FoldedElementSize))
|
||||
// ArraySize << log2(ElementSize)
|
||||
@ -178,10 +177,9 @@ static Value *isArrayMallocHelper(const CallInst *CI, LLVMContext &Context,
|
||||
/// isArrayMalloc - Returns the corresponding CallInst if the instruction
|
||||
/// is a call to malloc whose array size can be determined and the array size
|
||||
/// is not constant 1. Otherwise, return NULL.
|
||||
CallInst *llvm::isArrayMalloc(Value *I, LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
CallInst *llvm::isArrayMalloc(Value *I, const TargetData *TD) {
|
||||
CallInst *CI = extractMallocCall(I);
|
||||
Value *ArraySize = isArrayMallocHelper(CI, Context, TD);
|
||||
Value *ArraySize = isArrayMallocHelper(CI, TD);
|
||||
|
||||
if (ArraySize &&
|
||||
ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1))
|
||||
@ -191,10 +189,9 @@ CallInst *llvm::isArrayMalloc(Value *I, LLVMContext &Context,
|
||||
return NULL;
|
||||
}
|
||||
|
||||
const CallInst *llvm::isArrayMalloc(const Value *I, LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
|
||||
const CallInst *CI = extractMallocCall(I);
|
||||
Value *ArraySize = isArrayMallocHelper(CI, Context, TD);
|
||||
Value *ArraySize = isArrayMallocHelper(CI, TD);
|
||||
|
||||
if (ArraySize &&
|
||||
ArraySize != ConstantInt::get(CI->getOperand(1)->getType(), 1))
|
||||
@ -244,9 +241,8 @@ const Type *llvm::getMallocAllocatedType(const CallInst *CI) {
|
||||
/// then return that multiple. For non-array mallocs, the multiple is
|
||||
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
|
||||
/// determined.
|
||||
Value *llvm::getMallocArraySize(CallInst *CI, LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
return isArrayMallocHelper(CI, Context, TD);
|
||||
Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD) {
|
||||
return isArrayMallocHelper(CI, TD);
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -10,6 +10,7 @@
|
||||
// This file implements tracking of pointer bounds.
|
||||
//
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
#include "llvm/Analysis/ConstantFolding.h"
|
||||
#include "llvm/Analysis/Dominators.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
@ -101,7 +102,7 @@ const SCEV *PointerTracking::computeAllocationCount(Value *P,
|
||||
}
|
||||
|
||||
if (CallInst *CI = extractMallocCall(V)) {
|
||||
Value *arraySize = getMallocArraySize(CI, P->getContext(), TD);
|
||||
Value *arraySize = getMallocArraySize(CI, TD);
|
||||
const Type* AllocTy = getMallocAllocatedType(CI);
|
||||
if (!AllocTy || !arraySize) return SE->getCouldNotCompute();
|
||||
Ty = AllocTy;
|
||||
|
@ -3816,7 +3816,6 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
|
||||
if (Constant *C = dyn_cast<Constant>(V)) return C;
|
||||
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
|
||||
Instruction *I = cast<Instruction>(V);
|
||||
LLVMContext &Context = I->getParent()->getContext();
|
||||
|
||||
std::vector<Constant*> Operands;
|
||||
Operands.resize(I->getNumOperands());
|
||||
@ -3828,12 +3827,10 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
|
||||
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
return ConstantFoldCompareInstOperands(CI->getPredicate(),
|
||||
&Operands[0], Operands.size(),
|
||||
Context);
|
||||
&Operands[0], Operands.size());
|
||||
else
|
||||
return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
|
||||
&Operands[0], Operands.size(),
|
||||
Context);
|
||||
&Operands[0], Operands.size());
|
||||
}
|
||||
|
||||
/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
|
||||
@ -4040,12 +4037,10 @@ const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
|
||||
Constant *C;
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
|
||||
&Operands[0], Operands.size(),
|
||||
getContext());
|
||||
&Operands[0], Operands.size());
|
||||
else
|
||||
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
|
||||
&Operands[0], Operands.size(),
|
||||
getContext());
|
||||
&Operands[0], Operands.size());
|
||||
return getSCEV(C);
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,6 @@
|
||||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/Instructions.h"
|
||||
#include "llvm/IntrinsicInst.h"
|
||||
#include "llvm/LLVMContext.h"
|
||||
#include "llvm/Module.h"
|
||||
#include "llvm/Pass.h"
|
||||
#include "llvm/Analysis/ConstantFolding.h"
|
||||
@ -245,8 +244,7 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
|
||||
return false;
|
||||
}
|
||||
|
||||
static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
|
||||
LLVMContext &Context) {
|
||||
static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx) {
|
||||
ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
|
||||
if (!CI) return 0;
|
||||
unsigned IdxV = CI->getZExtValue();
|
||||
@ -282,8 +280,7 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
|
||||
/// users of the global, cleaning up the obvious ones. This is largely just a
|
||||
/// quick scan over the use list to clean up the easy and obvious cruft. This
|
||||
/// returns true if it made a change.
|
||||
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
||||
LLVMContext &Context) {
|
||||
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init) {
|
||||
bool Changed = false;
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
|
||||
User *U = *UI++;
|
||||
@ -304,11 +301,11 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
||||
Constant *SubInit = 0;
|
||||
if (Init)
|
||||
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
|
||||
Changed |= CleanupConstantGlobalUsers(CE, SubInit, Context);
|
||||
Changed |= CleanupConstantGlobalUsers(CE, SubInit);
|
||||
} else if (CE->getOpcode() == Instruction::BitCast &&
|
||||
isa<PointerType>(CE->getType())) {
|
||||
// Pointer cast, delete any stores and memsets to the global.
|
||||
Changed |= CleanupConstantGlobalUsers(CE, 0, Context);
|
||||
Changed |= CleanupConstantGlobalUsers(CE, 0);
|
||||
}
|
||||
|
||||
if (CE->use_empty()) {
|
||||
@ -322,11 +319,11 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
||||
Constant *SubInit = 0;
|
||||
if (!isa<ConstantExpr>(GEP->getOperand(0))) {
|
||||
ConstantExpr *CE =
|
||||
dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP, Context));
|
||||
dyn_cast_or_null<ConstantExpr>(ConstantFoldInstruction(GEP));
|
||||
if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
|
||||
SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
|
||||
}
|
||||
Changed |= CleanupConstantGlobalUsers(GEP, SubInit, Context);
|
||||
Changed |= CleanupConstantGlobalUsers(GEP, SubInit);
|
||||
|
||||
if (GEP->use_empty()) {
|
||||
GEP->eraseFromParent();
|
||||
@ -344,7 +341,7 @@ static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
||||
if (SafeToDestroyConstant(C)) {
|
||||
C->destroyConstant();
|
||||
// This could have invalidated UI, start over from scratch.
|
||||
CleanupConstantGlobalUsers(V, Init, Context);
|
||||
CleanupConstantGlobalUsers(V, Init);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -469,8 +466,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
|
||||
/// behavior of the program in a more fine-grained way. We have determined that
|
||||
/// this transformation is safe already. We return the first global variable we
|
||||
/// insert so that the caller can reprocess it.
|
||||
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
LLVMContext &Context) {
|
||||
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD) {
|
||||
// Make sure this global only has simple uses that we can SRA.
|
||||
if (!GlobalUsersSafeToSRA(GV))
|
||||
return 0;
|
||||
@ -492,11 +488,9 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
const StructLayout &Layout = *TD.getStructLayout(STy);
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
||||
Constant *In = getAggregateConstantElement(Init,
|
||||
ConstantInt::get(Type::getInt32Ty(Context), i),
|
||||
Context);
|
||||
ConstantInt::get(Type::getInt32Ty(STy->getContext()), i));
|
||||
assert(In && "Couldn't get element of initializer?");
|
||||
GlobalVariable *NGV = new GlobalVariable(Context,
|
||||
STy->getElementType(i), false,
|
||||
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
|
||||
GlobalVariable::InternalLinkage,
|
||||
In, GV->getName()+"."+Twine(i),
|
||||
GV->isThreadLocal(),
|
||||
@ -527,12 +521,10 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
|
||||
for (unsigned i = 0, e = NumElements; i != e; ++i) {
|
||||
Constant *In = getAggregateConstantElement(Init,
|
||||
ConstantInt::get(Type::getInt32Ty(Context), i),
|
||||
Context);
|
||||
ConstantInt::get(Type::getInt32Ty(Init->getContext()), i));
|
||||
assert(In && "Couldn't get element of initializer?");
|
||||
|
||||
GlobalVariable *NGV = new GlobalVariable(Context,
|
||||
STy->getElementType(), false,
|
||||
GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
|
||||
GlobalVariable::InternalLinkage,
|
||||
In, GV->getName()+"."+Twine(i),
|
||||
GV->isThreadLocal(),
|
||||
@ -554,7 +546,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
|
||||
DEBUG(errs() << "PERFORMING GLOBAL SRA ON: " << *GV);
|
||||
|
||||
Constant *NullInt = Constant::getNullValue(Type::getInt32Ty(Context));
|
||||
Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
|
||||
|
||||
// Loop over all of the uses of the global, replacing the constantexpr geps,
|
||||
// with smaller constantexpr geps or direct references.
|
||||
@ -678,8 +670,7 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) {
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
LLVMContext &Context) {
|
||||
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
|
||||
bool Changed = false;
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
|
||||
Instruction *I = cast<Instruction>(*UI++);
|
||||
@ -712,7 +703,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(CI,
|
||||
ConstantExpr::getCast(CI->getOpcode(),
|
||||
NewV, CI->getType()), Context);
|
||||
NewV, CI->getType()));
|
||||
if (CI->use_empty()) {
|
||||
Changed = true;
|
||||
CI->eraseFromParent();
|
||||
@ -730,7 +721,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
if (Idxs.size() == GEPI->getNumOperands()-1)
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(GEPI,
|
||||
ConstantExpr::getGetElementPtr(NewV, &Idxs[0],
|
||||
Idxs.size()), Context);
|
||||
Idxs.size()));
|
||||
if (GEPI->use_empty()) {
|
||||
Changed = true;
|
||||
GEPI->eraseFromParent();
|
||||
@ -746,8 +737,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
/// value stored into it. If there are uses of the loaded value that would trap
|
||||
/// if the loaded value is dynamically null, then we know that they cannot be
|
||||
/// reachable with a null optimize away the load.
|
||||
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
||||
LLVMContext &Context) {
|
||||
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV) {
|
||||
bool Changed = false;
|
||||
|
||||
// Keep track of whether we are able to remove all the uses of the global
|
||||
@ -758,7 +748,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
||||
for (Value::use_iterator GUI = GV->use_begin(), E = GV->use_end(); GUI != E;){
|
||||
User *GlobalUser = *GUI++;
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV, Context);
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
|
||||
// If we were able to delete all uses of the loads
|
||||
if (LI->use_empty()) {
|
||||
LI->eraseFromParent();
|
||||
@ -789,7 +779,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
||||
// nor is the global.
|
||||
if (AllNonStoreUsesGone) {
|
||||
DEBUG(errs() << " *** GLOBAL NOW DEAD!\n");
|
||||
CleanupConstantGlobalUsers(GV, 0, Context);
|
||||
CleanupConstantGlobalUsers(GV, 0);
|
||||
if (GV->use_empty()) {
|
||||
GV->eraseFromParent();
|
||||
++NumDeleted;
|
||||
@ -801,10 +791,10 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
||||
|
||||
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
|
||||
/// instructions that are foldable.
|
||||
static void ConstantPropUsersOf(Value *V, LLVMContext &Context) {
|
||||
static void ConstantPropUsersOf(Value *V) {
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UI++))
|
||||
if (Constant *NewC = ConstantFoldInstruction(I, Context)) {
|
||||
if (Constant *NewC = ConstantFoldInstruction(I)) {
|
||||
I->replaceAllUsesWith(NewC);
|
||||
|
||||
// Advance UI to the next non-I use to avoid invalidating it!
|
||||
@ -824,12 +814,11 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
CallInst *CI,
|
||||
BitCastInst *BCI,
|
||||
Value* NElems,
|
||||
LLVMContext &Context,
|
||||
TargetData* TD) {
|
||||
DEBUG(errs() << "PROMOTING MALLOC GLOBAL: " << *GV
|
||||
<< " CALL = " << *CI << " BCI = " << *BCI << '\n');
|
||||
|
||||
const Type *IntPtrTy = TD->getIntPtrType(Context);
|
||||
const Type *IntPtrTy = TD->getIntPtrType(GV->getContext());
|
||||
|
||||
ConstantInt *NElements = cast<ConstantInt>(NElems);
|
||||
if (NElements->getZExtValue() != 1) {
|
||||
@ -872,10 +861,10 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
// If there is a comparison against null, we will insert a global bool to
|
||||
// keep track of whether the global was initialized yet or not.
|
||||
GlobalVariable *InitBool =
|
||||
new GlobalVariable(Context, Type::getInt1Ty(Context), false,
|
||||
new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
|
||||
GlobalValue::InternalLinkage,
|
||||
ConstantInt::getFalse(Context), GV->getName()+".init",
|
||||
GV->isThreadLocal());
|
||||
ConstantInt::getFalse(GV->getContext()),
|
||||
GV->getName()+".init", GV->isThreadLocal());
|
||||
bool InitBoolUsed = false;
|
||||
|
||||
// Loop over all uses of GV, processing them in turn.
|
||||
@ -894,8 +883,8 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
switch (ICI->getPredicate()) {
|
||||
default: llvm_unreachable("Unknown ICmp Predicate!");
|
||||
case ICmpInst::ICMP_ULT:
|
||||
case ICmpInst::ICMP_SLT:
|
||||
LV = ConstantInt::getFalse(Context); // X < null -> always false
|
||||
case ICmpInst::ICMP_SLT: // X < null -> always false
|
||||
LV = ConstantInt::getFalse(GV->getContext());
|
||||
break;
|
||||
case ICmpInst::ICMP_ULE:
|
||||
case ICmpInst::ICMP_SLE:
|
||||
@ -917,7 +906,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
} else {
|
||||
StoreInst *SI = cast<StoreInst>(GV->use_back());
|
||||
// The global is initialized when the store to it occurs.
|
||||
new StoreInst(ConstantInt::getTrue(Context), InitBool, SI);
|
||||
new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, SI);
|
||||
SI->eraseFromParent();
|
||||
}
|
||||
|
||||
@ -938,9 +927,9 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
// To further other optimizations, loop over all users of NewGV and try to
|
||||
// constant prop them. This will promote GEP instructions with constant
|
||||
// indices into GEP constant-exprs, which will allow global-opt to hack on it.
|
||||
ConstantPropUsersOf(NewGV, Context);
|
||||
ConstantPropUsersOf(NewGV);
|
||||
if (RepValue != NewGV)
|
||||
ConstantPropUsersOf(RepValue, Context);
|
||||
ConstantPropUsersOf(RepValue);
|
||||
|
||||
return NewGV;
|
||||
}
|
||||
@ -1142,8 +1131,7 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
|
||||
|
||||
static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
|
||||
LLVMContext &Context) {
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
|
||||
std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
|
||||
|
||||
if (FieldNo >= FieldVals.size())
|
||||
@ -1161,7 +1149,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
||||
// a new Load of the scalarized global.
|
||||
Result = new LoadInst(GetHeapSROAValue(LI->getOperand(0), FieldNo,
|
||||
InsertedScalarizedValues,
|
||||
PHIsToRewrite, Context),
|
||||
PHIsToRewrite),
|
||||
LI->getName()+".f"+Twine(FieldNo), LI);
|
||||
} else if (PHINode *PN = dyn_cast<PHINode>(V)) {
|
||||
// PN's type is pointer to struct. Make a new PHI of pointer to struct
|
||||
@ -1185,16 +1173,14 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
||||
/// the load, rewrite the derived value to use the HeapSRoA'd load.
|
||||
static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
|
||||
LLVMContext &Context) {
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
|
||||
// If this is a comparison against null, handle it.
|
||||
if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
|
||||
assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
|
||||
// If we have a setcc of the loaded pointer, we can use a setcc of any
|
||||
// field.
|
||||
Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
|
||||
InsertedScalarizedValues, PHIsToRewrite,
|
||||
Context);
|
||||
InsertedScalarizedValues, PHIsToRewrite);
|
||||
|
||||
Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
|
||||
Constant::getNullValue(NPtr->getType()),
|
||||
@ -1212,8 +1198,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
// Load the pointer for this field.
|
||||
unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
|
||||
Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
|
||||
InsertedScalarizedValues, PHIsToRewrite,
|
||||
Context);
|
||||
InsertedScalarizedValues, PHIsToRewrite);
|
||||
|
||||
// Create the new GEP idx vector.
|
||||
SmallVector<Value*, 8> GEPIdx;
|
||||
@ -1245,8 +1230,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
// users.
|
||||
for (Value::use_iterator UI = PN->use_begin(), E = PN->use_end(); UI != E; ) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite,
|
||||
Context);
|
||||
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1256,13 +1240,11 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
/// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
|
||||
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
|
||||
LLVMContext &Context) {
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite) {
|
||||
for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
|
||||
UI != E; ) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite,
|
||||
Context);
|
||||
RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
|
||||
}
|
||||
|
||||
if (Load->use_empty()) {
|
||||
@ -1276,7 +1258,6 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
|
||||
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
|
||||
CallInst *CI, BitCastInst* BCI,
|
||||
Value* NElems,
|
||||
LLVMContext &Context,
|
||||
TargetData *TD) {
|
||||
DEBUG(errs() << "SROA HEAP ALLOC: " << *GV << " MALLOC CALL = " << *CI
|
||||
<< " BITCAST = " << *BCI << '\n');
|
||||
@ -1306,7 +1287,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
|
||||
GV->isThreadLocal());
|
||||
FieldGlobals.push_back(NGV);
|
||||
|
||||
Value *NMI = CallInst::CreateMalloc(CI, TD->getIntPtrType(Context),
|
||||
Value *NMI = CallInst::CreateMalloc(CI, TD->getIntPtrType(CI->getContext()),
|
||||
FieldTy, NElems,
|
||||
BCI->getName() + ".f" + Twine(FieldNo));
|
||||
FieldMallocs.push_back(NMI);
|
||||
@ -1342,7 +1323,8 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
|
||||
|
||||
// Create the block to check the first condition. Put all these blocks at the
|
||||
// end of the function as they are unlikely to be executed.
|
||||
BasicBlock *NullPtrBlock = BasicBlock::Create(Context, "malloc_ret_null",
|
||||
BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
|
||||
"malloc_ret_null",
|
||||
OrigBB->getParent());
|
||||
|
||||
// Remove the uncond branch from OrigBB to ContBB, turning it into a cond
|
||||
@ -1357,9 +1339,9 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
|
||||
Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
|
||||
Constant::getNullValue(GVVal->getType()),
|
||||
"tmp");
|
||||
BasicBlock *FreeBlock = BasicBlock::Create(Context, "free_it",
|
||||
BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
|
||||
OrigBB->getParent());
|
||||
BasicBlock *NextBlock = BasicBlock::Create(Context, "next",
|
||||
BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
|
||||
OrigBB->getParent());
|
||||
Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
|
||||
Cmp, NullPtrBlock);
|
||||
@ -1394,8 +1376,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite,
|
||||
Context);
|
||||
RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1426,7 +1407,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV,
|
||||
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
||||
Value *InVal = PN->getIncomingValue(i);
|
||||
InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
|
||||
PHIsToRewrite, Context);
|
||||
PHIsToRewrite);
|
||||
FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
|
||||
}
|
||||
}
|
||||
@ -1465,8 +1446,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
CallInst *CI,
|
||||
BitCastInst *BCI,
|
||||
Module::global_iterator &GVI,
|
||||
TargetData *TD,
|
||||
LLVMContext &Context) {
|
||||
TargetData *TD) {
|
||||
// If we can't figure out the type being malloced, then we can't optimize.
|
||||
const Type *AllocTy = getMallocAllocatedType(CI);
|
||||
assert(AllocTy);
|
||||
@ -1499,7 +1479,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
// transform the program to use global memory instead of malloc'd memory.
|
||||
// This eliminates dynamic allocation, avoids an indirection accessing the
|
||||
// data, and exposes the resultant global to further GlobalOpt.
|
||||
Value *NElems = getMallocArraySize(CI, Context, TD);
|
||||
Value *NElems = getMallocArraySize(CI, TD);
|
||||
// We cannot optimize the malloc if we cannot determine malloc array size.
|
||||
if (NElems) {
|
||||
if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
|
||||
@ -1508,7 +1488,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
// something.
|
||||
if (TD &&
|
||||
NElements->getZExtValue() * TD->getTypeAllocSize(AllocTy) < 2048) {
|
||||
GVI = OptimizeGlobalAddressOfMalloc(GV, CI, BCI, NElems, Context, TD);
|
||||
GVI = OptimizeGlobalAddressOfMalloc(GV, CI, BCI, NElems, TD);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1532,9 +1512,11 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
// structs. malloc [100 x struct],1 -> malloc struct, 100
|
||||
if (const ArrayType *AT =
|
||||
dyn_cast<ArrayType>(getMallocAllocatedType(CI))) {
|
||||
Value* NumElements = ConstantInt::get(Type::getInt32Ty(Context),
|
||||
AT->getNumElements());
|
||||
Value* NewMI = CallInst::CreateMalloc(CI, TD->getIntPtrType(Context),
|
||||
Value *NumElements =
|
||||
ConstantInt::get(Type::getInt32Ty(CI->getContext()),
|
||||
AT->getNumElements());
|
||||
Value *NewMI = CallInst::CreateMalloc(CI,
|
||||
TD->getIntPtrType(CI->getContext()),
|
||||
AllocSTy, NumElements,
|
||||
BCI->getName());
|
||||
Value *Cast = new BitCastInst(NewMI, getMallocType(CI), "tmp", CI);
|
||||
@ -1545,7 +1527,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
CI = extractMallocCallFromBitCast(NewMI);
|
||||
}
|
||||
|
||||
GVI = PerformHeapAllocSRoA(GV, CI, BCI, NElems, Context, TD);
|
||||
GVI = PerformHeapAllocSRoA(GV, CI, BCI, NElems, TD);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1558,7 +1540,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
// that only one value (besides its initializer) is ever stored to the global.
|
||||
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
Module::global_iterator &GVI,
|
||||
TargetData *TD, LLVMContext &Context) {
|
||||
TargetData *TD) {
|
||||
// Ignore no-op GEPs and bitcasts.
|
||||
StoredOnceVal = StoredOnceVal->stripPointerCasts();
|
||||
|
||||
@ -1574,7 +1556,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
|
||||
|
||||
// Optimize away any trapping uses of the loaded value.
|
||||
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context))
|
||||
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC))
|
||||
return true;
|
||||
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) {
|
||||
if (getMallocAllocatedType(CI)) {
|
||||
@ -1582,8 +1564,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
for (Value::use_iterator UI = CI->use_begin(), E = CI->use_end();
|
||||
UI != E; )
|
||||
BCI = dyn_cast<BitCastInst>(cast<Instruction>(*UI++));
|
||||
if (BCI &&
|
||||
TryToOptimizeStoreOfMallocToGlobal(GV, CI, BCI, GVI, TD, Context))
|
||||
if (BCI && TryToOptimizeStoreOfMallocToGlobal(GV, CI, BCI, GVI, TD))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@ -1596,8 +1577,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
/// two values ever stored into GV are its initializer and OtherVal. See if we
|
||||
/// can shrink the global into a boolean and select between the two values
|
||||
/// whenever it is used. This exposes the values to other scalar optimizations.
|
||||
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
LLVMContext &Context) {
|
||||
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
|
||||
const Type *GVElType = GV->getType()->getElementType();
|
||||
|
||||
// If GVElType is already i1, it is already shrunk. If the type of the GV is
|
||||
@ -1605,7 +1585,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
// between them is very expensive and unlikely to lead to later
|
||||
// simplification. In these cases, we typically end up with "cond ? v1 : v2"
|
||||
// where v1 and v2 both require constant pool loads, a big loss.
|
||||
if (GVElType == Type::getInt1Ty(Context) || GVElType->isFloatingPoint() ||
|
||||
if (GVElType == Type::getInt1Ty(GV->getContext()) ||
|
||||
GVElType->isFloatingPoint() ||
|
||||
isa<PointerType>(GVElType) || isa<VectorType>(GVElType))
|
||||
return false;
|
||||
|
||||
@ -1618,15 +1599,16 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
DEBUG(errs() << " *** SHRINKING TO BOOL: " << *GV);
|
||||
|
||||
// Create the new global, initializing it to false.
|
||||
GlobalVariable *NewGV = new GlobalVariable(Context,
|
||||
Type::getInt1Ty(Context), false,
|
||||
GlobalValue::InternalLinkage, ConstantInt::getFalse(Context),
|
||||
GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
|
||||
false,
|
||||
GlobalValue::InternalLinkage,
|
||||
ConstantInt::getFalse(GV->getContext()),
|
||||
GV->getName()+".b",
|
||||
GV->isThreadLocal());
|
||||
GV->getParent()->getGlobalList().insert(GV, NewGV);
|
||||
|
||||
Constant *InitVal = GV->getInitializer();
|
||||
assert(InitVal->getType() != Type::getInt1Ty(Context) &&
|
||||
assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
|
||||
"No reason to shrink to bool!");
|
||||
|
||||
// If initialized to zero and storing one into the global, we can use a cast
|
||||
@ -1643,7 +1625,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
// Only do this if we weren't storing a loaded value.
|
||||
Value *StoreVal;
|
||||
if (StoringOther || SI->getOperand(0) == InitVal)
|
||||
StoreVal = ConstantInt::get(Type::getInt1Ty(Context), StoringOther);
|
||||
StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
|
||||
StoringOther);
|
||||
else {
|
||||
// Otherwise, we are storing a previously loaded copy. To do this,
|
||||
// change the copy from copying the original value to just copying the
|
||||
@ -1758,8 +1741,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
|
||||
// Delete any stores we can find to the global. We may not be able to
|
||||
// make it completely dead though.
|
||||
bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(),
|
||||
GV->getContext());
|
||||
bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer());
|
||||
|
||||
// If the global is dead now, delete it.
|
||||
if (GV->use_empty()) {
|
||||
@ -1774,7 +1756,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
GV->setConstant(true);
|
||||
|
||||
// Clean up any obviously simplifiable users now.
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer(), GV->getContext());
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer());
|
||||
|
||||
// If the global is dead now, just nuke it.
|
||||
if (GV->use_empty()) {
|
||||
@ -1788,8 +1770,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
return true;
|
||||
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
|
||||
if (TargetData *TD = getAnalysisIfAvailable<TargetData>())
|
||||
if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD,
|
||||
GV->getContext())) {
|
||||
if (GlobalVariable *FirstNewGV = SRAGlobal(GV, *TD)) {
|
||||
GVI = FirstNewGV; // Don't skip the newly produced globals!
|
||||
return true;
|
||||
}
|
||||
@ -1804,8 +1785,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
GV->setInitializer(SOVConstant);
|
||||
|
||||
// Clean up any obviously simplifiable users now.
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer(),
|
||||
GV->getContext());
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer());
|
||||
|
||||
if (GV->use_empty()) {
|
||||
DEBUG(errs() << " *** Substituting initializer allowed us to "
|
||||
@ -1822,14 +1802,13 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
// Try to optimize globals based on the knowledge that only one value
|
||||
// (besides its initializer) is ever stored to the global.
|
||||
if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
|
||||
getAnalysisIfAvailable<TargetData>(),
|
||||
GV->getContext()))
|
||||
getAnalysisIfAvailable<TargetData>()))
|
||||
return true;
|
||||
|
||||
// Otherwise, if the global was not a boolean, we can shrink it to be a
|
||||
// boolean.
|
||||
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
|
||||
if (TryToShrinkGlobalToBoolean(GV, SOVConstant, GV->getContext())) {
|
||||
if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
|
||||
++NumShrunkToBool;
|
||||
return true;
|
||||
}
|
||||
@ -1981,11 +1960,10 @@ static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
|
||||
/// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
|
||||
/// specified array, returning the new global to use.
|
||||
static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
const std::vector<Function*> &Ctors,
|
||||
LLVMContext &Context) {
|
||||
const std::vector<Function*> &Ctors) {
|
||||
// If we made a change, reassemble the initializer list.
|
||||
std::vector<Constant*> CSVals;
|
||||
CSVals.push_back(ConstantInt::get(Type::getInt32Ty(Context), 65535));
|
||||
CSVals.push_back(ConstantInt::get(Type::getInt32Ty(GCL->getContext()),65535));
|
||||
CSVals.push_back(0);
|
||||
|
||||
// Create the new init list.
|
||||
@ -1994,12 +1972,14 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
if (Ctors[i]) {
|
||||
CSVals[1] = Ctors[i];
|
||||
} else {
|
||||
const Type *FTy = FunctionType::get(Type::getVoidTy(Context), false);
|
||||
const Type *FTy = FunctionType::get(Type::getVoidTy(GCL->getContext()),
|
||||
false);
|
||||
const PointerType *PFTy = PointerType::getUnqual(FTy);
|
||||
CSVals[1] = Constant::getNullValue(PFTy);
|
||||
CSVals[0] = ConstantInt::get(Type::getInt32Ty(Context), 2147483647);
|
||||
CSVals[0] = ConstantInt::get(Type::getInt32Ty(GCL->getContext()),
|
||||
2147483647);
|
||||
}
|
||||
CAList.push_back(ConstantStruct::get(Context, CSVals, false));
|
||||
CAList.push_back(ConstantStruct::get(GCL->getContext(), CSVals, false));
|
||||
}
|
||||
|
||||
// Create the array initializer.
|
||||
@ -2015,8 +1995,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
}
|
||||
|
||||
// Create the new global and insert it next to the existing list.
|
||||
GlobalVariable *NGV = new GlobalVariable(Context, CA->getType(),
|
||||
GCL->isConstant(),
|
||||
GlobalVariable *NGV = new GlobalVariable(CA->getType(), GCL->isConstant(),
|
||||
GCL->getLinkage(), CA, "",
|
||||
GCL->isThreadLocal());
|
||||
GCL->getParent()->getGlobalList().insert(GCL, NGV);
|
||||
@ -2050,7 +2029,7 @@ static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues,
|
||||
/// enough for us to understand. In particular, if it is a cast of something,
|
||||
/// we punt. We basically just support direct accesses to globals and GEP's of
|
||||
/// globals. This should be kept up to date with CommitValueTo.
|
||||
static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext &Context) {
|
||||
static bool isSimpleEnoughPointerToCommit(Constant *C) {
|
||||
// Conservatively, avoid aggregate types. This is because we don't
|
||||
// want to worry about them partially overlapping other stores.
|
||||
if (!cast<PointerType>(C->getType())->getElementType()->isSingleValueType())
|
||||
@ -2090,8 +2069,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext &Context) {
|
||||
/// initializer. This returns 'Init' modified to reflect 'Val' stored into it.
|
||||
/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
|
||||
static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
ConstantExpr *Addr, unsigned OpNo,
|
||||
LLVMContext &Context) {
|
||||
ConstantExpr *Addr, unsigned OpNo) {
|
||||
// Base case of the recursion.
|
||||
if (OpNo == Addr->getNumOperands()) {
|
||||
assert(Val->getType() == Init->getType() && "Type mismatch!");
|
||||
@ -2120,10 +2098,11 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
|
||||
unsigned Idx = CU->getZExtValue();
|
||||
assert(Idx < STy->getNumElements() && "Struct index out of range!");
|
||||
Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1, Context);
|
||||
Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
|
||||
|
||||
// Return the modified struct.
|
||||
return ConstantStruct::get(Context, &Elts[0], Elts.size(), STy->isPacked());
|
||||
return ConstantStruct::get(Init->getContext(), &Elts[0], Elts.size(),
|
||||
STy->isPacked());
|
||||
} else {
|
||||
ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
|
||||
const ArrayType *ATy = cast<ArrayType>(Init->getType());
|
||||
@ -2146,15 +2125,14 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
|
||||
assert(CI->getZExtValue() < ATy->getNumElements());
|
||||
Elts[CI->getZExtValue()] =
|
||||
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1, Context);
|
||||
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
|
||||
return ConstantArray::get(ATy, Elts);
|
||||
}
|
||||
}
|
||||
|
||||
/// CommitValueTo - We have decided that Addr (which satisfies the predicate
|
||||
/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
|
||||
static void CommitValueTo(Constant *Val, Constant *Addr,
|
||||
LLVMContext &Context) {
|
||||
static void CommitValueTo(Constant *Val, Constant *Addr) {
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
|
||||
assert(GV->hasInitializer());
|
||||
GV->setInitializer(Val);
|
||||
@ -2165,7 +2143,7 @@ static void CommitValueTo(Constant *Val, Constant *Addr,
|
||||
GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
|
||||
|
||||
Constant *Init = GV->getInitializer();
|
||||
Init = EvaluateStoreInto(Init, Val, CE, 2, Context);
|
||||
Init = EvaluateStoreInto(Init, Val, CE, 2);
|
||||
GV->setInitializer(Init);
|
||||
}
|
||||
|
||||
@ -2173,8 +2151,7 @@ static void CommitValueTo(Constant *Val, Constant *Addr,
|
||||
/// P after the stores reflected by 'memory' have been performed. If we can't
|
||||
/// decide, return null.
|
||||
static Constant *ComputeLoadResult(Constant *P,
|
||||
const DenseMap<Constant*, Constant*> &Memory,
|
||||
LLVMContext &Context) {
|
||||
const DenseMap<Constant*, Constant*> &Memory) {
|
||||
// If this memory location has been recently stored, use the stored value: it
|
||||
// is the most up-to-date.
|
||||
DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
|
||||
@ -2212,8 +2189,6 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
|
||||
return false;
|
||||
|
||||
LLVMContext &Context = F->getContext();
|
||||
|
||||
CallStack.push_back(F);
|
||||
|
||||
/// Values - As we compute SSA register values, we store their contents here.
|
||||
@ -2240,7 +2215,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
if (StoreInst *SI = dyn_cast<StoreInst>(CurInst)) {
|
||||
if (SI->isVolatile()) return false; // no volatile accesses.
|
||||
Constant *Ptr = getVal(Values, SI->getOperand(1));
|
||||
if (!isSimpleEnoughPointerToCommit(Ptr, Context))
|
||||
if (!isSimpleEnoughPointerToCommit(Ptr))
|
||||
// If this is too complex for us to commit, reject it.
|
||||
return false;
|
||||
Constant *Val = getVal(Values, SI->getOperand(0));
|
||||
@ -2274,12 +2249,12 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
} else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
|
||||
if (LI->isVolatile()) return false; // no volatile accesses.
|
||||
InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)),
|
||||
MutatedMemory, Context);
|
||||
MutatedMemory);
|
||||
if (InstResult == 0) return false; // Could not evaluate load.
|
||||
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
|
||||
if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
|
||||
const Type *Ty = AI->getType()->getElementType();
|
||||
AllocaTmps.push_back(new GlobalVariable(Context, Ty, false,
|
||||
AllocaTmps.push_back(new GlobalVariable(Ty, false,
|
||||
GlobalValue::InternalLinkage,
|
||||
UndefValue::get(Ty),
|
||||
AI->getName()));
|
||||
@ -2417,7 +2392,7 @@ static bool EvaluateStaticConstructor(Function *F) {
|
||||
<< " stores.\n");
|
||||
for (DenseMap<Constant*, Constant*>::iterator I = MutatedMemory.begin(),
|
||||
E = MutatedMemory.end(); I != E; ++I)
|
||||
CommitValueTo(I->second, I->first, F->getContext());
|
||||
CommitValueTo(I->second, I->first);
|
||||
}
|
||||
|
||||
// At this point, we are done interpreting. If we created any 'alloca'
|
||||
@ -2474,7 +2449,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
|
||||
|
||||
if (!MadeChange) return false;
|
||||
|
||||
GCL = InstallGlobalCtors(GCL, Ctors, GCL->getContext());
|
||||
GCL = InstallGlobalCtors(GCL, Ctors);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -66,7 +66,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
|
||||
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
|
||||
|
||||
if (!I->use_empty()) // Don't muck with dead instructions...
|
||||
if (Constant *C = ConstantFoldInstruction(I, F.getContext())) {
|
||||
if (Constant *C = ConstantFoldInstruction(I)) {
|
||||
// Add all of the users of this instruction to the worklist, they might
|
||||
// be constant propagatable now...
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
|
@ -12824,7 +12824,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
||||
|
||||
// ConstantProp instruction if trivially constant.
|
||||
if (!Inst->use_empty() && isa<Constant>(Inst->getOperand(0)))
|
||||
if (Constant *C = ConstantFoldInstruction(Inst, BB->getContext(), TD)) {
|
||||
if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
|
||||
DEBUG(errs() << "IC: ConstFold to: " << *C << " from: "
|
||||
<< *Inst << '\n');
|
||||
Inst->replaceAllUsesWith(C);
|
||||
@ -12846,8 +12846,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
|
||||
if (!FoldedConstants.insert(CE))
|
||||
continue;
|
||||
|
||||
Constant *NewC =
|
||||
ConstantFoldConstantExpression(CE, BB->getContext(), TD);
|
||||
Constant *NewC = ConstantFoldConstantExpression(CE, TD);
|
||||
if (NewC && NewC != CE) {
|
||||
*i = NewC;
|
||||
MadeIRChange = true;
|
||||
@ -12954,7 +12953,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
|
||||
|
||||
// Instruction isn't dead, see if we can constant propagate it.
|
||||
if (!I->use_empty() && isa<Constant>(I->getOperand(0)))
|
||||
if (Constant *C = ConstantFoldInstruction(I, F.getContext(), TD)) {
|
||||
if (Constant *C = ConstantFoldInstruction(I, TD)) {
|
||||
DEBUG(errs() << "IC: ConstFold to: " << *C << " from: " << *I << '\n');
|
||||
|
||||
// Add operands to the worklist.
|
||||
|
@ -1044,7 +1044,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, BasicBlock *PredBB,
|
||||
BI = NewBB->begin();
|
||||
for (BasicBlock::iterator E = NewBB->end(); BI != E; ) {
|
||||
Instruction *Inst = BI++;
|
||||
if (Constant *C = ConstantFoldInstruction(Inst, BB->getContext(), TD)) {
|
||||
if (Constant *C = ConstantFoldInstruction(Inst, TD)) {
|
||||
Inst->replaceAllUsesWith(C);
|
||||
Inst->eraseFromParent();
|
||||
continue;
|
||||
|
@ -32,7 +32,6 @@
|
||||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/Function.h"
|
||||
#include "llvm/Instructions.h"
|
||||
#include "llvm/LLVMContext.h"
|
||||
#include "llvm/Analysis/ConstantFolding.h"
|
||||
#include "llvm/Analysis/InlineCost.h"
|
||||
#include "llvm/Analysis/LoopInfo.h"
|
||||
@ -961,7 +960,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
|
||||
Worklist.pop_back();
|
||||
|
||||
// Simple constant folding.
|
||||
if (Constant *C = ConstantFoldInstruction(I, I->getContext())) {
|
||||
if (Constant *C = ConstantFoldInstruction(I)) {
|
||||
ReplaceUsesOfWith(I, C, Worklist, L, LPM);
|
||||
continue;
|
||||
}
|
||||
|
@ -359,8 +359,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) {
|
||||
Instruction *Inst = BI++;
|
||||
if (isInstructionTriviallyDead(Inst))
|
||||
Inst->eraseFromParent();
|
||||
else if (Constant *C = ConstantFoldInstruction(Inst,
|
||||
Inst->getContext())) {
|
||||
else if (Constant *C = ConstantFoldInstruction(Inst)) {
|
||||
Inst->replaceAllUsesWith(C);
|
||||
Inst->eraseFromParent();
|
||||
}
|
||||
|
@ -322,8 +322,6 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
|
||||
/// mapping its operands through ValueMap if they are available.
|
||||
Constant *PruningFunctionCloner::
|
||||
ConstantFoldMappedInstruction(const Instruction *I) {
|
||||
LLVMContext &Context = I->getContext();
|
||||
|
||||
SmallVector<Constant*, 8> Ops;
|
||||
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
|
||||
if (Constant *Op = dyn_cast_or_null<Constant>(MapValue(I->getOperand(i),
|
||||
@ -334,8 +332,7 @@ ConstantFoldMappedInstruction(const Instruction *I) {
|
||||
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
return ConstantFoldCompareInstOperands(CI->getPredicate(),
|
||||
&Ops[0], Ops.size(),
|
||||
Context, TD);
|
||||
&Ops[0], Ops.size(), TD);
|
||||
|
||||
if (const LoadInst *LI = dyn_cast<LoadInst>(I))
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(Ops[0]))
|
||||
@ -346,7 +343,7 @@ ConstantFoldMappedInstruction(const Instruction *I) {
|
||||
CE);
|
||||
|
||||
return ConstantFoldInstOperands(I->getOpcode(), I->getType(), &Ops[0],
|
||||
Ops.size(), Context, TD);
|
||||
Ops.size(), TD);
|
||||
}
|
||||
|
||||
/// CloneAndPruneFunctionInto - This works exactly like CloneFunctionInto,
|
||||
|
@ -362,8 +362,7 @@ bool llvm::UnrollLoop(Loop *L, unsigned Count, LoopInfo* LI, LPPassManager* LPM)
|
||||
|
||||
if (isInstructionTriviallyDead(Inst))
|
||||
(*BB)->getInstList().erase(Inst);
|
||||
else if (Constant *C = ConstantFoldInstruction(Inst,
|
||||
Header->getContext())) {
|
||||
else if (Constant *C = ConstantFoldInstruction(Inst)) {
|
||||
Inst->replaceAllUsesWith(C);
|
||||
(*BB)->getInstList().erase(Inst);
|
||||
}
|
||||
|
@ -1217,7 +1217,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI) {
|
||||
}
|
||||
|
||||
// Check for trivial simplification.
|
||||
if (Constant *C = ConstantFoldInstruction(N, BB->getContext())) {
|
||||
if (Constant *C = ConstantFoldInstruction(N)) {
|
||||
TranslateMap[BBI] = C;
|
||||
delete N; // Constant folded away, don't need actual inst
|
||||
} else {
|
||||
|
@ -16,7 +16,6 @@
|
||||
#include "llvm/GlobalVariable.h"
|
||||
#include "llvm/GlobalAlias.h"
|
||||
#include "llvm/DerivedTypes.h"
|
||||
#include "llvm/LLVMContext.h"
|
||||
#include "llvm/Module.h"
|
||||
#include "llvm/ADT/SmallPtrSet.h"
|
||||
#include "llvm/Support/ErrorHandling.h"
|
||||
@ -95,8 +94,7 @@ void GlobalValue::copyAttributesFrom(const GlobalValue *Src) {
|
||||
// GlobalVariable Implementation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
GlobalVariable::GlobalVariable(LLVMContext &Context, const Type *Ty,
|
||||
bool constant, LinkageTypes Link,
|
||||
GlobalVariable::GlobalVariable(const Type *Ty, bool constant, LinkageTypes Link,
|
||||
Constant *InitVal, const Twine &Name,
|
||||
bool ThreadLocal, unsigned AddressSpace)
|
||||
: GlobalValue(PointerType::get(Ty, AddressSpace),
|
||||
|
@ -31,8 +31,7 @@ using namespace llvm;
|
||||
//
|
||||
|
||||
GlobalVariable *ilist_traits<GlobalVariable>::createSentinel() {
|
||||
GlobalVariable *Ret = new GlobalVariable(getGlobalContext(),
|
||||
Type::getInt32Ty(getGlobalContext()),
|
||||
GlobalVariable *Ret = new GlobalVariable(Type::getInt32Ty(getGlobalContext()),
|
||||
false, GlobalValue::ExternalLinkage);
|
||||
// This should not be garbage monitored.
|
||||
LeakDetector::removeGarbageObject(Ret);
|
||||
|
Loading…
x
Reference in New Issue
Block a user