mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 12:50:30 +00:00
Get rid of the Pass+Context magic.
llvm-svn: 76702
This commit is contained in:
parent
48dffde0d7
commit
cc287b28c9
@ -856,7 +856,7 @@ the loop again and exiting the loop. Any future code is emitted in the
|
||||
NamedValues.erase(VarName);
|
||||
|
||||
// for expr always returns 0.0.
|
||||
return TheFunction->getContext()->getNullValue(Type::DoubleTy);
|
||||
return TheFunction->getContext().getNullValue(Type::DoubleTy);
|
||||
}
|
||||
</pre>
|
||||
</div>
|
||||
|
@ -1570,7 +1570,7 @@ Value *ForExprAST::Codegen() {
|
||||
|
||||
|
||||
// for expr always returns 0.0.
|
||||
return TheFunction->getContext()->getNullValue(Type::DoubleTy);
|
||||
return TheFunction->getContext().getNullValue(Type::DoubleTy);
|
||||
}
|
||||
|
||||
Function *PrototypeAST::Codegen() {
|
||||
|
@ -1858,7 +1858,7 @@ Value *ForExprAST::Codegen() {
|
||||
|
||||
|
||||
// for expr always returns 0.0.
|
||||
return TheFunction->getContext()->getNullValue(Type::DoubleTy);
|
||||
return TheFunction->getContext().getNullValue(Type::DoubleTy);
|
||||
}
|
||||
|
||||
Value *VarExprAST::Codegen() {
|
||||
|
@ -856,7 +856,7 @@ Value *ForExprAST::Codegen() {
|
||||
|
||||
|
||||
// for expr always returns 0.0.
|
||||
return TheFunction->getContext()->getNullValue(Type::DoubleTy);
|
||||
return TheFunction->getContext().getNullValue(Type::DoubleTy);
|
||||
}
|
||||
|
||||
Value *VarExprAST::Codegen() {
|
||||
|
@ -29,13 +29,13 @@ namespace llvm {
|
||||
/// is returned. Note that this function can only fail when attempting to fold
|
||||
/// instructions like loads and stores, which have no constant expression form.
|
||||
///
|
||||
Constant *ConstantFoldInstruction(Instruction *I, LLVMContext *Context,
|
||||
Constant *ConstantFoldInstruction(Instruction *I, LLVMContext &Context,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
|
||||
/// using the specified TargetData. If successful, the constant result is
|
||||
/// result is returned, if not, null is returned.
|
||||
Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext *Context,
|
||||
Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext &Context,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
|
||||
@ -46,7 +46,7 @@ Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext *Context,
|
||||
///
|
||||
Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant*const * Ops, unsigned NumOps,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
|
||||
@ -55,7 +55,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
///
|
||||
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant*const * Ops, unsigned NumOps,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD = 0);
|
||||
|
||||
|
||||
@ -63,7 +63,7 @@ Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
/// getelementptr constantexpr, return the constant value being addressed by the
|
||||
/// constant expression, or null if something is funny and we can't decide.
|
||||
Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE,
|
||||
LLVMContext *Context);
|
||||
LLVMContext &Context);
|
||||
|
||||
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
|
||||
/// the specified function.
|
||||
|
@ -37,7 +37,6 @@ public:
|
||||
|
||||
// Initialization and finalization hooks.
|
||||
virtual bool doInitialization(Loop *L, LPPassManager &LPM) {
|
||||
Context = L->getHeader()->getContext();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -368,7 +368,7 @@ namespace llvm {
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
ScalarEvolution();
|
||||
|
||||
LLVMContext *getContext() const { return Context; }
|
||||
LLVMContext &getContext() const { return F->getContext(); }
|
||||
|
||||
/// isSCEVable - Test if values of the given type are analyzable within
|
||||
/// the SCEV framework. This primarily includes integer types, and it
|
||||
|
@ -38,8 +38,8 @@ namespace llvm {
|
||||
friend struct SCEVVisitor<SCEVExpander, Value*>;
|
||||
public:
|
||||
explicit SCEVExpander(ScalarEvolution &se)
|
||||
: SE(se), Builder(*se.getContext(),
|
||||
TargetFolder(se.TD, *se.getContext())) {}
|
||||
: SE(se), Builder(se.getContext(),
|
||||
TargetFolder(se.TD, se.getContext())) {}
|
||||
|
||||
/// clear - Erase the contents of the InsertedExpressions map so that users
|
||||
/// trying to expand the same expression into multiple BasicBlocks or
|
||||
@ -61,7 +61,7 @@ namespace llvm {
|
||||
}
|
||||
|
||||
private:
|
||||
LLVMContext *getContext() const { return SE.getContext(); }
|
||||
LLVMContext &getContext() const { return SE.getContext(); }
|
||||
|
||||
/// InsertBinop - Insert the specified binary operator, doing a small amount
|
||||
/// of work to avoid inserting an obviously redundant operation.
|
||||
|
@ -65,13 +65,13 @@ namespace llvm {
|
||||
Value *FindInsertedValue(Value *V,
|
||||
const unsigned *idx_begin,
|
||||
const unsigned *idx_end,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
Instruction *InsertBefore = 0);
|
||||
|
||||
/// This is a convenience wrapper for finding values indexed by a single index
|
||||
/// only.
|
||||
inline Value *FindInsertedValue(Value *V, const unsigned Idx,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
Instruction *InsertBefore = 0) {
|
||||
const unsigned Idxs[1] = { Idx };
|
||||
return FindInsertedValue(V, &Idxs[0], &Idxs[1], Context, InsertBefore);
|
||||
|
@ -88,7 +88,7 @@ private:
|
||||
public:
|
||||
/// getContext - Get the context in which this basic block lives,
|
||||
/// or null if it is not currently attached to a function.
|
||||
LLVMContext *getContext() const;
|
||||
LLVMContext &getContext() const;
|
||||
|
||||
/// Instruction iterators...
|
||||
typedef InstListType::iterator iterator;
|
||||
|
@ -38,7 +38,6 @@ struct CallGraphSCCPass : public Pass {
|
||||
/// doInitialization - This method is called before the SCC's of the program
|
||||
/// has been processed, allowing the pass to do initialization as necessary.
|
||||
virtual bool doInitialization(CallGraph &CG) {
|
||||
Context = &CG.getModule().getContext();
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -142,19 +142,19 @@ class CCState {
|
||||
const TargetMachine &TM;
|
||||
const TargetRegisterInfo &TRI;
|
||||
SmallVector<CCValAssign, 16> &Locs;
|
||||
LLVMContext *Context;
|
||||
LLVMContext &Context;
|
||||
|
||||
unsigned StackOffset;
|
||||
SmallVector<uint32_t, 16> UsedRegs;
|
||||
public:
|
||||
CCState(unsigned CC, bool isVarArg, const TargetMachine &TM,
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext *C);
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext &C);
|
||||
|
||||
void addLoc(const CCValAssign &V) {
|
||||
Locs.push_back(V);
|
||||
}
|
||||
|
||||
LLVMContext *getContext() const { return Context; }
|
||||
LLVMContext &getContext() const { return Context; }
|
||||
const TargetMachine &getTarget() const { return TM; }
|
||||
unsigned getCallingConv() const { return CallingConv; }
|
||||
bool isVarArg() const { return IsVarArg; }
|
||||
|
@ -60,7 +60,6 @@ protected:
|
||||
const TargetData &TD;
|
||||
const TargetInstrInfo &TII;
|
||||
const TargetLowering &TLI;
|
||||
LLVMContext *Context;
|
||||
|
||||
public:
|
||||
/// startNewBlock - Set the current block to which generated machine
|
||||
|
@ -129,7 +129,7 @@ public:
|
||||
|
||||
/// getContext - Return a pointer to the LLVMContext associated with this
|
||||
/// function, or NULL if this function is not bound to a context yet.
|
||||
LLVMContext *getContext() const;
|
||||
LLVMContext &getContext() const;
|
||||
|
||||
/// isVarArg - Return true if this function takes a variable number of
|
||||
/// arguments.
|
||||
|
@ -645,8 +645,7 @@ public:
|
||||
Value *LHS, ///< The left-hand-side of the expression
|
||||
Value *RHS, ///< The right-hand-side of the expression
|
||||
const std::string &NameStr = "" ///< Name of the instruction
|
||||
) : CmpInst(InsertBefore->getParent()->getContext()->
|
||||
makeCmpResultType(LHS->getType()),
|
||||
) : CmpInst(InsertBefore->getContext().makeCmpResultType(LHS->getType()),
|
||||
Instruction::ICmp, pred, LHS, RHS, NameStr,
|
||||
InsertBefore) {
|
||||
assert(pred >= CmpInst::FIRST_ICMP_PREDICATE &&
|
||||
@ -667,7 +666,7 @@ public:
|
||||
Value *LHS, ///< The left-hand-side of the expression
|
||||
Value *RHS, ///< The right-hand-side of the expression
|
||||
const std::string &NameStr = "" ///< Name of the instruction
|
||||
) : CmpInst(InsertAtEnd.getContext()->makeCmpResultType(LHS->getType()),
|
||||
) : CmpInst(InsertAtEnd.getContext().makeCmpResultType(LHS->getType()),
|
||||
Instruction::ICmp, pred, LHS, RHS, NameStr,
|
||||
&InsertAtEnd) {
|
||||
assert(pred >= CmpInst::FIRST_ICMP_PREDICATE &&
|
||||
@ -821,8 +820,7 @@ public:
|
||||
Value *LHS, ///< The left-hand-side of the expression
|
||||
Value *RHS, ///< The right-hand-side of the expression
|
||||
const std::string &NameStr = "" ///< Name of the instruction
|
||||
) : CmpInst(InsertBefore->getParent()->getContext()->
|
||||
makeCmpResultType(LHS->getType()),
|
||||
) : CmpInst(InsertBefore->getContext().makeCmpResultType(LHS->getType()),
|
||||
Instruction::FCmp, pred, LHS, RHS, NameStr,
|
||||
InsertBefore) {
|
||||
assert(pred <= FCmpInst::LAST_FCMP_PREDICATE &&
|
||||
@ -841,7 +839,7 @@ public:
|
||||
Value *LHS, ///< The left-hand-side of the expression
|
||||
Value *RHS, ///< The right-hand-side of the expression
|
||||
const std::string &NameStr = "" ///< Name of the instruction
|
||||
) : CmpInst(InsertAtEnd.getContext()->makeCmpResultType(LHS->getType()),
|
||||
) : CmpInst(InsertAtEnd.getContext().makeCmpResultType(LHS->getType()),
|
||||
Instruction::FCmp, pred, LHS, RHS, NameStr,
|
||||
&InsertAtEnd) {
|
||||
assert(pred <= FCmpInst::LAST_FCMP_PREDICATE &&
|
||||
|
@ -48,7 +48,6 @@ class ImmutablePass;
|
||||
class PMStack;
|
||||
class AnalysisResolver;
|
||||
class PMDataManager;
|
||||
class LLVMContext;
|
||||
|
||||
// AnalysisID - Use the PassInfo to identify a pass...
|
||||
typedef const PassInfo* AnalysisID;
|
||||
@ -78,9 +77,6 @@ class Pass {
|
||||
void operator=(const Pass&); // DO NOT IMPLEMENT
|
||||
Pass(const Pass &); // DO NOT IMPLEMENT
|
||||
|
||||
protected:
|
||||
LLVMContext *Context;
|
||||
|
||||
public:
|
||||
explicit Pass(intptr_t pid) : Resolver(0), PassID(pid) {
|
||||
assert(pid && "pid cannot be 0");
|
||||
@ -281,11 +277,8 @@ public:
|
||||
/// doInitialization - Virtual method overridden by subclasses to do
|
||||
/// any necessary per-module initialization.
|
||||
///
|
||||
virtual bool doInitialization(Module &M) {
|
||||
Context = &M.getContext();
|
||||
return false;
|
||||
}
|
||||
|
||||
virtual bool doInitialization(Module &M) { return false; }
|
||||
|
||||
/// runOnFunction - Virtual method overriden by subclasses to do the
|
||||
/// per-function processing of the pass.
|
||||
///
|
||||
@ -336,10 +329,7 @@ public:
|
||||
/// doInitialization - Virtual method overridden by subclasses to do
|
||||
/// any necessary per-module initialization.
|
||||
///
|
||||
virtual bool doInitialization(Module &M) {
|
||||
Context = &M.getContext();
|
||||
return false;
|
||||
}
|
||||
virtual bool doInitialization(Module &M) { return false; }
|
||||
|
||||
/// doInitialization - Virtual method overridden by BasicBlockPass subclasses
|
||||
/// to do any necessary per-function initialization.
|
||||
|
@ -51,22 +51,22 @@ public:
|
||||
IRBuilder(LLVMContext &C) : Context(C), Folder(C) { ClearInsertionPoint(); }
|
||||
|
||||
explicit IRBuilder(BasicBlock *TheBB, const T& F)
|
||||
: Context(*TheBB->getParent()->getContext()), Folder(F) {
|
||||
: Context(TheBB->getContext()), Folder(F) {
|
||||
SetInsertPoint(TheBB);
|
||||
}
|
||||
|
||||
explicit IRBuilder(BasicBlock *TheBB)
|
||||
: Context(*TheBB->getParent()->getContext()), Folder(Context) {
|
||||
: Context(TheBB->getContext()), Folder(Context) {
|
||||
SetInsertPoint(TheBB);
|
||||
}
|
||||
|
||||
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP, const T& F)
|
||||
: Context(*TheBB->getParent()->getContext()), Folder(F) {
|
||||
: Context(TheBB->getContext()), Folder(F) {
|
||||
SetInsertPoint(TheBB, IP);
|
||||
}
|
||||
|
||||
IRBuilder(BasicBlock *TheBB, BasicBlock::iterator IP)
|
||||
: Context(*TheBB->getParent()->getContext()), Folder(Context) {
|
||||
: Context(TheBB->getContext()), Folder(Context) {
|
||||
SetInsertPoint(TheBB, IP);
|
||||
}
|
||||
|
||||
|
@ -35,7 +35,7 @@ class TargetFolder {
|
||||
/// Fold - Fold the constant using target specific information.
|
||||
Constant *Fold(Constant *C) const {
|
||||
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
|
||||
if (Constant *CF = ConstantFoldConstantExpression(CE, &Context, TD))
|
||||
if (Constant *CF = ConstantFoldConstantExpression(CE, Context, TD))
|
||||
return CF;
|
||||
return C;
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
|
||||
///
|
||||
void PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
|
||||
DominatorTree &DT, DominanceFrontier &DF,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
AliasSetTracker *AST = 0);
|
||||
|
||||
} // End llvm namespace
|
||||
|
@ -23,7 +23,7 @@ namespace llvm {
|
||||
class LLVMContext;
|
||||
typedef DenseMap<const Value *, Value *> ValueMapTy;
|
||||
|
||||
Value *MapValue(const Value *V, ValueMapTy &VM, LLVMContext *Context);
|
||||
Value *MapValue(const Value *V, ValueMapTy &VM, LLVMContext &Context);
|
||||
void RemapInstruction(Instruction *I, ValueMapTy &VM);
|
||||
} // End llvm namespace
|
||||
|
||||
|
@ -40,6 +40,7 @@ typedef StringMapEntry<Value*> ValueName;
|
||||
class raw_ostream;
|
||||
class AssemblyAnnotationWriter;
|
||||
class ValueHandleBase;
|
||||
class LLVMContext;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// Value Class
|
||||
@ -101,6 +102,9 @@ public:
|
||||
///
|
||||
inline const Type *getType() const { return VTy; }
|
||||
|
||||
/// All values hold a context through their type.
|
||||
LLVMContext &getContext() const;
|
||||
|
||||
// All values can potentially be named...
|
||||
inline bool hasName() const { return Name != 0; }
|
||||
ValueName *getValueName() const { return Name; }
|
||||
|
@ -309,7 +309,7 @@ BasicAliasAnalysis::getModRefInfo(CallSite CS1, CallSite CS2) {
|
||||
AliasAnalysis::AliasResult
|
||||
BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
|
||||
const Value *V2, unsigned V2Size) {
|
||||
Context = &V1->getType()->getContext();
|
||||
LLVMContext &Context = V1->getType()->getContext();
|
||||
|
||||
// Strip off any constant expression casts if they exist
|
||||
if (const ConstantExpr *CE = dyn_cast<ConstantExpr>(V1))
|
||||
@ -395,13 +395,13 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
|
||||
// the base pointers.
|
||||
while (isGEP(GEP1->getOperand(0)) &&
|
||||
GEP1->getOperand(1) ==
|
||||
Context->getNullValue(GEP1->getOperand(1)->getType()))
|
||||
Context.getNullValue(GEP1->getOperand(1)->getType()))
|
||||
GEP1 = cast<User>(GEP1->getOperand(0));
|
||||
const Value *BasePtr1 = GEP1->getOperand(0);
|
||||
|
||||
while (isGEP(GEP2->getOperand(0)) &&
|
||||
GEP2->getOperand(1) ==
|
||||
Context->getNullValue(GEP2->getOperand(1)->getType()))
|
||||
Context.getNullValue(GEP2->getOperand(1)->getType()))
|
||||
GEP2 = cast<User>(GEP2->getOperand(0));
|
||||
const Value *BasePtr2 = GEP2->getOperand(0);
|
||||
|
||||
@ -481,7 +481,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
|
||||
for (unsigned i = 0; i != GEPOperands.size(); ++i)
|
||||
if (!isa<ConstantInt>(GEPOperands[i]))
|
||||
GEPOperands[i] =
|
||||
Context->getNullValue(GEPOperands[i]->getType());
|
||||
Context.getNullValue(GEPOperands[i]->getType());
|
||||
int64_t Offset =
|
||||
getTargetData().getIndexedOffset(BasePtr->getType(),
|
||||
&GEPOperands[0],
|
||||
@ -499,16 +499,16 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
|
||||
|
||||
// This function is used to determine if the indices of two GEP instructions are
|
||||
// equal. V1 and V2 are the indices.
|
||||
static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext *Context) {
|
||||
static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext &Context) {
|
||||
if (V1->getType() == V2->getType())
|
||||
return V1 == V2;
|
||||
if (Constant *C1 = dyn_cast<Constant>(V1))
|
||||
if (Constant *C2 = dyn_cast<Constant>(V2)) {
|
||||
// Sign extend the constants to long types, if necessary
|
||||
if (C1->getType() != Type::Int64Ty)
|
||||
C1 = Context->getConstantExprSExt(C1, Type::Int64Ty);
|
||||
C1 = Context.getConstantExprSExt(C1, Type::Int64Ty);
|
||||
if (C2->getType() != Type::Int64Ty)
|
||||
C2 = Context->getConstantExprSExt(C2, Type::Int64Ty);
|
||||
C2 = Context.getConstantExprSExt(C2, Type::Int64Ty);
|
||||
return C1 == C2;
|
||||
}
|
||||
return false;
|
||||
@ -529,7 +529,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
|
||||
const PointerType *GEPPointerTy = cast<PointerType>(BasePtr1Ty);
|
||||
|
||||
Context = &GEPPointerTy->getContext();
|
||||
LLVMContext &Context = GEPPointerTy->getContext();
|
||||
|
||||
// Find the (possibly empty) initial sequence of equal values... which are not
|
||||
// necessarily constants.
|
||||
@ -604,9 +604,9 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
if (G1OC->getType() != G2OC->getType()) {
|
||||
// Sign extend both operands to long.
|
||||
if (G1OC->getType() != Type::Int64Ty)
|
||||
G1OC = Context->getConstantExprSExt(G1OC, Type::Int64Ty);
|
||||
G1OC = Context.getConstantExprSExt(G1OC, Type::Int64Ty);
|
||||
if (G2OC->getType() != Type::Int64Ty)
|
||||
G2OC = Context->getConstantExprSExt(G2OC, Type::Int64Ty);
|
||||
G2OC = Context.getConstantExprSExt(G2OC, Type::Int64Ty);
|
||||
GEP1Ops[FirstConstantOper] = G1OC;
|
||||
GEP2Ops[FirstConstantOper] = G2OC;
|
||||
}
|
||||
@ -693,7 +693,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
// TargetData::getIndexedOffset.
|
||||
for (i = 0; i != MaxOperands; ++i)
|
||||
if (!isa<ConstantInt>(GEP1Ops[i]))
|
||||
GEP1Ops[i] = Context->getNullValue(GEP1Ops[i]->getType());
|
||||
GEP1Ops[i] = Context.getNullValue(GEP1Ops[i]->getType());
|
||||
// Okay, now get the offset. This is the relative offset for the full
|
||||
// instruction.
|
||||
const TargetData &TD = getTargetData();
|
||||
@ -738,7 +738,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
const Type *ZeroIdxTy = GEPPointerTy;
|
||||
for (unsigned i = 0; i != FirstConstantOper; ++i) {
|
||||
if (!isa<StructType>(ZeroIdxTy))
|
||||
GEP1Ops[i] = GEP2Ops[i] = Context->getNullValue(Type::Int32Ty);
|
||||
GEP1Ops[i] = GEP2Ops[i] = Context.getNullValue(Type::Int32Ty);
|
||||
|
||||
if (const CompositeType *CT = dyn_cast<CompositeType>(ZeroIdxTy))
|
||||
ZeroIdxTy = CT->getTypeAtIndex(GEP1Ops[i]);
|
||||
@ -753,7 +753,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
// If they are equal, use a zero index...
|
||||
if (Op1 == Op2 && BasePtr1Ty == BasePtr2Ty) {
|
||||
if (!isa<ConstantInt>(Op1))
|
||||
GEP1Ops[i] = GEP2Ops[i] = Context->getNullValue(Op1->getType());
|
||||
GEP1Ops[i] = GEP2Ops[i] = Context.getNullValue(Op1->getType());
|
||||
// Otherwise, just keep the constants we have.
|
||||
} else {
|
||||
if (Op1) {
|
||||
@ -780,10 +780,10 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
//
|
||||
if (const ArrayType *AT = dyn_cast<ArrayType>(BasePtr1Ty))
|
||||
GEP1Ops[i] =
|
||||
Context->getConstantInt(Type::Int64Ty,AT->getNumElements()-1);
|
||||
Context.getConstantInt(Type::Int64Ty,AT->getNumElements()-1);
|
||||
else if (const VectorType *VT = dyn_cast<VectorType>(BasePtr1Ty))
|
||||
GEP1Ops[i] =
|
||||
Context->getConstantInt(Type::Int64Ty,VT->getNumElements()-1);
|
||||
Context.getConstantInt(Type::Int64Ty,VT->getNumElements()-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -798,7 +798,7 @@ BasicAliasAnalysis::CheckGEPInstructions(
|
||||
return MayAlias; // Be conservative with out-of-range accesses
|
||||
}
|
||||
} else { // Conservatively assume the minimum value for this index
|
||||
GEP2Ops[i] = Context->getNullValue(Op2->getType());
|
||||
GEP2Ops[i] = Context.getNullValue(Op2->getType());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -95,7 +95,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
|
||||
/// otherwise TD is null.
|
||||
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
Constant *Op1, const TargetData *TD,
|
||||
LLVMContext *Context){
|
||||
LLVMContext &Context){
|
||||
// SROA
|
||||
|
||||
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
|
||||
@ -113,7 +113,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, *TD) &&
|
||||
GV1 == GV2) {
|
||||
// (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
|
||||
return Context->getConstantInt(Op0->getType(), Offs1-Offs2);
|
||||
return Context.getConstantInt(Op0->getType(), Offs1-Offs2);
|
||||
}
|
||||
}
|
||||
|
||||
@ -124,7 +124,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
|
||||
/// constant expression, do so.
|
||||
static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
const Type *ResultTy,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
Constant *Ptr = Ops[0];
|
||||
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
|
||||
@ -151,14 +151,14 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
|
||||
|
||||
uint64_t Offset = TD->getIndexedOffset(Ptr->getType(),
|
||||
(Value**)Ops+1, NumOps-1);
|
||||
Constant *C = Context->getConstantInt(TD->getIntPtrType(), Offset+BasePtr);
|
||||
return Context->getConstantExprIntToPtr(C, ResultTy);
|
||||
Constant *C = Context.getConstantInt(TD->getIntPtrType(), Offset+BasePtr);
|
||||
return Context.getConstantExprIntToPtr(C, ResultTy);
|
||||
}
|
||||
|
||||
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
|
||||
/// targetdata. Return 0 if unfoldable.
|
||||
static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
const TargetData &TD, LLVMContext *Context) {
|
||||
const TargetData &TD, LLVMContext &Context) {
|
||||
// If this is a bitcast from constant vector -> vector, fold it.
|
||||
if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
|
||||
if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
|
||||
@ -184,24 +184,24 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
if (DstEltTy->isFloatingPoint()) {
|
||||
// Fold to an vector of integers with same size as our FP type.
|
||||
unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
|
||||
const Type *DestIVTy = Context->getVectorType(
|
||||
Context->getIntegerType(FPWidth), NumDstElt);
|
||||
const Type *DestIVTy = Context.getVectorType(
|
||||
Context.getIntegerType(FPWidth), NumDstElt);
|
||||
// Recursively handle this integer conversion, if possible.
|
||||
C = FoldBitCast(C, DestIVTy, TD, Context);
|
||||
if (!C) return 0;
|
||||
|
||||
// Finally, VMCore can handle this now that #elts line up.
|
||||
return Context->getConstantExprBitCast(C, DestTy);
|
||||
return Context.getConstantExprBitCast(C, DestTy);
|
||||
}
|
||||
|
||||
// Okay, we know the destination is integer, if the input is FP, convert
|
||||
// it to integer first.
|
||||
if (SrcEltTy->isFloatingPoint()) {
|
||||
unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
|
||||
const Type *SrcIVTy = Context->getVectorType(
|
||||
Context->getIntegerType(FPWidth), NumSrcElt);
|
||||
const Type *SrcIVTy = Context.getVectorType(
|
||||
Context.getIntegerType(FPWidth), NumSrcElt);
|
||||
// Ask VMCore to do the conversion now that #elts line up.
|
||||
C = Context->getConstantExprBitCast(C, SrcIVTy);
|
||||
C = Context.getConstantExprBitCast(C, SrcIVTy);
|
||||
CV = dyn_cast<ConstantVector>(C);
|
||||
if (!CV) return 0; // If VMCore wasn't able to fold it, bail out.
|
||||
}
|
||||
@ -215,7 +215,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
SmallVector<Constant*, 32> Result;
|
||||
if (NumDstElt < NumSrcElt) {
|
||||
// Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
|
||||
Constant *Zero = Context->getNullValue(DstEltTy);
|
||||
Constant *Zero = Context.getNullValue(DstEltTy);
|
||||
unsigned Ratio = NumSrcElt/NumDstElt;
|
||||
unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
|
||||
unsigned SrcElt = 0;
|
||||
@ -228,15 +228,15 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
if (!Src) return 0; // Reject constantexpr elements.
|
||||
|
||||
// Zero extend the element to the right size.
|
||||
Src = Context->getConstantExprZExt(Src, Elt->getType());
|
||||
Src = Context.getConstantExprZExt(Src, Elt->getType());
|
||||
|
||||
// Shift it to the right place, depending on endianness.
|
||||
Src = Context->getConstantExprShl(Src,
|
||||
Context->getConstantInt(Src->getType(), ShiftAmt));
|
||||
Src = Context.getConstantExprShl(Src,
|
||||
Context.getConstantInt(Src->getType(), ShiftAmt));
|
||||
ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
|
||||
|
||||
// Mix it in.
|
||||
Elt = Context->getConstantExprOr(Elt, Src);
|
||||
Elt = Context.getConstantExprOr(Elt, Src);
|
||||
}
|
||||
Result.push_back(Elt);
|
||||
}
|
||||
@ -254,17 +254,17 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
for (unsigned j = 0; j != Ratio; ++j) {
|
||||
// Shift the piece of the value into the right place, depending on
|
||||
// endianness.
|
||||
Constant *Elt = Context->getConstantExprLShr(Src,
|
||||
Context->getConstantInt(Src->getType(), ShiftAmt));
|
||||
Constant *Elt = Context.getConstantExprLShr(Src,
|
||||
Context.getConstantInt(Src->getType(), ShiftAmt));
|
||||
ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
|
||||
|
||||
// Truncate and remember this piece.
|
||||
Result.push_back(Context->getConstantExprTrunc(Elt, DstEltTy));
|
||||
Result.push_back(Context.getConstantExprTrunc(Elt, DstEltTy));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return Context->getConstantVector(Result.data(), Result.size());
|
||||
return Context.getConstantVector(Result.data(), Result.size());
|
||||
}
|
||||
}
|
||||
|
||||
@ -282,11 +282,11 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
|
||||
/// is returned. Note that this function can only fail when attempting to fold
|
||||
/// instructions like loads and stores, which have no constant expression form.
|
||||
///
|
||||
Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext *Context,
|
||||
Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
if (PHINode *PN = dyn_cast<PHINode>(I)) {
|
||||
if (PN->getNumIncomingValues() == 0)
|
||||
return Context->getUndef(PN->getType());
|
||||
return Context.getUndef(PN->getType());
|
||||
|
||||
Constant *Result = dyn_cast<Constant>(PN->getIncomingValue(0));
|
||||
if (Result == 0) return 0;
|
||||
@ -322,7 +322,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext *Context,
|
||||
/// using the specified TargetData. If successful, the constant result is
|
||||
/// result is returned, if not, null is returned.
|
||||
Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
SmallVector<Constant*, 8> Ops;
|
||||
for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
|
||||
@ -345,7 +345,7 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
|
||||
///
|
||||
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant* const* Ops, unsigned NumOps,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
// Handle easy binops first.
|
||||
if (Instruction::isBinaryOp(Opcode)) {
|
||||
@ -354,7 +354,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Context))
|
||||
return C;
|
||||
|
||||
return Context->getConstantExpr(Opcode, Ops[0], Ops[1]);
|
||||
return Context.getConstantExpr(Opcode, Ops[0], Ops[1]);
|
||||
}
|
||||
|
||||
switch (Opcode) {
|
||||
@ -376,15 +376,15 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
unsigned InWidth = Input->getType()->getScalarSizeInBits();
|
||||
if (TD->getPointerSizeInBits() < InWidth) {
|
||||
Constant *Mask =
|
||||
Context->getConstantInt(APInt::getLowBitsSet(InWidth,
|
||||
Context.getConstantInt(APInt::getLowBitsSet(InWidth,
|
||||
TD->getPointerSizeInBits()));
|
||||
Input = Context->getConstantExprAnd(Input, Mask);
|
||||
Input = Context.getConstantExprAnd(Input, Mask);
|
||||
}
|
||||
// Do a zext or trunc to get to the dest size.
|
||||
return Context->getConstantExprIntegerCast(Input, DestTy, false);
|
||||
return Context.getConstantExprIntegerCast(Input, DestTy, false);
|
||||
}
|
||||
}
|
||||
return Context->getConstantExprCast(Opcode, Ops[0], DestTy);
|
||||
return Context.getConstantExprCast(Opcode, Ops[0], DestTy);
|
||||
case Instruction::IntToPtr:
|
||||
// If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
|
||||
// the int size is >= the ptr size. This requires knowing the width of a
|
||||
@ -396,7 +396,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
if (CE->getOpcode() == Instruction::PtrToInt) {
|
||||
Constant *Input = CE->getOperand(0);
|
||||
Constant *C = FoldBitCast(Input, DestTy, *TD, Context);
|
||||
return C ? C : Context->getConstantExprBitCast(Input, DestTy);
|
||||
return C ? C : Context.getConstantExprBitCast(Input, DestTy);
|
||||
}
|
||||
// If there's a constant offset added to the integer value before
|
||||
// it is casted back to a pointer, see if the expression can be
|
||||
@ -419,18 +419,18 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
if (ElemIdx.ult(APInt(ElemIdx.getBitWidth(),
|
||||
AT->getNumElements()))) {
|
||||
Constant *Index[] = {
|
||||
Context->getNullValue(CE->getType()),
|
||||
Context->getConstantInt(ElemIdx)
|
||||
Context.getNullValue(CE->getType()),
|
||||
Context.getConstantInt(ElemIdx)
|
||||
};
|
||||
return
|
||||
Context->getConstantExprGetElementPtr(GV, &Index[0], 2);
|
||||
Context.getConstantExprGetElementPtr(GV, &Index[0], 2);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return Context->getConstantExprCast(Opcode, Ops[0], DestTy);
|
||||
return Context.getConstantExprCast(Opcode, Ops[0], DestTy);
|
||||
case Instruction::Trunc:
|
||||
case Instruction::ZExt:
|
||||
case Instruction::SExt:
|
||||
@ -440,25 +440,25 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
case Instruction::SIToFP:
|
||||
case Instruction::FPToUI:
|
||||
case Instruction::FPToSI:
|
||||
return Context->getConstantExprCast(Opcode, Ops[0], DestTy);
|
||||
return Context.getConstantExprCast(Opcode, Ops[0], DestTy);
|
||||
case Instruction::BitCast:
|
||||
if (TD)
|
||||
if (Constant *C = FoldBitCast(Ops[0], DestTy, *TD, Context))
|
||||
return C;
|
||||
return Context->getConstantExprBitCast(Ops[0], DestTy);
|
||||
return Context.getConstantExprBitCast(Ops[0], DestTy);
|
||||
case Instruction::Select:
|
||||
return Context->getConstantExprSelect(Ops[0], Ops[1], Ops[2]);
|
||||
return Context.getConstantExprSelect(Ops[0], Ops[1], Ops[2]);
|
||||
case Instruction::ExtractElement:
|
||||
return Context->getConstantExprExtractElement(Ops[0], Ops[1]);
|
||||
return Context.getConstantExprExtractElement(Ops[0], Ops[1]);
|
||||
case Instruction::InsertElement:
|
||||
return Context->getConstantExprInsertElement(Ops[0], Ops[1], Ops[2]);
|
||||
return Context.getConstantExprInsertElement(Ops[0], Ops[1], Ops[2]);
|
||||
case Instruction::ShuffleVector:
|
||||
return Context->getConstantExprShuffleVector(Ops[0], Ops[1], Ops[2]);
|
||||
return Context.getConstantExprShuffleVector(Ops[0], Ops[1], Ops[2]);
|
||||
case Instruction::GetElementPtr:
|
||||
if (Constant *C = SymbolicallyEvaluateGEP(Ops, NumOps, DestTy, Context, TD))
|
||||
return C;
|
||||
|
||||
return Context->getConstantExprGetElementPtr(Ops[0], Ops+1, NumOps-1);
|
||||
return Context.getConstantExprGetElementPtr(Ops[0], Ops+1, NumOps-1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -469,7 +469,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
|
||||
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
Constant*const * Ops,
|
||||
unsigned NumOps,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
const TargetData *TD) {
|
||||
// fold: icmp (inttoptr x), null -> icmp x, 0
|
||||
// fold: icmp (ptrtoint x), 0 -> icmp x, null
|
||||
@ -484,9 +484,9 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
// proper extension or truncation.
|
||||
Constant *C = Context->getConstantExprIntegerCast(CE0->getOperand(0),
|
||||
Constant *C = Context.getConstantExprIntegerCast(CE0->getOperand(0),
|
||||
IntPtrTy, false);
|
||||
Constant *NewOps[] = { C, Context->getNullValue(C->getType()) };
|
||||
Constant *NewOps[] = { C, Context.getNullValue(C->getType()) };
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
Context, TD);
|
||||
}
|
||||
@ -496,7 +496,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
if (CE0->getOpcode() == Instruction::PtrToInt &&
|
||||
CE0->getType() == IntPtrTy) {
|
||||
Constant *C = CE0->getOperand(0);
|
||||
Constant *NewOps[] = { C, Context->getNullValue(C->getType()) };
|
||||
Constant *NewOps[] = { C, Context.getNullValue(C->getType()) };
|
||||
// FIXME!
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
Context, TD);
|
||||
@ -510,9 +510,9 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
if (CE0->getOpcode() == Instruction::IntToPtr) {
|
||||
// Convert the integer value to the right size to ensure we get the
|
||||
// proper extension or truncation.
|
||||
Constant *C0 = Context->getConstantExprIntegerCast(CE0->getOperand(0),
|
||||
Constant *C0 = Context.getConstantExprIntegerCast(CE0->getOperand(0),
|
||||
IntPtrTy, false);
|
||||
Constant *C1 = Context->getConstantExprIntegerCast(CE1->getOperand(0),
|
||||
Constant *C1 = Context.getConstantExprIntegerCast(CE1->getOperand(0),
|
||||
IntPtrTy, false);
|
||||
Constant *NewOps[] = { C0, C1 };
|
||||
return ConstantFoldCompareInstOperands(Predicate, NewOps, 2,
|
||||
@ -533,7 +533,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
}
|
||||
}
|
||||
}
|
||||
return Context->getConstantExprCompare(Predicate, Ops[0], Ops[1]);
|
||||
return Context.getConstantExprCompare(Predicate, Ops[0], Ops[1]);
|
||||
}
|
||||
|
||||
|
||||
@ -542,8 +542,8 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
|
||||
/// constant expression, or null if something is funny and we can't decide.
|
||||
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
ConstantExpr *CE,
|
||||
LLVMContext *Context) {
|
||||
if (CE->getOperand(1) != Context->getNullValue(CE->getOperand(1)->getType()))
|
||||
LLVMContext &Context) {
|
||||
if (CE->getOperand(1) != Context.getNullValue(CE->getOperand(1)->getType()))
|
||||
return 0; // Do not allow stepping over the value!
|
||||
|
||||
// Loop over all of the operands, tracking down which value we are
|
||||
@ -558,9 +558,9 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
if (ConstantStruct *CS = dyn_cast<ConstantStruct>(C)) {
|
||||
C = CS->getOperand(El);
|
||||
} else if (isa<ConstantAggregateZero>(C)) {
|
||||
C = Context->getNullValue(STy->getElementType(El));
|
||||
C = Context.getNullValue(STy->getElementType(El));
|
||||
} else if (isa<UndefValue>(C)) {
|
||||
C = Context->getUndef(STy->getElementType(El));
|
||||
C = Context.getUndef(STy->getElementType(El));
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
@ -571,9 +571,9 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
if (ConstantArray *CA = dyn_cast<ConstantArray>(C))
|
||||
C = CA->getOperand(CI->getZExtValue());
|
||||
else if (isa<ConstantAggregateZero>(C))
|
||||
C = Context->getNullValue(ATy->getElementType());
|
||||
C = Context.getNullValue(ATy->getElementType());
|
||||
else if (isa<UndefValue>(C))
|
||||
C = Context->getUndef(ATy->getElementType());
|
||||
C = Context.getUndef(ATy->getElementType());
|
||||
else
|
||||
return 0;
|
||||
} else if (const VectorType *PTy = dyn_cast<VectorType>(*I)) {
|
||||
@ -582,9 +582,9 @@ Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
|
||||
if (ConstantVector *CP = dyn_cast<ConstantVector>(C))
|
||||
C = CP->getOperand(CI->getZExtValue());
|
||||
else if (isa<ConstantAggregateZero>(C))
|
||||
C = Context->getNullValue(PTy->getElementType());
|
||||
C = Context.getNullValue(PTy->getElementType());
|
||||
else if (isa<UndefValue>(C))
|
||||
C = Context->getUndef(PTy->getElementType());
|
||||
C = Context.getUndef(PTy->getElementType());
|
||||
else
|
||||
return 0;
|
||||
} else {
|
||||
@ -679,7 +679,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
|
||||
}
|
||||
|
||||
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
const Type *Ty, LLVMContext *Context) {
|
||||
const Type *Ty, LLVMContext &Context) {
|
||||
errno = 0;
|
||||
V = NativeFP(V);
|
||||
if (errno != 0) {
|
||||
@ -688,9 +688,9 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
}
|
||||
|
||||
if (Ty == Type::FloatTy)
|
||||
return Context->getConstantFP(APFloat((float)V));
|
||||
return Context.getConstantFP(APFloat((float)V));
|
||||
if (Ty == Type::DoubleTy)
|
||||
return Context->getConstantFP(APFloat(V));
|
||||
return Context.getConstantFP(APFloat(V));
|
||||
llvm_unreachable("Can only constant fold float/double");
|
||||
return 0; // dummy return to suppress warning
|
||||
}
|
||||
@ -698,7 +698,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
|
||||
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||
double V, double W,
|
||||
const Type *Ty,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
errno = 0;
|
||||
V = NativeFP(V, W);
|
||||
if (errno != 0) {
|
||||
@ -707,9 +707,9 @@ static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
|
||||
}
|
||||
|
||||
if (Ty == Type::FloatTy)
|
||||
return Context->getConstantFP(APFloat((float)V));
|
||||
return Context.getConstantFP(APFloat((float)V));
|
||||
if (Ty == Type::DoubleTy)
|
||||
return Context->getConstantFP(APFloat(V));
|
||||
return Context.getConstantFP(APFloat(V));
|
||||
llvm_unreachable("Can only constant fold float/double");
|
||||
return 0; // dummy return to suppress warning
|
||||
}
|
||||
@ -721,7 +721,7 @@ Constant *
|
||||
llvm::ConstantFoldCall(Function *F,
|
||||
Constant* const* Operands, unsigned NumOperands) {
|
||||
if (!F->hasName()) return 0;
|
||||
LLVMContext *Context = F->getContext();
|
||||
LLVMContext &Context = F->getContext();
|
||||
const char *Str = F->getNameStart();
|
||||
unsigned Len = F->getNameLen();
|
||||
|
||||
@ -775,7 +775,7 @@ llvm::ConstantFoldCall(Function *F,
|
||||
if (V >= -0.0)
|
||||
return ConstantFoldFP(sqrt, V, Ty, Context);
|
||||
else // Undefined
|
||||
return Context->getNullValue(Ty);
|
||||
return Context.getNullValue(Ty);
|
||||
}
|
||||
break;
|
||||
case 's':
|
||||
@ -801,13 +801,13 @@ llvm::ConstantFoldCall(Function *F,
|
||||
}
|
||||
} else if (ConstantInt *Op = dyn_cast<ConstantInt>(Operands[0])) {
|
||||
if (Len > 11 && !memcmp(Str, "llvm.bswap", 10))
|
||||
return Context->getConstantInt(Op->getValue().byteSwap());
|
||||
return Context.getConstantInt(Op->getValue().byteSwap());
|
||||
else if (Len > 11 && !memcmp(Str, "llvm.ctpop", 10))
|
||||
return Context->getConstantInt(Ty, Op->getValue().countPopulation());
|
||||
return Context.getConstantInt(Ty, Op->getValue().countPopulation());
|
||||
else if (Len > 10 && !memcmp(Str, "llvm.cttz", 9))
|
||||
return Context->getConstantInt(Ty, Op->getValue().countTrailingZeros());
|
||||
return Context.getConstantInt(Ty, Op->getValue().countTrailingZeros());
|
||||
else if (Len > 10 && !memcmp(Str, "llvm.ctlz", 9))
|
||||
return Context->getConstantInt(Ty, Op->getValue().countLeadingZeros());
|
||||
return Context.getConstantInt(Ty, Op->getValue().countLeadingZeros());
|
||||
}
|
||||
} else if (NumOperands == 2) {
|
||||
if (ConstantFP *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
|
||||
@ -830,10 +830,10 @@ llvm::ConstantFoldCall(Function *F,
|
||||
}
|
||||
} else if (ConstantInt *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
|
||||
if (!strcmp(Str, "llvm.powi.f32")) {
|
||||
return Context->getConstantFP(APFloat((float)std::pow((float)Op1V,
|
||||
return Context.getConstantFP(APFloat((float)std::pow((float)Op1V,
|
||||
(int)Op2C->getZExtValue())));
|
||||
} else if (!strcmp(Str, "llvm.powi.f64")) {
|
||||
return Context->getConstantFP(APFloat((double)std::pow((double)Op1V,
|
||||
return Context.getConstantFP(APFloat((double)std::pow((double)Op1V,
|
||||
(int)Op2C->getZExtValue())));
|
||||
}
|
||||
}
|
||||
|
@ -693,7 +693,7 @@ void Andersens::getMustAliases(Value *P, std::vector<Value*> &RetVals) {
|
||||
// If the object in the points-to set is the null object, then the null
|
||||
// pointer is a must alias.
|
||||
if (Pointee == &GraphNodes[NullObject])
|
||||
RetVals.push_back(Context->getNullValue(P->getType()));
|
||||
RetVals.push_back(P->getContext().getNullValue(P->getType()));
|
||||
}
|
||||
}
|
||||
AliasAnalysis::getMustAliases(P, RetVals);
|
||||
|
@ -42,6 +42,8 @@ ConstantRange LoopVR::getRange(const SCEV *S, const SCEV *T, ScalarEvolution &SE
|
||||
|
||||
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
|
||||
return ConstantRange(C->getValue()->getValue());
|
||||
|
||||
LLVMContext &Context = SE.getContext();
|
||||
|
||||
ConstantRange FullSet(cast<IntegerType>(S->getType())->getBitWidth(), true);
|
||||
|
||||
@ -73,8 +75,8 @@ ConstantRange LoopVR::getRange(const SCEV *S, const SCEV *T, ScalarEvolution &SE
|
||||
ConstantRange X = getRange(Mul->getOperand(0), T, SE);
|
||||
if (X.isFullSet()) return FullSet;
|
||||
|
||||
const IntegerType *Ty = Context->getIntegerType(X.getBitWidth());
|
||||
const IntegerType *ExTy = Context->getIntegerType(X.getBitWidth() *
|
||||
const IntegerType *Ty = Context.getIntegerType(X.getBitWidth());
|
||||
const IntegerType *ExTy = Context.getIntegerType(X.getBitWidth() *
|
||||
Mul->getNumOperands());
|
||||
ConstantRange XExt = X.zeroExtend(ExTy->getBitWidth());
|
||||
|
||||
|
@ -192,13 +192,13 @@ const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
|
||||
}
|
||||
|
||||
const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
|
||||
return getConstant(Context->getConstantInt(Val));
|
||||
return getConstant(getContext().getConstantInt(Val));
|
||||
}
|
||||
|
||||
const SCEV *
|
||||
ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
|
||||
return getConstant(
|
||||
Context->getConstantInt(cast<IntegerType>(Ty), V, isSigned));
|
||||
getContext().getConstantInt(cast<IntegerType>(Ty), V, isSigned));
|
||||
}
|
||||
|
||||
const Type *SCEVConstant::getType() const { return V->getType(); }
|
||||
@ -1518,7 +1518,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
++Idx;
|
||||
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
|
||||
// We found two constants, fold them together!
|
||||
ConstantInt *Fold = Context->getConstantInt(LHSC->getValue()->getValue() *
|
||||
ConstantInt *Fold = getContext().getConstantInt(LHSC->getValue()->getValue() *
|
||||
RHSC->getValue()->getValue());
|
||||
Ops[0] = getConstant(Fold);
|
||||
Ops.erase(Ops.begin()+1); // Erase the folded element
|
||||
@ -1740,7 +1740,7 @@ const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
|
||||
if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
|
||||
Constant *LHSCV = LHSC->getValue();
|
||||
Constant *RHSCV = RHSC->getValue();
|
||||
return getConstant(cast<ConstantInt>(Context->getConstantExprUDiv(LHSCV,
|
||||
return getConstant(cast<ConstantInt>(getContext().getConstantExprUDiv(LHSCV,
|
||||
RHSCV)));
|
||||
}
|
||||
}
|
||||
@ -1869,7 +1869,7 @@ ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
assert(Idx < Ops.size());
|
||||
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
|
||||
// We found two constants, fold them together!
|
||||
ConstantInt *Fold = Context->getConstantInt(
|
||||
ConstantInt *Fold = getContext().getConstantInt(
|
||||
APIntOps::smax(LHSC->getValue()->getValue(),
|
||||
RHSC->getValue()->getValue()));
|
||||
Ops[0] = getConstant(Fold);
|
||||
@ -1966,7 +1966,7 @@ ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
|
||||
assert(Idx < Ops.size());
|
||||
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
|
||||
// We found two constants, fold them together!
|
||||
ConstantInt *Fold = Context->getConstantInt(
|
||||
ConstantInt *Fold = getContext().getConstantInt(
|
||||
APIntOps::umax(LHSC->getValue()->getValue(),
|
||||
RHSC->getValue()->getValue()));
|
||||
Ops[0] = getConstant(Fold);
|
||||
@ -2133,7 +2133,7 @@ const SCEV *ScalarEvolution::getSCEV(Value *V) {
|
||||
/// specified signed integer value and return a SCEV for the constant.
|
||||
const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
|
||||
const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
|
||||
return getConstant(Context->getConstantInt(ITy, Val));
|
||||
return getConstant(getContext().getConstantInt(ITy, Val));
|
||||
}
|
||||
|
||||
/// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
|
||||
@ -2141,24 +2141,24 @@ const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
|
||||
const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
|
||||
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
|
||||
return getConstant(
|
||||
cast<ConstantInt>(Context->getConstantExprNeg(VC->getValue())));
|
||||
cast<ConstantInt>(getContext().getConstantExprNeg(VC->getValue())));
|
||||
|
||||
const Type *Ty = V->getType();
|
||||
Ty = getEffectiveSCEVType(Ty);
|
||||
return getMulExpr(V,
|
||||
getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty))));
|
||||
getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty))));
|
||||
}
|
||||
|
||||
/// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
|
||||
const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
|
||||
if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
|
||||
return getConstant(
|
||||
cast<ConstantInt>(Context->getConstantExprNot(VC->getValue())));
|
||||
cast<ConstantInt>(getContext().getConstantExprNot(VC->getValue())));
|
||||
|
||||
const Type *Ty = V->getType();
|
||||
Ty = getEffectiveSCEVType(Ty);
|
||||
const SCEV *AllOnes =
|
||||
getConstant(cast<ConstantInt>(Context->getAllOnesValue(Ty)));
|
||||
getConstant(cast<ConstantInt>(getContext().getAllOnesValue(Ty)));
|
||||
return getMinusSCEV(AllOnes, V);
|
||||
}
|
||||
|
||||
@ -2896,7 +2896,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
// Turn shift left of a constant amount into a multiply.
|
||||
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
||||
uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
||||
Constant *X = Context->getConstantInt(
|
||||
Constant *X = getContext().getConstantInt(
|
||||
APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
|
||||
return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
|
||||
}
|
||||
@ -2906,7 +2906,7 @@ const SCEV *ScalarEvolution::createSCEV(Value *V) {
|
||||
// Turn logical shift right of a constant into a unsigned divide.
|
||||
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
|
||||
uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
|
||||
Constant *X = Context->getConstantInt(
|
||||
Constant *X = getContext().getConstantInt(
|
||||
APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
|
||||
return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
|
||||
}
|
||||
@ -3477,7 +3477,7 @@ EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
|
||||
/// the addressed element of the initializer or null if the index expression is
|
||||
/// invalid.
|
||||
static Constant *
|
||||
GetAddressedElementFromGlobal(LLVMContext *Context, GlobalVariable *GV,
|
||||
GetAddressedElementFromGlobal(LLVMContext &Context, GlobalVariable *GV,
|
||||
const std::vector<ConstantInt*> &Indices) {
|
||||
Constant *Init = GV->getInitializer();
|
||||
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
|
||||
@ -3491,10 +3491,10 @@ GetAddressedElementFromGlobal(LLVMContext *Context, GlobalVariable *GV,
|
||||
} else if (isa<ConstantAggregateZero>(Init)) {
|
||||
if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
|
||||
assert(Idx < STy->getNumElements() && "Bad struct index!");
|
||||
Init = Context->getNullValue(STy->getElementType(Idx));
|
||||
Init = Context.getNullValue(STy->getElementType(Idx));
|
||||
} else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
|
||||
if (Idx >= ATy->getNumElements()) return 0; // Bogus program
|
||||
Init = Context->getNullValue(ATy->getElementType());
|
||||
Init = Context.getNullValue(ATy->getElementType());
|
||||
} else {
|
||||
llvm_unreachable("Unknown constant aggregate type!");
|
||||
}
|
||||
@ -3558,14 +3558,14 @@ ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
|
||||
|
||||
unsigned MaxSteps = MaxBruteForceIterations;
|
||||
for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
|
||||
ConstantInt *ItCst = Context->getConstantInt(
|
||||
ConstantInt *ItCst = getContext().getConstantInt(
|
||||
cast<IntegerType>(IdxExpr->getType()), IterationNum);
|
||||
ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
|
||||
|
||||
// Form the GEP offset.
|
||||
Indexes[VarIdxNum] = Val;
|
||||
|
||||
Constant *Result = GetAddressedElementFromGlobal(Context, GV, Indexes);
|
||||
Constant *Result = GetAddressedElementFromGlobal(getContext(), GV, Indexes);
|
||||
if (Result == 0) break; // Cannot compute!
|
||||
|
||||
// Evaluate the condition for this iteration.
|
||||
@ -3649,7 +3649,7 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
|
||||
if (Constant *C = dyn_cast<Constant>(V)) return C;
|
||||
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
|
||||
Instruction *I = cast<Instruction>(V);
|
||||
LLVMContext *Context = I->getParent()->getContext();
|
||||
LLVMContext &Context = I->getParent()->getContext();
|
||||
|
||||
std::vector<Constant*> Operands;
|
||||
Operands.resize(I->getNumOperands());
|
||||
@ -3869,10 +3869,11 @@ const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
|
||||
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
|
||||
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
|
||||
&Operands[0], Operands.size(),
|
||||
Context);
|
||||
getContext());
|
||||
else
|
||||
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
|
||||
&Operands[0], Operands.size(), Context);
|
||||
&Operands[0], Operands.size(),
|
||||
getContext());
|
||||
Pair.first->second = C;
|
||||
return getSCEV(C);
|
||||
}
|
||||
@ -4068,12 +4069,12 @@ SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
|
||||
return std::make_pair(CNC, CNC);
|
||||
}
|
||||
|
||||
LLVMContext *Context = SE.getContext();
|
||||
LLVMContext &Context = SE.getContext();
|
||||
|
||||
ConstantInt *Solution1 =
|
||||
Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
|
||||
Context.getConstantInt((NegB + SqrtVal).sdiv(TwoA));
|
||||
ConstantInt *Solution2 =
|
||||
Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
|
||||
Context.getConstantInt((NegB - SqrtVal).sdiv(TwoA));
|
||||
|
||||
return std::make_pair(SE.getConstant(Solution1),
|
||||
SE.getConstant(Solution2));
|
||||
@ -4141,7 +4142,7 @@ const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
|
||||
#endif
|
||||
// Pick the smallest positive root value.
|
||||
if (ConstantInt *CB =
|
||||
dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
|
||||
dyn_cast<ConstantInt>(getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
|
||||
R1->getValue(), R2->getValue()))) {
|
||||
if (CB->getZExtValue() == false)
|
||||
std::swap(R1, R2); // R1 is the minimum root now.
|
||||
@ -4681,7 +4682,7 @@ const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
|
||||
|
||||
// Check Add for unsigned overflow.
|
||||
// TODO: More sophisticated things could be done here.
|
||||
const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
|
||||
const Type *WideTy = getContext().getIntegerType(getTypeSizeInBits(Ty) + 1);
|
||||
const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
|
||||
const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
|
||||
const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
|
||||
@ -4835,7 +4836,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
|
||||
// The exit value should be (End+A)/A.
|
||||
APInt ExitVal = (End + A).udiv(A);
|
||||
ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
|
||||
ConstantInt *ExitValue = SE.getContext().getConstantInt(ExitVal);
|
||||
|
||||
// Evaluate at the exit value. If we really did fall out of the valid
|
||||
// range, then we computed our trip count, otherwise wrap around or other
|
||||
@ -4847,7 +4848,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
// Ensure that the previous value is in the range. This is a sanity check.
|
||||
assert(Range.contains(
|
||||
EvaluateConstantChrecAtConstant(this,
|
||||
SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
|
||||
SE.getContext().getConstantInt(ExitVal - One), SE)->getValue()) &&
|
||||
"Linear scev computation is off in a bad way!");
|
||||
return SE.getConstant(ExitValue);
|
||||
} else if (isQuadratic()) {
|
||||
@ -4868,7 +4869,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
// Pick the smallest positive root value.
|
||||
if (ConstantInt *CB =
|
||||
dyn_cast<ConstantInt>(
|
||||
SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
|
||||
SE.getContext().getConstantExprICmp(ICmpInst::ICMP_ULT,
|
||||
R1->getValue(), R2->getValue()))) {
|
||||
if (CB->getZExtValue() == false)
|
||||
std::swap(R1, R2); // R1 is the minimum root now.
|
||||
@ -4882,7 +4883,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
if (Range.contains(R1Val->getValue())) {
|
||||
// The next iteration must be out of the range...
|
||||
ConstantInt *NextVal =
|
||||
SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
|
||||
SE.getContext().getConstantInt(R1->getValue()->getValue()+1);
|
||||
|
||||
R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
|
||||
if (!Range.contains(R1Val->getValue()))
|
||||
@ -4893,7 +4894,7 @@ const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
|
||||
// If R1 was not in the range, then it is a good return value. Make
|
||||
// sure that R1-1 WAS in the range though, just in case.
|
||||
ConstantInt *NextVal =
|
||||
SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
|
||||
SE.getContext().getConstantInt(R1->getValue()->getValue()-1);
|
||||
R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
|
||||
if (Range.contains(R1Val->getValue()))
|
||||
return R1;
|
||||
|
@ -55,7 +55,7 @@ Value *SCEVExpander::InsertNoopCastOfTo(Value *V, const Type *Ty) {
|
||||
|
||||
// FIXME: keep track of the cast instruction.
|
||||
if (Constant *C = dyn_cast<Constant>(V))
|
||||
return getContext()->getConstantExprCast(Op, C, Ty);
|
||||
return getContext().getConstantExprCast(Op, C, Ty);
|
||||
|
||||
if (Argument *A = dyn_cast<Argument>(V)) {
|
||||
// Check to see if there is already a cast!
|
||||
@ -126,7 +126,7 @@ Value *SCEVExpander::InsertBinop(Instruction::BinaryOps Opcode,
|
||||
// Fold a binop with constant operands.
|
||||
if (Constant *CLHS = dyn_cast<Constant>(LHS))
|
||||
if (Constant *CRHS = dyn_cast<Constant>(RHS))
|
||||
return getContext()->getConstantExpr(Opcode, CLHS, CRHS);
|
||||
return getContext().getConstantExpr(Opcode, CLHS, CRHS);
|
||||
|
||||
// Do a quick scan to see if we have this binop nearby. If so, reuse it.
|
||||
unsigned ScanLimit = 6;
|
||||
@ -167,7 +167,7 @@ static bool FactorOutConstant(const SCEV *&S,
|
||||
// For a Constant, check for a multiple of the given factor.
|
||||
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
|
||||
ConstantInt *CI =
|
||||
SE.getContext()->getConstantInt(C->getValue()->getValue().sdiv(Factor));
|
||||
SE.getContext().getConstantInt(C->getValue()->getValue().sdiv(Factor));
|
||||
// If the quotient is zero and the remainder is non-zero, reject
|
||||
// the value at this scale. It will be considered for subsequent
|
||||
// smaller scales.
|
||||
@ -285,7 +285,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
Ops = NewOps;
|
||||
AnyNonZeroIndices |= !ScaledOps.empty();
|
||||
Value *Scaled = ScaledOps.empty() ?
|
||||
getContext()->getNullValue(Ty) :
|
||||
getContext().getNullValue(Ty) :
|
||||
expandCodeFor(SE.getAddExpr(ScaledOps), Ty);
|
||||
GepIndices.push_back(Scaled);
|
||||
|
||||
@ -299,7 +299,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
if (FullOffset < SL.getSizeInBytes()) {
|
||||
unsigned ElIdx = SL.getElementContainingOffset(FullOffset);
|
||||
GepIndices.push_back(
|
||||
getContext()->getConstantInt(Type::Int32Ty, ElIdx));
|
||||
getContext().getConstantInt(Type::Int32Ty, ElIdx));
|
||||
ElTy = STy->getTypeAtIndex(ElIdx);
|
||||
Ops[0] =
|
||||
SE.getConstant(Ty, FullOffset - SL.getElementOffset(ElIdx));
|
||||
@ -328,7 +328,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEV *const *op_begin,
|
||||
// Fold a GEP with constant operands.
|
||||
if (Constant *CLHS = dyn_cast<Constant>(V))
|
||||
if (Constant *CRHS = dyn_cast<Constant>(Idx))
|
||||
return getContext()->getConstantExprGetElementPtr(CLHS, &CRHS, 1);
|
||||
return getContext().getConstantExprGetElementPtr(CLHS, &CRHS, 1);
|
||||
|
||||
// Do a quick scan to see if we have this GEP nearby. If so, reuse it.
|
||||
unsigned ScanLimit = 6;
|
||||
@ -400,7 +400,7 @@ Value *SCEVExpander::visitMulExpr(const SCEVMulExpr *S) {
|
||||
|
||||
// -1 * ... ---> 0 - ...
|
||||
if (FirstOp == 1)
|
||||
V = InsertBinop(Instruction::Sub, getContext()->getNullValue(Ty), V);
|
||||
V = InsertBinop(Instruction::Sub, getContext().getNullValue(Ty), V);
|
||||
return V;
|
||||
}
|
||||
|
||||
@ -412,7 +412,7 @@ Value *SCEVExpander::visitUDivExpr(const SCEVUDivExpr *S) {
|
||||
const APInt &RHS = SC->getValue()->getValue();
|
||||
if (RHS.isPowerOf2())
|
||||
return InsertBinop(Instruction::LShr, LHS,
|
||||
getContext()->getConstantInt(Ty, RHS.logBase2()));
|
||||
getContext().getConstantInt(Ty, RHS.logBase2()));
|
||||
}
|
||||
|
||||
Value *RHS = expandCodeFor(S->getRHS(), Ty);
|
||||
@ -522,7 +522,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
BasicBlock *Preheader = L->getLoopPreheader();
|
||||
PHINode *PN = PHINode::Create(Ty, "indvar", Header->begin());
|
||||
InsertedValues.insert(PN);
|
||||
PN->addIncoming(getContext()->getNullValue(Ty), Preheader);
|
||||
PN->addIncoming(getContext().getNullValue(Ty), Preheader);
|
||||
|
||||
pred_iterator HPI = pred_begin(Header);
|
||||
assert(HPI != pred_end(Header) && "Loop with zero preds???");
|
||||
@ -532,7 +532,7 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) {
|
||||
|
||||
// Insert a unit add instruction right before the terminator corresponding
|
||||
// to the back-edge.
|
||||
Constant *One = getContext()->getConstantInt(Ty, 1);
|
||||
Constant *One = getContext().getConstantInt(Ty, 1);
|
||||
Instruction *Add = BinaryOperator::CreateAdd(PN, One, "indvar.next",
|
||||
(*HPI)->getTerminator());
|
||||
InsertedValues.insert(Add);
|
||||
|
@ -824,7 +824,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
|
||||
Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
|
||||
SmallVector<unsigned, 10> &Idxs,
|
||||
unsigned IdxSkip,
|
||||
LLVMContext *Context,
|
||||
LLVMContext &Context,
|
||||
Instruction *InsertBefore) {
|
||||
const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
|
||||
if (STy) {
|
||||
@ -882,13 +882,13 @@ Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
|
||||
//
|
||||
// All inserted insertvalue instructions are inserted before InsertBefore
|
||||
Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
|
||||
const unsigned *idx_end, LLVMContext *Context,
|
||||
const unsigned *idx_end, LLVMContext &Context,
|
||||
Instruction *InsertBefore) {
|
||||
assert(InsertBefore && "Must have someplace to insert!");
|
||||
const Type *IndexedType = ExtractValueInst::getIndexedType(From->getType(),
|
||||
idx_begin,
|
||||
idx_end);
|
||||
Value *To = Context->getUndef(IndexedType);
|
||||
Value *To = Context.getUndef(IndexedType);
|
||||
SmallVector<unsigned, 10> Idxs(idx_begin, idx_end);
|
||||
unsigned IdxSkip = Idxs.size();
|
||||
|
||||
@ -903,7 +903,7 @@ Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
|
||||
/// If InsertBefore is not null, this function will duplicate (modified)
|
||||
/// insertvalues when a part of a nested struct is extracted.
|
||||
Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
|
||||
const unsigned *idx_end, LLVMContext *Context,
|
||||
const unsigned *idx_end, LLVMContext &Context,
|
||||
Instruction *InsertBefore) {
|
||||
// Nothing to index? Just return V then (this is useful at the end of our
|
||||
// recursion)
|
||||
@ -917,11 +917,11 @@ Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
|
||||
const CompositeType *PTy = cast<CompositeType>(V->getType());
|
||||
|
||||
if (isa<UndefValue>(V))
|
||||
return Context->getUndef(ExtractValueInst::getIndexedType(PTy,
|
||||
return Context.getUndef(ExtractValueInst::getIndexedType(PTy,
|
||||
idx_begin,
|
||||
idx_end));
|
||||
else if (isa<ConstantAggregateZero>(V))
|
||||
return Context->getNullValue(ExtractValueInst::getIndexedType(PTy,
|
||||
return Context.getNullValue(ExtractValueInst::getIndexedType(PTy,
|
||||
idx_begin,
|
||||
idx_end));
|
||||
else if (Constant *C = dyn_cast<Constant>(V)) {
|
||||
|
@ -314,7 +314,7 @@ bool DwarfEHPrepare::PromoteStackTemporaries() {
|
||||
if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
|
||||
// Turn the exception temporary into registers and phi nodes if possible.
|
||||
std::vector<AllocaInst*> Allocas(1, ExceptionValueVar);
|
||||
PromoteMemToReg(Allocas, *DT, *DF, Context);
|
||||
PromoteMemToReg(Allocas, *DT, *DF, ExceptionValueVar->getContext());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
@ -355,7 +355,7 @@ Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
|
||||
// Create the temporary if we didn't already.
|
||||
if (!ExceptionValueVar) {
|
||||
ExceptionValueVar = new AllocaInst(
|
||||
Context->getPointerTypeUnqual(Type::Int8Ty),
|
||||
BB->getContext().getPointerTypeUnqual(Type::Int8Ty),
|
||||
"eh.value", F->begin()->begin());
|
||||
++NumStackTempsIntroduced;
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ static void ReplaceFPIntrinsicWithCall(CallInst *CI, const char *Fname,
|
||||
|
||||
void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
IRBuilder<> Builder(CI->getParent(), CI);
|
||||
LLVMContext *Context = CI->getParent()->getContext();
|
||||
LLVMContext &Context = CI->getContext();
|
||||
|
||||
Function *Callee = CI->getCalledFunction();
|
||||
assert(Callee && "Cannot lower an indirect call!");
|
||||
@ -346,7 +346,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
}
|
||||
case Intrinsic::sigsetjmp:
|
||||
if (CI->getType() != Type::VoidTy)
|
||||
CI->replaceAllUsesWith(Context->getNullValue(CI->getType()));
|
||||
CI->replaceAllUsesWith(Context.getNullValue(CI->getType()));
|
||||
break;
|
||||
|
||||
case Intrinsic::longjmp: {
|
||||
@ -362,15 +362,15 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
break;
|
||||
}
|
||||
case Intrinsic::ctpop:
|
||||
CI->replaceAllUsesWith(LowerCTPOP(*Context, CI->getOperand(1), CI));
|
||||
CI->replaceAllUsesWith(LowerCTPOP(Context, CI->getOperand(1), CI));
|
||||
break;
|
||||
|
||||
case Intrinsic::bswap:
|
||||
CI->replaceAllUsesWith(LowerBSWAP(*Context, CI->getOperand(1), CI));
|
||||
CI->replaceAllUsesWith(LowerBSWAP(Context, CI->getOperand(1), CI));
|
||||
break;
|
||||
|
||||
case Intrinsic::ctlz:
|
||||
CI->replaceAllUsesWith(LowerCTLZ(*Context, CI->getOperand(1), CI));
|
||||
CI->replaceAllUsesWith(LowerCTLZ(Context, CI->getOperand(1), CI));
|
||||
break;
|
||||
|
||||
case Intrinsic::cttz: {
|
||||
@ -378,9 +378,9 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
Value *Src = CI->getOperand(1);
|
||||
Value *NotSrc = Builder.CreateNot(Src);
|
||||
NotSrc->setName(Src->getName() + ".not");
|
||||
Value *SrcM1 = Context->getConstantInt(Src->getType(), 1);
|
||||
Value *SrcM1 = Context.getConstantInt(Src->getType(), 1);
|
||||
SrcM1 = Builder.CreateSub(Src, SrcM1);
|
||||
Src = LowerCTPOP(*Context, Builder.CreateAnd(NotSrc, SrcM1), CI);
|
||||
Src = LowerCTPOP(Context, Builder.CreateAnd(NotSrc, SrcM1), CI);
|
||||
CI->replaceAllUsesWith(Src);
|
||||
break;
|
||||
}
|
||||
@ -393,7 +393,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
"save" : "restore") << " intrinsic.\n";
|
||||
Warned = true;
|
||||
if (Callee->getIntrinsicID() == Intrinsic::stacksave)
|
||||
CI->replaceAllUsesWith(Context->getNullValue(CI->getType()));
|
||||
CI->replaceAllUsesWith(Context.getNullValue(CI->getType()));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -414,7 +414,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
case Intrinsic::readcyclecounter: {
|
||||
cerr << "WARNING: this target does not support the llvm.readcyclecoun"
|
||||
<< "ter intrinsic. It is being lowered to a constant 0\n";
|
||||
CI->replaceAllUsesWith(Context->getConstantInt(Type::Int64Ty, 0));
|
||||
CI->replaceAllUsesWith(Context.getConstantInt(Type::Int64Ty, 0));
|
||||
break;
|
||||
}
|
||||
|
||||
@ -428,13 +428,13 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
case Intrinsic::eh_exception:
|
||||
case Intrinsic::eh_selector_i32:
|
||||
case Intrinsic::eh_selector_i64:
|
||||
CI->replaceAllUsesWith(Context->getNullValue(CI->getType()));
|
||||
CI->replaceAllUsesWith(Context.getNullValue(CI->getType()));
|
||||
break;
|
||||
|
||||
case Intrinsic::eh_typeid_for_i32:
|
||||
case Intrinsic::eh_typeid_for_i64:
|
||||
// Return something different to eh_selector.
|
||||
CI->replaceAllUsesWith(Context->getConstantInt(CI->getType(), 1));
|
||||
CI->replaceAllUsesWith(Context.getConstantInt(CI->getType(), 1));
|
||||
break;
|
||||
|
||||
case Intrinsic::var_annotation:
|
||||
@ -506,7 +506,7 @@ void IntrinsicLowering::LowerIntrinsicCall(CallInst *CI) {
|
||||
case Intrinsic::flt_rounds:
|
||||
// Lower to "round to the nearest"
|
||||
if (CI->getType() != Type::VoidTy)
|
||||
CI->replaceAllUsesWith(Context->getConstantInt(CI->getType(), 1));
|
||||
CI->replaceAllUsesWith(Context.getConstantInt(CI->getType(), 1));
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -21,7 +21,7 @@
|
||||
using namespace llvm;
|
||||
|
||||
CCState::CCState(unsigned CC, bool isVarArg, const TargetMachine &tm,
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext *C)
|
||||
SmallVector<CCValAssign, 16> &locs, LLVMContext &C)
|
||||
: CallingConv(CC), IsVarArg(isVarArg), TM(tm),
|
||||
TRI(*TM.getRegisterInfo()), Locs(locs), Context(C) {
|
||||
// No stack is used.
|
||||
|
@ -92,7 +92,7 @@ unsigned FastISel::getRegForValue(Value *V) {
|
||||
} else if (isa<ConstantPointerNull>(V)) {
|
||||
// Translate this as an integer zero so that it can be
|
||||
// local-CSE'd with actual integer zeros.
|
||||
Reg = getRegForValue(Context->getNullValue(TD.getIntPtrType()));
|
||||
Reg = getRegForValue(V->getContext().getNullValue(TD.getIntPtrType()));
|
||||
} else if (ConstantFP *CF = dyn_cast<ConstantFP>(V)) {
|
||||
Reg = FastEmit_f(VT, VT, ISD::ConstantFP, CF);
|
||||
|
||||
@ -108,7 +108,8 @@ unsigned FastISel::getRegForValue(Value *V) {
|
||||
if (isExact) {
|
||||
APInt IntVal(IntBitWidth, 2, x);
|
||||
|
||||
unsigned IntegerReg = getRegForValue(Context->getConstantInt(IntVal));
|
||||
unsigned IntegerReg =
|
||||
getRegForValue(V->getContext().getConstantInt(IntVal));
|
||||
if (IntegerReg != 0)
|
||||
Reg = FastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg);
|
||||
}
|
||||
@ -480,7 +481,7 @@ bool FastISel::SelectCall(User *I) {
|
||||
UpdateValueMap(I, ResultReg);
|
||||
} else {
|
||||
unsigned ResultReg =
|
||||
getRegForValue(Context->getNullValue(I->getType()));
|
||||
getRegForValue(I->getContext().getNullValue(I->getType()));
|
||||
UpdateValueMap(I, ResultReg);
|
||||
}
|
||||
return true;
|
||||
@ -753,8 +754,7 @@ FastISel::FastISel(MachineFunction &mf,
|
||||
TM(MF.getTarget()),
|
||||
TD(*TM.getTargetData()),
|
||||
TII(*TM.getInstrInfo()),
|
||||
TLI(*TM.getTargetLowering()),
|
||||
Context(mf.getFunction()->getContext()) {
|
||||
TLI(*TM.getTargetLowering()) {
|
||||
}
|
||||
|
||||
FastISel::~FastISel() {}
|
||||
|
@ -816,7 +816,7 @@ void SelectionDAG::init(MachineFunction &mf, MachineModuleInfo *mmi,
|
||||
MF = &mf;
|
||||
MMI = mmi;
|
||||
DW = dw;
|
||||
Context = mf.getFunction()->getContext();
|
||||
Context = &mf.getFunction()->getContext();
|
||||
}
|
||||
|
||||
SelectionDAG::~SelectionDAG() {
|
||||
|
@ -62,10 +62,10 @@ namespace {
|
||||
Constant *GetFrameMap(Function &F);
|
||||
const Type* GetConcreteStackEntryType(Function &F);
|
||||
void CollectRoots(Function &F);
|
||||
static GetElementPtrInst *CreateGEP(LLVMContext *Context,
|
||||
static GetElementPtrInst *CreateGEP(LLVMContext &Context,
|
||||
IRBuilder<> &B, Value *BasePtr,
|
||||
int Idx1, const char *Name);
|
||||
static GetElementPtrInst *CreateGEP(LLVMContext *Context,
|
||||
static GetElementPtrInst *CreateGEP(LLVMContext &Context,
|
||||
IRBuilder<> &B, Value *BasePtr,
|
||||
int Idx1, int Idx2, const char *Name);
|
||||
};
|
||||
@ -95,7 +95,7 @@ namespace {
|
||||
|
||||
public:
|
||||
EscapeEnumerator(Function &F, const char *N = "cleanup")
|
||||
: F(F), CleanupBBName(N), State(0), Builder(*F.getContext()) {}
|
||||
: F(F), CleanupBBName(N), State(0), Builder(F.getContext()) {}
|
||||
|
||||
IRBuilder<> *Next() {
|
||||
switch (State) {
|
||||
@ -188,7 +188,7 @@ ShadowStackGC::ShadowStackGC() : Head(0), StackEntryTy(0) {
|
||||
|
||||
Constant *ShadowStackGC::GetFrameMap(Function &F) {
|
||||
// doInitialization creates the abstract type of this value.
|
||||
LLVMContext *Context = F.getContext();
|
||||
LLVMContext &Context = F.getContext();
|
||||
|
||||
Type *VoidPtr = PointerType::getUnqual(Type::Int8Ty);
|
||||
|
||||
@ -203,17 +203,17 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
|
||||
}
|
||||
|
||||
Constant *BaseElts[] = {
|
||||
Context->getConstantInt(Type::Int32Ty, Roots.size(), false),
|
||||
Context->getConstantInt(Type::Int32Ty, NumMeta, false),
|
||||
Context.getConstantInt(Type::Int32Ty, Roots.size(), false),
|
||||
Context.getConstantInt(Type::Int32Ty, NumMeta, false),
|
||||
};
|
||||
|
||||
Constant *DescriptorElts[] = {
|
||||
Context->getConstantStruct(BaseElts, 2),
|
||||
Context->getConstantArray(Context->getArrayType(VoidPtr, NumMeta),
|
||||
Context.getConstantStruct(BaseElts, 2),
|
||||
Context.getConstantArray(Context.getArrayType(VoidPtr, NumMeta),
|
||||
Metadata.begin(), NumMeta)
|
||||
};
|
||||
|
||||
Constant *FrameMap = Context->getConstantStruct(DescriptorElts, 2);
|
||||
Constant *FrameMap = Context.getConstantStruct(DescriptorElts, 2);
|
||||
|
||||
std::string TypeName("gc_map.");
|
||||
TypeName += utostr(NumMeta);
|
||||
@ -236,9 +236,9 @@ Constant *ShadowStackGC::GetFrameMap(Function &F) {
|
||||
GlobalVariable::InternalLinkage,
|
||||
FrameMap, "__gc_" + F.getName());
|
||||
|
||||
Constant *GEPIndices[2] = { Context->getConstantInt(Type::Int32Ty, 0),
|
||||
Context->getConstantInt(Type::Int32Ty, 0) };
|
||||
return Context->getConstantExprGetElementPtr(GV, GEPIndices, 2);
|
||||
Constant *GEPIndices[2] = { Context.getConstantInt(Type::Int32Ty, 0),
|
||||
Context.getConstantInt(Type::Int32Ty, 0) };
|
||||
return Context.getConstantExprGetElementPtr(GV, GEPIndices, 2);
|
||||
}
|
||||
|
||||
const Type* ShadowStackGC::GetConcreteStackEntryType(Function &F) {
|
||||
@ -340,11 +340,11 @@ void ShadowStackGC::CollectRoots(Function &F) {
|
||||
}
|
||||
|
||||
GetElementPtrInst *
|
||||
ShadowStackGC::CreateGEP(LLVMContext *Context, IRBuilder<> &B, Value *BasePtr,
|
||||
ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
|
||||
int Idx, int Idx2, const char *Name) {
|
||||
Value *Indices[] = { Context->getConstantInt(Type::Int32Ty, 0),
|
||||
Context->getConstantInt(Type::Int32Ty, Idx),
|
||||
Context->getConstantInt(Type::Int32Ty, Idx2) };
|
||||
Value *Indices[] = { Context.getConstantInt(Type::Int32Ty, 0),
|
||||
Context.getConstantInt(Type::Int32Ty, Idx),
|
||||
Context.getConstantInt(Type::Int32Ty, Idx2) };
|
||||
Value* Val = B.CreateGEP(BasePtr, Indices, Indices + 3, Name);
|
||||
|
||||
assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
|
||||
@ -353,10 +353,10 @@ ShadowStackGC::CreateGEP(LLVMContext *Context, IRBuilder<> &B, Value *BasePtr,
|
||||
}
|
||||
|
||||
GetElementPtrInst *
|
||||
ShadowStackGC::CreateGEP(LLVMContext *Context, IRBuilder<> &B, Value *BasePtr,
|
||||
ShadowStackGC::CreateGEP(LLVMContext &Context, IRBuilder<> &B, Value *BasePtr,
|
||||
int Idx, const char *Name) {
|
||||
Value *Indices[] = { Context->getConstantInt(Type::Int32Ty, 0),
|
||||
Context->getConstantInt(Type::Int32Ty, Idx) };
|
||||
Value *Indices[] = { Context.getConstantInt(Type::Int32Ty, 0),
|
||||
Context.getConstantInt(Type::Int32Ty, Idx) };
|
||||
Value *Val = B.CreateGEP(BasePtr, Indices, Indices + 2, Name);
|
||||
|
||||
assert(isa<GetElementPtrInst>(Val) && "Unexpected folded constant");
|
||||
@ -366,7 +366,7 @@ ShadowStackGC::CreateGEP(LLVMContext *Context, IRBuilder<> &B, Value *BasePtr,
|
||||
|
||||
/// runOnFunction - Insert code to maintain the shadow stack.
|
||||
bool ShadowStackGC::performCustomLowering(Function &F) {
|
||||
LLVMContext *Context = F.getContext();
|
||||
LLVMContext &Context = F.getContext();
|
||||
|
||||
// Find calls to llvm.gcroot.
|
||||
CollectRoots(F);
|
||||
|
@ -68,7 +68,7 @@ bool UnreachableBlockElim::runOnFunction(Function &F) {
|
||||
BasicBlock *BB = I;
|
||||
DeadBlocks.push_back(BB);
|
||||
while (PHINode *PN = dyn_cast<PHINode>(BB->begin())) {
|
||||
PN->replaceAllUsesWith(Context->getNullValue(PN->getType()));
|
||||
PN->replaceAllUsesWith(F.getContext().getNullValue(PN->getType()));
|
||||
BB->getInstList().pop_front();
|
||||
}
|
||||
for (succ_iterator SI = succ_begin(BB), E = succ_end(BB); SI != E; ++SI)
|
||||
|
@ -368,7 +368,7 @@ void JIT::deleteModuleProvider(ModuleProvider *MP, std::string *E) {
|
||||
GenericValue JIT::runFunction(Function *F,
|
||||
const std::vector<GenericValue> &ArgValues) {
|
||||
assert(F && "Function *F was null at entry to run()");
|
||||
LLVMContext *Context = F->getContext();
|
||||
LLVMContext &Context = F->getContext();
|
||||
|
||||
void *FPtr = getPointerToFunction(F);
|
||||
assert(FPtr && "Pointer to fn's code was null after getPointerToFunction");
|
||||
@ -470,7 +470,7 @@ GenericValue JIT::runFunction(Function *F,
|
||||
// arguments. Make this function and return.
|
||||
|
||||
// First, create the function.
|
||||
FunctionType *STy=Context->getFunctionType(RetTy, false);
|
||||
FunctionType *STy=Context.getFunctionType(RetTy, false);
|
||||
Function *Stub = Function::Create(STy, Function::InternalLinkage, "",
|
||||
F->getParent());
|
||||
|
||||
@ -487,27 +487,27 @@ GenericValue JIT::runFunction(Function *F,
|
||||
switch (ArgTy->getTypeID()) {
|
||||
default: llvm_unreachable("Unknown argument type for function call!");
|
||||
case Type::IntegerTyID:
|
||||
C = Context->getConstantInt(AV.IntVal);
|
||||
C = Context.getConstantInt(AV.IntVal);
|
||||
break;
|
||||
case Type::FloatTyID:
|
||||
C = Context->getConstantFP(APFloat(AV.FloatVal));
|
||||
C = Context.getConstantFP(APFloat(AV.FloatVal));
|
||||
break;
|
||||
case Type::DoubleTyID:
|
||||
C = Context->getConstantFP(APFloat(AV.DoubleVal));
|
||||
C = Context.getConstantFP(APFloat(AV.DoubleVal));
|
||||
break;
|
||||
case Type::PPC_FP128TyID:
|
||||
case Type::X86_FP80TyID:
|
||||
case Type::FP128TyID:
|
||||
C = Context->getConstantFP(APFloat(AV.IntVal));
|
||||
C = Context.getConstantFP(APFloat(AV.IntVal));
|
||||
break;
|
||||
case Type::PointerTyID:
|
||||
void *ArgPtr = GVTOP(AV);
|
||||
if (sizeof(void*) == 4)
|
||||
C = Context->getConstantInt(Type::Int32Ty, (int)(intptr_t)ArgPtr);
|
||||
C = Context.getConstantInt(Type::Int32Ty, (int)(intptr_t)ArgPtr);
|
||||
else
|
||||
C = Context->getConstantInt(Type::Int64Ty, (intptr_t)ArgPtr);
|
||||
C = Context.getConstantInt(Type::Int64Ty, (intptr_t)ArgPtr);
|
||||
// Cast the integer to pointer
|
||||
C = Context->getConstantExprIntToPtr(C, ArgTy);
|
||||
C = Context.getConstantExprIntToPtr(C, ArgTy);
|
||||
break;
|
||||
}
|
||||
Args.push_back(C);
|
||||
|
@ -1073,7 +1073,7 @@ static bool LinkFunctionBody(Function *Dest, Function *Src,
|
||||
for (Instruction::op_iterator OI = I->op_begin(), OE = I->op_end();
|
||||
OI != OE; ++OI)
|
||||
if (!isa<Instruction>(*OI) && !isa<BasicBlock>(*OI))
|
||||
*OI = RemapOperand(*OI, ValueMap, *Dest->getContext());
|
||||
*OI = RemapOperand(*OI, ValueMap, Dest->getContext());
|
||||
|
||||
// There is no need to map the arguments anymore.
|
||||
for (Function::arg_iterator I = Src->arg_begin(), E = Src->arg_end();
|
||||
|
@ -893,7 +893,7 @@ emitLoadConstPool(MachineBasicBlock &MBB,
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineConstantPool *ConstantPool = MF.getConstantPool();
|
||||
Constant *C =
|
||||
MF.getFunction()->getContext()->getConstantInt(Type::Int32Ty, Val);
|
||||
MF.getFunction()->getContext().getConstantInt(Type::Int32Ty, Val);
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
|
||||
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
|
||||
|
@ -696,7 +696,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
bool isVarArg = TheCall->isVarArg();
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, DAG.getContext());
|
||||
RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(TheCall,
|
||||
CCAssignFnForNode(CallingConv, /* Return*/ true));
|
||||
|
||||
@ -832,7 +832,7 @@ SDValue ARMTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC, /* Return*/ false));
|
||||
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
@ -1032,7 +1032,7 @@ SDValue ARMTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
|
||||
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
||||
|
||||
// CCState - Info about the registers and stack slots.
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analyze return values of ISD::RET.
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), CCAssignFnForNode(CC, /* Return */ true));
|
||||
@ -1384,7 +1384,7 @@ ARMTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(),
|
||||
CCAssignFnForNode(CC, /* Return*/ false));
|
||||
|
||||
|
@ -59,7 +59,7 @@ void Thumb1RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineConstantPool *ConstantPool = MF.getConstantPool();
|
||||
Constant *C =
|
||||
MF.getFunction()->getContext()->getConstantInt(Type::Int32Ty, Val);
|
||||
MF.getFunction()->getContext().getConstantInt(Type::Int32Ty, Val);
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
|
||||
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRcp))
|
||||
|
@ -53,7 +53,7 @@ void Thumb2RegisterInfo::emitLoadConstPool(MachineBasicBlock &MBB,
|
||||
MachineFunction &MF = *MBB.getParent();
|
||||
MachineConstantPool *ConstantPool = MF.getConstantPool();
|
||||
Constant *C =
|
||||
MF.getFunction()->getContext()->getConstantInt(Type::Int32Ty, Val);
|
||||
MF.getFunction()->getContext().getConstantInt(Type::Int32Ty, Val);
|
||||
unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
|
||||
|
||||
BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
|
||||
|
@ -238,7 +238,7 @@ SDValue AlphaTargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(TheCall, CC_Alpha);
|
||||
|
||||
@ -356,7 +356,7 @@ AlphaTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
DAG.getContext());
|
||||
*DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(TheCall, RetCC_Alpha);
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
|
@ -1238,7 +1238,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
|
||||
Out << '{';
|
||||
if (AT->getNumElements()) {
|
||||
Out << ' ';
|
||||
Constant *CZ = Context->getNullValue(AT->getElementType());
|
||||
Constant *CZ = CPV->getContext().getNullValue(AT->getElementType());
|
||||
printConstant(CZ, Static);
|
||||
for (unsigned i = 1, e = AT->getNumElements(); i != e; ++i) {
|
||||
Out << ", ";
|
||||
@ -1263,7 +1263,7 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
|
||||
assert(isa<ConstantAggregateZero>(CPV) || isa<UndefValue>(CPV));
|
||||
const VectorType *VT = cast<VectorType>(CPV->getType());
|
||||
Out << "{ ";
|
||||
Constant *CZ = Context->getNullValue(VT->getElementType());
|
||||
Constant *CZ = CPV->getContext().getNullValue(VT->getElementType());
|
||||
printConstant(CZ, Static);
|
||||
for (unsigned i = 1, e = VT->getNumElements(); i != e; ++i) {
|
||||
Out << ", ";
|
||||
@ -1285,10 +1285,12 @@ void CWriter::printConstant(Constant *CPV, bool Static) {
|
||||
Out << '{';
|
||||
if (ST->getNumElements()) {
|
||||
Out << ' ';
|
||||
printConstant(Context->getNullValue(ST->getElementType(0)), Static);
|
||||
printConstant(
|
||||
CPV->getContext().getNullValue(ST->getElementType(0)), Static);
|
||||
for (unsigned i = 1, e = ST->getNumElements(); i != e; ++i) {
|
||||
Out << ", ";
|
||||
printConstant(Context->getNullValue(ST->getElementType(i)), Static);
|
||||
printConstant(
|
||||
CPV->getContext().getNullValue(ST->getElementType(i)), Static);
|
||||
}
|
||||
}
|
||||
Out << " }";
|
||||
@ -3498,7 +3500,7 @@ void CWriter::visitStoreInst(StoreInst &I) {
|
||||
if (!ITy->isPowerOf2ByteWidth())
|
||||
// We have a bit width that doesn't match an even power-of-2 byte
|
||||
// size. Consequently we must & the value with the type's bit mask
|
||||
BitMask = Context->getConstantInt(ITy, ITy->getBitMask());
|
||||
BitMask = I.getContext().getConstantInt(ITy, ITy->getBitMask());
|
||||
if (BitMask)
|
||||
Out << "((";
|
||||
writeOperand(Operand);
|
||||
|
@ -1385,7 +1385,7 @@ LowerRET(SDValue Op, SelectionDAG &DAG, TargetMachine &TM) {
|
||||
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
|
||||
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
CCState CCInfo(CC, isVarArg, TM, RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, TM, RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_SPU);
|
||||
|
||||
// If this is the first return lowered for this function, add the regs to the
|
||||
|
@ -195,7 +195,7 @@ SDValue MSP430TargetLowering::LowerCCCArguments(SDValue Op,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_MSP430);
|
||||
|
||||
assert(!isVarArg && "Varargs not supported yet");
|
||||
@ -272,7 +272,7 @@ SDValue MSP430TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values of ISD::RET
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_MSP430);
|
||||
@ -324,7 +324,7 @@ SDValue MSP430TargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(TheCall, CC_MSP430);
|
||||
|
||||
@ -452,7 +452,7 @@ MSP430TargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, DAG.getContext());
|
||||
RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(TheCall, RetCC_MSP430);
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
|
@ -735,7 +735,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG)
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// To meet O32 ABI, Mips must always allocate 16 bytes on
|
||||
// the stack (even if less than 4 are used as arguments)
|
||||
@ -919,7 +919,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, DAG.getContext());
|
||||
RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(TheCall, RetCC_Mips);
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
@ -963,7 +963,7 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG)
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
if (Subtarget->isABI_O32())
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_MipsO32);
|
||||
@ -1111,7 +1111,7 @@ LowerRET(SDValue Op, SelectionDAG &DAG)
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values of ISD::RET
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_Mips);
|
||||
|
@ -1527,7 +1527,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve space for the linkage area on the stack.
|
||||
CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
|
||||
@ -1586,7 +1586,7 @@ PPCTargetLowering::LowerFORMAL_ARGUMENTS_SVR4(SDValue Op,
|
||||
// caller's stack frame, right above the parameter list area.
|
||||
SmallVector<CCValAssign, 16> ByValArgLocs;
|
||||
CCState CCByValInfo(CC, isVarArg, getTargetMachine(),
|
||||
ByValArgLocs, DAG.getContext());
|
||||
ByValArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve stack space for the allocations in CCInfo.
|
||||
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
|
||||
@ -2455,7 +2455,7 @@ static SDValue LowerCallReturn(SDValue Op, SelectionDAG &DAG, TargetMachine &TM,
|
||||
SmallVector<SDValue, 16> ResultVals;
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
unsigned CallerCC = DAG.getMachineFunction().getFunction()->getCallingConv();
|
||||
CCState CCRetInfo(CallerCC, isVarArg, TM, RVLocs, DAG.getContext());
|
||||
CCState CCRetInfo(CallerCC, isVarArg, TM, RVLocs, *DAG.getContext());
|
||||
CCRetInfo.AnalyzeCallResult(TheCall, RetCC_PPC);
|
||||
|
||||
// Copy all of the result registers out of their specified physreg.
|
||||
@ -2561,7 +2561,7 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
|
||||
|
||||
// Assign locations to all of the outgoing arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// Reserve space for the linkage area on the stack.
|
||||
CCInfo.AllocateStack(PPCFrameInfo::getLinkageSize(false, false), PtrByteSize);
|
||||
@ -2602,7 +2602,7 @@ SDValue PPCTargetLowering::LowerCALL_SVR4(SDValue Op, SelectionDAG &DAG,
|
||||
// Assign locations to all of the outgoing aggregate by value arguments.
|
||||
SmallVector<CCValAssign, 16> ByValArgLocs;
|
||||
CCState CCByValInfo(CC, isVarArg, getTargetMachine(), ByValArgLocs,
|
||||
DAG.getContext());
|
||||
*DAG.getContext());
|
||||
|
||||
// Reserve stack space for the allocations in CCInfo.
|
||||
CCByValInfo.AllocateStack(CCInfo.getNextStackOffset(), PtrByteSize);
|
||||
@ -3067,7 +3067,7 @@ SDValue PPCTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG,
|
||||
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
|
||||
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
CCState CCInfo(CC, isVarArg, TM, RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, TM, RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_PPC);
|
||||
|
||||
// If this is the first return lowered for this function, add the regs to the
|
||||
|
@ -40,7 +40,7 @@ static SDValue LowerRET(SDValue Op, SelectionDAG &DAG) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CC, isVarArg, DAG.getTarget(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, DAG.getTarget(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values of ISD::RET
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_Sparc32);
|
||||
@ -90,7 +90,7 @@ SparcTargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_Sparc32);
|
||||
|
||||
static const unsigned ArgRegs[] = {
|
||||
@ -469,7 +469,7 @@ static SDValue LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState RVInfo(CallingConv, isVarArg, DAG.getTarget(),
|
||||
RVLocs, DAG.getContext());
|
||||
RVLocs, *DAG.getContext());
|
||||
|
||||
RVInfo.AnalyzeCallResult(TheCall, RetCC_Sparc32);
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
|
@ -213,7 +213,7 @@ SDValue SystemZTargetLowering::LowerCCCArguments(SDValue Op,
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_SystemZ);
|
||||
|
||||
if (isVarArg)
|
||||
@ -305,7 +305,7 @@ SDValue SystemZTargetLowering::LowerCCCCallTo(SDValue Op, SelectionDAG &DAG,
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallOperands(TheCall, CC_SystemZ);
|
||||
|
||||
@ -436,7 +436,7 @@ SystemZTargetLowering::LowerCallResult(SDValue Chain, SDValue InFlag,
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(), RVLocs,
|
||||
DAG.getContext());
|
||||
*DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(TheCall, RetCC_SystemZ);
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
@ -482,7 +482,7 @@ SDValue SystemZTargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values of ISD::RET
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_SystemZ);
|
||||
|
@ -272,7 +272,7 @@ bool X86FastISel::X86FastEmitStore(MVT VT, Value *Val,
|
||||
const X86AddressMode &AM) {
|
||||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Val))
|
||||
Val = Context->getNullValue(TD.getIntPtrType());
|
||||
Val = Val->getContext().getNullValue(TD.getIntPtrType());
|
||||
|
||||
// If this is a store of a simple constant, fold the constant into the store.
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
|
||||
@ -672,7 +672,7 @@ bool X86FastISel::X86FastEmitCompare(Value *Op0, Value *Op1, MVT VT) {
|
||||
|
||||
// Handle 'null' like i32/i64 0.
|
||||
if (isa<ConstantPointerNull>(Op1))
|
||||
Op1 = Context->getNullValue(TD.getIntPtrType());
|
||||
Op1 = Op0->getContext().getNullValue(TD.getIntPtrType());
|
||||
|
||||
// We have two options: compare with register or immediate. If the RHS of
|
||||
// the compare is an immediate that we can fold into this compare, use
|
||||
|
@ -1049,7 +1049,7 @@ SDValue X86TargetLowering::LowerRET(SDValue Op, SelectionDAG &DAG) {
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
unsigned CC = DAG.getMachineFunction().getFunction()->getCallingConv();
|
||||
bool isVarArg = DAG.getMachineFunction().getFunction()->isVarArg();
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_X86);
|
||||
|
||||
// If this is the first return lowered for this function, add the regs to the
|
||||
@ -1176,7 +1176,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
|
||||
bool isVarArg = TheCall->isVarArg();
|
||||
bool Is64Bit = Subtarget->is64Bit();
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, DAG.getContext());
|
||||
RVLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallResult(TheCall, RetCC_X86);
|
||||
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
@ -1385,7 +1385,7 @@ X86TargetLowering::LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(), CCAssignFnForNode(CC));
|
||||
|
||||
SmallVector<SDValue, 8> ArgValues;
|
||||
@ -1680,7 +1680,7 @@ SDValue X86TargetLowering::LowerCALL(SDValue Op, SelectionDAG &DAG) {
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
CCInfo.AnalyzeCallOperands(TheCall, CCAssignFnForNode(CC));
|
||||
|
||||
// Get a count of how many bytes are to be pushed on the stack.
|
||||
|
@ -2284,8 +2284,8 @@ MachineInstr* X86InstrInfo::foldMemoryOperandImpl(MachineFunction &MF,
|
||||
MachineConstantPool &MCP = *MF.getConstantPool();
|
||||
const VectorType *Ty = VectorType::get(Type::Int32Ty, 4);
|
||||
Constant *C = LoadMI->getOpcode() == X86::V_SET0 ?
|
||||
MF.getFunction()->getContext()->getNullValue(Ty) :
|
||||
MF.getFunction()->getContext()->getAllOnesValue(Ty);
|
||||
MF.getFunction()->getContext().getNullValue(Ty) :
|
||||
MF.getFunction()->getContext().getAllOnesValue(Ty);
|
||||
unsigned CPI = MCP.getConstantPoolIndex(C, 16);
|
||||
|
||||
// Create operands to load from the constant pool entry.
|
||||
|
@ -648,7 +648,7 @@ LowerCCCCallTo(SDValue Op, SelectionDAG &DAG, unsigned CC)
|
||||
|
||||
// Analyze operands of the call, assigning locations to each operand.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
// The ABI dictates there should be one stack slot available to the callee
|
||||
// on function entry (for saving lr).
|
||||
@ -775,7 +775,7 @@ LowerCallResult(SDValue Chain, SDValue InFlag, CallSDNode *TheCall,
|
||||
// Assign locations to each value returned by this call.
|
||||
SmallVector<CCValAssign, 16> RVLocs;
|
||||
CCState CCInfo(CallingConv, isVarArg, getTargetMachine(),
|
||||
RVLocs, DAG.getContext());
|
||||
RVLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeCallResult(TheCall, RetCC_XCore);
|
||||
SmallVector<SDValue, 8> ResultVals;
|
||||
@ -831,7 +831,7 @@ LowerCCCArguments(SDValue Op, SelectionDAG &DAG)
|
||||
|
||||
// Assign locations to all of the incoming arguments.
|
||||
SmallVector<CCValAssign, 16> ArgLocs;
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), ArgLocs, *DAG.getContext());
|
||||
|
||||
CCInfo.AnalyzeFormalArguments(Op.getNode(), CC_XCore);
|
||||
|
||||
@ -948,7 +948,7 @@ LowerRET(SDValue Op, SelectionDAG &DAG)
|
||||
DebugLoc dl = Op.getDebugLoc();
|
||||
|
||||
// CCState - Info about the registers and stack slot.
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, DAG.getContext());
|
||||
CCState CCInfo(CC, isVarArg, getTargetMachine(), RVLocs, *DAG.getContext());
|
||||
|
||||
// Analize return values of ISD::RET
|
||||
CCInfo.AnalyzeReturn(Op.getNode(), RetCC_XCore);
|
||||
|
@ -576,6 +576,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
|
||||
|
||||
const Type *RetTy = FTy->getReturnType();
|
||||
LLVMContext &Context = RetTy->getContext();
|
||||
|
||||
// Work around LLVM bug PR56: the CWriter cannot emit varargs functions which
|
||||
// have zero fixed arguments.
|
||||
@ -586,7 +587,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
}
|
||||
|
||||
// Construct the new function type using the new arguments.
|
||||
FunctionType *NFTy = Context->getFunctionType(RetTy, Params, FTy->isVarArg());
|
||||
FunctionType *NFTy = Context.getFunctionType(RetTy, Params, FTy->isVarArg());
|
||||
|
||||
// Create the new function body and insert it into the module...
|
||||
Function *NF = Function::Create(NFTy, F->getLinkage(), F->getName());
|
||||
@ -637,9 +638,9 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
// Emit a GEP and load for each element of the struct.
|
||||
const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
|
||||
const StructType *STy = cast<StructType>(AgTy);
|
||||
Value *Idxs[2] = { Context->getConstantInt(Type::Int32Ty, 0), 0 };
|
||||
Value *Idxs[2] = { Context.getConstantInt(Type::Int32Ty, 0), 0 };
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
||||
Idxs[1] = Context->getConstantInt(Type::Int32Ty, i);
|
||||
Idxs[1] = Context.getConstantInt(Type::Int32Ty, i);
|
||||
Value *Idx = GetElementPtrInst::Create(*AI, Idxs, Idxs+2,
|
||||
(*AI)->getName()+"."+utostr(i),
|
||||
Call);
|
||||
@ -664,7 +665,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
// Use i32 to index structs, and i64 for others (pointers/arrays).
|
||||
// This satisfies GEP constraints.
|
||||
const Type *IdxTy = (isa<StructType>(ElTy) ? Type::Int32Ty : Type::Int64Ty);
|
||||
Ops.push_back(Context->getConstantInt(IdxTy, *II));
|
||||
Ops.push_back(Context.getConstantInt(IdxTy, *II));
|
||||
// Keep track of the type we're currently indexing
|
||||
ElTy = cast<CompositeType>(ElTy)->getTypeAtIndex(*II);
|
||||
}
|
||||
@ -680,7 +681,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
}
|
||||
|
||||
if (ExtraArgHack)
|
||||
Args.push_back(Context->getNullValue(Type::Int32Ty));
|
||||
Args.push_back(Context.getNullValue(Type::Int32Ty));
|
||||
|
||||
// Push any varargs arguments on the list
|
||||
for (; AI != CS.arg_end(); ++AI, ++ArgIndex) {
|
||||
@ -757,10 +758,10 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
const Type *AgTy = cast<PointerType>(I->getType())->getElementType();
|
||||
Value *TheAlloca = new AllocaInst(AgTy, 0, "", InsertPt);
|
||||
const StructType *STy = cast<StructType>(AgTy);
|
||||
Value *Idxs[2] = { Context->getConstantInt(Type::Int32Ty, 0), 0 };
|
||||
Value *Idxs[2] = { Context.getConstantInt(Type::Int32Ty, 0), 0 };
|
||||
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
||||
Idxs[1] = Context->getConstantInt(Type::Int32Ty, i);
|
||||
Idxs[1] = Context.getConstantInt(Type::Int32Ty, i);
|
||||
std::string Name = TheAlloca->getName()+"."+utostr(i);
|
||||
Value *Idx = GetElementPtrInst::Create(TheAlloca, Idxs, Idxs+2,
|
||||
Name, InsertPt);
|
||||
@ -843,7 +844,7 @@ Function *ArgPromotion::DoPromotion(Function *F,
|
||||
|
||||
// Notify the alias analysis implementation that we inserted a new argument.
|
||||
if (ExtraArgHack)
|
||||
AA.copyValue(Context->getNullValue(Type::Int32Ty), NF->arg_begin());
|
||||
AA.copyValue(Context.getNullValue(Type::Int32Ty), NF->arg_begin());
|
||||
|
||||
|
||||
// Tell the alias analysis that the old function is about to disappear.
|
||||
|
@ -196,8 +196,10 @@ bool DAE::DeleteDeadVarargs(Function &Fn) {
|
||||
// Start by computing a new prototype for the function, which is the same as
|
||||
// the old function, but doesn't have isVarArg set.
|
||||
const FunctionType *FTy = Fn.getFunctionType();
|
||||
LLVMContext &Context = FTy->getContext();
|
||||
|
||||
std::vector<const Type*> Params(FTy->param_begin(), FTy->param_end());
|
||||
FunctionType *NFTy = Context->getFunctionType(FTy->getReturnType(),
|
||||
FunctionType *NFTy = Context.getFunctionType(FTy->getReturnType(),
|
||||
Params, false);
|
||||
unsigned NumArgs = Params.size();
|
||||
|
||||
@ -598,6 +600,9 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
const Type *RetTy = FTy->getReturnType();
|
||||
const Type *NRetTy = NULL;
|
||||
unsigned RetCount = NumRetVals(F);
|
||||
|
||||
LLVMContext &Context = RetTy->getContext();
|
||||
|
||||
// -1 means unused, other numbers are the new index
|
||||
SmallVector<int, 5> NewRetIdxs(RetCount, -1);
|
||||
std::vector<const Type*> RetTypes;
|
||||
@ -635,7 +640,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
// something and {} into void.
|
||||
// Make the new struct packed if we used to return a packed struct
|
||||
// already.
|
||||
NRetTy = Context->getStructType(RetTypes, STy->isPacked());
|
||||
NRetTy = Context.getStructType(RetTypes, STy->isPacked());
|
||||
else if (RetTypes.size() == 1)
|
||||
// One return type? Just a simple value then, but only if we didn't use to
|
||||
// return a struct with that simple value before.
|
||||
@ -703,7 +708,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
}
|
||||
|
||||
// Create the new function type based on the recomputed parameters.
|
||||
FunctionType *NFTy = Context->getFunctionType(NRetTy, Params,
|
||||
FunctionType *NFTy = Context.getFunctionType(NRetTy, Params,
|
||||
FTy->isVarArg());
|
||||
|
||||
// No change?
|
||||
@ -753,7 +758,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
}
|
||||
|
||||
if (ExtraArgHack)
|
||||
Args.push_back(Context->getUndef(Type::Int32Ty));
|
||||
Args.push_back(Context.getUndef(Type::Int32Ty));
|
||||
|
||||
// Push any varargs arguments on the list. Don't forget their attributes.
|
||||
for (CallSite::arg_iterator E = CS.arg_end(); I != E; ++I, ++i) {
|
||||
@ -792,7 +797,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
} else if (New->getType() == Type::VoidTy) {
|
||||
// Our return value has uses, but they will get removed later on.
|
||||
// Replace by null for now.
|
||||
Call->replaceAllUsesWith(Context->getNullValue(Call->getType()));
|
||||
Call->replaceAllUsesWith(Context.getNullValue(Call->getType()));
|
||||
} else {
|
||||
assert(isa<StructType>(RetTy) &&
|
||||
"Return type changed, but not into a void. The old return type"
|
||||
@ -809,7 +814,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
// extract/insertvalue chaining and let instcombine clean that up.
|
||||
//
|
||||
// Start out building up our return value from undef
|
||||
Value *RetVal = Context->getUndef(RetTy);
|
||||
Value *RetVal = Context.getUndef(RetTy);
|
||||
for (unsigned i = 0; i != RetCount; ++i)
|
||||
if (NewRetIdxs[i] != -1) {
|
||||
Value *V;
|
||||
@ -855,7 +860,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
} else {
|
||||
// If this argument is dead, replace any uses of it with null constants
|
||||
// (these are guaranteed to become unused later on).
|
||||
I->replaceAllUsesWith(Context->getNullValue(I->getType()));
|
||||
I->replaceAllUsesWith(Context.getNullValue(I->getType()));
|
||||
}
|
||||
|
||||
// If we change the return value of the function we must rewrite any return
|
||||
@ -876,7 +881,7 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
// clean that up.
|
||||
Value *OldRet = RI->getOperand(0);
|
||||
// Start out building up our return value from undef
|
||||
RetVal = Context->getUndef(NRetTy);
|
||||
RetVal = Context.getUndef(NRetTy);
|
||||
for (unsigned i = 0; i != RetCount; ++i)
|
||||
if (NewRetIdxs[i] != -1) {
|
||||
ExtractValueInst *EV = ExtractValueInst::Create(OldRet, i,
|
||||
@ -908,7 +913,6 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) {
|
||||
|
||||
bool DAE::runOnModule(Module &M) {
|
||||
bool Changed = false;
|
||||
Context = &M.getContext();
|
||||
|
||||
// First pass: Do a simple check to see if any functions can have their "..."
|
||||
// removed. We can do this if they never call va_start. This loop cannot be
|
||||
|
@ -77,7 +77,6 @@ static inline bool ShouldNukeSymtabEntry(const Type *Ty){
|
||||
//
|
||||
bool DTE::runOnModule(Module &M) {
|
||||
bool Changed = false;
|
||||
Context = &M.getContext();
|
||||
|
||||
TypeSymbolTable &ST = M.getTypeSymbolTable();
|
||||
std::set<const Type *> UsedTypes = getAnalysis<FindUsedTypes>().getTypes();
|
||||
|
@ -44,7 +44,6 @@ namespace {
|
||||
return false; // Nothing to extract
|
||||
}
|
||||
|
||||
Context = &M.getContext();
|
||||
|
||||
if (deleteStuff)
|
||||
return deleteGV();
|
||||
@ -87,6 +86,8 @@ namespace {
|
||||
}
|
||||
|
||||
bool isolateGV(Module &M) {
|
||||
LLVMContext &Context = M.getContext();
|
||||
|
||||
// Mark all globals internal
|
||||
// FIXME: what should we do with private linkage?
|
||||
for (Module::global_iterator I = M.global_begin(), E = M.global_end(); I != E; ++I)
|
||||
@ -102,14 +103,14 @@ namespace {
|
||||
// by putting them in the used array
|
||||
{
|
||||
std::vector<Constant *> AUGs;
|
||||
const Type *SBP= Context->getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type *SBP= Context.getPointerTypeUnqual(Type::Int8Ty);
|
||||
for (std::vector<GlobalValue*>::iterator GI = Named.begin(),
|
||||
GE = Named.end(); GI != GE; ++GI) {
|
||||
(*GI)->setLinkage(GlobalValue::ExternalLinkage);
|
||||
AUGs.push_back(Context->getConstantExprBitCast(*GI, SBP));
|
||||
AUGs.push_back(Context.getConstantExprBitCast(*GI, SBP));
|
||||
}
|
||||
ArrayType *AT = Context->getArrayType(SBP, AUGs.size());
|
||||
Constant *Init = Context->getConstantArray(AT, AUGs);
|
||||
ArrayType *AT = Context.getArrayType(SBP, AUGs.size());
|
||||
Constant *Init = Context.getConstantArray(AT, AUGs);
|
||||
GlobalValue *gv = new GlobalVariable(M, AT, false,
|
||||
GlobalValue::AppendingLinkage,
|
||||
Init, "llvm.used");
|
||||
|
@ -58,7 +58,6 @@ ModulePass *llvm::createGlobalDCEPass() { return new GlobalDCE(); }
|
||||
|
||||
bool GlobalDCE::runOnModule(Module &M) {
|
||||
bool Changed = false;
|
||||
Context = &M.getContext();
|
||||
|
||||
// Loop over the module, adding globals which are obviously necessary.
|
||||
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
|
||||
|
@ -247,7 +247,7 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
|
||||
}
|
||||
|
||||
static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
|
||||
if (!CI) return 0;
|
||||
unsigned IdxV = CI->getZExtValue();
|
||||
@ -261,18 +261,18 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
|
||||
} else if (isa<ConstantAggregateZero>(Agg)) {
|
||||
if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) {
|
||||
if (IdxV < STy->getNumElements())
|
||||
return Context->getNullValue(STy->getElementType(IdxV));
|
||||
return Context.getNullValue(STy->getElementType(IdxV));
|
||||
} else if (const SequentialType *STy =
|
||||
dyn_cast<SequentialType>(Agg->getType())) {
|
||||
return Context->getNullValue(STy->getElementType());
|
||||
return Context.getNullValue(STy->getElementType());
|
||||
}
|
||||
} else if (isa<UndefValue>(Agg)) {
|
||||
if (const StructType *STy = dyn_cast<StructType>(Agg->getType())) {
|
||||
if (IdxV < STy->getNumElements())
|
||||
return Context->getUndef(STy->getElementType(IdxV));
|
||||
return Context.getUndef(STy->getElementType(IdxV));
|
||||
} else if (const SequentialType *STy =
|
||||
dyn_cast<SequentialType>(Agg->getType())) {
|
||||
return Context->getUndef(STy->getElementType());
|
||||
return Context.getUndef(STy->getElementType());
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
@ -284,7 +284,7 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
|
||||
/// quick scan over the use list to clean up the easy and obvious cruft. This
|
||||
/// returns true if it made a change.
|
||||
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
bool Changed = false;
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
|
||||
User *U = *UI++;
|
||||
@ -466,7 +466,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
|
||||
/// this transformation is safe already. We return the first global variable we
|
||||
/// insert so that the caller can reprocess it.
|
||||
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// Make sure this global only has simple uses that we can SRA.
|
||||
if (!GlobalUsersSafeToSRA(GV))
|
||||
return 0;
|
||||
@ -488,10 +488,10 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
const StructLayout &Layout = *TD.getStructLayout(STy);
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
|
||||
Constant *In = getAggregateConstantElement(Init,
|
||||
Context->getConstantInt(Type::Int32Ty, i),
|
||||
Context.getConstantInt(Type::Int32Ty, i),
|
||||
Context);
|
||||
assert(In && "Couldn't get element of initializer?");
|
||||
GlobalVariable *NGV = new GlobalVariable(*Context,
|
||||
GlobalVariable *NGV = new GlobalVariable(Context,
|
||||
STy->getElementType(i), false,
|
||||
GlobalVariable::InternalLinkage,
|
||||
In, GV->getName()+"."+utostr(i),
|
||||
@ -523,11 +523,11 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
unsigned EltAlign = TD.getABITypeAlignment(STy->getElementType());
|
||||
for (unsigned i = 0, e = NumElements; i != e; ++i) {
|
||||
Constant *In = getAggregateConstantElement(Init,
|
||||
Context->getConstantInt(Type::Int32Ty, i),
|
||||
Context.getConstantInt(Type::Int32Ty, i),
|
||||
Context);
|
||||
assert(In && "Couldn't get element of initializer?");
|
||||
|
||||
GlobalVariable *NGV = new GlobalVariable(*Context,
|
||||
GlobalVariable *NGV = new GlobalVariable(Context,
|
||||
STy->getElementType(), false,
|
||||
GlobalVariable::InternalLinkage,
|
||||
In, GV->getName()+"."+utostr(i),
|
||||
@ -550,7 +550,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
|
||||
DOUT << "PERFORMING GLOBAL SRA ON: " << *GV;
|
||||
|
||||
Constant *NullInt = Context->getNullValue(Type::Int32Ty);
|
||||
Constant *NullInt = Context.getNullValue(Type::Int32Ty);
|
||||
|
||||
// Loop over all of the uses of the global, replacing the constantexpr geps,
|
||||
// with smaller constantexpr geps or direct references.
|
||||
@ -575,7 +575,7 @@ static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
|
||||
Idxs.push_back(NullInt);
|
||||
for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
|
||||
Idxs.push_back(CE->getOperand(i));
|
||||
NewPtr = Context->getConstantExprGetElementPtr(cast<Constant>(NewPtr),
|
||||
NewPtr = Context.getConstantExprGetElementPtr(cast<Constant>(NewPtr),
|
||||
&Idxs[0], Idxs.size());
|
||||
} else {
|
||||
GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
|
||||
@ -675,7 +675,7 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) {
|
||||
}
|
||||
|
||||
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
bool Changed = false;
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
|
||||
Instruction *I = cast<Instruction>(*UI++);
|
||||
@ -707,7 +707,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
}
|
||||
} else if (CastInst *CI = dyn_cast<CastInst>(I)) {
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(CI,
|
||||
Context->getConstantExprCast(CI->getOpcode(),
|
||||
Context.getConstantExprCast(CI->getOpcode(),
|
||||
NewV, CI->getType()), Context);
|
||||
if (CI->use_empty()) {
|
||||
Changed = true;
|
||||
@ -725,7 +725,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
break;
|
||||
if (Idxs.size() == GEPI->getNumOperands()-1)
|
||||
Changed |= OptimizeAwayTrappingUsesOfValue(GEPI,
|
||||
Context->getConstantExprGetElementPtr(NewV, &Idxs[0],
|
||||
Context.getConstantExprGetElementPtr(NewV, &Idxs[0],
|
||||
Idxs.size()), Context);
|
||||
if (GEPI->use_empty()) {
|
||||
Changed = true;
|
||||
@ -743,7 +743,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
|
||||
/// if the loaded value is dynamically null, then we know that they cannot be
|
||||
/// reachable with a null optimize away the load.
|
||||
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
bool Changed = false;
|
||||
|
||||
// Keep track of whether we are able to remove all the uses of the global
|
||||
@ -797,7 +797,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
|
||||
|
||||
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
|
||||
/// instructions that are foldable.
|
||||
static void ConstantPropUsersOf(Value *V, LLVMContext *Context) {
|
||||
static void ConstantPropUsersOf(Value *V, LLVMContext &Context) {
|
||||
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
|
||||
if (Instruction *I = dyn_cast<Instruction>(*UI++))
|
||||
if (Constant *NewC = ConstantFoldInstruction(I, Context)) {
|
||||
@ -818,20 +818,20 @@ static void ConstantPropUsersOf(Value *V, LLVMContext *Context) {
|
||||
/// malloc into a global, and any loads of GV as uses of the new global.
|
||||
static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
MallocInst *MI,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
DOUT << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI;
|
||||
ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize());
|
||||
|
||||
if (NElements->getZExtValue() != 1) {
|
||||
// If we have an array allocation, transform it to a single element
|
||||
// allocation to make the code below simpler.
|
||||
Type *NewTy = Context->getArrayType(MI->getAllocatedType(),
|
||||
Type *NewTy = Context.getArrayType(MI->getAllocatedType(),
|
||||
NElements->getZExtValue());
|
||||
MallocInst *NewMI =
|
||||
new MallocInst(NewTy, Context->getNullValue(Type::Int32Ty),
|
||||
new MallocInst(NewTy, Context.getNullValue(Type::Int32Ty),
|
||||
MI->getAlignment(), MI->getName(), MI);
|
||||
Value* Indices[2];
|
||||
Indices[0] = Indices[1] = Context->getNullValue(Type::Int32Ty);
|
||||
Indices[0] = Indices[1] = Context.getNullValue(Type::Int32Ty);
|
||||
Value *NewGEP = GetElementPtrInst::Create(NewMI, Indices, Indices + 2,
|
||||
NewMI->getName()+".el0", MI);
|
||||
MI->replaceAllUsesWith(NewGEP);
|
||||
@ -844,7 +844,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
// FIXME: This new global should have the alignment returned by malloc. Code
|
||||
// could depend on malloc returning large alignment (on the mac, 16 bytes) but
|
||||
// this would only guarantee some lower alignment.
|
||||
Constant *Init = Context->getUndef(MI->getAllocatedType());
|
||||
Constant *Init = Context.getUndef(MI->getAllocatedType());
|
||||
GlobalVariable *NewGV = new GlobalVariable(*GV->getParent(),
|
||||
MI->getAllocatedType(), false,
|
||||
GlobalValue::InternalLinkage, Init,
|
||||
@ -857,15 +857,15 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
|
||||
Constant *RepValue = NewGV;
|
||||
if (NewGV->getType() != GV->getType()->getElementType())
|
||||
RepValue = Context->getConstantExprBitCast(RepValue,
|
||||
RepValue = Context.getConstantExprBitCast(RepValue,
|
||||
GV->getType()->getElementType());
|
||||
|
||||
// If there is a comparison against null, we will insert a global bool to
|
||||
// keep track of whether the global was initialized yet or not.
|
||||
GlobalVariable *InitBool =
|
||||
new GlobalVariable(*Context, Type::Int1Ty, false,
|
||||
new GlobalVariable(Context, Type::Int1Ty, false,
|
||||
GlobalValue::InternalLinkage,
|
||||
Context->getFalse(), GV->getName()+".init",
|
||||
Context.getFalse(), GV->getName()+".init",
|
||||
GV->isThreadLocal());
|
||||
bool InitBoolUsed = false;
|
||||
|
||||
@ -886,12 +886,12 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
default: llvm_unreachable("Unknown ICmp Predicate!");
|
||||
case ICmpInst::ICMP_ULT:
|
||||
case ICmpInst::ICMP_SLT:
|
||||
LV = Context->getFalse(); // X < null -> always false
|
||||
LV = Context.getFalse(); // X < null -> always false
|
||||
break;
|
||||
case ICmpInst::ICMP_ULE:
|
||||
case ICmpInst::ICMP_SLE:
|
||||
case ICmpInst::ICMP_EQ:
|
||||
LV = BinaryOperator::CreateNot(*Context, LV, "notinit", CI);
|
||||
LV = BinaryOperator::CreateNot(Context, LV, "notinit", CI);
|
||||
break;
|
||||
case ICmpInst::ICMP_NE:
|
||||
case ICmpInst::ICMP_UGE:
|
||||
@ -908,7 +908,7 @@ static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
|
||||
} else {
|
||||
StoreInst *SI = cast<StoreInst>(GV->use_back());
|
||||
// The global is initialized when the store to it occurs.
|
||||
new StoreInst(Context->getTrue(), InitBool, SI);
|
||||
new StoreInst(Context.getTrue(), InitBool, SI);
|
||||
SI->eraseFromParent();
|
||||
}
|
||||
|
||||
@ -1133,7 +1133,7 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
|
||||
static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
|
||||
|
||||
if (FieldNo >= FieldVals.size())
|
||||
@ -1160,7 +1160,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
||||
cast<StructType>(cast<PointerType>(PN->getType())->getElementType());
|
||||
|
||||
Result =
|
||||
PHINode::Create(Context->getPointerTypeUnqual(ST->getElementType(FieldNo)),
|
||||
PHINode::Create(Context.getPointerTypeUnqual(ST->getElementType(FieldNo)),
|
||||
PN->getName()+".f"+utostr(FieldNo), PN);
|
||||
PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
|
||||
} else {
|
||||
@ -1176,7 +1176,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
|
||||
static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// If this is a comparison against null, handle it.
|
||||
if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
|
||||
assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
|
||||
@ -1187,7 +1187,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
Context);
|
||||
|
||||
Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
|
||||
Context->getNullValue(NPtr->getType()),
|
||||
Context.getNullValue(NPtr->getType()),
|
||||
SCI->getName());
|
||||
SCI->replaceAllUsesWith(New);
|
||||
SCI->eraseFromParent();
|
||||
@ -1247,7 +1247,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
|
||||
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
|
||||
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
|
||||
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
|
||||
UI != E; ) {
|
||||
Instruction *User = cast<Instruction>(*UI++);
|
||||
@ -1264,7 +1264,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
|
||||
/// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break
|
||||
/// it up into multiple allocations of arrays of the fields.
|
||||
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
|
||||
LLVMContext *Context){
|
||||
LLVMContext &Context){
|
||||
DOUT << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI;
|
||||
const StructType *STy = cast<StructType>(MI->getAllocatedType());
|
||||
|
||||
@ -1281,12 +1281,12 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
|
||||
|
||||
for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
|
||||
const Type *FieldTy = STy->getElementType(FieldNo);
|
||||
const Type *PFieldTy = Context->getPointerTypeUnqual(FieldTy);
|
||||
const Type *PFieldTy = Context.getPointerTypeUnqual(FieldTy);
|
||||
|
||||
GlobalVariable *NGV =
|
||||
new GlobalVariable(*GV->getParent(),
|
||||
PFieldTy, false, GlobalValue::InternalLinkage,
|
||||
Context->getNullValue(PFieldTy),
|
||||
Context.getNullValue(PFieldTy),
|
||||
GV->getName() + ".f" + utostr(FieldNo), GV,
|
||||
GV->isThreadLocal());
|
||||
FieldGlobals.push_back(NGV);
|
||||
@ -1312,7 +1312,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
|
||||
Value *RunningOr = 0;
|
||||
for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
|
||||
Value *Cond = new ICmpInst(MI, ICmpInst::ICMP_EQ, FieldMallocs[i],
|
||||
Context->getNullValue(FieldMallocs[i]->getType()),
|
||||
Context.getNullValue(FieldMallocs[i]->getType()),
|
||||
"isnull");
|
||||
if (!RunningOr)
|
||||
RunningOr = Cond; // First seteq
|
||||
@ -1339,7 +1339,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
|
||||
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
|
||||
Value *GVVal = new LoadInst(FieldGlobals[i], "tmp", NullPtrBlock);
|
||||
Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
|
||||
Context->getNullValue(GVVal->getType()),
|
||||
Context.getNullValue(GVVal->getType()),
|
||||
"tmp");
|
||||
BasicBlock *FreeBlock = BasicBlock::Create("free_it", OrigBB->getParent());
|
||||
BasicBlock *NextBlock = BasicBlock::Create("next", OrigBB->getParent());
|
||||
@ -1347,7 +1347,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
|
||||
|
||||
// Fill in FreeBlock.
|
||||
new FreeInst(GVVal, FreeBlock);
|
||||
new StoreInst(Context->getNullValue(GVVal->getType()), FieldGlobals[i],
|
||||
new StoreInst(Context.getNullValue(GVVal->getType()), FieldGlobals[i],
|
||||
FreeBlock);
|
||||
BranchInst::Create(NextBlock, FreeBlock);
|
||||
|
||||
@ -1387,7 +1387,7 @@ static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
|
||||
// Insert a store of null into each global.
|
||||
for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
|
||||
const PointerType *PT = cast<PointerType>(FieldGlobals[i]->getType());
|
||||
Constant *Null = Context->getNullValue(PT->getElementType());
|
||||
Constant *Null = Context.getNullValue(PT->getElementType());
|
||||
new StoreInst(Null, FieldGlobals[i], SI);
|
||||
}
|
||||
// Erase the original store.
|
||||
@ -1445,7 +1445,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
MallocInst *MI,
|
||||
Module::global_iterator &GVI,
|
||||
TargetData &TD,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// If this is a malloc of an abstract type, don't touch it.
|
||||
if (!MI->getAllocatedType()->isSized())
|
||||
return false;
|
||||
@ -1508,7 +1508,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
if (const ArrayType *AT = dyn_cast<ArrayType>(MI->getAllocatedType())) {
|
||||
MallocInst *NewMI =
|
||||
new MallocInst(AllocSTy,
|
||||
Context->getConstantInt(Type::Int32Ty, AT->getNumElements()),
|
||||
Context.getConstantInt(Type::Int32Ty, AT->getNumElements()),
|
||||
"", MI);
|
||||
NewMI->takeName(MI);
|
||||
Value *Cast = new BitCastInst(NewMI, MI->getType(), "tmp", MI);
|
||||
@ -1529,7 +1529,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
|
||||
// that only one value (besides its initializer) is ever stored to the global.
|
||||
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
Module::global_iterator &GVI,
|
||||
TargetData &TD, LLVMContext *Context) {
|
||||
TargetData &TD, LLVMContext &Context) {
|
||||
// Ignore no-op GEPs and bitcasts.
|
||||
StoredOnceVal = StoredOnceVal->stripPointerCasts();
|
||||
|
||||
@ -1542,7 +1542,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
|
||||
if (GV->getInitializer()->getType() != SOVC->getType())
|
||||
SOVC =
|
||||
Context->getConstantExprBitCast(SOVC, GV->getInitializer()->getType());
|
||||
Context.getConstantExprBitCast(SOVC, GV->getInitializer()->getType());
|
||||
|
||||
// Optimize away any trapping uses of the loaded value.
|
||||
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, Context))
|
||||
@ -1561,7 +1561,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
|
||||
/// can shrink the global into a boolean and select between the two values
|
||||
/// whenever it is used. This exposes the values to other scalar optimizations.
|
||||
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
const Type *GVElType = GV->getType()->getElementType();
|
||||
|
||||
// If GVElType is already i1, it is already shrunk. If the type of the GV is
|
||||
@ -1582,8 +1582,8 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
DOUT << " *** SHRINKING TO BOOL: " << *GV;
|
||||
|
||||
// Create the new global, initializing it to false.
|
||||
GlobalVariable *NewGV = new GlobalVariable(*Context, Type::Int1Ty, false,
|
||||
GlobalValue::InternalLinkage, Context->getFalse(),
|
||||
GlobalVariable *NewGV = new GlobalVariable(Context, Type::Int1Ty, false,
|
||||
GlobalValue::InternalLinkage, Context.getFalse(),
|
||||
GV->getName()+".b",
|
||||
GV->isThreadLocal());
|
||||
GV->getParent()->getGlobalList().insert(GV, NewGV);
|
||||
@ -1605,7 +1605,7 @@ static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
|
||||
// Only do this if we weren't storing a loaded value.
|
||||
Value *StoreVal;
|
||||
if (StoringOther || SI->getOperand(0) == InitVal)
|
||||
StoreVal = Context->getConstantInt(Type::Int1Ty, StoringOther);
|
||||
StoreVal = Context.getConstantInt(Type::Int1Ty, StoringOther);
|
||||
else {
|
||||
// Otherwise, we are storing a previously loaded copy. To do this,
|
||||
// change the copy from copying the original value to just copying the
|
||||
@ -1721,7 +1721,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
// Delete any stores we can find to the global. We may not be able to
|
||||
// make it completely dead though.
|
||||
bool Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(),
|
||||
Context);
|
||||
GV->getContext());
|
||||
|
||||
// If the global is dead now, delete it.
|
||||
if (GV->use_empty()) {
|
||||
@ -1736,7 +1736,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
GV->setConstant(true);
|
||||
|
||||
// Clean up any obviously simplifiable users now.
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer(), Context);
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer(), GV->getContext());
|
||||
|
||||
// If the global is dead now, just nuke it.
|
||||
if (GV->use_empty()) {
|
||||
@ -1751,7 +1751,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
} else if (!GV->getInitializer()->getType()->isSingleValueType()) {
|
||||
if (GlobalVariable *FirstNewGV = SRAGlobal(GV,
|
||||
getAnalysis<TargetData>(),
|
||||
Context)) {
|
||||
GV->getContext())) {
|
||||
GVI = FirstNewGV; // Don't skip the newly produced globals!
|
||||
return true;
|
||||
}
|
||||
@ -1766,7 +1766,8 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
GV->setInitializer(SOVConstant);
|
||||
|
||||
// Clean up any obviously simplifiable users now.
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer(), Context);
|
||||
CleanupConstantGlobalUsers(GV, GV->getInitializer(),
|
||||
GV->getContext());
|
||||
|
||||
if (GV->use_empty()) {
|
||||
DOUT << " *** Substituting initializer allowed us to "
|
||||
@ -1783,13 +1784,13 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
|
||||
// Try to optimize globals based on the knowledge that only one value
|
||||
// (besides its initializer) is ever stored to the global.
|
||||
if (OptimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GVI,
|
||||
getAnalysis<TargetData>(), Context))
|
||||
getAnalysis<TargetData>(), GV->getContext()))
|
||||
return true;
|
||||
|
||||
// Otherwise, if the global was not a boolean, we can shrink it to be a
|
||||
// boolean.
|
||||
if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
|
||||
if (TryToShrinkGlobalToBoolean(GV, SOVConstant, Context)) {
|
||||
if (TryToShrinkGlobalToBoolean(GV, SOVConstant, GV->getContext())) {
|
||||
++NumShrunkToBool;
|
||||
return true;
|
||||
}
|
||||
@ -1943,10 +1944,10 @@ static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
|
||||
/// specified array, returning the new global to use.
|
||||
static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
const std::vector<Function*> &Ctors,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// If we made a change, reassemble the initializer list.
|
||||
std::vector<Constant*> CSVals;
|
||||
CSVals.push_back(Context->getConstantInt(Type::Int32Ty, 65535));
|
||||
CSVals.push_back(Context.getConstantInt(Type::Int32Ty, 65535));
|
||||
CSVals.push_back(0);
|
||||
|
||||
// Create the new init list.
|
||||
@ -1955,18 +1956,18 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
if (Ctors[i]) {
|
||||
CSVals[1] = Ctors[i];
|
||||
} else {
|
||||
const Type *FTy = Context->getFunctionType(Type::VoidTy, false);
|
||||
const PointerType *PFTy = Context->getPointerTypeUnqual(FTy);
|
||||
CSVals[1] = Context->getNullValue(PFTy);
|
||||
CSVals[0] = Context->getConstantInt(Type::Int32Ty, 2147483647);
|
||||
const Type *FTy = Context.getFunctionType(Type::VoidTy, false);
|
||||
const PointerType *PFTy = Context.getPointerTypeUnqual(FTy);
|
||||
CSVals[1] = Context.getNullValue(PFTy);
|
||||
CSVals[0] = Context.getConstantInt(Type::Int32Ty, 2147483647);
|
||||
}
|
||||
CAList.push_back(Context->getConstantStruct(CSVals));
|
||||
CAList.push_back(Context.getConstantStruct(CSVals));
|
||||
}
|
||||
|
||||
// Create the array initializer.
|
||||
const Type *StructTy =
|
||||
cast<ArrayType>(GCL->getType()->getElementType())->getElementType();
|
||||
Constant *CA = Context->getConstantArray(ArrayType::get(StructTy,
|
||||
Constant *CA = Context.getConstantArray(ArrayType::get(StructTy,
|
||||
CAList.size()), CAList);
|
||||
|
||||
// If we didn't change the number of elements, don't create a new GV.
|
||||
@ -1976,7 +1977,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
}
|
||||
|
||||
// Create the new global and insert it next to the existing list.
|
||||
GlobalVariable *NGV = new GlobalVariable(*Context, CA->getType(),
|
||||
GlobalVariable *NGV = new GlobalVariable(Context, CA->getType(),
|
||||
GCL->isConstant(),
|
||||
GCL->getLinkage(), CA, "",
|
||||
GCL->isThreadLocal());
|
||||
@ -1987,7 +1988,7 @@ static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
|
||||
if (!GCL->use_empty()) {
|
||||
Constant *V = NGV;
|
||||
if (V->getType() != GCL->getType())
|
||||
V = Context->getConstantExprBitCast(V, GCL->getType());
|
||||
V = Context.getConstantExprBitCast(V, GCL->getType());
|
||||
GCL->replaceAllUsesWith(V);
|
||||
}
|
||||
GCL->eraseFromParent();
|
||||
@ -2011,7 +2012,7 @@ static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues,
|
||||
/// enough for us to understand. In particular, if it is a cast of something,
|
||||
/// we punt. We basically just support direct accesses to globals and GEP's of
|
||||
/// globals. This should be kept up to date with CommitValueTo.
|
||||
static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext *Context) {
|
||||
static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext &Context) {
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
|
||||
if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage())
|
||||
return false; // do not allow weak/linkonce/dllimport/dllexport linkage.
|
||||
@ -2036,7 +2037,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext *Context) {
|
||||
/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
|
||||
static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
ConstantExpr *Addr, unsigned OpNo,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// Base case of the recursion.
|
||||
if (OpNo == Addr->getNumOperands()) {
|
||||
assert(Val->getType() == Init->getType() && "Type mismatch!");
|
||||
@ -2052,10 +2053,10 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
Elts.push_back(cast<Constant>(*i));
|
||||
} else if (isa<ConstantAggregateZero>(Init)) {
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
|
||||
Elts.push_back(Context->getNullValue(STy->getElementType(i)));
|
||||
Elts.push_back(Context.getNullValue(STy->getElementType(i)));
|
||||
} else if (isa<UndefValue>(Init)) {
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
|
||||
Elts.push_back(Context->getUndef(STy->getElementType(i)));
|
||||
Elts.push_back(Context.getUndef(STy->getElementType(i)));
|
||||
} else {
|
||||
llvm_unreachable("This code is out of sync with "
|
||||
" ConstantFoldLoadThroughGEPConstantExpr");
|
||||
@ -2068,7 +2069,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1, Context);
|
||||
|
||||
// Return the modified struct.
|
||||
return Context->getConstantStruct(&Elts[0], Elts.size(), STy->isPacked());
|
||||
return Context.getConstantStruct(&Elts[0], Elts.size(), STy->isPacked());
|
||||
} else {
|
||||
ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
|
||||
const ArrayType *ATy = cast<ArrayType>(Init->getType());
|
||||
@ -2079,10 +2080,10 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
for (User::op_iterator i = CA->op_begin(), e = CA->op_end(); i != e; ++i)
|
||||
Elts.push_back(cast<Constant>(*i));
|
||||
} else if (isa<ConstantAggregateZero>(Init)) {
|
||||
Constant *Elt = Context->getNullValue(ATy->getElementType());
|
||||
Constant *Elt = Context.getNullValue(ATy->getElementType());
|
||||
Elts.assign(ATy->getNumElements(), Elt);
|
||||
} else if (isa<UndefValue>(Init)) {
|
||||
Constant *Elt = Context->getUndef(ATy->getElementType());
|
||||
Constant *Elt = Context.getUndef(ATy->getElementType());
|
||||
Elts.assign(ATy->getNumElements(), Elt);
|
||||
} else {
|
||||
llvm_unreachable("This code is out of sync with "
|
||||
@ -2092,14 +2093,14 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
|
||||
assert(CI->getZExtValue() < ATy->getNumElements());
|
||||
Elts[CI->getZExtValue()] =
|
||||
EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1, Context);
|
||||
return Context->getConstantArray(ATy, Elts);
|
||||
return Context.getConstantArray(ATy, Elts);
|
||||
}
|
||||
}
|
||||
|
||||
/// CommitValueTo - We have decided that Addr (which satisfies the predicate
|
||||
/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
|
||||
static void CommitValueTo(Constant *Val, Constant *Addr,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
|
||||
assert(GV->hasInitializer());
|
||||
GV->setInitializer(Val);
|
||||
@ -2119,7 +2120,7 @@ static void CommitValueTo(Constant *Val, Constant *Addr,
|
||||
/// decide, return null.
|
||||
static Constant *ComputeLoadResult(Constant *P,
|
||||
const DenseMap<Constant*, Constant*> &Memory,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// If this memory location has been recently stored, use the stored value: it
|
||||
// is the most up-to-date.
|
||||
DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
|
||||
@ -2158,7 +2159,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
|
||||
return false;
|
||||
|
||||
LLVMContext *Context = F->getContext();
|
||||
LLVMContext &Context = F->getContext();
|
||||
|
||||
CallStack.push_back(F);
|
||||
|
||||
@ -2192,20 +2193,20 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
Constant *Val = getVal(Values, SI->getOperand(0));
|
||||
MutatedMemory[Ptr] = Val;
|
||||
} else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CurInst)) {
|
||||
InstResult = Context->getConstantExpr(BO->getOpcode(),
|
||||
InstResult = Context.getConstantExpr(BO->getOpcode(),
|
||||
getVal(Values, BO->getOperand(0)),
|
||||
getVal(Values, BO->getOperand(1)));
|
||||
} else if (CmpInst *CI = dyn_cast<CmpInst>(CurInst)) {
|
||||
InstResult = Context->getConstantExprCompare(CI->getPredicate(),
|
||||
InstResult = Context.getConstantExprCompare(CI->getPredicate(),
|
||||
getVal(Values, CI->getOperand(0)),
|
||||
getVal(Values, CI->getOperand(1)));
|
||||
} else if (CastInst *CI = dyn_cast<CastInst>(CurInst)) {
|
||||
InstResult = Context->getConstantExprCast(CI->getOpcode(),
|
||||
InstResult = Context.getConstantExprCast(CI->getOpcode(),
|
||||
getVal(Values, CI->getOperand(0)),
|
||||
CI->getType());
|
||||
} else if (SelectInst *SI = dyn_cast<SelectInst>(CurInst)) {
|
||||
InstResult =
|
||||
Context->getConstantExprSelect(getVal(Values, SI->getOperand(0)),
|
||||
Context.getConstantExprSelect(getVal(Values, SI->getOperand(0)),
|
||||
getVal(Values, SI->getOperand(1)),
|
||||
getVal(Values, SI->getOperand(2)));
|
||||
} else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(CurInst)) {
|
||||
@ -2215,7 +2216,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
i != e; ++i)
|
||||
GEPOps.push_back(getVal(Values, *i));
|
||||
InstResult =
|
||||
Context->getConstantExprGetElementPtr(P, &GEPOps[0], GEPOps.size());
|
||||
Context.getConstantExprGetElementPtr(P, &GEPOps[0], GEPOps.size());
|
||||
} else if (LoadInst *LI = dyn_cast<LoadInst>(CurInst)) {
|
||||
if (LI->isVolatile()) return false; // no volatile accesses.
|
||||
InstResult = ComputeLoadResult(getVal(Values, LI->getOperand(0)),
|
||||
@ -2224,9 +2225,9 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
|
||||
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(CurInst)) {
|
||||
if (AI->isArrayAllocation()) return false; // Cannot handle array allocs.
|
||||
const Type *Ty = AI->getType()->getElementType();
|
||||
AllocaTmps.push_back(new GlobalVariable(*Context, Ty, false,
|
||||
AllocaTmps.push_back(new GlobalVariable(Context, Ty, false,
|
||||
GlobalValue::InternalLinkage,
|
||||
Context->getUndef(Ty),
|
||||
Context.getUndef(Ty),
|
||||
AI->getName()));
|
||||
InstResult = AllocaTmps.back();
|
||||
} else if (CallInst *CI = dyn_cast<CallInst>(CurInst)) {
|
||||
@ -2368,7 +2369,7 @@ static bool EvaluateStaticConstructor(Function *F) {
|
||||
// silly, e.g. storing the address of the alloca somewhere and using it
|
||||
// later. Since this is undefined, we'll just make it be null.
|
||||
if (!Tmp->use_empty())
|
||||
Tmp->replaceAllUsesWith(F->getContext()->getNullValue(Tmp->getType()));
|
||||
Tmp->replaceAllUsesWith(F->getContext().getNullValue(Tmp->getType()));
|
||||
delete Tmp;
|
||||
}
|
||||
|
||||
@ -2412,7 +2413,7 @@ bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable *&GCL) {
|
||||
|
||||
if (!MadeChange) return false;
|
||||
|
||||
GCL = InstallGlobalCtors(GCL, Ctors, Context);
|
||||
GCL = InstallGlobalCtors(GCL, Ctors, GCL->getContext());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2476,7 +2477,6 @@ bool GlobalOpt::OptimizeGlobalAliases(Module &M) {
|
||||
|
||||
bool GlobalOpt::runOnModule(Module &M) {
|
||||
bool Changed = false;
|
||||
Context = &M.getContext();
|
||||
|
||||
// Try to find the llvm.globalctors list.
|
||||
GlobalVariable *GlobalCtors = FindGlobalCtors(M);
|
||||
|
@ -56,8 +56,6 @@ bool IPCP::runOnModule(Module &M) {
|
||||
bool Changed = false;
|
||||
bool LocalChange = true;
|
||||
|
||||
Context = &M.getContext();
|
||||
|
||||
// FIXME: instead of using smart algorithms, we just iterate until we stop
|
||||
// making changes.
|
||||
while (LocalChange) {
|
||||
@ -136,7 +134,7 @@ bool IPCP::PropagateConstantsIntoArguments(Function &F) {
|
||||
continue;
|
||||
|
||||
Value *V = ArgumentConstants[i].first;
|
||||
if (V == 0) V = Context->getUndef(AI->getType());
|
||||
if (V == 0) V = F.getContext().getUndef(AI->getType());
|
||||
AI->replaceAllUsesWith(V);
|
||||
++NumArgumentsProped;
|
||||
MadeChange = true;
|
||||
@ -161,15 +159,17 @@ bool IPCP::PropagateConstantReturn(Function &F) {
|
||||
// propagate information about its results into callers.
|
||||
if (F.mayBeOverridden())
|
||||
return false;
|
||||
|
||||
LLVMContext &Context = F.getContext();
|
||||
|
||||
// Check to see if this function returns a constant.
|
||||
SmallVector<Value *,4> RetVals;
|
||||
const StructType *STy = dyn_cast<StructType>(F.getReturnType());
|
||||
if (STy)
|
||||
for (unsigned i = 0, e = STy->getNumElements(); i < e; ++i)
|
||||
RetVals.push_back(Context->getUndef(STy->getElementType(i)));
|
||||
RetVals.push_back(Context.getUndef(STy->getElementType(i)));
|
||||
else
|
||||
RetVals.push_back(Context->getUndef(F.getReturnType()));
|
||||
RetVals.push_back(Context.getUndef(F.getReturnType()));
|
||||
|
||||
unsigned NumNonConstant = 0;
|
||||
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB)
|
||||
|
@ -44,8 +44,6 @@ static RegisterPass<IndMemRemPass>
|
||||
X("indmemrem","Indirect Malloc and Free Removal");
|
||||
|
||||
bool IndMemRemPass::runOnModule(Module &M) {
|
||||
Context = &M.getContext();
|
||||
|
||||
// In theory, all direct calls of malloc and free should be promoted
|
||||
// to intrinsics. Therefore, this goes through and finds where the
|
||||
// address of free or malloc are taken and replaces those with bounce
|
||||
|
@ -102,8 +102,6 @@ bool InternalizePass::runOnModule(Module &M) {
|
||||
CallGraph *CG = getAnalysisIfAvailable<CallGraph>();
|
||||
CallGraphNode *ExternalNode = CG ? CG->getExternalCallingNode() : 0;
|
||||
|
||||
Context = &M.getContext();
|
||||
|
||||
if (ExternalNames.empty()) {
|
||||
// Return if we're not in 'all but main' mode and have no external api
|
||||
if (!AllButMain)
|
||||
|
@ -134,8 +134,6 @@ static RegisterPass<LowerSetJmp> X("lowersetjmp", "Lower Set Jump");
|
||||
bool LowerSetJmp::runOnModule(Module& M) {
|
||||
bool Changed = false;
|
||||
|
||||
Context = &M.getContext();
|
||||
|
||||
// These are what the functions are called.
|
||||
Function* SetJmp = M.getFunction("llvm.setjmp");
|
||||
Function* LongJmp = M.getFunction("llvm.longjmp");
|
||||
@ -203,8 +201,9 @@ bool LowerSetJmp::runOnModule(Module& M) {
|
||||
// This function is always successful, unless it isn't.
|
||||
bool LowerSetJmp::doInitialization(Module& M)
|
||||
{
|
||||
const Type *SBPTy = Context->getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type *SBPPTy = Context->getPointerTypeUnqual(SBPTy);
|
||||
LLVMContext &Context = M.getContext();
|
||||
const Type *SBPTy = Context.getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type *SBPPTy = Context.getPointerTypeUnqual(SBPTy);
|
||||
|
||||
// N.B. See llvm/runtime/GCCLibraries/libexception/SJLJ-Exception.h for
|
||||
// a description of the following library functions.
|
||||
@ -260,7 +259,7 @@ bool LowerSetJmp::IsTransformableFunction(const std::string& Name) {
|
||||
// throwing the exception for us.
|
||||
void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
|
||||
{
|
||||
const Type* SBPTy = Context->getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type* SBPTy = Inst->getContext().getPointerTypeUnqual(Type::Int8Ty);
|
||||
|
||||
// Create the call to "__llvm_sjljeh_throw_longjmp". This takes the
|
||||
// same parameters as "longjmp", except that the buffer is cast to a
|
||||
@ -291,7 +290,8 @@ void LowerSetJmp::TransformLongJmpCall(CallInst* Inst)
|
||||
Removed = &BB->back();
|
||||
// If the removed instructions have any users, replace them now.
|
||||
if (!Removed->use_empty())
|
||||
Removed->replaceAllUsesWith(Context->getUndef(Removed->getType()));
|
||||
Removed->replaceAllUsesWith(
|
||||
Inst->getContext().getUndef(Removed->getType()));
|
||||
Removed->eraseFromParent();
|
||||
} while (Removed != Inst);
|
||||
|
||||
@ -312,7 +312,7 @@ AllocaInst* LowerSetJmp::GetSetJmpMap(Function* Func)
|
||||
assert(Inst && "Couldn't find even ONE instruction in entry block!");
|
||||
|
||||
// Fill in the alloca and call to initialize the SJ map.
|
||||
const Type *SBPTy = Context->getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type *SBPTy = Func->getContext().getPointerTypeUnqual(Type::Int8Ty);
|
||||
AllocaInst* Map = new AllocaInst(SBPTy, 0, "SJMap", Inst);
|
||||
CallInst::Create(InitSJMap, Map, "", Inst);
|
||||
return SJMap[Func] = Map;
|
||||
@ -378,12 +378,12 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
|
||||
Function* Func = ABlock->getParent();
|
||||
|
||||
// Add this setjmp to the setjmp map.
|
||||
const Type* SBPTy = Context->getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type* SBPTy = Inst->getContext().getPointerTypeUnqual(Type::Int8Ty);
|
||||
CastInst* BufPtr =
|
||||
new BitCastInst(Inst->getOperand(1), SBPTy, "SBJmpBuf", Inst);
|
||||
std::vector<Value*> Args =
|
||||
make_vector<Value*>(GetSetJmpMap(Func), BufPtr,
|
||||
Context->getConstantInt(Type::Int32Ty,
|
||||
Inst->getContext().getConstantInt(Type::Int32Ty,
|
||||
SetJmpIDMap[Func]++), 0);
|
||||
CallInst::Create(AddSJToMap, Args.begin(), Args.end(), "", Inst);
|
||||
|
||||
@ -430,11 +430,11 @@ void LowerSetJmp::TransformSetJmpCall(CallInst* Inst)
|
||||
PHINode* PHI = PHINode::Create(Type::Int32Ty, "SetJmpReturn", Inst);
|
||||
|
||||
// Coming from a call to setjmp, the return is 0.
|
||||
PHI->addIncoming(Context->getNullValue(Type::Int32Ty), ABlock);
|
||||
PHI->addIncoming(Inst->getContext().getNullValue(Type::Int32Ty), ABlock);
|
||||
|
||||
// Add the case for this setjmp's number...
|
||||
SwitchValuePair SVP = GetSJSwitch(Func, GetRethrowBB(Func));
|
||||
SVP.first->addCase(Context->getConstantInt(Type::Int32Ty,
|
||||
SVP.first->addCase(Inst->getContext().getConstantInt(Type::Int32Ty,
|
||||
SetJmpIDMap[Func] - 1),
|
||||
SetJmpContBlock);
|
||||
|
||||
|
@ -520,7 +520,7 @@ static void AliasGToF(Function *F, Function *G) {
|
||||
|
||||
GlobalAlias *GA = new GlobalAlias(
|
||||
G->getType(), G->getLinkage(), "",
|
||||
F->getContext()->getConstantExprBitCast(F, G->getType()), G->getParent());
|
||||
F->getContext().getConstantExprBitCast(F, G->getType()), G->getParent());
|
||||
F->setAlignment(std::max(F->getAlignment(), G->getAlignment()));
|
||||
GA->takeName(G);
|
||||
GA->setVisibility(G->getVisibility());
|
||||
@ -616,8 +616,6 @@ static bool fold(std::vector<Function *> &FnVec, unsigned i, unsigned j) {
|
||||
bool MergeFunctions::runOnModule(Module &M) {
|
||||
bool Changed = false;
|
||||
|
||||
Context = &M.getContext();
|
||||
|
||||
std::map<unsigned long, std::vector<Function *> > FnMap;
|
||||
|
||||
for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) {
|
||||
|
@ -141,8 +141,6 @@ Function* PartialInliner::unswitchFunction(Function* F) {
|
||||
}
|
||||
|
||||
bool PartialInliner::runOnModule(Module& M) {
|
||||
Context = &M.getContext();
|
||||
|
||||
std::vector<Function*> worklist;
|
||||
worklist.reserve(M.size());
|
||||
for (Module::iterator FI = M.begin(), FE = M.end(); FI != FE; ++FI)
|
||||
|
@ -108,8 +108,6 @@ SpecializeFunction(Function* F,
|
||||
|
||||
|
||||
bool PartSpec::runOnModule(Module &M) {
|
||||
Context = &M.getContext();
|
||||
|
||||
bool Changed = false;
|
||||
for (Module::iterator I = M.begin(); I != M.end(); ++I) {
|
||||
Function &F = *I;
|
||||
|
@ -243,7 +243,7 @@ void PruneEH::DeleteBasicBlock(BasicBlock *BB) {
|
||||
} else if (InvokeInst *II = dyn_cast<InvokeInst>(I))
|
||||
CGN->removeCallEdgeFor(II);
|
||||
if (!I->use_empty())
|
||||
I->replaceAllUsesWith(Context->getUndef(I->getType()));
|
||||
I->replaceAllUsesWith(BB->getContext().getUndef(I->getType()));
|
||||
}
|
||||
|
||||
// Get the list of successors of this block.
|
||||
|
@ -70,8 +70,8 @@ ModulePass *llvm::createRaiseAllocationsPass() {
|
||||
// function into the appropriate instruction.
|
||||
//
|
||||
void RaiseAllocations::doInitialization(Module &M) {
|
||||
Context = &M.getContext();
|
||||
|
||||
LLVMContext &Context = M.getContext();
|
||||
|
||||
// Get Malloc and free prototypes if they exist!
|
||||
MallocFunc = M.getFunction("malloc");
|
||||
if (MallocFunc) {
|
||||
@ -79,7 +79,7 @@ void RaiseAllocations::doInitialization(Module &M) {
|
||||
|
||||
// Get the expected prototype for malloc
|
||||
const FunctionType *Malloc1Type =
|
||||
Context->getFunctionType(Context->getPointerTypeUnqual(Type::Int8Ty),
|
||||
Context.getFunctionType(Context.getPointerTypeUnqual(Type::Int8Ty),
|
||||
std::vector<const Type*>(1, Type::Int64Ty), false);
|
||||
|
||||
// Chck to see if we got the expected malloc
|
||||
@ -87,14 +87,14 @@ void RaiseAllocations::doInitialization(Module &M) {
|
||||
// Check to see if the prototype is wrong, giving us i8*(i32) * malloc
|
||||
// This handles the common declaration of: 'void *malloc(unsigned);'
|
||||
const FunctionType *Malloc2Type =
|
||||
Context->getFunctionType(Context->getPointerTypeUnqual(Type::Int8Ty),
|
||||
Context.getFunctionType(Context.getPointerTypeUnqual(Type::Int8Ty),
|
||||
std::vector<const Type*>(1, Type::Int32Ty), false);
|
||||
if (TyWeHave != Malloc2Type) {
|
||||
// Check to see if the prototype is missing, giving us
|
||||
// i8*(...) * malloc
|
||||
// This handles the common declaration of: 'void *malloc();'
|
||||
const FunctionType *Malloc3Type =
|
||||
Context->getFunctionType(Context->getPointerTypeUnqual(Type::Int8Ty),
|
||||
Context.getFunctionType(Context.getPointerTypeUnqual(Type::Int8Ty),
|
||||
true);
|
||||
if (TyWeHave != Malloc3Type)
|
||||
// Give up
|
||||
@ -108,21 +108,21 @@ void RaiseAllocations::doInitialization(Module &M) {
|
||||
const FunctionType* TyWeHave = FreeFunc->getFunctionType();
|
||||
|
||||
// Get the expected prototype for void free(i8*)
|
||||
const FunctionType *Free1Type = Context->getFunctionType(Type::VoidTy,
|
||||
std::vector<const Type*>(1, Context->getPointerTypeUnqual(Type::Int8Ty)),
|
||||
const FunctionType *Free1Type = Context.getFunctionType(Type::VoidTy,
|
||||
std::vector<const Type*>(1, Context.getPointerTypeUnqual(Type::Int8Ty)),
|
||||
false);
|
||||
|
||||
if (TyWeHave != Free1Type) {
|
||||
// Check to see if the prototype was forgotten, giving us
|
||||
// void (...) * free
|
||||
// This handles the common forward declaration of: 'void free();'
|
||||
const FunctionType* Free2Type = Context->getFunctionType(Type::VoidTy,
|
||||
const FunctionType* Free2Type = Context.getFunctionType(Type::VoidTy,
|
||||
true);
|
||||
|
||||
if (TyWeHave != Free2Type) {
|
||||
// One last try, check to see if we can find free as
|
||||
// int (...)* free. This handles the case where NOTHING was declared.
|
||||
const FunctionType* Free3Type = Context->getFunctionType(Type::Int32Ty,
|
||||
const FunctionType* Free3Type = Context.getFunctionType(Type::Int32Ty,
|
||||
true);
|
||||
|
||||
if (TyWeHave != Free3Type) {
|
||||
@ -143,6 +143,8 @@ void RaiseAllocations::doInitialization(Module &M) {
|
||||
bool RaiseAllocations::runOnModule(Module &M) {
|
||||
// Find the malloc/free prototypes...
|
||||
doInitialization(M);
|
||||
|
||||
LLVMContext &Context = M.getContext();
|
||||
|
||||
bool Changed = false;
|
||||
|
||||
@ -222,7 +224,7 @@ bool RaiseAllocations::runOnModule(Module &M) {
|
||||
Value *Source = *CS.arg_begin();
|
||||
if (!isa<PointerType>(Source->getType()))
|
||||
Source = new IntToPtrInst(Source,
|
||||
Context->getPointerTypeUnqual(Type::Int8Ty),
|
||||
Context.getPointerTypeUnqual(Type::Int8Ty),
|
||||
"FreePtrCast", I);
|
||||
new FreeInst(Source, I);
|
||||
|
||||
@ -233,7 +235,7 @@ bool RaiseAllocations::runOnModule(Module &M) {
|
||||
|
||||
// Delete the old call site
|
||||
if (I->getType() != Type::VoidTy)
|
||||
I->replaceAllUsesWith(Context->getUndef(I->getType()));
|
||||
I->replaceAllUsesWith(Context.getUndef(I->getType()));
|
||||
I->eraseFromParent();
|
||||
Changed = true;
|
||||
++NumRaised;
|
||||
|
@ -42,7 +42,6 @@ X("strip-dead-prototypes", "Strip Unused Function Prototypes");
|
||||
|
||||
bool StripDeadPrototypesPass::runOnModule(Module &M) {
|
||||
bool MadeChange = false;
|
||||
Context = &M.getContext();
|
||||
|
||||
// Erase dead function prototypes.
|
||||
for (Module::iterator I = M.begin(), E = M.end(); I != E; ) {
|
||||
|
@ -369,7 +369,6 @@ bool StripDebugInfo(Module &M) {
|
||||
}
|
||||
|
||||
bool StripSymbols::runOnModule(Module &M) {
|
||||
Context = &M.getContext();
|
||||
bool Changed = false;
|
||||
Changed |= StripDebugInfo(M);
|
||||
if (!OnlyDebugInfo)
|
||||
|
@ -230,7 +230,8 @@ Function *SRETPromotion::cloneFunctionBody(Function *F,
|
||||
AttributesVec.push_back(AttributeWithIndex::get(~0, attrs));
|
||||
|
||||
|
||||
FunctionType *NFTy = Context->getFunctionType(STy, Params, FTy->isVarArg());
|
||||
FunctionType *NFTy =
|
||||
F->getContext().getFunctionType(STy, Params, FTy->isVarArg());
|
||||
Function *NF = Function::Create(NFTy, F->getLinkage());
|
||||
NF->takeName(F);
|
||||
NF->copyAttributesFrom(F);
|
||||
|
@ -63,10 +63,10 @@ bool FunctionProfiler::runOnModule(Module &M) {
|
||||
if (!I->isDeclaration())
|
||||
++NumFunctions;
|
||||
|
||||
const Type *ATy = Context->getArrayType(Type::Int32Ty, NumFunctions);
|
||||
const Type *ATy = M.getContext().getArrayType(Type::Int32Ty, NumFunctions);
|
||||
GlobalVariable *Counters =
|
||||
new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
|
||||
Context->getNullValue(ATy), "FuncProfCounters");
|
||||
M.getContext().getNullValue(ATy), "FuncProfCounters");
|
||||
|
||||
// Instrument all of the functions...
|
||||
unsigned i = 0;
|
||||
@ -108,10 +108,10 @@ bool BlockProfiler::runOnModule(Module &M) {
|
||||
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I)
|
||||
NumBlocks += I->size();
|
||||
|
||||
const Type *ATy = Context->getArrayType(Type::Int32Ty, NumBlocks);
|
||||
const Type *ATy = M.getContext().getArrayType(Type::Int32Ty, NumBlocks);
|
||||
GlobalVariable *Counters =
|
||||
new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
|
||||
Context->getNullValue(ATy), "BlockProfCounters");
|
||||
M.getContext().getNullValue(ATy), "BlockProfCounters");
|
||||
|
||||
// Instrument all of the blocks...
|
||||
unsigned i = 0;
|
||||
|
@ -64,10 +64,10 @@ bool EdgeProfiler::runOnModule(Module &M) {
|
||||
NumEdges += BB->getTerminator()->getNumSuccessors();
|
||||
}
|
||||
|
||||
const Type *ATy = Context->getArrayType(Type::Int32Ty, NumEdges);
|
||||
const Type *ATy = M.getContext().getArrayType(Type::Int32Ty, NumEdges);
|
||||
GlobalVariable *Counters =
|
||||
new GlobalVariable(M, ATy, false, GlobalValue::InternalLinkage,
|
||||
Context->getNullValue(ATy), "EdgeProfCounters");
|
||||
M.getContext().getNullValue(ATy), "EdgeProfCounters");
|
||||
|
||||
// Instrument all of the edges...
|
||||
unsigned i = 0;
|
||||
|
@ -23,10 +23,10 @@
|
||||
|
||||
void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
|
||||
GlobalValue *Array) {
|
||||
LLVMContext *Context = MainFn->getContext();
|
||||
LLVMContext &Context = MainFn->getContext();
|
||||
const Type *ArgVTy =
|
||||
Context->getPointerTypeUnqual(Context->getPointerTypeUnqual(Type::Int8Ty));
|
||||
const PointerType *UIntPtr = Context->getPointerTypeUnqual(Type::Int32Ty);
|
||||
Context.getPointerTypeUnqual(Context.getPointerTypeUnqual(Type::Int8Ty));
|
||||
const PointerType *UIntPtr = Context.getPointerTypeUnqual(Type::Int32Ty);
|
||||
Module &M = *MainFn->getParent();
|
||||
Constant *InitFn = M.getOrInsertFunction(FnName, Type::Int32Ty, Type::Int32Ty,
|
||||
ArgVTy, UIntPtr, Type::Int32Ty,
|
||||
@ -35,27 +35,27 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
|
||||
// This could force argc and argv into programs that wouldn't otherwise have
|
||||
// them, but instead we just pass null values in.
|
||||
std::vector<Value*> Args(4);
|
||||
Args[0] = Context->getNullValue(Type::Int32Ty);
|
||||
Args[1] = Context->getNullValue(ArgVTy);
|
||||
Args[0] = Context.getNullValue(Type::Int32Ty);
|
||||
Args[1] = Context.getNullValue(ArgVTy);
|
||||
|
||||
// Skip over any allocas in the entry block.
|
||||
BasicBlock *Entry = MainFn->begin();
|
||||
BasicBlock::iterator InsertPos = Entry->begin();
|
||||
while (isa<AllocaInst>(InsertPos)) ++InsertPos;
|
||||
|
||||
std::vector<Constant*> GEPIndices(2, Context->getNullValue(Type::Int32Ty));
|
||||
std::vector<Constant*> GEPIndices(2, Context.getNullValue(Type::Int32Ty));
|
||||
unsigned NumElements = 0;
|
||||
if (Array) {
|
||||
Args[2] = Context->getConstantExprGetElementPtr(Array, &GEPIndices[0],
|
||||
Args[2] = Context.getConstantExprGetElementPtr(Array, &GEPIndices[0],
|
||||
GEPIndices.size());
|
||||
NumElements =
|
||||
cast<ArrayType>(Array->getType()->getElementType())->getNumElements();
|
||||
} else {
|
||||
// If this profiling instrumentation doesn't have a constant array, just
|
||||
// pass null.
|
||||
Args[2] = Context->getConstantPointerNull(UIntPtr);
|
||||
Args[2] = Context.getConstantPointerNull(UIntPtr);
|
||||
}
|
||||
Args[3] = Context->getConstantInt(Type::Int32Ty, NumElements);
|
||||
Args[3] = Context.getConstantInt(Type::Int32Ty, NumElements);
|
||||
|
||||
Instruction *InitCall = CallInst::Create(InitFn, Args.begin(), Args.end(),
|
||||
"newargc", InsertPos);
|
||||
@ -101,7 +101,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
|
||||
|
||||
void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum,
|
||||
GlobalValue *CounterArray) {
|
||||
LLVMContext *Context = BB->getContext();
|
||||
LLVMContext &Context = BB->getContext();
|
||||
|
||||
// Insert the increment after any alloca or PHI instructions...
|
||||
BasicBlock::iterator InsertPos = BB->getFirstNonPHI();
|
||||
@ -110,16 +110,16 @@ void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum,
|
||||
|
||||
// Create the getelementptr constant expression
|
||||
std::vector<Constant*> Indices(2);
|
||||
Indices[0] = Context->getNullValue(Type::Int32Ty);
|
||||
Indices[1] = Context->getConstantInt(Type::Int32Ty, CounterNum);
|
||||
Indices[0] = Context.getNullValue(Type::Int32Ty);
|
||||
Indices[1] = Context.getConstantInt(Type::Int32Ty, CounterNum);
|
||||
Constant *ElementPtr =
|
||||
Context->getConstantExprGetElementPtr(CounterArray, &Indices[0],
|
||||
Context.getConstantExprGetElementPtr(CounterArray, &Indices[0],
|
||||
Indices.size());
|
||||
|
||||
// Load, increment and store the value back.
|
||||
Value *OldVal = new LoadInst(ElementPtr, "OldFuncCounter", InsertPos);
|
||||
Value *NewVal = BinaryOperator::Create(Instruction::Add, OldVal,
|
||||
Context->getConstantInt(Type::Int32Ty, 1),
|
||||
Context.getConstantInt(Type::Int32Ty, 1),
|
||||
"NewFuncCounter", InsertPos);
|
||||
new StoreInst(NewVal, ElementPtr, InsertPos);
|
||||
}
|
||||
|
@ -209,16 +209,16 @@ void GlobalRandomCounter::PrepFunction(Function* F) {}
|
||||
|
||||
void GlobalRandomCounter::ProcessChoicePoint(BasicBlock* bb) {
|
||||
BranchInst* t = cast<BranchInst>(bb->getTerminator());
|
||||
LLVMContext *Context = bb->getContext();
|
||||
LLVMContext &Context = bb->getContext();
|
||||
|
||||
//decrement counter
|
||||
LoadInst* l = new LoadInst(Counter, "counter", t);
|
||||
|
||||
ICmpInst* s = new ICmpInst(t, ICmpInst::ICMP_EQ, l,
|
||||
Context->getConstantInt(T, 0),
|
||||
Context.getConstantInt(T, 0),
|
||||
"countercc");
|
||||
|
||||
Value* nv = BinaryOperator::CreateSub(l, Context->getConstantInt(T, 1),
|
||||
Value* nv = BinaryOperator::CreateSub(l, Context.getConstantInt(T, 1),
|
||||
"counternew", t);
|
||||
new StoreInst(nv, Counter, t);
|
||||
t->setCondition(s);
|
||||
@ -283,16 +283,16 @@ void GlobalRandomCounterOpt::PrepFunction(Function* F) {
|
||||
|
||||
void GlobalRandomCounterOpt::ProcessChoicePoint(BasicBlock* bb) {
|
||||
BranchInst* t = cast<BranchInst>(bb->getTerminator());
|
||||
LLVMContext *Context = bb->getContext();
|
||||
LLVMContext &Context = bb->getContext();
|
||||
|
||||
//decrement counter
|
||||
LoadInst* l = new LoadInst(AI, "counter", t);
|
||||
|
||||
ICmpInst* s = new ICmpInst(t, ICmpInst::ICMP_EQ, l,
|
||||
Context->getConstantInt(T, 0),
|
||||
Context.getConstantInt(T, 0),
|
||||
"countercc");
|
||||
|
||||
Value* nv = BinaryOperator::CreateSub(l, Context->getConstantInt(T, 1),
|
||||
Value* nv = BinaryOperator::CreateSub(l, Context.getConstantInt(T, 1),
|
||||
"counternew", t);
|
||||
new StoreInst(nv, AI, t);
|
||||
t->setCondition(s);
|
||||
@ -318,15 +318,15 @@ void CycleCounter::PrepFunction(Function* F) {}
|
||||
|
||||
void CycleCounter::ProcessChoicePoint(BasicBlock* bb) {
|
||||
BranchInst* t = cast<BranchInst>(bb->getTerminator());
|
||||
LLVMContext *Context = bb->getContext();
|
||||
LLVMContext &Context = bb->getContext();
|
||||
|
||||
CallInst* c = CallInst::Create(F, "rdcc", t);
|
||||
BinaryOperator* b =
|
||||
BinaryOperator::CreateAnd(c, Context->getConstantInt(Type::Int64Ty, rm),
|
||||
BinaryOperator::CreateAnd(c, Context.getConstantInt(Type::Int64Ty, rm),
|
||||
"mrdcc", t);
|
||||
|
||||
ICmpInst *s = new ICmpInst(t, ICmpInst::ICMP_EQ, b,
|
||||
Context->getConstantInt(Type::Int64Ty, 0),
|
||||
Context.getConstantInt(Type::Int64Ty, 0),
|
||||
"mrdccc");
|
||||
|
||||
t->setCondition(s);
|
||||
@ -352,16 +352,17 @@ void RSProfilers_std::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNu
|
||||
|
||||
// Create the getelementptr constant expression
|
||||
std::vector<Constant*> Indices(2);
|
||||
Indices[0] = Context->getNullValue(Type::Int32Ty);
|
||||
Indices[1] = Context->getConstantInt(Type::Int32Ty, CounterNum);
|
||||
Constant *ElementPtr = Context->getConstantExprGetElementPtr(CounterArray,
|
||||
Indices[0] = BB->getContext().getNullValue(Type::Int32Ty);
|
||||
Indices[1] = BB->getContext().getConstantInt(Type::Int32Ty, CounterNum);
|
||||
Constant *ElementPtr =
|
||||
BB->getContext().getConstantExprGetElementPtr(CounterArray,
|
||||
&Indices[0], 2);
|
||||
|
||||
// Load, increment and store the value back.
|
||||
Value *OldVal = new LoadInst(ElementPtr, "OldCounter", InsertPos);
|
||||
profcode.insert(OldVal);
|
||||
Value *NewVal = BinaryOperator::CreateAdd(OldVal,
|
||||
Context->getConstantInt(Type::Int32Ty, 1),
|
||||
BB->getContext().getConstantInt(Type::Int32Ty, 1),
|
||||
"NewCounter", InsertPos);
|
||||
profcode.insert(NewVal);
|
||||
profcode.insert(new StoreInst(NewVal, ElementPtr, InsertPos));
|
||||
@ -395,7 +396,7 @@ Value* ProfilerRS::Translate(Value* v) {
|
||||
return i;
|
||||
} else {
|
||||
//translate this
|
||||
Instruction* i2 = i->clone(*Context);
|
||||
Instruction* i2 = i->clone(v->getContext());
|
||||
if (i->hasName())
|
||||
i2->setName("dup_" + i->getName());
|
||||
TransCache[i] = i2;
|
||||
@ -482,7 +483,7 @@ void ProfilerRS::ProcessBackEdge(BasicBlock* src, BasicBlock* dst, Function& F)
|
||||
//b:
|
||||
BranchInst::Create(cast<BasicBlock>(Translate(dst)), bbC);
|
||||
BranchInst::Create(dst, cast<BasicBlock>(Translate(dst)),
|
||||
Context->getConstantInt(Type::Int1Ty, true), bbCp);
|
||||
F.getContext().getConstantInt(Type::Int1Ty, true), bbCp);
|
||||
//c:
|
||||
{
|
||||
TerminatorInst* iB = src->getTerminator();
|
||||
@ -539,7 +540,7 @@ bool ProfilerRS::runOnFunction(Function& F) {
|
||||
ReplaceInstWithInst(T, BranchInst::Create(T->getSuccessor(0),
|
||||
cast<BasicBlock>(
|
||||
Translate(T->getSuccessor(0))),
|
||||
Context->getConstantInt(Type::Int1Ty,
|
||||
F.getContext().getConstantInt(Type::Int1Ty,
|
||||
true)));
|
||||
|
||||
//do whatever is needed now that the function is duplicated
|
||||
|
@ -518,7 +518,7 @@ static bool OptimizeCmpExpression(CmpInst *CI) {
|
||||
BasicBlock::iterator InsertPt = UserBB->getFirstNonPHI();
|
||||
|
||||
InsertedCmp =
|
||||
CmpInst::Create(*DefBB->getContext(), CI->getOpcode(),
|
||||
CmpInst::Create(DefBB->getContext(), CI->getOpcode(),
|
||||
CI->getPredicate(), CI->getOperand(0),
|
||||
CI->getOperand(1), "", InsertPt);
|
||||
MadeChange = true;
|
||||
@ -559,6 +559,8 @@ static bool IsNonLocalValue(Value *V, BasicBlock *BB) {
|
||||
bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
||||
const Type *AccessTy,
|
||||
DenseMap<Value*,Value*> &SunkAddrs) {
|
||||
LLVMContext &Context = MemoryInst->getContext();
|
||||
|
||||
// Figure out what addressing mode will be built up for this operation.
|
||||
SmallVector<Instruction*, 16> AddrModeInsts;
|
||||
ExtAddrMode AddrMode = AddressingModeMatcher::Match(Addr, AccessTy,MemoryInst,
|
||||
@ -615,7 +617,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
||||
V = new SExtInst(V, IntPtrTy, "sunkaddr", InsertPt);
|
||||
}
|
||||
if (AddrMode.Scale != 1)
|
||||
V = BinaryOperator::CreateMul(V, Context->getConstantInt(IntPtrTy,
|
||||
V = BinaryOperator::CreateMul(V, Context.getConstantInt(IntPtrTy,
|
||||
AddrMode.Scale),
|
||||
"sunkaddr", InsertPt);
|
||||
Result = V;
|
||||
@ -647,7 +649,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
||||
|
||||
// Add in the Base Offset if present.
|
||||
if (AddrMode.BaseOffs) {
|
||||
Value *V = Context->getConstantInt(IntPtrTy, AddrMode.BaseOffs);
|
||||
Value *V = Context.getConstantInt(IntPtrTy, AddrMode.BaseOffs);
|
||||
if (Result)
|
||||
Result = BinaryOperator::CreateAdd(Result, V, "sunkaddr", InsertPt);
|
||||
else
|
||||
@ -655,7 +657,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
|
||||
}
|
||||
|
||||
if (Result == 0)
|
||||
SunkAddr = Context->getNullValue(Addr->getType());
|
||||
SunkAddr = Context.getNullValue(Addr->getType());
|
||||
else
|
||||
SunkAddr = new IntToPtrInst(Result, Addr->getType(), "sunkaddr",InsertPt);
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ bool ConstantPropagation::runOnFunction(Function &F) {
|
||||
WorkList.erase(WorkList.begin()); // Get an element from the worklist...
|
||||
|
||||
if (!I->use_empty()) // Don't muck with dead instructions...
|
||||
if (Constant *C = ConstantFoldInstruction(I, Context)) {
|
||||
if (Constant *C = ConstantFoldInstruction(I, F.getContext())) {
|
||||
// Add all of the users of this instruction to the worklist, they might
|
||||
// be constant propagatable now...
|
||||
for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
|
||||
|
@ -797,7 +797,7 @@ Value *GVN::GetValueForBlock(BasicBlock *BB, Instruction* orig,
|
||||
// If the block is unreachable, just return undef, since this path
|
||||
// can't actually occur at runtime.
|
||||
if (!DT->isReachableFromEntry(BB))
|
||||
return Phis[BB] = Context->getUndef(orig->getType());
|
||||
return Phis[BB] = BB->getContext().getUndef(orig->getType());
|
||||
|
||||
if (BasicBlock *Pred = BB->getSinglePredecessor()) {
|
||||
Value *ret = GetValueForBlock(Pred, orig, Phis);
|
||||
@ -985,7 +985,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
|
||||
// Loading the allocation -> undef.
|
||||
if (isa<AllocationInst>(DepInst)) {
|
||||
ValuesPerBlock.push_back(std::make_pair(DepBB,
|
||||
Context->getUndef(LI->getType())));
|
||||
DepBB->getContext().getUndef(LI->getType())));
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -1272,7 +1272,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
// undef value. This can happen when loading for a fresh allocation with no
|
||||
// intervening stores, for example.
|
||||
if (isa<AllocationInst>(DepInst)) {
|
||||
L->replaceAllUsesWith(Context->getUndef(L->getType()));
|
||||
L->replaceAllUsesWith(DepInst->getContext().getUndef(L->getType()));
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
return true;
|
||||
@ -1384,9 +1384,9 @@ bool GVN::processInstruction(Instruction *I,
|
||||
BasicBlock* falseSucc = BI->getSuccessor(1);
|
||||
|
||||
if (trueSucc->getSinglePredecessor())
|
||||
localAvail[trueSucc]->table[condVN] = Context->getTrue();
|
||||
localAvail[trueSucc]->table[condVN] = trueSucc->getContext().getTrue();
|
||||
if (falseSucc->getSinglePredecessor())
|
||||
localAvail[falseSucc]->table[condVN] = Context->getFalse();
|
||||
localAvail[falseSucc]->table[condVN] = trueSucc->getContext().getFalse();
|
||||
|
||||
return false;
|
||||
|
||||
@ -1628,7 +1628,7 @@ bool GVN::performPRE(Function& F) {
|
||||
// will be available in the predecessor by the time we need them. Any
|
||||
// that weren't original present will have been instantiated earlier
|
||||
// in this loop.
|
||||
Instruction* PREInstr = CurInst->clone(*Context);
|
||||
Instruction* PREInstr = CurInst->clone(CurInst->getContext());
|
||||
bool success = true;
|
||||
for (unsigned i = 0, e = CurInst->getNumOperands(); i != e; ++i) {
|
||||
Value *Op = PREInstr->getOperand(i);
|
||||
|
@ -800,6 +800,8 @@ void GVNPRE::val_replace(ValueNumberedSet& s, Value* v) {
|
||||
Value* GVNPRE::phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) {
|
||||
if (V == 0)
|
||||
return 0;
|
||||
|
||||
LLVMContext &Context = V->getContext();
|
||||
|
||||
// Unary Operations
|
||||
if (CastInst* U = dyn_cast<CastInst>(V)) {
|
||||
@ -862,7 +864,7 @@ Value* GVNPRE::phi_translate(Value* V, BasicBlock* pred, BasicBlock* succ) {
|
||||
newOp1, newOp2,
|
||||
BO->getName()+".expr");
|
||||
else if (CmpInst* C = dyn_cast<CmpInst>(U))
|
||||
newVal = CmpInst::Create(*Context, C->getOpcode(),
|
||||
newVal = CmpInst::Create(Context, C->getOpcode(),
|
||||
C->getPredicate(),
|
||||
newOp1, newOp2,
|
||||
C->getName()+".expr");
|
||||
@ -1594,6 +1596,7 @@ void GVNPRE::buildsets(Function& F) {
|
||||
void GVNPRE::insertion_pre(Value* e, BasicBlock* BB,
|
||||
DenseMap<BasicBlock*, Value*>& avail,
|
||||
std::map<BasicBlock*, ValueNumberedSet>& new_sets) {
|
||||
LLVMContext &Context = e->getContext();
|
||||
for (pred_iterator PI = pred_begin(BB), PE = pred_end(BB); PI != PE; ++PI) {
|
||||
Value* e2 = avail[*PI];
|
||||
if (!availableOut[*PI].test(VN.lookup(e2))) {
|
||||
@ -1680,7 +1683,7 @@ void GVNPRE::insertion_pre(Value* e, BasicBlock* BB,
|
||||
BO->getName()+".gvnpre",
|
||||
(*PI)->getTerminator());
|
||||
else if (CmpInst* C = dyn_cast<CmpInst>(U))
|
||||
newVal = CmpInst::Create(*Context, C->getOpcode(),
|
||||
newVal = CmpInst::Create(Context, C->getOpcode(),
|
||||
C->getPredicate(), s1, s2,
|
||||
C->getName()+".gvnpre",
|
||||
(*PI)->getTerminator());
|
||||
|
@ -292,7 +292,7 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L,
|
||||
if (NumPreds != 1) {
|
||||
// Clone the PHI and delete the original one. This lets IVUsers and
|
||||
// any other maps purge the original user from their records.
|
||||
PHINode *NewPN = PN->clone(*Context);
|
||||
PHINode *NewPN = PN->clone(PN->getContext());
|
||||
NewPN->takeName(PN);
|
||||
NewPN->insertBefore(PN);
|
||||
PN->replaceAllUsesWith(NewPN);
|
||||
@ -713,21 +713,23 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
|
||||
}
|
||||
if (NewPred == CmpInst::BAD_ICMP_PREDICATE) return;
|
||||
|
||||
LLVMContext &Context = PH->getContext();
|
||||
|
||||
// Insert new integer induction variable.
|
||||
PHINode *NewPHI = PHINode::Create(Type::Int32Ty,
|
||||
PH->getName()+".int", PH);
|
||||
NewPHI->addIncoming(Context->getConstantInt(Type::Int32Ty, newInitValue),
|
||||
NewPHI->addIncoming(Context.getConstantInt(Type::Int32Ty, newInitValue),
|
||||
PH->getIncomingBlock(IncomingEdge));
|
||||
|
||||
Value *NewAdd = BinaryOperator::CreateAdd(NewPHI,
|
||||
Context->getConstantInt(Type::Int32Ty,
|
||||
Context.getConstantInt(Type::Int32Ty,
|
||||
newIncrValue),
|
||||
Incr->getName()+".int", Incr);
|
||||
NewPHI->addIncoming(NewAdd, PH->getIncomingBlock(BackEdge));
|
||||
|
||||
// The back edge is edge 1 of newPHI, whatever it may have been in the
|
||||
// original PHI.
|
||||
ConstantInt *NewEV = Context->getConstantInt(Type::Int32Ty, intEV);
|
||||
ConstantInt *NewEV = Context.getConstantInt(Type::Int32Ty, intEV);
|
||||
Value *LHS = (EVIndex == 1 ? NewPHI->getIncomingValue(1) : NewEV);
|
||||
Value *RHS = (EVIndex == 1 ? NewEV : NewPHI->getIncomingValue(1));
|
||||
ICmpInst *NewEC = new ICmpInst(EC->getParent()->getTerminator(),
|
||||
@ -743,7 +745,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PH) {
|
||||
RecursivelyDeleteTriviallyDeadInstructions(EC);
|
||||
|
||||
// Delete old, floating point, increment instruction.
|
||||
Incr->replaceAllUsesWith(Context->getUndef(Incr->getType()));
|
||||
Incr->replaceAllUsesWith(Context.getUndef(Incr->getType()));
|
||||
RecursivelyDeleteTriviallyDeadInstructions(Incr);
|
||||
|
||||
// Replace floating induction variable, if it isn't already deleted.
|
||||
|
@ -85,7 +85,8 @@ namespace {
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
InstCombiner() : FunctionPass(&ID) {}
|
||||
|
||||
LLVMContext *getContext() { return Context; }
|
||||
LLVMContext *Context;
|
||||
LLVMContext *getContext() const { return Context; }
|
||||
|
||||
/// AddToWorkList - Add the specified instruction to the worklist if it
|
||||
/// isn't already in it.
|
||||
@ -11557,7 +11558,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
|
||||
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
||||
if (Constant *V =
|
||||
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE,
|
||||
Context))
|
||||
*Context))
|
||||
return ReplaceInstUsesWith(LI, V);
|
||||
if (CE->getOperand(0)->isNullValue()) {
|
||||
// Insert a new store to null instruction before the load to indicate
|
||||
@ -13082,6 +13083,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
|
||||
|
||||
bool InstCombiner::runOnFunction(Function &F) {
|
||||
MustPreserveLCSSA = mustPreserveAnalysisID(LCSSAID);
|
||||
Context = &F.getContext();
|
||||
|
||||
bool EverMadeChange = false;
|
||||
|
||||
|
@ -435,7 +435,8 @@ bool JumpThreading::ProcessBranchOnDuplicateCond(BasicBlock *PredBB,
|
||||
<< "' folding condition to '" << BranchDir << "': "
|
||||
<< *BB->getTerminator();
|
||||
++NumFolds;
|
||||
DestBI->setCondition(Context->getConstantInt(Type::Int1Ty, BranchDir));
|
||||
DestBI->setCondition(BB->getContext().getConstantInt(Type::Int1Ty,
|
||||
BranchDir));
|
||||
ConstantFoldTerminator(BB);
|
||||
return true;
|
||||
}
|
||||
@ -564,7 +565,8 @@ bool JumpThreading::SimplifyPartiallyRedundantLoad(LoadInst *LI) {
|
||||
|
||||
// If the returned value is the load itself, replace with an undef. This can
|
||||
// only happen in dead loops.
|
||||
if (AvailableVal == LI) AvailableVal = Context->getUndef(LI->getType());
|
||||
if (AvailableVal == LI) AvailableVal =
|
||||
AvailableVal->getContext().getUndef(LI->getType());
|
||||
LI->replaceAllUsesWith(AvailableVal);
|
||||
LI->eraseFromParent();
|
||||
return true;
|
||||
@ -718,7 +720,7 @@ bool JumpThreading::ProcessJumpOnPHI(PHINode *PN) {
|
||||
// Next, figure out which successor we are threading to.
|
||||
BasicBlock *SuccBB;
|
||||
if (BranchInst *BI = dyn_cast<BranchInst>(BB->getTerminator()))
|
||||
SuccBB = BI->getSuccessor(PredCst == Context->getFalse());
|
||||
SuccBB = BI->getSuccessor(PredCst == PredBB->getContext().getFalse());
|
||||
else {
|
||||
SwitchInst *SI = cast<SwitchInst>(BB->getTerminator());
|
||||
SuccBB = SI->getSuccessor(SI->findCaseValue(PredCst));
|
||||
@ -756,7 +758,7 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB,
|
||||
// We can only do the simplification for phi nodes of 'false' with AND or
|
||||
// 'true' with OR. See if we have any entries in the phi for this.
|
||||
unsigned PredNo = ~0U;
|
||||
ConstantInt *PredCst = Context->getConstantInt(Type::Int1Ty, !isAnd);
|
||||
ConstantInt *PredCst = V->getContext().getConstantInt(Type::Int1Ty, !isAnd);
|
||||
for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
|
||||
if (PN->getIncomingValue(i) == PredCst) {
|
||||
PredNo = i;
|
||||
@ -795,15 +797,15 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB,
|
||||
/// result can not be determined, a null pointer is returned.
|
||||
static Constant *GetResultOfComparison(CmpInst::Predicate pred,
|
||||
Value *LHS, Value *RHS,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
if (Constant *CLHS = dyn_cast<Constant>(LHS))
|
||||
if (Constant *CRHS = dyn_cast<Constant>(RHS))
|
||||
return Context->getConstantExprCompare(pred, CLHS, CRHS);
|
||||
return Context.getConstantExprCompare(pred, CLHS, CRHS);
|
||||
|
||||
if (LHS == RHS)
|
||||
if (isa<IntegerType>(LHS->getType()) || isa<PointerType>(LHS->getType()))
|
||||
return ICmpInst::isTrueWhenEqual(pred) ?
|
||||
Context->getTrue() : Context->getFalse();
|
||||
Context.getTrue() : Context.getFalse();
|
||||
|
||||
return 0;
|
||||
}
|
||||
@ -829,7 +831,7 @@ bool JumpThreading::ProcessBranchOnCompare(CmpInst *Cmp, BasicBlock *BB) {
|
||||
PredVal = PN->getIncomingValue(i);
|
||||
|
||||
Constant *Res = GetResultOfComparison(Cmp->getPredicate(), PredVal,
|
||||
RHS, Context);
|
||||
RHS, Cmp->getContext());
|
||||
if (!Res) {
|
||||
PredVal = 0;
|
||||
continue;
|
||||
@ -931,7 +933,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB, BasicBlock *PredBB,
|
||||
// Clone the non-phi instructions of BB into NewBB, keeping track of the
|
||||
// mapping and using it to remap operands in the cloned instructions.
|
||||
for (; !isa<TerminatorInst>(BI); ++BI) {
|
||||
Instruction *New = BI->clone(*Context);
|
||||
Instruction *New = BI->clone(BI->getContext());
|
||||
New->setName(BI->getNameStart());
|
||||
NewBB->getInstList().push_back(New);
|
||||
ValueMapping[BI] = New;
|
||||
|
@ -475,6 +475,8 @@ void LICM::sink(Instruction &I) {
|
||||
++NumSunk;
|
||||
Changed = true;
|
||||
|
||||
LLVMContext &Context = I.getContext();
|
||||
|
||||
// The case where there is only a single exit node of this loop is common
|
||||
// enough that we handle it as a special (more efficient) case. It is more
|
||||
// efficient to handle because there are no PHI nodes that need to be placed.
|
||||
@ -483,7 +485,7 @@ void LICM::sink(Instruction &I) {
|
||||
// Instruction is not used, just delete it.
|
||||
CurAST->deleteValue(&I);
|
||||
if (!I.use_empty()) // If I has users in unreachable blocks, eliminate.
|
||||
I.replaceAllUsesWith(Context->getUndef(I.getType()));
|
||||
I.replaceAllUsesWith(Context.getUndef(I.getType()));
|
||||
I.eraseFromParent();
|
||||
} else {
|
||||
// Move the instruction to the start of the exit block, after any PHI
|
||||
@ -497,7 +499,7 @@ void LICM::sink(Instruction &I) {
|
||||
// The instruction is actually dead if there ARE NO exit blocks.
|
||||
CurAST->deleteValue(&I);
|
||||
if (!I.use_empty()) // If I has users in unreachable blocks, eliminate.
|
||||
I.replaceAllUsesWith(Context->getUndef(I.getType()));
|
||||
I.replaceAllUsesWith(Context.getUndef(I.getType()));
|
||||
I.eraseFromParent();
|
||||
} else {
|
||||
// Otherwise, if we have multiple exits, use the PromoteMem2Reg function to
|
||||
@ -570,7 +572,7 @@ void LICM::sink(Instruction &I) {
|
||||
ExitBlock->getInstList().insert(InsertPt, &I);
|
||||
New = &I;
|
||||
} else {
|
||||
New = I.clone(*Context);
|
||||
New = I.clone(Context);
|
||||
CurAST->copyValue(&I, New);
|
||||
if (!I.getName().empty())
|
||||
New->setName(I.getName()+".le");
|
||||
@ -768,7 +770,7 @@ void LICM::PromoteValuesInLoop() {
|
||||
PromotedAllocas.reserve(PromotedValues.size());
|
||||
for (unsigned i = 0, e = PromotedValues.size(); i != e; ++i)
|
||||
PromotedAllocas.push_back(PromotedValues[i].first);
|
||||
PromoteMemToReg(PromotedAllocas, *DT, *DF, Context, CurAST);
|
||||
PromoteMemToReg(PromotedAllocas, *DT, *DF, Preheader->getContext(), CurAST);
|
||||
}
|
||||
|
||||
/// FindPromotableValuesInLoop - Check the current loop for stores to definite
|
||||
|
@ -294,15 +294,15 @@ static bool isUsedOutsideLoop(Value *V, Loop *L) {
|
||||
|
||||
// Return V+1
|
||||
static Value *getPlusOne(Value *V, bool Sign, Instruction *InsertPt,
|
||||
LLVMContext *Context) {
|
||||
Constant *One = Context->getConstantInt(V->getType(), 1, Sign);
|
||||
LLVMContext &Context) {
|
||||
Constant *One = Context.getConstantInt(V->getType(), 1, Sign);
|
||||
return BinaryOperator::CreateAdd(V, One, "lsp", InsertPt);
|
||||
}
|
||||
|
||||
// Return V-1
|
||||
static Value *getMinusOne(Value *V, bool Sign, Instruction *InsertPt,
|
||||
LLVMContext *Context) {
|
||||
Constant *One = Context->getConstantInt(V->getType(), 1, Sign);
|
||||
LLVMContext &Context) {
|
||||
Constant *One = Context.getConstantInt(V->getType(), 1, Sign);
|
||||
return BinaryOperator::CreateSub(V, One, "lsp", InsertPt);
|
||||
}
|
||||
|
||||
@ -493,6 +493,8 @@ bool LoopIndexSplit::restrictLoopBound(ICmpInst &Op) {
|
||||
EBR->setSuccessor(1, T);
|
||||
}
|
||||
|
||||
LLVMContext &Context = Op.getContext();
|
||||
|
||||
// New upper and lower bounds.
|
||||
Value *NLB = NULL;
|
||||
Value *NUB = NULL;
|
||||
@ -879,6 +881,8 @@ bool LoopIndexSplit::splitLoop() {
|
||||
BasicBlock *ExitingBlock = ExitCondition->getParent();
|
||||
if (!cleanBlock(ExitingBlock)) return false;
|
||||
|
||||
LLVMContext &Context = Header->getContext();
|
||||
|
||||
for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
|
||||
I != E; ++I) {
|
||||
BranchInst *BR = dyn_cast<BranchInst>((*I)->getTerminator());
|
||||
|
@ -238,7 +238,7 @@ bool LoopRotate::rotateLoop(Loop *Lp, LPPassManager &LPM) {
|
||||
// This is not a PHI instruction. Insert its clone into original pre-header.
|
||||
// If this instruction is using a value from same basic block then
|
||||
// update it to use value from cloned instruction.
|
||||
Instruction *C = In->clone(*Context);
|
||||
Instruction *C = In->clone(In->getContext());
|
||||
C->setName(In->getName());
|
||||
OrigPreHeader->getInstList().push_back(C);
|
||||
|
||||
|
@ -1576,7 +1576,9 @@ void LoopStrengthReduce::StrengthReduceStridedIVUsers(const SCEV *const &Stride,
|
||||
BasicBlock *LatchBlock = L->getLoopLatch();
|
||||
Instruction *IVIncInsertPt = LatchBlock->getTerminator();
|
||||
|
||||
Value *CommonBaseV = Context->getNullValue(ReplacedTy);
|
||||
LLVMContext &Context = Preheader->getContext();
|
||||
|
||||
Value *CommonBaseV = Context.getNullValue(ReplacedTy);
|
||||
|
||||
const SCEV *RewriteFactor = SE->getIntegerSCEV(0, ReplacedTy);
|
||||
IVExpr ReuseIV(SE->getIntegerSCEV(0, Type::Int32Ty),
|
||||
@ -1859,6 +1861,8 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
|
||||
const SCEVConstant *SC = dyn_cast<SCEVConstant>(*CondStride);
|
||||
if (!SC) return Cond;
|
||||
|
||||
LLVMContext &Context = Cond->getContext();
|
||||
|
||||
ICmpInst::Predicate Predicate = Cond->getPredicate();
|
||||
int64_t CmpSSInt = SC->getValue()->getSExtValue();
|
||||
unsigned BitWidth = SE->getTypeSizeInBits((*CondStride)->getType());
|
||||
@ -1942,7 +1946,7 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
|
||||
|
||||
NewCmpTy = NewCmpLHS->getType();
|
||||
NewTyBits = SE->getTypeSizeInBits(NewCmpTy);
|
||||
const Type *NewCmpIntTy = Context->getIntegerType(NewTyBits);
|
||||
const Type *NewCmpIntTy = Context.getIntegerType(NewTyBits);
|
||||
if (RequiresTypeConversion(NewCmpTy, CmpTy)) {
|
||||
// Check if it is possible to rewrite it using
|
||||
// an iv / stride of a smaller integer type.
|
||||
@ -1987,10 +1991,10 @@ ICmpInst *LoopStrengthReduce::ChangeCompareStride(Loop *L, ICmpInst *Cond,
|
||||
|
||||
NewStride = &IU->StrideOrder[i];
|
||||
if (!isa<PointerType>(NewCmpTy))
|
||||
NewCmpRHS = Context->getConstantInt(NewCmpTy, NewCmpVal);
|
||||
NewCmpRHS = Context.getConstantInt(NewCmpTy, NewCmpVal);
|
||||
else {
|
||||
Constant *CI = Context->getConstantInt(NewCmpIntTy, NewCmpVal);
|
||||
NewCmpRHS = Context->getConstantExprIntToPtr(CI, NewCmpTy);
|
||||
Constant *CI = Context.getConstantInt(NewCmpIntTy, NewCmpVal);
|
||||
NewCmpRHS = Context.getConstantExprIntToPtr(CI, NewCmpTy);
|
||||
}
|
||||
NewOffset = TyBits == NewTyBits
|
||||
? SE->getMulExpr(CondUse->getOffset(),
|
||||
@ -2171,6 +2175,8 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
|
||||
const SCEV *BackedgeTakenCount = SE->getBackedgeTakenCount(L);
|
||||
if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
|
||||
return;
|
||||
|
||||
LLVMContext &Context = L->getHeader()->getContext();
|
||||
|
||||
for (unsigned Stride = 0, e = IU->StrideOrder.size(); Stride != e;
|
||||
++Stride) {
|
||||
@ -2233,7 +2239,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
|
||||
|
||||
ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
|
||||
if (!Init) continue;
|
||||
Constant *NewInit = Context->getConstantFP(DestTy, Init->getZExtValue());
|
||||
Constant *NewInit = Context.getConstantFP(DestTy, Init->getZExtValue());
|
||||
|
||||
BinaryOperator *Incr =
|
||||
dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
|
||||
@ -2257,7 +2263,7 @@ void LoopStrengthReduce::OptimizeShadowIV(Loop *L) {
|
||||
PHINode *NewPH = PHINode::Create(DestTy, "IV.S.", PH);
|
||||
|
||||
/* create new increment. '++d' in above example. */
|
||||
Constant *CFP = Context->getConstantFP(DestTy, C->getZExtValue());
|
||||
Constant *CFP = Context.getConstantFP(DestTy, C->getZExtValue());
|
||||
BinaryOperator *NewIncr =
|
||||
BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
|
||||
Instruction::FAdd : Instruction::FSub,
|
||||
@ -2293,6 +2299,8 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
|
||||
// one register value.
|
||||
BasicBlock *LatchBlock = L->getLoopLatch();
|
||||
BasicBlock *ExitingBlock = L->getExitingBlock();
|
||||
LLVMContext &Context = LatchBlock->getContext();
|
||||
|
||||
if (!ExitingBlock)
|
||||
// Multiple exits, just look at the exit in the latch block if there is one.
|
||||
ExitingBlock = LatchBlock;
|
||||
@ -2382,7 +2390,7 @@ void LoopStrengthReduce::OptimizeLoopTermCond(Loop *L) {
|
||||
Cond->moveBefore(TermBr);
|
||||
} else {
|
||||
// Otherwise, clone the terminating condition and insert into the loopend.
|
||||
Cond = cast<ICmpInst>(Cond->clone(*Context));
|
||||
Cond = cast<ICmpInst>(Cond->clone(Context));
|
||||
Cond->setName(L->getHeader()->getName() + ".termcond");
|
||||
LatchBlock->getInstList().insert(TermBr, Cond);
|
||||
|
||||
@ -2424,6 +2432,8 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
|
||||
if (!ExitingBlock)
|
||||
return; // More than one block exiting!
|
||||
|
||||
LLVMContext &Context = ExitingBlock->getContext();
|
||||
|
||||
// Okay, we've computed the exiting block. See what condition causes us to
|
||||
// exit.
|
||||
//
|
||||
@ -2496,7 +2506,7 @@ void LoopStrengthReduce::OptimizeLoopCountIV(Loop *L) {
|
||||
Value *startVal = phi->getIncomingValue(inBlock);
|
||||
Value *endVal = Cond->getOperand(1);
|
||||
// FIXME check for case where both are constant
|
||||
Constant* Zero = Context->getConstantInt(Cond->getOperand(1)->getType(), 0);
|
||||
Constant* Zero = Context.getConstantInt(Cond->getOperand(1)->getType(), 0);
|
||||
BinaryOperator *NewStartVal =
|
||||
BinaryOperator::Create(Instruction::Sub, endVal, startVal,
|
||||
"tmp", PreInsertPt);
|
||||
|
@ -216,6 +216,7 @@ bool LoopUnswitch::runOnLoop(Loop *L, LPPassManager &LPM_Ref) {
|
||||
/// and profitable.
|
||||
bool LoopUnswitch::processCurrentLoop() {
|
||||
bool Changed = false;
|
||||
LLVMContext &Context = currentLoop->getHeader()->getContext();
|
||||
|
||||
// Loop over all of the basic blocks in the loop. If we find an interior
|
||||
// block that is branching on a loop-invariant condition, we can unswitch this
|
||||
@ -233,7 +234,7 @@ bool LoopUnswitch::processCurrentLoop() {
|
||||
Value *LoopCond = FindLIVLoopCondition(BI->getCondition(),
|
||||
currentLoop, Changed);
|
||||
if (LoopCond && UnswitchIfProfitable(LoopCond,
|
||||
Context->getTrue())) {
|
||||
Context.getTrue())) {
|
||||
++NumBranches;
|
||||
return true;
|
||||
}
|
||||
@ -263,7 +264,7 @@ bool LoopUnswitch::processCurrentLoop() {
|
||||
Value *LoopCond = FindLIVLoopCondition(SI->getCondition(),
|
||||
currentLoop, Changed);
|
||||
if (LoopCond && UnswitchIfProfitable(LoopCond,
|
||||
Context->getTrue())) {
|
||||
Context.getTrue())) {
|
||||
++NumSelects;
|
||||
return true;
|
||||
}
|
||||
@ -337,6 +338,7 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
|
||||
BasicBlock **LoopExit) {
|
||||
BasicBlock *Header = currentLoop->getHeader();
|
||||
TerminatorInst *HeaderTerm = Header->getTerminator();
|
||||
LLVMContext &Context = Header->getContext();
|
||||
|
||||
BasicBlock *LoopExitBB = 0;
|
||||
if (BranchInst *BI = dyn_cast<BranchInst>(HeaderTerm)) {
|
||||
@ -351,10 +353,10 @@ bool LoopUnswitch::IsTrivialUnswitchCondition(Value *Cond, Constant **Val,
|
||||
// this.
|
||||
if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
|
||||
BI->getSuccessor(0)))) {
|
||||
if (Val) *Val = Context->getTrue();
|
||||
if (Val) *Val = Context.getTrue();
|
||||
} else if ((LoopExitBB = isTrivialLoopExitBlock(currentLoop,
|
||||
BI->getSuccessor(1)))) {
|
||||
if (Val) *Val = Context->getFalse();
|
||||
if (Val) *Val = Context.getFalse();
|
||||
}
|
||||
} else if (SwitchInst *SI = dyn_cast<SwitchInst>(HeaderTerm)) {
|
||||
// If this isn't a switch on Cond, we can't handle it.
|
||||
@ -510,7 +512,7 @@ void LoopUnswitch::EmitPreheaderBranchOnCondition(Value *LIC, Constant *Val,
|
||||
Value *BranchVal = LIC;
|
||||
if (!isa<ConstantInt>(Val) || Val->getType() != Type::Int1Ty)
|
||||
BranchVal = new ICmpInst(InsertPt, ICmpInst::ICMP_EQ, LIC, Val, "tmp");
|
||||
else if (Val != Context->getTrue())
|
||||
else if (Val != Val->getContext().getTrue())
|
||||
// We want to enter the new loop when the condition is true.
|
||||
std::swap(TrueDest, FalseDest);
|
||||
|
||||
@ -818,7 +820,7 @@ void LoopUnswitch::RemoveBlockIfDead(BasicBlock *BB,
|
||||
// Anything that uses the instructions in this basic block should have their
|
||||
// uses replaced with undefs.
|
||||
if (!I->use_empty())
|
||||
I->replaceAllUsesWith(Context->getUndef(I->getType()));
|
||||
I->replaceAllUsesWith(I->getContext().getUndef(I->getType()));
|
||||
}
|
||||
|
||||
// If this is the edge to the header block for a loop, remove the loop and
|
||||
@ -899,6 +901,8 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
|
||||
// selects, switches.
|
||||
std::vector<User*> Users(LIC->use_begin(), LIC->use_end());
|
||||
std::vector<Instruction*> Worklist;
|
||||
LLVMContext &Context = Val->getContext();
|
||||
|
||||
|
||||
// If we know that LIC == Val, or that LIC == NotVal, just replace uses of LIC
|
||||
// in the loop with the appropriate one directly.
|
||||
@ -907,7 +911,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
|
||||
if (IsEqual)
|
||||
Replacement = Val;
|
||||
else
|
||||
Replacement = Context->getConstantInt(Type::Int1Ty,
|
||||
Replacement = Context.getConstantInt(Type::Int1Ty,
|
||||
!cast<ConstantInt>(Val)->getZExtValue());
|
||||
|
||||
for (unsigned i = 0, e = Users.size(); i != e; ++i)
|
||||
@ -947,7 +951,7 @@ void LoopUnswitch::RewriteLoopBodyWithConditionConstant(Loop *L, Value *LIC,
|
||||
|
||||
Instruction* OldTerm = Old->getTerminator();
|
||||
BranchInst::Create(Split, SISucc,
|
||||
Context->getTrue(), OldTerm);
|
||||
Context.getTrue(), OldTerm);
|
||||
|
||||
LPM->deleteSimpleAnalysisValue(Old->getTerminator(), L);
|
||||
Old->getTerminator()->eraseFromParent();
|
||||
@ -988,7 +992,7 @@ void LoopUnswitch::SimplifyCode(std::vector<Instruction*> &Worklist, Loop *L) {
|
||||
Worklist.pop_back();
|
||||
|
||||
// Simple constant folding.
|
||||
if (Constant *C = ConstantFoldInstruction(I, Context)) {
|
||||
if (Constant *C = ConstantFoldInstruction(I, I->getContext())) {
|
||||
ReplaceUsesOfWith(I, C, Worklist, L, LPM);
|
||||
continue;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ STATISTIC(NumMemSetInfer, "Number of memsets inferred");
|
||||
/// true for all i8 values obviously, but is also true for i32 0, i32 -1,
|
||||
/// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
|
||||
/// byte store (e.g. i16 0x1234), return null.
|
||||
static Value *isBytewiseValue(Value *V, LLVMContext* Context) {
|
||||
static Value *isBytewiseValue(Value *V, LLVMContext& Context) {
|
||||
// All byte-wide stores are splatable, even of arbitrary variables.
|
||||
if (V->getType() == Type::Int8Ty) return V;
|
||||
|
||||
@ -44,9 +44,9 @@ static Value *isBytewiseValue(Value *V, LLVMContext* Context) {
|
||||
// corresponding integer value is "byteable". An important case is 0.0.
|
||||
if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
|
||||
if (CFP->getType() == Type::FloatTy)
|
||||
V = Context->getConstantExprBitCast(CFP, Type::Int32Ty);
|
||||
V = Context.getConstantExprBitCast(CFP, Type::Int32Ty);
|
||||
if (CFP->getType() == Type::DoubleTy)
|
||||
V = Context->getConstantExprBitCast(CFP, Type::Int64Ty);
|
||||
V = Context.getConstantExprBitCast(CFP, Type::Int64Ty);
|
||||
// Don't handle long double formats, which have strange constraints.
|
||||
}
|
||||
|
||||
@ -69,7 +69,7 @@ static Value *isBytewiseValue(Value *V, LLVMContext* Context) {
|
||||
if (Val != Val2)
|
||||
return 0;
|
||||
}
|
||||
return Context->getConstantInt(Val);
|
||||
return Context.getConstantInt(Val);
|
||||
}
|
||||
}
|
||||
|
||||
@ -346,7 +346,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
|
||||
// Ensure that the value being stored is something that can be memset'able a
|
||||
// byte at a time like "0" or "-1" or any width, as well as things like
|
||||
// 0xA0A0A0A0 and 0.0.
|
||||
Value *ByteVal = isBytewiseValue(SI->getOperand(0), Context);
|
||||
Value *ByteVal = isBytewiseValue(SI->getOperand(0), SI->getContext());
|
||||
if (!ByteVal)
|
||||
return false;
|
||||
|
||||
@ -385,7 +385,8 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
|
||||
if (NextStore->isVolatile()) break;
|
||||
|
||||
// Check to see if this stored value is of the same byte-splattable value.
|
||||
if (ByteVal != isBytewiseValue(NextStore->getOperand(0), Context))
|
||||
if (ByteVal != isBytewiseValue(NextStore->getOperand(0),
|
||||
NextStore->getContext()))
|
||||
break;
|
||||
|
||||
// Check to see if this store is to a constant offset from the start ptr.
|
||||
@ -439,15 +440,17 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator& BBI) {
|
||||
StartPtr = Range.StartPtr;
|
||||
|
||||
// Cast the start ptr to be i8* as memset requires.
|
||||
const Type *i8Ptr = Context->getPointerTypeUnqual(Type::Int8Ty);
|
||||
const Type *i8Ptr = SI->getContext().getPointerTypeUnqual(Type::Int8Ty);
|
||||
if (StartPtr->getType() != i8Ptr)
|
||||
StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getNameStart(),
|
||||
InsertPt);
|
||||
|
||||
Value *Ops[] = {
|
||||
StartPtr, ByteVal, // Start, value
|
||||
Context->getConstantInt(Type::Int64Ty, Range.End-Range.Start), // size
|
||||
Context->getConstantInt(Type::Int32Ty, Range.Alignment) // align
|
||||
// size
|
||||
SI->getContext().getConstantInt(Type::Int64Ty, Range.End-Range.Start),
|
||||
// align
|
||||
SI->getContext().getConstantInt(Type::Int32Ty, Range.Alignment)
|
||||
};
|
||||
Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
|
||||
DEBUG(cerr << "Replace stores:\n";
|
||||
|
@ -1664,7 +1664,7 @@ namespace {
|
||||
TopBB(TopBB),
|
||||
TopInst(NULL),
|
||||
modified(modified),
|
||||
Context(TopBB->getContext())
|
||||
Context(&TopBB->getContext())
|
||||
{
|
||||
assert(Top && "VRPSolver created for unreachable basic block.");
|
||||
}
|
||||
@ -1681,7 +1681,7 @@ namespace {
|
||||
TopBB(TopInst->getParent()),
|
||||
TopInst(TopInst),
|
||||
modified(modified),
|
||||
Context(TopInst->getParent()->getContext())
|
||||
Context(&TopInst->getContext())
|
||||
{
|
||||
assert(Top && "VRPSolver created for unreachable basic block.");
|
||||
assert(Top->getBlock() == TopInst->getParent() && "Context mismatch.");
|
||||
@ -2267,6 +2267,7 @@ namespace {
|
||||
|
||||
std::vector<DomTreeDFS::Node *> WorkList;
|
||||
|
||||
LLVMContext *Context;
|
||||
public:
|
||||
static char ID; // Pass identification, replacement for typeid
|
||||
PredicateSimplifier() : FunctionPass(&ID) {}
|
||||
@ -2402,6 +2403,7 @@ namespace {
|
||||
DominatorTree *DT = &getAnalysis<DominatorTree>();
|
||||
DTDFS = new DomTreeDFS(DT);
|
||||
TargetData *TD = &getAnalysis<TargetData>();
|
||||
Context = &F.getContext();
|
||||
|
||||
DOUT << "Entering Function: " << F.getName() << "\n";
|
||||
|
||||
@ -2447,7 +2449,7 @@ namespace {
|
||||
return;
|
||||
}
|
||||
|
||||
LLVMContext *Context = BI.getParent()->getContext();
|
||||
LLVMContext *Context = &BI.getContext();
|
||||
|
||||
for (DomTreeDFS::Node::iterator I = DTNode->begin(), E = DTNode->end();
|
||||
I != E; ++I) {
|
||||
@ -2505,7 +2507,7 @@ namespace {
|
||||
|
||||
void PredicateSimplifier::Forwards::visitAllocaInst(AllocaInst &AI) {
|
||||
VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &AI);
|
||||
VRP.add(AI.getParent()->getContext()->getNullValue(AI.getType()),
|
||||
VRP.add(AI.getContext().getNullValue(AI.getType()),
|
||||
&AI, ICmpInst::ICMP_NE);
|
||||
VRP.solve();
|
||||
}
|
||||
@ -2516,7 +2518,7 @@ namespace {
|
||||
if (isa<Constant>(Ptr)) return;
|
||||
|
||||
VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &LI);
|
||||
VRP.add(LI.getParent()->getContext()->getNullValue(Ptr->getType()),
|
||||
VRP.add(LI.getContext().getNullValue(Ptr->getType()),
|
||||
Ptr, ICmpInst::ICMP_NE);
|
||||
VRP.solve();
|
||||
}
|
||||
@ -2526,14 +2528,14 @@ namespace {
|
||||
if (isa<Constant>(Ptr)) return;
|
||||
|
||||
VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &SI);
|
||||
VRP.add(SI.getParent()->getContext()->getNullValue(Ptr->getType()),
|
||||
VRP.add(SI.getContext().getNullValue(Ptr->getType()),
|
||||
Ptr, ICmpInst::ICMP_NE);
|
||||
VRP.solve();
|
||||
}
|
||||
|
||||
void PredicateSimplifier::Forwards::visitSExtInst(SExtInst &SI) {
|
||||
VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &SI);
|
||||
LLVMContext *Context = SI.getParent()->getContext();
|
||||
LLVMContext *Context = &SI.getContext();
|
||||
uint32_t SrcBitWidth = cast<IntegerType>(SI.getSrcTy())->getBitWidth();
|
||||
uint32_t DstBitWidth = cast<IntegerType>(SI.getDestTy())->getBitWidth();
|
||||
APInt Min(APInt::getHighBitsSet(DstBitWidth, DstBitWidth-SrcBitWidth+1));
|
||||
@ -2545,7 +2547,7 @@ namespace {
|
||||
|
||||
void PredicateSimplifier::Forwards::visitZExtInst(ZExtInst &ZI) {
|
||||
VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &ZI);
|
||||
LLVMContext *Context = ZI.getParent()->getContext();
|
||||
LLVMContext *Context = &ZI.getContext();
|
||||
uint32_t SrcBitWidth = cast<IntegerType>(ZI.getSrcTy())->getBitWidth();
|
||||
uint32_t DstBitWidth = cast<IntegerType>(ZI.getDestTy())->getBitWidth();
|
||||
APInt Max(APInt::getLowBitsSet(DstBitWidth, SrcBitWidth));
|
||||
@ -2564,7 +2566,7 @@ namespace {
|
||||
case Instruction::SDiv: {
|
||||
Value *Divisor = BO.getOperand(1);
|
||||
VRPSolver VRP(VN, IG, UB, VR, PS->DTDFS, PS->modified, &BO);
|
||||
VRP.add(BO.getParent()->getContext()->getNullValue(Divisor->getType()),
|
||||
VRP.add(BO.getContext().getNullValue(Divisor->getType()),
|
||||
Divisor, ICmpInst::ICMP_NE);
|
||||
VRP.solve();
|
||||
break;
|
||||
@ -2638,7 +2640,7 @@ namespace {
|
||||
|
||||
Pred = IC.getPredicate();
|
||||
|
||||
LLVMContext *Context = IC.getParent()->getContext();
|
||||
LLVMContext *Context = &IC.getContext();
|
||||
|
||||
if (ConstantInt *Op1 = dyn_cast<ConstantInt>(IC.getOperand(1))) {
|
||||
ConstantInt *NextVal = 0;
|
||||
|
@ -200,8 +200,8 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
|
||||
///
|
||||
static Instruction *LowerNegateToMultiply(Instruction *Neg,
|
||||
std::map<AssertingVH<>, unsigned> &ValueRankMap,
|
||||
LLVMContext *Context) {
|
||||
Constant *Cst = Context->getAllOnesValue(Neg->getType());
|
||||
LLVMContext &Context) {
|
||||
Constant *Cst = Neg->getContext().getAllOnesValue(Neg->getType());
|
||||
|
||||
Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg);
|
||||
ValueRankMap.erase(Neg);
|
||||
@ -256,6 +256,7 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I,
|
||||
std::vector<ValueEntry> &Ops) {
|
||||
Value *LHS = I->getOperand(0), *RHS = I->getOperand(1);
|
||||
unsigned Opcode = I->getOpcode();
|
||||
LLVMContext &Context = I->getContext();
|
||||
|
||||
// First step, linearize the expression if it is in ((A+B)+(C+D)) form.
|
||||
BinaryOperator *LHSBO = isReassociableOp(LHS, Opcode);
|
||||
@ -284,8 +285,8 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I,
|
||||
Ops.push_back(ValueEntry(getRank(RHS), RHS));
|
||||
|
||||
// Clear the leaves out.
|
||||
I->setOperand(0, Context->getUndef(I->getType()));
|
||||
I->setOperand(1, Context->getUndef(I->getType()));
|
||||
I->setOperand(0, Context.getUndef(I->getType()));
|
||||
I->setOperand(1, Context.getUndef(I->getType()));
|
||||
return;
|
||||
} else {
|
||||
// Turn X+(Y+Z) -> (Y+Z)+X
|
||||
@ -320,7 +321,7 @@ void Reassociate::LinearizeExprTree(BinaryOperator *I,
|
||||
Ops.push_back(ValueEntry(getRank(RHS), RHS));
|
||||
|
||||
// Clear the RHS leaf out.
|
||||
I->setOperand(1, Context->getUndef(I->getType()));
|
||||
I->setOperand(1, Context.getUndef(I->getType()));
|
||||
}
|
||||
|
||||
// RewriteExprTree - Now that the operands for this expression tree are
|
||||
@ -373,7 +374,7 @@ void Reassociate::RewriteExprTree(BinaryOperator *I,
|
||||
// version of the value is returned, and BI is left pointing at the instruction
|
||||
// that should be processed next by the reassociation pass.
|
||||
//
|
||||
static Value *NegateValue(LLVMContext *Context, Value *V, Instruction *BI) {
|
||||
static Value *NegateValue(LLVMContext &Context, Value *V, Instruction *BI) {
|
||||
// We are trying to expose opportunity for reassociation. One of the things
|
||||
// that we want to do to achieve this is to push a negation as deep into an
|
||||
// expression chain as possible, to expose the add instructions. In practice,
|
||||
@ -402,12 +403,12 @@ static Value *NegateValue(LLVMContext *Context, Value *V, Instruction *BI) {
|
||||
// Insert a 'neg' instruction that subtracts the value from zero to get the
|
||||
// negation.
|
||||
//
|
||||
return BinaryOperator::CreateNeg(*Context, V, V->getName() + ".neg", BI);
|
||||
return BinaryOperator::CreateNeg(Context, V, V->getName() + ".neg", BI);
|
||||
}
|
||||
|
||||
/// ShouldBreakUpSubtract - Return true if we should break up this subtract of
|
||||
/// X-Y into (X + -Y).
|
||||
static bool ShouldBreakUpSubtract(LLVMContext *Context, Instruction *Sub) {
|
||||
static bool ShouldBreakUpSubtract(LLVMContext &Context, Instruction *Sub) {
|
||||
// If this is a negation, we can't split it up!
|
||||
if (BinaryOperator::isNeg(Sub))
|
||||
return false;
|
||||
@ -431,7 +432,7 @@ static bool ShouldBreakUpSubtract(LLVMContext *Context, Instruction *Sub) {
|
||||
/// BreakUpSubtract - If we have (X-Y), and if either X is an add, or if this is
|
||||
/// only used by an add, transform this into (X+(0-Y)) to promote better
|
||||
/// reassociation.
|
||||
static Instruction *BreakUpSubtract(LLVMContext *Context, Instruction *Sub,
|
||||
static Instruction *BreakUpSubtract(LLVMContext &Context, Instruction *Sub,
|
||||
std::map<AssertingVH<>, unsigned> &ValueRankMap) {
|
||||
// Convert a subtract into an add and a neg instruction... so that sub
|
||||
// instructions can be commuted with other add instructions...
|
||||
@ -458,16 +459,16 @@ static Instruction *BreakUpSubtract(LLVMContext *Context, Instruction *Sub,
|
||||
/// reassociation.
|
||||
static Instruction *ConvertShiftToMul(Instruction *Shl,
|
||||
std::map<AssertingVH<>, unsigned> &ValueRankMap,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// If an operand of this shift is a reassociable multiply, or if the shift
|
||||
// is used by a reassociable multiply or add, turn into a multiply.
|
||||
if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) ||
|
||||
(Shl->hasOneUse() &&
|
||||
(isReassociableOp(Shl->use_back(), Instruction::Mul) ||
|
||||
isReassociableOp(Shl->use_back(), Instruction::Add)))) {
|
||||
Constant *MulCst = Context->getConstantInt(Shl->getType(), 1);
|
||||
Constant *MulCst = Context.getConstantInt(Shl->getType(), 1);
|
||||
MulCst =
|
||||
Context->getConstantExprShl(MulCst, cast<Constant>(Shl->getOperand(1)));
|
||||
Context.getConstantExprShl(MulCst, cast<Constant>(Shl->getOperand(1)));
|
||||
|
||||
Instruction *Mul = BinaryOperator::CreateMul(Shl->getOperand(0), MulCst,
|
||||
"", Shl);
|
||||
@ -562,12 +563,14 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
|
||||
bool IterateOptimization = false;
|
||||
if (Ops.size() == 1) return Ops[0].Op;
|
||||
|
||||
LLVMContext &Context = I->getContext();
|
||||
|
||||
unsigned Opcode = I->getOpcode();
|
||||
|
||||
if (Constant *V1 = dyn_cast<Constant>(Ops[Ops.size()-2].Op))
|
||||
if (Constant *V2 = dyn_cast<Constant>(Ops.back().Op)) {
|
||||
Ops.pop_back();
|
||||
Ops.back().Op = Context->getConstantExpr(Opcode, V1, V2);
|
||||
Ops.back().Op = Context.getConstantExpr(Opcode, V1, V2);
|
||||
return OptimizeExpression(I, Ops);
|
||||
}
|
||||
|
||||
@ -623,10 +626,10 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
|
||||
if (FoundX != i) {
|
||||
if (Opcode == Instruction::And) { // ...&X&~X = 0
|
||||
++NumAnnihil;
|
||||
return Context->getNullValue(X->getType());
|
||||
return Context.getNullValue(X->getType());
|
||||
} else if (Opcode == Instruction::Or) { // ...|X|~X = -1
|
||||
++NumAnnihil;
|
||||
return Context->getAllOnesValue(X->getType());
|
||||
return Context.getAllOnesValue(X->getType());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -645,7 +648,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
|
||||
assert(Opcode == Instruction::Xor);
|
||||
if (e == 2) {
|
||||
++NumAnnihil;
|
||||
return Context->getNullValue(Ops[0].Op->getType());
|
||||
return Context.getNullValue(Ops[0].Op->getType());
|
||||
}
|
||||
// ... X^X -> ...
|
||||
Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
|
||||
@ -670,7 +673,7 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
|
||||
// Remove X and -X from the operand list.
|
||||
if (Ops.size() == 2) {
|
||||
++NumAnnihil;
|
||||
return Context->getNullValue(X->getType());
|
||||
return Context.getNullValue(X->getType());
|
||||
} else {
|
||||
Ops.erase(Ops.begin()+i);
|
||||
if (i < FoundX)
|
||||
@ -781,6 +784,8 @@ Value *Reassociate::OptimizeExpression(BinaryOperator *I,
|
||||
/// ReassociateBB - Inspect all of the instructions in this basic block,
|
||||
/// reassociating them as we go.
|
||||
void Reassociate::ReassociateBB(BasicBlock *BB) {
|
||||
LLVMContext &Context = BB->getContext();
|
||||
|
||||
for (BasicBlock::iterator BBI = BB->begin(); BBI != BB->end(); ) {
|
||||
Instruction *BI = BBI++;
|
||||
if (BI->getOpcode() == Instruction::Shl &&
|
||||
|
@ -69,7 +69,7 @@ namespace {
|
||||
|
||||
CastInst *AllocaInsertionPoint =
|
||||
CastInst::Create(Instruction::BitCast,
|
||||
Context->getNullValue(Type::Int32Ty), Type::Int32Ty,
|
||||
F.getContext().getNullValue(Type::Int32Ty), Type::Int32Ty,
|
||||
"reg2mem alloca point", I);
|
||||
|
||||
// Find the escaped instructions. But don't create stack slots for
|
||||
|
@ -645,7 +645,7 @@ void SCCPSolver::visitReturnInst(ReturnInst &I) {
|
||||
DenseMap<std::pair<Function*, unsigned>, LatticeVal>::iterator
|
||||
It = TrackedMultipleRetVals.find(std::make_pair(F, i));
|
||||
if (It == TrackedMultipleRetVals.end()) break;
|
||||
if (Value *Val = FindInsertedValue(I.getOperand(0), i, Context))
|
||||
if (Value *Val = FindInsertedValue(I.getOperand(0), i, I.getContext()))
|
||||
mergeInValue(It->second, F, getValueState(Val));
|
||||
}
|
||||
}
|
||||
@ -1162,7 +1162,7 @@ void SCCPSolver::visitLoadInst(LoadInst &I) {
|
||||
if (GV->isConstant() && GV->hasDefinitiveInitializer())
|
||||
if (Constant *V =
|
||||
ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE,
|
||||
Context)) {
|
||||
*Context)) {
|
||||
markConstant(IV, &I, V);
|
||||
return;
|
||||
}
|
||||
@ -1537,7 +1537,7 @@ FunctionPass *llvm::createSCCPPass() {
|
||||
bool SCCP::runOnFunction(Function &F) {
|
||||
DOUT << "SCCP on function '" << F.getNameStart() << "'\n";
|
||||
SCCPSolver Solver;
|
||||
Solver.setContext(Context);
|
||||
Solver.setContext(&F.getContext());
|
||||
|
||||
// Mark the first block of the function as being executable.
|
||||
Solver.MarkBlockExecutable(F.begin());
|
||||
@ -1577,7 +1577,7 @@ bool SCCP::runOnFunction(Function &F) {
|
||||
Instruction *I = Insts.back();
|
||||
Insts.pop_back();
|
||||
if (!I->use_empty())
|
||||
I->replaceAllUsesWith(Context->getUndef(I->getType()));
|
||||
I->replaceAllUsesWith(F.getContext().getUndef(I->getType()));
|
||||
BB->getInstList().erase(I);
|
||||
MadeChanges = true;
|
||||
++NumInstRemoved;
|
||||
@ -1597,7 +1597,7 @@ bool SCCP::runOnFunction(Function &F) {
|
||||
continue;
|
||||
|
||||
Constant *Const = IV.isConstant()
|
||||
? IV.getConstant() : Context->getUndef(Inst->getType());
|
||||
? IV.getConstant() : F.getContext().getUndef(Inst->getType());
|
||||
DOUT << " Constant: " << *Const << " = " << *Inst;
|
||||
|
||||
// Replaces all of the uses of a variable with uses of the constant.
|
||||
@ -1662,7 +1662,7 @@ static bool AddressIsTaken(GlobalValue *GV) {
|
||||
}
|
||||
|
||||
bool IPSCCP::runOnModule(Module &M) {
|
||||
Context = &M.getContext();
|
||||
LLVMContext *Context = &M.getContext();
|
||||
|
||||
SCCPSolver Solver;
|
||||
Solver.setContext(Context);
|
||||
|
@ -187,7 +187,7 @@ bool SROA::performPromotion(Function &F) {
|
||||
|
||||
if (Allocas.empty()) break;
|
||||
|
||||
PromoteMemToReg(Allocas, DT, DF, Context);
|
||||
PromoteMemToReg(Allocas, DT, DF, F.getContext());
|
||||
NumPromoted += Allocas.size();
|
||||
Changed = true;
|
||||
}
|
||||
@ -243,7 +243,7 @@ bool SROA::performScalarRepl(Function &F) {
|
||||
DOUT << " memcpy = " << *TheCopy;
|
||||
Constant *TheSrc = cast<Constant>(TheCopy->getOperand(2));
|
||||
AI->replaceAllUsesWith(
|
||||
Context->getConstantExprBitCast(TheSrc, AI->getType()));
|
||||
F.getContext().getConstantExprBitCast(TheSrc, AI->getType()));
|
||||
TheCopy->eraseFromParent(); // Don't mutate the global.
|
||||
AI->eraseFromParent();
|
||||
++NumGlobals;
|
||||
@ -308,7 +308,7 @@ bool SROA::performScalarRepl(Function &F) {
|
||||
DOUT << "CONVERT TO SCALAR INTEGER: " << *AI << "\n";
|
||||
|
||||
// Create and insert the integer alloca.
|
||||
const Type *NewTy = Context->getIntegerType(AllocaSize*8);
|
||||
const Type *NewTy = F.getContext().getIntegerType(AllocaSize*8);
|
||||
NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
|
||||
ConvertUsesToScalar(AI, NewAI, 0);
|
||||
}
|
||||
@ -331,6 +331,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
|
||||
std::vector<AllocationInst*> &WorkList) {
|
||||
DOUT << "Found inst to SROA: " << *AI;
|
||||
SmallVector<AllocaInst*, 32> ElementAllocas;
|
||||
LLVMContext &Context = AI->getContext();
|
||||
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
|
||||
ElementAllocas.reserve(ST->getNumContainedTypes());
|
||||
for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
|
||||
@ -372,7 +373,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
|
||||
// %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
|
||||
// (Also works for arrays instead of structs)
|
||||
if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
|
||||
Value *Insert = Context->getUndef(LI->getType());
|
||||
Value *Insert = Context.getUndef(LI->getType());
|
||||
for (unsigned i = 0, e = ElementAllocas.size(); i != e; ++i) {
|
||||
Value *Load = new LoadInst(ElementAllocas[i], "load", LI);
|
||||
Insert = InsertValueInst::Create(Insert, Load, i, "insert", LI);
|
||||
@ -419,7 +420,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
|
||||
// expanded itself once the worklist is rerun.
|
||||
//
|
||||
SmallVector<Value*, 8> NewArgs;
|
||||
NewArgs.push_back(Context->getNullValue(Type::Int32Ty));
|
||||
NewArgs.push_back(Context.getNullValue(Type::Int32Ty));
|
||||
NewArgs.append(GEPI->op_begin()+3, GEPI->op_end());
|
||||
RepValue = GetElementPtrInst::Create(AllocaToUse, NewArgs.begin(),
|
||||
NewArgs.end(), "", GEPI);
|
||||
@ -513,6 +514,7 @@ static bool AllUsersAreLoads(Value *Ptr) {
|
||||
///
|
||||
void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
|
||||
AllocaInfo &Info) {
|
||||
LLVMContext &Context = User->getContext();
|
||||
if (BitCastInst *C = dyn_cast<BitCastInst>(User))
|
||||
return isSafeUseOfBitCastedAllocation(C, AI, Info);
|
||||
|
||||
@ -532,7 +534,7 @@ void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
|
||||
|
||||
// The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
|
||||
if (I == E ||
|
||||
I.getOperand() != Context->getNullValue(I.getOperand()->getType())) {
|
||||
I.getOperand() != Context.getNullValue(I.getOperand()->getType())) {
|
||||
return MarkUnsafe(Info);
|
||||
}
|
||||
|
||||
@ -728,6 +730,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
// that doesn't have anything to do with the alloca that we are promoting. For
|
||||
// memset, this Value* stays null.
|
||||
Value *OtherPtr = 0;
|
||||
LLVMContext &Context = MI->getContext();
|
||||
unsigned MemAlignment = MI->getAlignment();
|
||||
if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
|
||||
if (BCInst == MTI->getRawDest())
|
||||
@ -765,7 +768,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
const Type *BytePtrTy = MI->getRawDest()->getType();
|
||||
bool SROADest = MI->getRawDest() == BCInst;
|
||||
|
||||
Constant *Zero = Context->getNullValue(Type::Int32Ty);
|
||||
Constant *Zero = Context.getNullValue(Type::Int32Ty);
|
||||
|
||||
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
|
||||
// If this is a memcpy/memmove, emit a GEP of the other element address.
|
||||
@ -773,7 +776,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
unsigned OtherEltAlign = MemAlignment;
|
||||
|
||||
if (OtherPtr) {
|
||||
Value *Idx[2] = { Zero, Context->getConstantInt(Type::Int32Ty, i) };
|
||||
Value *Idx[2] = { Zero, Context.getConstantInt(Type::Int32Ty, i) };
|
||||
OtherElt = GetElementPtrInst::Create(OtherPtr, Idx, Idx + 2,
|
||||
OtherPtr->getNameStr()+"."+utostr(i),
|
||||
MI);
|
||||
@ -820,7 +823,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
Constant *StoreVal;
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getOperand(2))) {
|
||||
if (CI->isZero()) {
|
||||
StoreVal = Context->getNullValue(EltTy); // 0.0, null, 0, <0,0>
|
||||
StoreVal = Context.getNullValue(EltTy); // 0.0, null, 0, <0,0>
|
||||
} else {
|
||||
// If EltTy is a vector type, get the element type.
|
||||
const Type *ValTy = EltTy->getScalarType();
|
||||
@ -836,18 +839,18 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
}
|
||||
|
||||
// Convert the integer value to the appropriate type.
|
||||
StoreVal = Context->getConstantInt(TotalVal);
|
||||
StoreVal = Context.getConstantInt(TotalVal);
|
||||
if (isa<PointerType>(ValTy))
|
||||
StoreVal = Context->getConstantExprIntToPtr(StoreVal, ValTy);
|
||||
StoreVal = Context.getConstantExprIntToPtr(StoreVal, ValTy);
|
||||
else if (ValTy->isFloatingPoint())
|
||||
StoreVal = Context->getConstantExprBitCast(StoreVal, ValTy);
|
||||
StoreVal = Context.getConstantExprBitCast(StoreVal, ValTy);
|
||||
assert(StoreVal->getType() == ValTy && "Type mismatch!");
|
||||
|
||||
// If the requested value was a vector constant, create it.
|
||||
if (EltTy != ValTy) {
|
||||
unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
|
||||
SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
|
||||
StoreVal = Context->getConstantVector(&Elts[0], NumElts);
|
||||
StoreVal = Context.getConstantVector(&Elts[0], NumElts);
|
||||
}
|
||||
}
|
||||
new StoreInst(StoreVal, EltPtr, MI);
|
||||
@ -873,15 +876,15 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
Value *Ops[] = {
|
||||
SROADest ? EltPtr : OtherElt, // Dest ptr
|
||||
SROADest ? OtherElt : EltPtr, // Src ptr
|
||||
Context->getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size
|
||||
Context->getConstantInt(Type::Int32Ty, OtherEltAlign) // Align
|
||||
Context.getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size
|
||||
Context.getConstantInt(Type::Int32Ty, OtherEltAlign) // Align
|
||||
};
|
||||
CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
|
||||
} else {
|
||||
assert(isa<MemSetInst>(MI));
|
||||
Value *Ops[] = {
|
||||
EltPtr, MI->getOperand(2), // Dest, Value,
|
||||
Context->getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size
|
||||
Context.getConstantInt(MI->getOperand(3)->getType(), EltSize), // Size
|
||||
Zero // Align
|
||||
};
|
||||
CallInst::Create(TheFn, Ops, Ops + 4, "", MI);
|
||||
@ -898,6 +901,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts){
|
||||
// Extract each element out of the integer according to its structure offset
|
||||
// and store the element value to the individual alloca.
|
||||
LLVMContext &Context = SI->getContext();
|
||||
Value *SrcVal = SI->getOperand(0);
|
||||
const Type *AllocaEltTy = AI->getType()->getElementType();
|
||||
uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
|
||||
@ -911,7 +915,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
// Handle tail padding by extending the operand
|
||||
if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
|
||||
SrcVal = new ZExtInst(SrcVal,
|
||||
Context->getIntegerType(AllocaSizeBits), "", SI);
|
||||
Context.getIntegerType(AllocaSizeBits), "", SI);
|
||||
|
||||
DOUT << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << *SI;
|
||||
|
||||
@ -930,7 +934,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
|
||||
Value *EltVal = SrcVal;
|
||||
if (Shift) {
|
||||
Value *ShiftVal = Context->getConstantInt(EltVal->getType(), Shift);
|
||||
Value *ShiftVal = Context.getConstantInt(EltVal->getType(), Shift);
|
||||
EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
|
||||
"sroa.store.elt", SI);
|
||||
}
|
||||
@ -943,7 +947,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
|
||||
if (FieldSizeBits != AllocaSizeBits)
|
||||
EltVal = new TruncInst(EltVal,
|
||||
Context->getIntegerType(FieldSizeBits), "", SI);
|
||||
Context.getIntegerType(FieldSizeBits), "", SI);
|
||||
Value *DestField = NewElts[i];
|
||||
if (EltVal->getType() == FieldTy) {
|
||||
// Storing to an integer field of this size, just do it.
|
||||
@ -953,7 +957,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
} else {
|
||||
// Otherwise, bitcast the dest pointer (for aggregates).
|
||||
DestField = new BitCastInst(DestField,
|
||||
Context->getPointerTypeUnqual(EltVal->getType()),
|
||||
Context.getPointerTypeUnqual(EltVal->getType()),
|
||||
"", SI);
|
||||
}
|
||||
new StoreInst(EltVal, DestField, SI);
|
||||
@ -978,7 +982,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
|
||||
Value *EltVal = SrcVal;
|
||||
if (Shift) {
|
||||
Value *ShiftVal = Context->getConstantInt(EltVal->getType(), Shift);
|
||||
Value *ShiftVal = Context.getConstantInt(EltVal->getType(), Shift);
|
||||
EltVal = BinaryOperator::CreateLShr(EltVal, ShiftVal,
|
||||
"sroa.store.elt", SI);
|
||||
}
|
||||
@ -986,7 +990,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
// Truncate down to an integer of the right size.
|
||||
if (ElementSizeBits != AllocaSizeBits)
|
||||
EltVal = new TruncInst(EltVal,
|
||||
Context->getIntegerType(ElementSizeBits),"",SI);
|
||||
Context.getIntegerType(ElementSizeBits),"",SI);
|
||||
Value *DestField = NewElts[i];
|
||||
if (EltVal->getType() == ArrayEltTy) {
|
||||
// Storing to an integer field of this size, just do it.
|
||||
@ -996,7 +1000,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
} else {
|
||||
// Otherwise, bitcast the dest pointer (for aggregates).
|
||||
DestField = new BitCastInst(DestField,
|
||||
Context->getPointerTypeUnqual(EltVal->getType()),
|
||||
Context.getPointerTypeUnqual(EltVal->getType()),
|
||||
"", SI);
|
||||
}
|
||||
new StoreInst(EltVal, DestField, SI);
|
||||
@ -1039,9 +1043,11 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
|
||||
ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
|
||||
}
|
||||
|
||||
Value *ResultVal =
|
||||
Context->getNullValue(Context->getIntegerType(AllocaSizeBits));
|
||||
|
||||
LLVMContext &Context = LI->getContext();
|
||||
|
||||
Value *ResultVal =
|
||||
Context.getNullValue(Context.getIntegerType(AllocaSizeBits));
|
||||
|
||||
for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
|
||||
// Load the value from the alloca. If the NewElt is an aggregate, cast
|
||||
@ -1054,11 +1060,11 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
// Ignore zero sized fields like {}, they obviously contain no data.
|
||||
if (FieldSizeBits == 0) continue;
|
||||
|
||||
const IntegerType *FieldIntTy = Context->getIntegerType(FieldSizeBits);
|
||||
const IntegerType *FieldIntTy = Context.getIntegerType(FieldSizeBits);
|
||||
if (!isa<IntegerType>(FieldTy) && !FieldTy->isFloatingPoint() &&
|
||||
!isa<VectorType>(FieldTy))
|
||||
SrcField = new BitCastInst(SrcField,
|
||||
Context->getPointerTypeUnqual(FieldIntTy),
|
||||
Context.getPointerTypeUnqual(FieldIntTy),
|
||||
"", LI);
|
||||
SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
|
||||
|
||||
@ -1083,7 +1089,7 @@ void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
|
||||
|
||||
if (Shift) {
|
||||
Value *ShiftVal = Context->getConstantInt(SrcField->getType(), Shift);
|
||||
Value *ShiftVal = Context.getConstantInt(SrcField->getType(), Shift);
|
||||
SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
|
||||
}
|
||||
|
||||
@ -1186,8 +1192,10 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
|
||||
if (isa<ConstantInt>(I.getOperand()))
|
||||
return;
|
||||
|
||||
LLVMContext &Context = GEPI->getContext();
|
||||
|
||||
if (NumElements == 1) {
|
||||
GEPI->setOperand(2, Context->getNullValue(Type::Int32Ty));
|
||||
GEPI->setOperand(2, Context.getNullValue(Type::Int32Ty));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1195,16 +1203,16 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
|
||||
// All users of the GEP must be loads. At each use of the GEP, insert
|
||||
// two loads of the appropriate indexed GEP and select between them.
|
||||
Value *IsOne = new ICmpInst(GEPI, ICmpInst::ICMP_NE, I.getOperand(),
|
||||
Context->getNullValue(I.getOperand()->getType()),
|
||||
Context.getNullValue(I.getOperand()->getType()),
|
||||
"isone");
|
||||
// Insert the new GEP instructions, which are properly indexed.
|
||||
SmallVector<Value*, 8> Indices(GEPI->op_begin()+1, GEPI->op_end());
|
||||
Indices[1] = Context->getNullValue(Type::Int32Ty);
|
||||
Indices[1] = Context.getNullValue(Type::Int32Ty);
|
||||
Value *ZeroIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
|
||||
Indices.begin(),
|
||||
Indices.end(),
|
||||
GEPI->getName()+".0", GEPI);
|
||||
Indices[1] = Context->getConstantInt(Type::Int32Ty, 1);
|
||||
Indices[1] = Context.getConstantInt(Type::Int32Ty, 1);
|
||||
Value *OneIdx = GetElementPtrInst::Create(GEPI->getOperand(0),
|
||||
Indices.begin(),
|
||||
Indices.end(),
|
||||
@ -1262,7 +1270,7 @@ void SROA::CleanupAllocaUsers(AllocationInst *AI) {
|
||||
/// and stores would mutate the memory.
|
||||
static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
|
||||
unsigned AllocaSize, const TargetData &TD,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
// If this could be contributing to a vector, analyze it.
|
||||
if (VecTy != Type::VoidTy) { // either null or a vector type.
|
||||
|
||||
@ -1290,7 +1298,7 @@ static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
|
||||
cast<VectorType>(VecTy)->getElementType()
|
||||
->getPrimitiveSizeInBits()/8 == EltSize)) {
|
||||
if (VecTy == 0)
|
||||
VecTy = Context->getVectorType(In, AllocaSize/EltSize);
|
||||
VecTy = In->getContext().getVectorType(In, AllocaSize/EltSize);
|
||||
return;
|
||||
}
|
||||
}
|
||||
@ -1321,7 +1329,8 @@ bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
|
||||
// Don't break volatile loads.
|
||||
if (LI->isVolatile())
|
||||
return false;
|
||||
MergeInType(LI->getType(), Offset, VecTy, AllocaSize, *TD, Context);
|
||||
MergeInType(LI->getType(), Offset, VecTy,
|
||||
AllocaSize, *TD, V->getContext());
|
||||
SawVec |= isa<VectorType>(LI->getType());
|
||||
continue;
|
||||
}
|
||||
@ -1330,7 +1339,7 @@ bool SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
|
||||
// Storing the pointer, not into the value?
|
||||
if (SI->getOperand(0) == V || SI->isVolatile()) return 0;
|
||||
MergeInType(SI->getOperand(0)->getType(), Offset,
|
||||
VecTy, AllocaSize, *TD, Context);
|
||||
VecTy, AllocaSize, *TD, V->getContext());
|
||||
SawVec |= isa<VectorType>(SI->getOperand(0)->getType());
|
||||
continue;
|
||||
}
|
||||
@ -1459,7 +1468,8 @@ void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset) {
|
||||
APVal |= APVal << 8;
|
||||
|
||||
Value *Old = Builder.CreateLoad(NewAI, (NewAI->getName()+".in").c_str());
|
||||
Value *New = ConvertScalar_InsertValue(Context->getConstantInt(APVal),
|
||||
Value *New = ConvertScalar_InsertValue(
|
||||
User->getContext().getConstantInt(APVal),
|
||||
Old, Offset, Builder);
|
||||
Builder.CreateStore(New, NewAI);
|
||||
}
|
||||
@ -1531,6 +1541,8 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
|
||||
if (FromVal->getType() == ToType && Offset == 0)
|
||||
return FromVal;
|
||||
|
||||
LLVMContext &Context = FromVal->getContext();
|
||||
|
||||
// If the result alloca is a vector type, this is either an element
|
||||
// access or a bitcast to another vector type of the same size.
|
||||
if (const VectorType *VTy = dyn_cast<VectorType>(FromVal->getType())) {
|
||||
@ -1546,7 +1558,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
|
||||
}
|
||||
// Return the element extracted out of it.
|
||||
Value *V = Builder.CreateExtractElement(FromVal,
|
||||
Context->getConstantInt(Type::Int32Ty,Elt),
|
||||
Context.getConstantInt(Type::Int32Ty,Elt),
|
||||
"tmp");
|
||||
if (V->getType() != ToType)
|
||||
V = Builder.CreateBitCast(V, ToType, "tmp");
|
||||
@ -1557,7 +1569,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
|
||||
// use insertvalue's to form the FCA.
|
||||
if (const StructType *ST = dyn_cast<StructType>(ToType)) {
|
||||
const StructLayout &Layout = *TD->getStructLayout(ST);
|
||||
Value *Res = Context->getUndef(ST);
|
||||
Value *Res = Context.getUndef(ST);
|
||||
for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
|
||||
Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
|
||||
Offset+Layout.getElementOffsetInBits(i),
|
||||
@ -1569,7 +1581,7 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
|
||||
|
||||
if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
|
||||
uint64_t EltSize = TD->getTypeAllocSizeInBits(AT->getElementType());
|
||||
Value *Res = Context->getUndef(AT);
|
||||
Value *Res = Context.getUndef(AT);
|
||||
for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
|
||||
Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
|
||||
Offset+i*EltSize, Builder);
|
||||
@ -1599,21 +1611,21 @@ Value *SROA::ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
|
||||
// only some bits are used.
|
||||
if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
|
||||
FromVal = Builder.CreateLShr(FromVal,
|
||||
Context->getConstantInt(FromVal->getType(),
|
||||
Context.getConstantInt(FromVal->getType(),
|
||||
ShAmt), "tmp");
|
||||
else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
|
||||
FromVal = Builder.CreateShl(FromVal,
|
||||
Context->getConstantInt(FromVal->getType(),
|
||||
Context.getConstantInt(FromVal->getType(),
|
||||
-ShAmt), "tmp");
|
||||
|
||||
// Finally, unconditionally truncate the integer to the right width.
|
||||
unsigned LIBitWidth = TD->getTypeSizeInBits(ToType);
|
||||
if (LIBitWidth < NTy->getBitWidth())
|
||||
FromVal =
|
||||
Builder.CreateTrunc(FromVal, Context->getIntegerType(LIBitWidth), "tmp");
|
||||
Builder.CreateTrunc(FromVal, Context.getIntegerType(LIBitWidth), "tmp");
|
||||
else if (LIBitWidth > NTy->getBitWidth())
|
||||
FromVal =
|
||||
Builder.CreateZExt(FromVal, Context->getIntegerType(LIBitWidth), "tmp");
|
||||
Builder.CreateZExt(FromVal, Context.getIntegerType(LIBitWidth), "tmp");
|
||||
|
||||
// If the result is an integer, this is a trunc or bitcast.
|
||||
if (isa<IntegerType>(ToType)) {
|
||||
@ -1645,6 +1657,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
|
||||
// Convert the stored type to the actual type, shift it left to insert
|
||||
// then 'or' into place.
|
||||
const Type *AllocaType = Old->getType();
|
||||
LLVMContext &Context = Old->getContext();
|
||||
|
||||
if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
|
||||
uint64_t VecSize = TD->getTypeAllocSizeInBits(VTy);
|
||||
@ -1664,7 +1677,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
|
||||
SV = Builder.CreateBitCast(SV, VTy->getElementType(), "tmp");
|
||||
|
||||
SV = Builder.CreateInsertElement(Old, SV,
|
||||
Context->getConstantInt(Type::Int32Ty, Elt),
|
||||
Context.getConstantInt(Type::Int32Ty, Elt),
|
||||
"tmp");
|
||||
return SV;
|
||||
}
|
||||
@ -1697,7 +1710,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
|
||||
unsigned SrcStoreWidth = TD->getTypeStoreSizeInBits(SV->getType());
|
||||
unsigned DestStoreWidth = TD->getTypeStoreSizeInBits(AllocaType);
|
||||
if (SV->getType()->isFloatingPoint() || isa<VectorType>(SV->getType()))
|
||||
SV = Builder.CreateBitCast(SV, Context->getIntegerType(SrcWidth), "tmp");
|
||||
SV = Builder.CreateBitCast(SV, Context.getIntegerType(SrcWidth), "tmp");
|
||||
else if (isa<PointerType>(SV->getType()))
|
||||
SV = Builder.CreatePtrToInt(SV, TD->getIntPtrType(), "tmp");
|
||||
|
||||
@ -1732,11 +1745,11 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
|
||||
// only some bits in the structure are set.
|
||||
APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
|
||||
if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
|
||||
SV = Builder.CreateShl(SV, Context->getConstantInt(SV->getType(),
|
||||
SV = Builder.CreateShl(SV, Context.getConstantInt(SV->getType(),
|
||||
ShAmt), "tmp");
|
||||
Mask <<= ShAmt;
|
||||
} else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
|
||||
SV = Builder.CreateLShr(SV, Context->getConstantInt(SV->getType(),
|
||||
SV = Builder.CreateLShr(SV, Context.getConstantInt(SV->getType(),
|
||||
-ShAmt), "tmp");
|
||||
Mask = Mask.lshr(-ShAmt);
|
||||
}
|
||||
@ -1745,7 +1758,7 @@ Value *SROA::ConvertScalar_InsertValue(Value *SV, Value *Old,
|
||||
// in the new bits.
|
||||
if (SrcWidth != DestWidth) {
|
||||
assert(DestWidth > SrcWidth);
|
||||
Old = Builder.CreateAnd(Old, Context->getConstantInt(~Mask), "mask");
|
||||
Old = Builder.CreateAnd(Old, Context.getConstantInt(~Mask), "mask");
|
||||
SV = Builder.CreateOr(Old, SV, "ins");
|
||||
}
|
||||
return SV;
|
||||
|
@ -58,7 +58,7 @@ FunctionPass *llvm::createCFGSimplificationPass() {
|
||||
|
||||
/// ChangeToUnreachable - Insert an unreachable instruction before the specified
|
||||
/// instruction, making it and the rest of the code in the block dead.
|
||||
static void ChangeToUnreachable(Instruction *I, LLVMContext *Context) {
|
||||
static void ChangeToUnreachable(Instruction *I, LLVMContext &Context) {
|
||||
BasicBlock *BB = I->getParent();
|
||||
// Loop over all of the successors, removing BB's entry from any PHI
|
||||
// nodes.
|
||||
@ -71,7 +71,7 @@ static void ChangeToUnreachable(Instruction *I, LLVMContext *Context) {
|
||||
BasicBlock::iterator BBI = I, BBE = BB->end();
|
||||
while (BBI != BBE) {
|
||||
if (!BBI->use_empty())
|
||||
BBI->replaceAllUsesWith(Context->getUndef(BBI->getType()));
|
||||
BBI->replaceAllUsesWith(Context.getUndef(BBI->getType()));
|
||||
BB->getInstList().erase(BBI++);
|
||||
}
|
||||
}
|
||||
@ -97,7 +97,7 @@ static void ChangeToCall(InvokeInst *II) {
|
||||
|
||||
static bool MarkAliveBlocks(BasicBlock *BB,
|
||||
SmallPtrSet<BasicBlock*, 128> &Reachable,
|
||||
LLVMContext *Context) {
|
||||
LLVMContext &Context) {
|
||||
|
||||
SmallVector<BasicBlock*, 128> Worklist;
|
||||
Worklist.push_back(BB);
|
||||
|
@ -65,7 +65,7 @@ public:
|
||||
Caller = CI->getParent()->getParent();
|
||||
this->TD = &TD;
|
||||
if (CI->getCalledFunction())
|
||||
Context = CI->getCalledFunction()->getContext();
|
||||
Context = &CI->getCalledFunction()->getContext();
|
||||
return CallOptimizer(CI->getCalledFunction(), CI, B);
|
||||
}
|
||||
|
||||
@ -1639,7 +1639,7 @@ bool SimplifyLibCalls::runOnFunction(Function &F) {
|
||||
|
||||
const TargetData &TD = getAnalysis<TargetData>();
|
||||
|
||||
IRBuilder<> Builder(*Context);
|
||||
IRBuilder<> Builder(F.getContext());
|
||||
|
||||
bool Changed = false;
|
||||
for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
|
||||
@ -1730,8 +1730,6 @@ void SimplifyLibCalls::setDoesNotAlias(Function &F, unsigned n) {
|
||||
/// doInitialization - Add attributes to well-known functions.
|
||||
///
|
||||
bool SimplifyLibCalls::doInitialization(Module &M) {
|
||||
Context = &M.getContext();
|
||||
|
||||
Modified = false;
|
||||
for (Module::iterator I = M.begin(), E = M.end(); I != E; ++I) {
|
||||
Function &F = *I;
|
||||
|
@ -305,7 +305,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) {
|
||||
// keeping track of the mapping...
|
||||
//
|
||||
for (; BI != DestBlock->end(); ++BI) {
|
||||
Instruction *New = BI->clone(*Context);
|
||||
Instruction *New = BI->clone(BI->getContext());
|
||||
New->setName(BI->getName());
|
||||
SourceBlock->getInstList().push_back(New);
|
||||
ValueMapping[BI] = New;
|
||||
@ -359,7 +359,7 @@ void TailDup::eliminateUnconditionalBranch(BranchInst *Branch) {
|
||||
Instruction *Inst = BI++;
|
||||
if (isInstructionTriviallyDead(Inst))
|
||||
Inst->eraseFromParent();
|
||||
else if (Constant *C = ConstantFoldInstruction(Inst, Context)) {
|
||||
else if (Constant *C = ConstantFoldInstruction(Inst, BI->getContext())) {
|
||||
Inst->replaceAllUsesWith(C);
|
||||
Inst->eraseFromParent();
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ bool AddressingModeMatcher::MatchScaledValue(Value *ScaleReg, int64_t Scale,
|
||||
ConstantInt *CI = 0; Value *AddLHS = 0;
|
||||
if (isa<Instruction>(ScaleReg) && // not a constant expr.
|
||||
match(ScaleReg, m_Add(m_Value(AddLHS), m_ConstantInt(CI)),
|
||||
*MemoryInst->getParent()->getContext())) {
|
||||
MemoryInst->getContext())) {
|
||||
TestAddrMode.ScaledReg = AddLHS;
|
||||
TestAddrMode.BaseOffs += CI->getSExtValue()*TestAddrMode.Scale;
|
||||
|
||||
|
@ -51,7 +51,7 @@ void llvm::DeleteDeadBlock(BasicBlock *BB) {
|
||||
// contained within it must dominate their uses, that all uses will
|
||||
// eventually be removed (they are themselves dead).
|
||||
if (!I.use_empty())
|
||||
I.replaceAllUsesWith(BB->getContext()->getUndef(I.getType()));
|
||||
I.replaceAllUsesWith(BB->getContext().getUndef(I.getType()));
|
||||
BB->getInstList().pop_back();
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ void llvm::FoldSingleEntryPHINodes(BasicBlock *BB) {
|
||||
if (PN->getIncomingValue(0) != PN)
|
||||
PN->replaceAllUsesWith(PN->getIncomingValue(0));
|
||||
else
|
||||
PN->replaceAllUsesWith(BB->getContext()->getUndef(PN->getType()));
|
||||
PN->replaceAllUsesWith(BB->getContext().getUndef(PN->getType()));
|
||||
PN->eraseFromParent();
|
||||
}
|
||||
}
|
||||
@ -252,7 +252,7 @@ void llvm::RemoveSuccessor(TerminatorInst *TI, unsigned SuccNum) {
|
||||
|
||||
// Create a value to return... if the function doesn't return null...
|
||||
if (BB->getParent()->getReturnType() != Type::VoidTy)
|
||||
RetVal = TI->getParent()->getContext()->getNullValue(
|
||||
RetVal = TI->getContext().getNullValue(
|
||||
BB->getParent()->getReturnType());
|
||||
|
||||
// Create the return...
|
||||
@ -387,7 +387,7 @@ BasicBlock *llvm::SplitBlockPredecessors(BasicBlock *BB,
|
||||
if (NumPreds == 0) {
|
||||
// Insert dummy values as the incoming value.
|
||||
for (BasicBlock::iterator I = BB->begin(); isa<PHINode>(I); ++I)
|
||||
cast<PHINode>(I)->addIncoming(BB->getContext()->getUndef(I->getType()),
|
||||
cast<PHINode>(I)->addIncoming(BB->getContext().getUndef(I->getType()),
|
||||
NewBB);
|
||||
return NewBB;
|
||||
}
|
||||
@ -618,7 +618,7 @@ void llvm::CopyPrecedingStopPoint(Instruction *I,
|
||||
if (I != I->getParent()->begin()) {
|
||||
BasicBlock::iterator BBI = I; --BBI;
|
||||
if (DbgStopPointInst *DSPI = dyn_cast<DbgStopPointInst>(BBI)) {
|
||||
CallInst *newDSPI = DSPI->clone(*I->getParent()->getContext());
|
||||
CallInst *newDSPI = DSPI->clone(I->getContext());
|
||||
newDSPI->insertBefore(InsertPos);
|
||||
}
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ BasicBlock *llvm::CloneBasicBlock(const BasicBlock *BB,
|
||||
// Loop over all instructions, and copy them over.
|
||||
for (BasicBlock::const_iterator II = BB->begin(), IE = BB->end();
|
||||
II != IE; ++II) {
|
||||
Instruction *NewInst = II->clone(*BB->getContext());
|
||||
Instruction *NewInst = II->clone(BB->getContext());
|
||||
if (II->hasName())
|
||||
NewInst->setName(II->getName()+NameSuffix);
|
||||
NewBB->getInstList().push_back(NewInst);
|
||||
@ -152,7 +152,7 @@ Function *llvm::CloneFunction(const Function *F,
|
||||
|
||||
// Create a new function type...
|
||||
FunctionType *FTy =
|
||||
F->getContext()->getFunctionType(F->getFunctionType()->getReturnType(),
|
||||
F->getContext().getFunctionType(F->getFunctionType()->getReturnType(),
|
||||
ArgTypes, F->getFunctionType()->isVarArg());
|
||||
|
||||
// Create the new function...
|
||||
@ -249,7 +249,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
|
||||
continue;
|
||||
}
|
||||
|
||||
Instruction *NewInst = II->clone(*BB->getContext());
|
||||
Instruction *NewInst = II->clone(BB->getContext());
|
||||
if (II->hasName())
|
||||
NewInst->setName(II->getName()+NameSuffix);
|
||||
NewBB->getInstList().push_back(NewInst);
|
||||
@ -297,7 +297,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
|
||||
}
|
||||
|
||||
if (!TerminatorDone) {
|
||||
Instruction *NewInst = OldTI->clone(*BB->getContext());
|
||||
Instruction *NewInst = OldTI->clone(BB->getContext());
|
||||
if (OldTI->hasName())
|
||||
NewInst->setName(OldTI->getName()+NameSuffix);
|
||||
NewBB->getInstList().push_back(NewInst);
|
||||
@ -325,7 +325,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
|
||||
/// mapping its operands through ValueMap if they are available.
|
||||
Constant *PruningFunctionCloner::
|
||||
ConstantFoldMappedInstruction(const Instruction *I) {
|
||||
LLVMContext *Context = I->getParent()->getContext();
|
||||
LLVMContext &Context = I->getContext();
|
||||
|
||||
SmallVector<Constant*, 8> Ops;
|
||||
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
|
||||
@ -367,7 +367,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
|
||||
ClonedCodeInfo *CodeInfo,
|
||||
const TargetData *TD) {
|
||||
assert(NameSuffix && "NameSuffix cannot be null!");
|
||||
LLVMContext *Context = OldFunc->getContext();
|
||||
LLVMContext &Context = OldFunc->getContext();
|
||||
|
||||
#ifndef NDEBUG
|
||||
for (Function::const_arg_iterator II = OldFunc->arg_begin(),
|
||||
@ -490,7 +490,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
|
||||
BasicBlock::iterator I = NewBB->begin();
|
||||
BasicBlock::const_iterator OldI = OldBB->begin();
|
||||
while ((PN = dyn_cast<PHINode>(I++))) {
|
||||
Value *NV = OldFunc->getContext()->getUndef(PN->getType());
|
||||
Value *NV = OldFunc->getContext().getUndef(PN->getType());
|
||||
PN->replaceAllUsesWith(NV);
|
||||
assert(ValueMap[OldI] == PN && "ValueMap mismatch");
|
||||
ValueMap[OldI] = NV;
|
||||
|
@ -90,7 +90,7 @@ Module *llvm::CloneModule(const Module *M,
|
||||
if (I->hasInitializer())
|
||||
GV->setInitializer(cast<Constant>(MapValue(I->getInitializer(),
|
||||
ValueMap,
|
||||
&M->getContext())));
|
||||
M->getContext())));
|
||||
GV->setLinkage(I->getLinkage());
|
||||
GV->setThreadLocal(I->isThreadLocal());
|
||||
GV->setConstant(I->isConstant());
|
||||
@ -121,7 +121,7 @@ Module *llvm::CloneModule(const Module *M,
|
||||
GlobalAlias *GA = cast<GlobalAlias>(ValueMap[I]);
|
||||
GA->setLinkage(I->getLinkage());
|
||||
if (const Constant* C = I->getAliasee())
|
||||
GA->setAliasee(cast<Constant>(MapValue(C, ValueMap, &M->getContext())));
|
||||
GA->setAliasee(cast<Constant>(MapValue(C, ValueMap, M->getContext())));
|
||||
}
|
||||
|
||||
return New;
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user