"LLVMContext* " --> "LLVMContext *"

llvm-svn: 74878
This commit is contained in:
Owen Anderson 2009-07-06 23:00:19 +00:00
parent 5f268720e9
commit 121f736d9c
31 changed files with 109 additions and 109 deletions

View File

@ -29,13 +29,13 @@ namespace llvm {
/// is returned. Note that this function can only fail when attempting to fold
/// instructions like loads and stores, which have no constant expression form.
///
Constant *ConstantFoldInstruction(Instruction *I, LLVMContext* Context,
Constant *ConstantFoldInstruction(Instruction *I, LLVMContext *Context,
const TargetData *TD = 0);
/// ConstantFoldConstantExpression - Attempt to fold the constant expression
/// using the specified TargetData. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext* Context,
Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext *Context,
const TargetData *TD = 0);
/// ConstantFoldInstOperands - Attempt to constant fold an instruction with the
@ -46,7 +46,7 @@ Constant *ConstantFoldConstantExpression(ConstantExpr *CE, LLVMContext* Context,
///
Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
Constant*const * Ops, unsigned NumOps,
LLVMContext* Context,
LLVMContext *Context,
const TargetData *TD = 0);
/// ConstantFoldCompareInstOperands - Attempt to constant fold a compare
@ -55,7 +55,7 @@ Constant *ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
///
Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
Constant*const * Ops, unsigned NumOps,
LLVMContext* Context,
LLVMContext *Context,
const TargetData *TD = 0);
@ -63,7 +63,7 @@ Constant *ConstantFoldCompareInstOperands(unsigned Predicate,
/// getelementptr constantexpr, return the constant value being addressed by the
/// constant expression, or null if something is funny and we can't decide.
Constant *ConstantFoldLoadThroughGEPConstantExpr(Constant *C, ConstantExpr *CE,
LLVMContext* Context);
LLVMContext *Context);
/// canConstantFoldCallTo - Return true if its even possible to fold a call to
/// the specified function.

View File

@ -355,7 +355,7 @@ namespace llvm {
static char ID; // Pass identification, replacement for typeid
ScalarEvolution();
LLVMContext* getContext() const { return Context; }
LLVMContext *getContext() const { return Context; }
/// isSCEVable - Test if values of the given type are analyzable within
/// the SCEV framework. This primarily includes integer types, and it

View File

@ -131,7 +131,7 @@ class SparseSolver {
SparseSolver(const SparseSolver&); // DO NOT IMPLEMENT
void operator=(const SparseSolver&); // DO NOT IMPLEMENT
public:
explicit SparseSolver(AbstractLatticeFunction *Lattice, LLVMContext* C)
explicit SparseSolver(AbstractLatticeFunction *Lattice, LLVMContext *C)
: LatticeFunc(Lattice), Context(C) {}
~SparseSolver() {
delete LatticeFunc;

View File

@ -88,7 +88,7 @@ private:
public:
/// getContext - Get the context in which this basic block lives,
/// or null if it is not currently attached to a function.
LLVMContext* getContext() const;
LLVMContext *getContext() const;
/// Instruction iterators...
typedef InstListType::iterator iterator;

View File

@ -129,7 +129,7 @@ public:
/// getContext - Return a pointer to the LLVMContext associated with this
/// function, or NULL if this function is not bound to a context yet.
LLVMContext* getContext() const;
LLVMContext *getContext() const;
/// isVarArg - Return true if this function takes a variable number of
/// arguments.

View File

@ -79,7 +79,7 @@ class Pass {
Pass(const Pass &); // DO NOT IMPLEMENT
protected:
LLVMContext* Context;
LLVMContext *Context;
public:
explicit Pass(intptr_t pid) : Resolver(0), PassID(pid) {

View File

@ -30,7 +30,7 @@ class LLVMContext;
/// TargetFolder - Create constants with target dependent folding.
class TargetFolder {
const TargetData *TD;
LLVMContext* Context;
LLVMContext *Context;
/// Fold - Fold the constant using target specific information.
Constant *Fold(Constant *C) const {
@ -41,7 +41,7 @@ class TargetFolder {
}
public:
explicit TargetFolder(const TargetData *TheTD, LLVMContext* C) :
explicit TargetFolder(const TargetData *TheTD, LLVMContext *C) :
TD(TheTD), Context(C) {}
//===--------------------------------------------------------------------===//

View File

@ -40,7 +40,7 @@ bool isAllocaPromotable(const AllocaInst *AI);
///
void PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
DominatorTree &DT, DominanceFrontier &DF,
LLVMContext* Context,
LLVMContext *Context,
AliasSetTracker *AST = 0);
} // End llvm namespace

View File

@ -23,7 +23,7 @@ namespace llvm {
class LLVMContext;
typedef DenseMap<const Value *, Value *> ValueMapTy;
Value *MapValue(const Value *V, ValueMapTy &VM, LLVMContext* Context);
Value *MapValue(const Value *V, ValueMapTy &VM, LLVMContext *Context);
void RemapInstruction(Instruction *I, ValueMapTy &VM);
} // End llvm namespace

View File

@ -499,7 +499,7 @@ BasicAliasAnalysis::alias(const Value *V1, unsigned V1Size,
// This function is used to determine if the indices of two GEP instructions are
// equal. V1 and V2 are the indices.
static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext* Context) {
static bool IndexOperandsEqual(Value *V1, Value *V2, LLVMContext *Context) {
if (V1->getType() == V2->getType())
return V1 == V2;
if (Constant *C1 = dyn_cast<Constant>(V1))

View File

@ -94,7 +94,7 @@ static bool IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
/// otherwise TD is null.
static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
Constant *Op1, const TargetData *TD,
LLVMContext* Context){
LLVMContext *Context){
// SROA
// Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
@ -123,7 +123,7 @@ static Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0,
/// constant expression, do so.
static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
const Type *ResultTy,
LLVMContext* Context,
LLVMContext *Context,
const TargetData *TD) {
Constant *Ptr = Ops[0];
if (!TD || !cast<PointerType>(Ptr->getType())->getElementType()->isSized())
@ -157,7 +157,7 @@ static Constant *SymbolicallyEvaluateGEP(Constant* const* Ops, unsigned NumOps,
/// FoldBitCast - Constant fold bitcast, symbolically evaluating it with
/// targetdata. Return 0 if unfoldable.
static Constant *FoldBitCast(Constant *C, const Type *DestTy,
const TargetData &TD, LLVMContext* Context) {
const TargetData &TD, LLVMContext *Context) {
// If this is a bitcast from constant vector -> vector, fold it.
if (ConstantVector *CV = dyn_cast<ConstantVector>(C)) {
if (const VectorType *DestVTy = dyn_cast<VectorType>(DestTy)) {
@ -281,7 +281,7 @@ static Constant *FoldBitCast(Constant *C, const Type *DestTy,
/// is returned. Note that this function can only fail when attempting to fold
/// instructions like loads and stores, which have no constant expression form.
///
Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext* Context,
Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext *Context,
const TargetData *TD) {
if (PHINode *PN = dyn_cast<PHINode>(I)) {
if (PN->getNumIncomingValues() == 0)
@ -321,7 +321,7 @@ Constant *llvm::ConstantFoldInstruction(Instruction *I, LLVMContext* Context,
/// using the specified TargetData. If successful, the constant result is
/// result is returned, if not, null is returned.
Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
LLVMContext* Context,
LLVMContext *Context,
const TargetData *TD) {
SmallVector<Constant*, 8> Ops;
for (User::op_iterator i = CE->op_begin(), e = CE->op_end(); i != e; ++i)
@ -344,7 +344,7 @@ Constant *llvm::ConstantFoldConstantExpression(ConstantExpr *CE,
///
Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
Constant* const* Ops, unsigned NumOps,
LLVMContext* Context,
LLVMContext *Context,
const TargetData *TD) {
// Handle easy binops first.
if (Instruction::isBinaryOp(Opcode)) {
@ -470,7 +470,7 @@ Constant *llvm::ConstantFoldInstOperands(unsigned Opcode, const Type *DestTy,
Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
Constant*const * Ops,
unsigned NumOps,
LLVMContext* Context,
LLVMContext *Context,
const TargetData *TD) {
// fold: icmp (inttoptr x), null -> icmp x, 0
// fold: icmp (ptrtoint x), 0 -> icmp x, null
@ -543,7 +543,7 @@ Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
/// constant expression, or null if something is funny and we can't decide.
Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
ConstantExpr *CE,
LLVMContext* Context) {
LLVMContext *Context) {
if (CE->getOperand(1) != Context->getNullValue(CE->getOperand(1)->getType()))
return 0; // Do not allow stepping over the value!
@ -680,7 +680,7 @@ llvm::canConstantFoldCallTo(const Function *F) {
}
static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
const Type *Ty, LLVMContext* Context) {
const Type *Ty, LLVMContext *Context) {
errno = 0;
V = NativeFP(V);
if (errno != 0) {
@ -699,7 +699,7 @@ static Constant *ConstantFoldFP(double (*NativeFP)(double), double V,
static Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
double V, double W,
const Type *Ty,
LLVMContext* Context) {
LLVMContext *Context) {
errno = 0;
V = NativeFP(V, W);
if (errno != 0) {
@ -722,7 +722,7 @@ Constant *
llvm::ConstantFoldCall(Function *F,
Constant* const* Operands, unsigned NumOperands) {
if (!F->hasName()) return 0;
LLVMContext* Context = F->getContext();
LLVMContext *Context = F->getContext();
const char *Str = F->getNameStart();
unsigned Len = F->getNameLen();

View File

@ -3421,7 +3421,7 @@ static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
if (Constant *C = dyn_cast<Constant>(V)) return C;
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
Instruction *I = cast<Instruction>(V);
LLVMContext* Context = I->getParent()->getContext();
LLVMContext *Context = I->getParent()->getContext();
std::vector<Constant*> Operands;
Operands.resize(I->getNumOperands());

View File

@ -835,7 +835,7 @@ bool llvm::CannotBeNegativeZero(const Value *V, unsigned Depth) {
Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType,
SmallVector<unsigned, 10> &Idxs,
unsigned IdxSkip,
LLVMContext* Context,
LLVMContext *Context,
Instruction *InsertBefore) {
const llvm::StructType *STy = llvm::dyn_cast<llvm::StructType>(IndexedType);
if (STy) {
@ -914,7 +914,7 @@ Value *BuildSubAggregate(Value *From, const unsigned *idx_begin,
/// If InsertBefore is not null, this function will duplicate (modified)
/// insertvalues when a part of a nested struct is extracted.
Value *llvm::FindInsertedValue(Value *V, const unsigned *idx_begin,
const unsigned *idx_end, LLVMContext* Context,
const unsigned *idx_end, LLVMContext *Context,
Instruction *InsertBefore) {
// Nothing to index? Just return V then (this is useful at the end of our
// recursion)

View File

@ -246,7 +246,7 @@ static bool AnalyzeGlobal(Value *V, GlobalStatus &GS,
}
static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
LLVMContext* Context) {
LLVMContext *Context) {
ConstantInt *CI = dyn_cast<ConstantInt>(Idx);
if (!CI) return 0;
unsigned IdxV = CI->getZExtValue();
@ -283,7 +283,7 @@ static Constant *getAggregateConstantElement(Constant *Agg, Constant *Idx,
/// quick scan over the use list to clean up the easy and obvious cruft. This
/// returns true if it made a change.
static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
LLVMContext* Context) {
LLVMContext *Context) {
bool Changed = false;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;) {
User *U = *UI++;
@ -465,7 +465,7 @@ static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
/// this transformation is safe already. We return the first global variable we
/// insert so that the caller can reprocess it.
static GlobalVariable *SRAGlobal(GlobalVariable *GV, const TargetData &TD,
LLVMContext* Context) {
LLVMContext *Context) {
// Make sure this global only has simple uses that we can SRA.
if (!GlobalUsersSafeToSRA(GV))
return 0;
@ -674,7 +674,7 @@ static bool AllUsesOfLoadedValueWillTrapIfNull(GlobalVariable *GV) {
}
static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
LLVMContext* Context) {
LLVMContext *Context) {
bool Changed = false;
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ) {
Instruction *I = cast<Instruction>(*UI++);
@ -742,7 +742,7 @@ static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV,
/// if the loaded value is dynamically null, then we know that they cannot be
/// reachable with a null optimize away the load.
static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
LLVMContext* Context) {
LLVMContext *Context) {
bool Changed = false;
// Keep track of whether we are able to remove all the uses of the global
@ -796,7 +796,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
/// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
/// instructions that are foldable.
static void ConstantPropUsersOf(Value *V, LLVMContext* Context) {
static void ConstantPropUsersOf(Value *V, LLVMContext *Context) {
for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; )
if (Instruction *I = dyn_cast<Instruction>(*UI++))
if (Constant *NewC = ConstantFoldInstruction(I, Context)) {
@ -817,7 +817,7 @@ static void ConstantPropUsersOf(Value *V, LLVMContext* Context) {
/// malloc into a global, and any loads of GV as uses of the new global.
static GlobalVariable *OptimizeGlobalAddressOfMalloc(GlobalVariable *GV,
MallocInst *MI,
LLVMContext* Context) {
LLVMContext *Context) {
DOUT << "PROMOTING MALLOC GLOBAL: " << *GV << " MALLOC = " << *MI;
ConstantInt *NElements = cast<ConstantInt>(MI->getArraySize());
@ -1131,7 +1131,7 @@ static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(GlobalVariable *GV,
static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
LLVMContext* Context) {
LLVMContext *Context) {
std::vector<Value*> &FieldVals = InsertedScalarizedValues[V];
if (FieldNo >= FieldVals.size())
@ -1174,7 +1174,7 @@ static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
static void RewriteHeapSROALoadUser(Instruction *LoadUser,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
LLVMContext* Context) {
LLVMContext *Context) {
// If this is a comparison against null, handle it.
if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
@ -1245,7 +1245,7 @@ static void RewriteHeapSROALoadUser(Instruction *LoadUser,
static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
DenseMap<Value*, std::vector<Value*> > &InsertedScalarizedValues,
std::vector<std::pair<PHINode*, unsigned> > &PHIsToRewrite,
LLVMContext* Context) {
LLVMContext *Context) {
for (Value::use_iterator UI = Load->use_begin(), E = Load->use_end();
UI != E; ) {
Instruction *User = cast<Instruction>(*UI++);
@ -1262,7 +1262,7 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - MI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, MallocInst *MI,
LLVMContext* Context){
LLVMContext *Context){
DOUT << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *MI;
const StructType *STy = cast<StructType>(MI->getAllocatedType());
@ -1442,7 +1442,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
MallocInst *MI,
Module::global_iterator &GVI,
TargetData &TD,
LLVMContext* Context) {
LLVMContext *Context) {
// If this is a malloc of an abstract type, don't touch it.
if (!MI->getAllocatedType()->isSized())
return false;
@ -1526,7 +1526,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// that only one value (besides its initializer) is ever stored to the global.
static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
Module::global_iterator &GVI,
TargetData &TD, LLVMContext* Context) {
TargetData &TD, LLVMContext *Context) {
// Ignore no-op GEPs and bitcasts.
StoredOnceVal = StoredOnceVal->stripPointerCasts();
@ -1558,7 +1558,7 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
/// can shrink the global into a boolean and select between the two values
/// whenever it is used. This exposes the values to other scalar optimizations.
static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal,
LLVMContext* Context) {
LLVMContext *Context) {
const Type *GVElType = GV->getType()->getElementType();
// If GVElType is already i1, it is already shrunk. If the type of the GV is
@ -1941,7 +1941,7 @@ static std::vector<Function*> ParseGlobalCtors(GlobalVariable *GV) {
/// specified array, returning the new global to use.
static GlobalVariable *InstallGlobalCtors(GlobalVariable *GCL,
const std::vector<Function*> &Ctors,
LLVMContext* Context) {
LLVMContext *Context) {
// If we made a change, reassemble the initializer list.
std::vector<Constant*> CSVals;
CSVals.push_back(Context->getConstantInt(Type::Int32Ty, 65535));
@ -2009,7 +2009,7 @@ static Constant *getVal(DenseMap<Value*, Constant*> &ComputedValues,
/// enough for us to understand. In particular, if it is a cast of something,
/// we punt. We basically just support direct accesses to globals and GEP's of
/// globals. This should be kept up to date with CommitValueTo.
static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext* Context) {
static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext *Context) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(C)) {
if (!GV->hasExternalLinkage() && !GV->hasLocalLinkage())
return false; // do not allow weak/linkonce/dllimport/dllexport linkage.
@ -2034,7 +2034,7 @@ static bool isSimpleEnoughPointerToCommit(Constant *C, LLVMContext* Context) {
/// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
ConstantExpr *Addr, unsigned OpNo,
LLVMContext* Context) {
LLVMContext *Context) {
// Base case of the recursion.
if (OpNo == Addr->getNumOperands()) {
assert(Val->getType() == Init->getType() && "Type mismatch!");
@ -2097,7 +2097,7 @@ static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
/// CommitValueTo - We have decided that Addr (which satisfies the predicate
/// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
static void CommitValueTo(Constant *Val, Constant *Addr,
LLVMContext* Context) {
LLVMContext *Context) {
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
assert(GV->hasInitializer());
GV->setInitializer(Val);
@ -2117,7 +2117,7 @@ static void CommitValueTo(Constant *Val, Constant *Addr,
/// decide, return null.
static Constant *ComputeLoadResult(Constant *P,
const DenseMap<Constant*, Constant*> &Memory,
LLVMContext* Context) {
LLVMContext *Context) {
// If this memory location has been recently stored, use the stored value: it
// is the most up-to-date.
DenseMap<Constant*, Constant*>::const_iterator I = Memory.find(P);
@ -2156,7 +2156,7 @@ static bool EvaluateFunction(Function *F, Constant *&RetVal,
if (std::find(CallStack.begin(), CallStack.end(), F) != CallStack.end())
return false;
LLVMContext* Context = F->getContext();
LLVMContext *Context = F->getContext();
CallStack.push_back(F);

View File

@ -23,7 +23,7 @@
void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
GlobalValue *Array) {
LLVMContext* Context = MainFn->getContext();
LLVMContext *Context = MainFn->getContext();
const Type *ArgVTy =
Context->getPointerTypeUnqual(Context->getPointerTypeUnqual(Type::Int8Ty));
const PointerType *UIntPtr = Context->getPointerTypeUnqual(Type::Int32Ty);
@ -101,7 +101,7 @@ void llvm::InsertProfilingInitCall(Function *MainFn, const char *FnName,
void llvm::IncrementCounterInBlock(BasicBlock *BB, unsigned CounterNum,
GlobalValue *CounterArray) {
LLVMContext* Context = BB->getContext();
LLVMContext *Context = BB->getContext();
// Insert the increment after any alloca or PHI instructions...
BasicBlock::iterator InsertPos = BB->getFirstNonPHI();

View File

@ -208,7 +208,7 @@ void GlobalRandomCounter::PrepFunction(Function* F) {}
void GlobalRandomCounter::ProcessChoicePoint(BasicBlock* bb) {
BranchInst* t = cast<BranchInst>(bb->getTerminator());
LLVMContext* Context = bb->getContext();
LLVMContext *Context = bb->getContext();
//decrement counter
LoadInst* l = new LoadInst(Counter, "counter", t);
@ -282,7 +282,7 @@ void GlobalRandomCounterOpt::PrepFunction(Function* F) {
void GlobalRandomCounterOpt::ProcessChoicePoint(BasicBlock* bb) {
BranchInst* t = cast<BranchInst>(bb->getTerminator());
LLVMContext* Context = bb->getContext();
LLVMContext *Context = bb->getContext();
//decrement counter
LoadInst* l = new LoadInst(AI, "counter", t);
@ -317,7 +317,7 @@ void CycleCounter::PrepFunction(Function* F) {}
void CycleCounter::ProcessChoicePoint(BasicBlock* bb) {
BranchInst* t = cast<BranchInst>(bb->getTerminator());
LLVMContext* Context = bb->getContext();
LLVMContext *Context = bb->getContext();
CallInst* c = CallInst::Create(F, "rdcc", t);
BinaryOperator* b =

View File

@ -83,7 +83,7 @@ namespace {
static char ID; // Pass identification, replacement for typeid
InstCombiner() : FunctionPass(&ID) {}
LLVMContext* getContext() { return Context; }
LLVMContext *getContext() { return Context; }
/// AddToWorkList - Add the specified instruction to the worklist if it
/// isn't already in it.
@ -568,7 +568,7 @@ bool InstCombiner::SimplifyCompare(CmpInst &I) {
// dyn_castNegVal - Given a 'sub' instruction, return the RHS of the instruction
// if the LHS is a constant zero (which is the 'negate' form).
//
static inline Value *dyn_castNegVal(Value *V, LLVMContext* Context) {
static inline Value *dyn_castNegVal(Value *V, LLVMContext *Context) {
if (BinaryOperator::isNeg(V))
return BinaryOperator::getNegArgument(V);
@ -587,7 +587,7 @@ static inline Value *dyn_castNegVal(Value *V, LLVMContext* Context) {
// instruction if the LHS is a constant negative zero (which is the 'negate'
// form).
//
static inline Value *dyn_castFNegVal(Value *V, LLVMContext* Context) {
static inline Value *dyn_castFNegVal(Value *V, LLVMContext *Context) {
if (BinaryOperator::isFNeg(V))
return BinaryOperator::getFNegArgument(V);
@ -602,7 +602,7 @@ static inline Value *dyn_castFNegVal(Value *V, LLVMContext* Context) {
return 0;
}
static inline Value *dyn_castNotVal(Value *V, LLVMContext* Context) {
static inline Value *dyn_castNotVal(Value *V, LLVMContext *Context) {
if (BinaryOperator::isNot(V))
return BinaryOperator::getNotArgument(V);
@ -618,7 +618,7 @@ static inline Value *dyn_castNotVal(Value *V, LLVMContext* Context) {
// Otherwise, return null.
//
static inline Value *dyn_castFoldableMul(Value *V, ConstantInt *&CST,
LLVMContext* Context) {
LLVMContext *Context) {
if (V->hasOneUse() && V->getType()->isInteger())
if (Instruction *I = dyn_cast<Instruction>(V)) {
if (I->getOpcode() == Instruction::Mul)
@ -658,19 +658,19 @@ static unsigned getOpcode(const Value *V) {
}
/// AddOne - Add one to a ConstantInt
static Constant *AddOne(Constant *C, LLVMContext* Context) {
static Constant *AddOne(Constant *C, LLVMContext *Context) {
return Context->getConstantExprAdd(C,
Context->getConstantInt(C->getType(), 1));
}
/// SubOne - Subtract one from a ConstantInt
static Constant *SubOne(ConstantInt *C, LLVMContext* Context) {
static Constant *SubOne(ConstantInt *C, LLVMContext *Context) {
return Context->getConstantExprSub(C,
Context->getConstantInt(C->getType(), 1));
}
/// MultiplyOverflows - True if the multiply can not be expressed in an int
/// this size.
static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign,
LLVMContext* Context) {
LLVMContext *Context) {
uint32_t W = C1->getBitWidth();
APInt LHSExt = C1->getValue(), RHSExt = C2->getValue();
if (sign) {
@ -697,7 +697,7 @@ static bool MultiplyOverflows(ConstantInt *C1, ConstantInt *C2, bool sign,
/// are any bits set in the constant that are not demanded. If so, shrink the
/// constant and return true.
static bool ShrinkDemandedConstant(Instruction *I, unsigned OpNo,
APInt Demanded, LLVMContext* Context) {
APInt Demanded, LLVMContext *Context) {
assert(I && "No instruction?");
assert(OpNo < I->getNumOperands() && "Operand index too large");
@ -1800,7 +1800,7 @@ Value *InstCombiner::SimplifyDemandedVectorElts(Value *V, APInt DemandedElts,
///
template<typename Functor>
static Instruction *AssociativeOpt(BinaryOperator &Root, const Functor &F,
LLVMContext* Context) {
LLVMContext *Context) {
unsigned Opcode = Root.getOpcode();
Value *LHS = Root.getOperand(0);
@ -1872,8 +1872,8 @@ namespace {
// AddRHS - Implements: X + X --> X << 1
struct AddRHS {
Value *RHS;
LLVMContext* Context;
AddRHS(Value *rhs, LLVMContext* C) : RHS(rhs), Context(C) {}
LLVMContext *Context;
AddRHS(Value *rhs, LLVMContext *C) : RHS(rhs), Context(C) {}
bool shouldApply(Value *LHS) const { return LHS == RHS; }
Instruction *apply(BinaryOperator &Add) const {
return BinaryOperator::CreateShl(Add.getOperand(0),
@ -1885,8 +1885,8 @@ struct AddRHS {
// iff C1&C2 == 0
struct AddMaskingAnd {
Constant *C2;
LLVMContext* Context;
AddMaskingAnd(Constant *c, LLVMContext* C) : C2(c), Context(C) {}
LLVMContext *Context;
AddMaskingAnd(Constant *c, LLVMContext *C) : C2(c), Context(C) {}
bool shouldApply(Value *LHS) const {
ConstantInt *C1;
return match(LHS, m_And(m_Value(), m_ConstantInt(C1))) &&
@ -1901,7 +1901,7 @@ struct AddMaskingAnd {
static Value *FoldOperationIntoSelectOperand(Instruction &I, Value *SO,
InstCombiner *IC) {
LLVMContext* Context = IC->getContext();
LLVMContext *Context = IC->getContext();
if (CastInst *CI = dyn_cast<CastInst>(&I)) {
return IC->InsertCastBefore(CI->getOpcode(), SO, I.getType(), I);
@ -3389,7 +3389,7 @@ static unsigned getFCmpCode(FCmpInst::Predicate CC, bool &isOrdered) {
/// new ICmp instruction. The sign is passed in to determine which kind
/// of predicate to use in the new icmp instruction.
static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS,
LLVMContext* Context) {
LLVMContext *Context) {
switch (code) {
default: assert(0 && "Illegal ICmp code!");
case 0: return Context->getConstantIntFalse();
@ -3423,7 +3423,7 @@ static Value *getICmpValue(bool sign, unsigned code, Value *LHS, Value *RHS,
/// opcode and two operands into either a FCmp instruction. isordered is passed
/// in to determine which kind of predicate to use in the new fcmp instruction.
static Value *getFCmpValue(bool isordered, unsigned code,
Value *LHS, Value *RHS, LLVMContext* Context) {
Value *LHS, Value *RHS, LLVMContext *Context) {
switch (code) {
default: assert(0 && "Illegal FCmp code!");
case 0:
@ -5271,7 +5271,7 @@ Instruction *InstCombiner::visitXor(BinaryOperator &I) {
}
static ConstantInt *ExtractElement(Constant *V, Constant *Idx,
LLVMContext* Context) {
LLVMContext *Context) {
return cast<ConstantInt>(Context->getConstantExprExtractElement(V, Idx));
}
@ -5290,7 +5290,7 @@ static bool HasAddOverflow(ConstantInt *Result,
/// AddWithOverflow - Compute Result = In1+In2, returning true if the result
/// overflowed for this type.
static bool AddWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, LLVMContext* Context,
Constant *In2, LLVMContext *Context,
bool IsSigned = false) {
Result = Context->getConstantExprAdd(In1, In2);
@ -5326,7 +5326,7 @@ static bool HasSubOverflow(ConstantInt *Result,
/// SubWithOverflow - Compute Result = In1-In2, returning true if the result
/// overflowed for this type.
static bool SubWithOverflow(Constant *&Result, Constant *In1,
Constant *In2, LLVMContext* Context,
Constant *In2, LLVMContext *Context,
bool IsSigned = false) {
Result = Context->getConstantExprSub(In1, In2);
@ -5354,7 +5354,7 @@ static Value *EmitGEPOffset(User *GEP, Instruction &I, InstCombiner &IC) {
TargetData &TD = IC.getTargetData();
gep_type_iterator GTI = gep_type_begin(GEP);
const Type *IntPtrTy = TD.getIntPtrType();
LLVMContext* Context = IC.getContext();
LLVMContext *Context = IC.getContext();
Value *Result = Context->getNullValue(IntPtrTy);
// Build a mask for high order bits.
@ -7718,7 +7718,7 @@ Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, ConstantInt *Op1,
/// X*Scale+Offset.
///
static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
int &Offset, LLVMContext* Context) {
int &Offset, LLVMContext *Context) {
assert(Val->getType() == Type::Int32Ty && "Unexpected allocation size type!");
if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
Offset = CI->getZExtValue();
@ -8089,7 +8089,7 @@ Instruction *InstCombiner::commonCastTransforms(CastInst &CI) {
static const Type *FindElementAtOffset(const Type *Ty, int64_t Offset,
SmallVectorImpl<Value*> &NewIndices,
const TargetData *TD,
LLVMContext* Context) {
LLVMContext *Context) {
if (!Ty->isSized()) return 0;
// Start with the index over the outer type. Note that the type size
@ -8742,7 +8742,7 @@ Instruction *InstCombiner::visitSExt(SExtInst &CI) {
/// FitsInFPType - Return a Constant* for the specified FP constant if it fits
/// in the specified FP type without changing its value.
static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem,
LLVMContext* Context) {
LLVMContext *Context) {
bool losesInfo;
APFloat F = CFP->getValueAPF();
(void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
@ -8753,7 +8753,7 @@ static Constant *FitsInFPType(ConstantFP *CFP, const fltSemantics &Sem,
/// LookThroughFPExtensions - If this is an fp extension instruction, look
/// through it until we get the source value.
static Value *LookThroughFPExtensions(Value *V, LLVMContext* Context) {
static Value *LookThroughFPExtensions(Value *V, LLVMContext *Context) {
if (Instruction *I = dyn_cast<Instruction>(V))
if (I->getOpcode() == Instruction::FPExt)
return LookThroughFPExtensions(I->getOperand(0), Context);
@ -9076,7 +9076,7 @@ static unsigned GetSelectFoldableOperands(Instruction *I) {
/// GetSelectFoldableConstant - For the same transformation as the previous
/// function, return the identity constant that goes into the select.
static Constant *GetSelectFoldableConstant(Instruction *I,
LLVMContext* Context) {
LLVMContext *Context) {
switch (I->getOpcode()) {
default: assert(0 && "This cannot happen!"); abort();
case Instruction::Add:
@ -11450,7 +11450,7 @@ static Instruction *InstCombineLoadCast(InstCombiner &IC, LoadInst &LI,
const TargetData *TD) {
User *CI = cast<User>(LI.getOperand(0));
Value *CastOp = CI->getOperand(0);
LLVMContext* Context = IC.getContext();
LLVMContext *Context = IC.getContext();
if (TD) {
if (ConstantExpr *CE = dyn_cast<ConstantExpr>(CI)) {
@ -11675,7 +11675,7 @@ Instruction *InstCombiner::visitLoadInst(LoadInst &LI) {
static Instruction *InstCombineStoreToCast(InstCombiner &IC, StoreInst &SI) {
User *CI = cast<User>(SI.getOperand(1));
Value *CastOp = CI->getOperand(0);
LLVMContext* Context = IC.getContext();
LLVMContext *Context = IC.getContext();
const Type *DestPTy = cast<PointerType>(CI->getType())->getElementType();
const PointerType *SrcTy = dyn_cast<PointerType>(CastOp->getType());
@ -12304,7 +12304,7 @@ static std::vector<unsigned> getShuffleMask(const ShuffleVectorInst *SVI) {
/// value is already around as a register, for example if it were inserted then
/// extracted from the vector.
static Value *FindScalarElement(Value *V, unsigned EltNo,
LLVMContext* Context) {
LLVMContext *Context) {
assert(isa<VectorType>(V->getType()) && "Not looking at a vector?");
const VectorType *PTy = cast<VectorType>(V->getType());
unsigned Width = PTy->getNumElements();
@ -12480,7 +12480,7 @@ Instruction *InstCombiner::visitExtractElementInst(ExtractElementInst &EI) {
/// Otherwise, return false.
static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
std::vector<Constant*> &Mask,
LLVMContext* Context) {
LLVMContext *Context) {
assert(V->getType() == LHS->getType() && V->getType() == RHS->getType() &&
"Invalid CollectSingleShuffleElements");
unsigned NumElts = cast<VectorType>(V->getType())->getNumElements();
@ -12550,7 +12550,7 @@ static bool CollectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
/// RHS of the shuffle instruction, if it is not null. Return a shuffle mask
/// that computes V and the LHS value of the shuffle.
static Value *CollectShuffleElements(Value *V, std::vector<Constant*> &Mask,
Value *&RHS, LLVMContext* Context) {
Value *&RHS, LLVMContext *Context) {
assert(isa<VectorType>(V->getType()) &&
(RHS == 0 || V->getType() == RHS->getType()) &&
"Invalid shuffle!");

View File

@ -795,7 +795,7 @@ bool JumpThreading::ProcessBranchOnLogical(Value *V, BasicBlock *BB,
/// result can not be determined, a null pointer is returned.
static Constant *GetResultOfComparison(CmpInst::Predicate pred,
Value *LHS, Value *RHS,
LLVMContext* Context) {
LLVMContext *Context) {
if (Constant *CLHS = dyn_cast<Constant>(LHS))
if (Constant *CRHS = dyn_cast<Constant>(RHS))
return Context->getConstantExprCompare(pred, CLHS, CRHS);

View File

@ -294,14 +294,14 @@ static bool isUsedOutsideLoop(Value *V, Loop *L) {
// Return V+1
static Value *getPlusOne(Value *V, bool Sign, Instruction *InsertPt,
LLVMContext* Context) {
LLVMContext *Context) {
Constant *One = Context->getConstantInt(V->getType(), 1, Sign);
return BinaryOperator::CreateAdd(V, One, "lsp", InsertPt);
}
// Return V-1
static Value *getMinusOne(Value *V, bool Sign, Instruction *InsertPt,
LLVMContext* Context) {
LLVMContext *Context) {
Constant *One = Context->getConstantInt(V->getType(), 1, Sign);
return BinaryOperator::CreateSub(V, One, "lsp", InsertPt);
}

View File

@ -200,7 +200,7 @@ static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
///
static Instruction *LowerNegateToMultiply(Instruction *Neg,
std::map<AssertingVH<>, unsigned> &ValueRankMap,
LLVMContext* Context) {
LLVMContext *Context) {
Constant *Cst = Context->getConstantIntAllOnesValue(Neg->getType());
Instruction *Res = BinaryOperator::CreateMul(Neg->getOperand(1), Cst, "",Neg);
@ -458,7 +458,7 @@ static Instruction *BreakUpSubtract(Instruction *Sub,
/// reassociation.
static Instruction *ConvertShiftToMul(Instruction *Shl,
std::map<AssertingVH<>, unsigned> &ValueRankMap,
LLVMContext* Context) {
LLVMContext *Context) {
// If an operand of this shift is a reassociable multiply, or if the shift
// is used by a reassociable multiply or add, turn into a multiply.
if (isReassociableOp(Shl->getOperand(0), Instruction::Mul) ||

View File

@ -139,7 +139,7 @@ public:
/// Constant Propagation.
///
class SCCPSolver : public InstVisitor<SCCPSolver> {
LLVMContext* Context;
LLVMContext *Context;
DenseSet<BasicBlock*> BBExecutable;// The basic blocks that are executable
std::map<Value*, LatticeVal> ValueState; // The state each value is in.
@ -179,7 +179,7 @@ class SCCPSolver : public InstVisitor<SCCPSolver> {
typedef std::pair<BasicBlock*, BasicBlock*> Edge;
DenseSet<Edge> KnownFeasibleEdges;
public:
void setContext(LLVMContext* C) { Context = C; }
void setContext(LLVMContext *C) { Context = C; }
/// MarkBlockExecutable - This method can be used by clients to mark all of
/// the blocks that are known to be intrinsically live in the processed unit.

View File

@ -1261,7 +1261,7 @@ void SROA::CleanupAllocaUsers(AllocationInst *AI) {
/// and stores would mutate the memory.
static void MergeInType(const Type *In, uint64_t Offset, const Type *&VecTy,
unsigned AllocaSize, const TargetData &TD,
LLVMContext* Context) {
LLVMContext *Context) {
// If this could be contributing to a vector, analyze it.
if (VecTy != Type::VoidTy) { // either null or a vector type.

View File

@ -58,7 +58,7 @@ FunctionPass *llvm::createCFGSimplificationPass() {
/// ChangeToUnreachable - Insert an unreachable instruction before the specified
/// instruction, making it and the rest of the code in the block dead.
static void ChangeToUnreachable(Instruction *I, LLVMContext* Context) {
static void ChangeToUnreachable(Instruction *I, LLVMContext *Context) {
BasicBlock *BB = I->getParent();
// Loop over all of the successors, removing BB's entry from any PHI
// nodes.
@ -97,7 +97,7 @@ static void ChangeToCall(InvokeInst *II) {
static bool MarkAliveBlocks(BasicBlock *BB,
SmallPtrSet<BasicBlock*, 128> &Reachable,
LLVMContext* Context) {
LLVMContext *Context) {
SmallVector<BasicBlock*, 128> Worklist;
Worklist.push_back(BB);

View File

@ -325,7 +325,7 @@ void PruningFunctionCloner::CloneBlock(const BasicBlock *BB,
/// mapping its operands through ValueMap if they are available.
Constant *PruningFunctionCloner::
ConstantFoldMappedInstruction(const Instruction *I) {
LLVMContext* Context = I->getParent()->getContext();
LLVMContext *Context = I->getParent()->getContext();
SmallVector<Constant*, 8> Ops;
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
@ -367,7 +367,7 @@ void llvm::CloneAndPruneFunctionInto(Function *NewFunc, const Function *OldFunc,
ClonedCodeInfo *CodeInfo,
const TargetData *TD) {
assert(NameSuffix && "NameSuffix cannot be null!");
LLVMContext* Context = OldFunc->getContext();
LLVMContext *Context = OldFunc->getContext();
#ifndef NDEBUG
for (Function::const_arg_iterator II = OldFunc->arg_begin(),

View File

@ -238,7 +238,7 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
DOUT << "inputs: " << inputs.size() << "\n";
DOUT << "outputs: " << outputs.size() << "\n";
LLVMContext* Context = header->getContext();
LLVMContext *Context = header->getContext();
// This function returns unsigned, outputs will go back by reference.
switch (NumExitBlocks) {
@ -352,7 +352,7 @@ Function *CodeExtractor::constructFunction(const Values &inputs,
void CodeExtractor::
emitCallAndSwitchStatement(Function *newFunction, BasicBlock *codeReplacer,
Values &inputs, Values &outputs) {
LLVMContext* Context = codeReplacer->getContext();
LLVMContext *Context = codeReplacer->getContext();
// Emit a call to the new function, passing in: *pointer to struct (if
// aggregating parameters), or plan inputs and allocated memory for outputs

View File

@ -263,7 +263,7 @@ void llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) {
/// too, recursively.
void
llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
LLVMContext* Context = PN->getParent()->getContext();
LLVMContext *Context = PN->getParent()->getContext();
// We can remove a PHI if it is on a cycle in the def-use graph
// where each node in the cycle has degree one, i.e. only one use,

View File

@ -183,7 +183,7 @@ namespace {
///
AliasSetTracker *AST;
LLVMContext* Context;
LLVMContext *Context;
/// AllocaLookup - Reverse mapping of Allocas.
///
@ -216,7 +216,7 @@ namespace {
public:
PromoteMem2Reg(const std::vector<AllocaInst*> &A, DominatorTree &dt,
DominanceFrontier &df, AliasSetTracker *ast,
LLVMContext* C)
LLVMContext *C)
: Allocas(A), DT(dt), DF(df), AST(ast), Context(C) {}
void run();
@ -999,7 +999,7 @@ NextIteration:
///
void llvm::PromoteMemToReg(const std::vector<AllocaInst*> &Allocas,
DominatorTree &DT, DominanceFrontier &DF,
LLVMContext* Context, AliasSetTracker *AST) {
LLVMContext *Context, AliasSetTracker *AST) {
// If there is nothing to do, bail out...
if (Allocas.empty()) return;

View File

@ -1178,7 +1178,7 @@ static bool BlockIsSimpleEnoughToThreadThrough(BasicBlock *BB) {
/// ultimate destination.
static bool FoldCondBranchOnPHI(BranchInst *BI) {
BasicBlock *BB = BI->getParent();
LLVMContext* Context = BB->getContext();
LLVMContext *Context = BB->getContext();
PHINode *PN = dyn_cast<PHINode>(BI->getCondition());
// NOTE: we currently cannot transform this case if the PHI node is used
// outside of the block.
@ -1276,7 +1276,7 @@ static bool FoldCondBranchOnPHI(BranchInst *BI) {
/// FoldTwoEntryPHINode - Given a BB that starts with the specified two-entry
/// PHI node, see if we can eliminate it.
static bool FoldTwoEntryPHINode(PHINode *PN) {
LLVMContext* Context = PN->getParent()->getContext();
LLVMContext *Context = PN->getParent()->getContext();
// Ok, this is a two entry PHI node. Check to see if this is a simple "if
// statement", which has a very simple dominance structure. Basically, we
@ -1609,7 +1609,7 @@ bool llvm::FoldBranchToCommonDest(BranchInst *BI) {
static bool SimplifyCondBranchToCondBranch(BranchInst *PBI, BranchInst *BI) {
assert(PBI->isConditional() && BI->isConditional());
BasicBlock *BB = BI->getParent();
LLVMContext* Context = BB->getContext();
LLVMContext *Context = BB->getContext();
// If this block ends with a branch instruction, and if there is a
// predecessor that ends on a branch of the same condition, make

View File

@ -22,7 +22,7 @@
#include "llvm/ADT/SmallVector.h"
using namespace llvm;
Value *llvm::MapValue(const Value *V, ValueMapTy &VM, LLVMContext* Context) {
Value *llvm::MapValue(const Value *V, ValueMapTy &VM, LLVMContext *Context) {
Value *&VMSlot = VM[V];
if (VMSlot) return VMSlot; // Does it exist in the map yet?

View File

@ -29,7 +29,7 @@ ValueSymbolTable *BasicBlock::getValueSymbolTable() {
return 0;
}
LLVMContext* BasicBlock::getContext() const {
LLVMContext *BasicBlock::getContext() const {
return Parent ? Parent->getContext() : 0;
}

View File

@ -114,7 +114,7 @@ void Argument::removeAttr(Attributes attr) {
// Helper Methods in Function
//===----------------------------------------------------------------------===//
LLVMContext* Function::getContext() const {
LLVMContext *Function::getContext() const {
const Module* M = getParent();
if (M) return &M->getContext();
return 0;