mirror of
https://github.com/RPCS3/llvm.git
synced 2025-01-31 07:43:37 +00:00
Remove AllocationInst. Since MallocInst went away, AllocaInst is the only subclass of AllocationInst, so it no longer is necessary.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@84969 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
4ab74cdc12
commit
7b929dad59
@ -455,8 +455,7 @@ void LLVMDisposeTypeHandle(LLVMTypeHandleRef TypeHandle);
|
||||
macro(UnreachableInst) \
|
||||
macro(UnwindInst) \
|
||||
macro(UnaryInstruction) \
|
||||
macro(AllocationInst) \
|
||||
macro(AllocaInst) \
|
||||
macro(AllocaInst) \
|
||||
macro(CastInst) \
|
||||
macro(BitCastInst) \
|
||||
macro(FPExtInst) \
|
||||
|
@ -34,22 +34,28 @@ class LLVMContext;
|
||||
class DominatorTree;
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AllocationInst Class
|
||||
// AllocaInst Class
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// AllocationInst - This class is the base class of AllocaInst.
|
||||
/// AllocaInst - an instruction to allocate memory on the stack
|
||||
///
|
||||
class AllocationInst : public UnaryInstruction {
|
||||
protected:
|
||||
AllocationInst(const Type *Ty, Value *ArraySize,
|
||||
unsigned iTy, unsigned Align, const Twine &Name = "",
|
||||
Instruction *InsertBefore = 0);
|
||||
AllocationInst(const Type *Ty, Value *ArraySize,
|
||||
unsigned iTy, unsigned Align, const Twine &Name,
|
||||
BasicBlock *InsertAtEnd);
|
||||
class AllocaInst : public UnaryInstruction {
|
||||
public:
|
||||
explicit AllocaInst(const Type *Ty, Value *ArraySize = 0,
|
||||
const Twine &Name = "", Instruction *InsertBefore = 0);
|
||||
AllocaInst(const Type *Ty, Value *ArraySize,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
|
||||
AllocaInst(const Type *Ty, const Twine &Name, Instruction *InsertBefore = 0);
|
||||
AllocaInst(const Type *Ty, const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
|
||||
AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
|
||||
const Twine &Name = "", Instruction *InsertBefore = 0);
|
||||
AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd);
|
||||
|
||||
// Out of line virtual method, so the vtable, etc. has a home.
|
||||
virtual ~AllocationInst();
|
||||
virtual ~AllocaInst();
|
||||
|
||||
/// isArrayAllocation - Return true if there is an allocation size parameter
|
||||
/// to the allocation instruction that is not 1.
|
||||
@ -79,63 +85,13 @@ public:
|
||||
unsigned getAlignment() const { return (1u << SubclassData) >> 1; }
|
||||
void setAlignment(unsigned Align);
|
||||
|
||||
virtual AllocationInst *clone() const = 0;
|
||||
|
||||
// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const AllocationInst *) { return true; }
|
||||
static inline bool classof(const Instruction *I) {
|
||||
return I->getOpcode() == Instruction::Alloca;
|
||||
}
|
||||
static inline bool classof(const Value *V) {
|
||||
return isa<Instruction>(V) && classof(cast<Instruction>(V));
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AllocaInst Class
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
/// AllocaInst - an instruction to allocate memory on the stack
|
||||
///
|
||||
class AllocaInst : public AllocationInst {
|
||||
public:
|
||||
explicit AllocaInst(const Type *Ty,
|
||||
Value *ArraySize = 0,
|
||||
const Twine &NameStr = "",
|
||||
Instruction *InsertBefore = 0)
|
||||
: AllocationInst(Ty, ArraySize, Alloca,
|
||||
0, NameStr, InsertBefore) {}
|
||||
AllocaInst(const Type *Ty,
|
||||
Value *ArraySize, const Twine &NameStr,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: AllocationInst(Ty, ArraySize, Alloca, 0, NameStr, InsertAtEnd) {}
|
||||
|
||||
AllocaInst(const Type *Ty, const Twine &NameStr,
|
||||
Instruction *InsertBefore = 0)
|
||||
: AllocationInst(Ty, 0, Alloca, 0, NameStr, InsertBefore) {}
|
||||
AllocaInst(const Type *Ty, const Twine &NameStr,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: AllocationInst(Ty, 0, Alloca, 0, NameStr, InsertAtEnd) {}
|
||||
|
||||
AllocaInst(const Type *Ty, Value *ArraySize,
|
||||
unsigned Align, const Twine &NameStr = "",
|
||||
Instruction *InsertBefore = 0)
|
||||
: AllocationInst(Ty, ArraySize, Alloca,
|
||||
Align, NameStr, InsertBefore) {}
|
||||
AllocaInst(const Type *Ty, Value *ArraySize,
|
||||
unsigned Align, const Twine &NameStr,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: AllocationInst(Ty, ArraySize, Alloca,
|
||||
Align, NameStr, InsertAtEnd) {}
|
||||
|
||||
virtual AllocaInst *clone() const;
|
||||
|
||||
/// isStaticAlloca - Return true if this alloca is in the entry block of the
|
||||
/// function and is a constant size. If so, the code generator will fold it
|
||||
/// into the prolog/epilog code, so it is basically free.
|
||||
bool isStaticAlloca() const;
|
||||
|
||||
virtual AllocaInst *clone() const;
|
||||
|
||||
// Methods for support type inquiry through isa, cast, and dyn_cast:
|
||||
static inline bool classof(const AllocaInst *) { return true; }
|
||||
static inline bool classof(const Instruction *I) {
|
||||
|
@ -165,7 +165,7 @@ public:
|
||||
RetTy visitUnreachableInst(UnreachableInst &I) { DELEGATE(TerminatorInst);}
|
||||
RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
|
||||
RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
|
||||
RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(AllocationInst);}
|
||||
RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(Instruction); }
|
||||
RetTy visitFreeInst(FreeInst &I) { DELEGATE(Instruction); }
|
||||
RetTy visitLoadInst(LoadInst &I) { DELEGATE(Instruction); }
|
||||
RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction); }
|
||||
@ -198,7 +198,6 @@ public:
|
||||
//
|
||||
RetTy visitTerminatorInst(TerminatorInst &I) { DELEGATE(Instruction); }
|
||||
RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction); }
|
||||
RetTy visitAllocationInst(AllocationInst &I) { DELEGATE(Instruction); }
|
||||
RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction); }
|
||||
RetTy visitCastInst(CastInst &I) { DELEGATE(Instruction); }
|
||||
|
||||
|
@ -239,7 +239,7 @@ bool llvm::isNoAliasCall(const Value *V) {
|
||||
/// NoAlias returns
|
||||
///
|
||||
bool llvm::isIdentifiedObject(const Value *V) {
|
||||
if (isa<AllocationInst>(V) || isNoAliasCall(V))
|
||||
if (isa<AllocaInst>(V) || isNoAliasCall(V))
|
||||
return true;
|
||||
if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
|
||||
return true;
|
||||
|
@ -80,7 +80,7 @@ static bool isKnownNonNull(const Value *V) {
|
||||
/// object that never escapes from the function.
|
||||
static bool isNonEscapingLocalObject(const Value *V) {
|
||||
// If this is a local allocation, check to see if it escapes.
|
||||
if (isa<AllocationInst>(V) || isNoAliasCall(V))
|
||||
if (isa<AllocaInst>(V) || isNoAliasCall(V))
|
||||
return !PointerMayBeCaptured(V, false);
|
||||
|
||||
// If this is an argument that corresponds to a byval or noalias argument,
|
||||
@ -104,7 +104,7 @@ static bool isObjectSmallerThan(const Value *V, unsigned Size,
|
||||
const Type *AccessTy;
|
||||
if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
|
||||
AccessTy = GV->getType()->getElementType();
|
||||
} else if (const AllocationInst *AI = dyn_cast<AllocationInst>(V)) {
|
||||
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
|
||||
if (!AI->isArrayAllocation())
|
||||
AccessTy = AI->getType()->getElementType();
|
||||
else
|
||||
@ -587,8 +587,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, unsigned V1Size,
|
||||
return NoAlias;
|
||||
|
||||
// Arguments can't alias with local allocations or noalias calls.
|
||||
if ((isa<Argument>(O1) && (isa<AllocationInst>(O2) || isNoAliasCall(O2))) ||
|
||||
(isa<Argument>(O2) && (isa<AllocationInst>(O1) || isNoAliasCall(O1))))
|
||||
if ((isa<Argument>(O1) && (isa<AllocaInst>(O2) || isNoAliasCall(O2))) ||
|
||||
(isa<Argument>(O2) && (isa<AllocaInst>(O1) || isNoAliasCall(O1))))
|
||||
return NoAlias;
|
||||
|
||||
// Most objects can't alias null.
|
||||
|
@ -594,11 +594,12 @@ namespace {
|
||||
void visitReturnInst(ReturnInst &RI);
|
||||
void visitInvokeInst(InvokeInst &II) { visitCallSite(CallSite(&II)); }
|
||||
void visitCallInst(CallInst &CI) {
|
||||
if (isMalloc(&CI)) visitAllocationInst(CI);
|
||||
if (isMalloc(&CI)) visitAlloc(CI);
|
||||
else visitCallSite(CallSite(&CI));
|
||||
}
|
||||
void visitCallSite(CallSite CS);
|
||||
void visitAllocationInst(Instruction &I);
|
||||
void visitAllocaInst(AllocaInst &I);
|
||||
void visitAlloc(Instruction &I);
|
||||
void visitLoadInst(LoadInst &LI);
|
||||
void visitStoreInst(StoreInst &SI);
|
||||
void visitGetElementPtrInst(GetElementPtrInst &GEP);
|
||||
@ -792,7 +793,7 @@ void Andersens::IdentifyObjects(Module &M) {
|
||||
// object.
|
||||
if (isa<PointerType>(II->getType())) {
|
||||
ValueNodes[&*II] = NumObjects++;
|
||||
if (AllocationInst *AI = dyn_cast<AllocationInst>(&*II))
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(&*II))
|
||||
ObjectNodes[AI] = NumObjects++;
|
||||
else if (isMalloc(&*II))
|
||||
ObjectNodes[&*II] = NumObjects++;
|
||||
@ -1167,7 +1168,11 @@ void Andersens::visitInstruction(Instruction &I) {
|
||||
}
|
||||
}
|
||||
|
||||
void Andersens::visitAllocationInst(Instruction &I) {
|
||||
void Andersens::visitAllocaInst(AllocaInst &I) {
|
||||
visitAlloc(I);
|
||||
}
|
||||
|
||||
void Andersens::visitAlloc(Instruction &I) {
|
||||
unsigned ObjectIndex = getObject(&I);
|
||||
GraphNodes[ObjectIndex].setValue(&I);
|
||||
Constraints.push_back(Constraint(Constraint::AddressOf, getNodeValue(I),
|
||||
@ -2819,7 +2824,7 @@ void Andersens::PrintNode(const Node *N) const {
|
||||
else
|
||||
errs() << "(unnamed)";
|
||||
|
||||
if (isa<GlobalValue>(V) || isa<AllocationInst>(V) || isMalloc(V))
|
||||
if (isa<GlobalValue>(V) || isa<AllocaInst>(V) || isMalloc(V))
|
||||
if (N == &GraphNodes[getObject(V)])
|
||||
errs() << "<mem>";
|
||||
}
|
||||
|
@ -50,7 +50,7 @@ unsigned InlineCostAnalyzer::FunctionInfo::
|
||||
// Unfortunately, we don't know the pointer that may get propagated here,
|
||||
// so we can't make this decision.
|
||||
if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
|
||||
isa<AllocationInst>(Inst))
|
||||
isa<AllocaInst>(Inst))
|
||||
continue;
|
||||
|
||||
bool AllOperandsConstant = true;
|
||||
|
@ -229,7 +229,7 @@ getPointerDependencyFrom(Value *MemPtr, uint64_t MemSize, bool isLoad,
|
||||
// a subsequent bitcast of the malloc call result. There can be stores to
|
||||
// the malloced memory between the malloc call and its bitcast uses, and we
|
||||
// need to continue scanning until the malloc call.
|
||||
if (isa<AllocationInst>(Inst) || extractMallocCall(Inst)) {
|
||||
if (isa<AllocaInst>(Inst) || extractMallocCall(Inst)) {
|
||||
Value *AccessPtr = MemPtr->getUnderlyingObject();
|
||||
|
||||
if (AccessPtr == Inst ||
|
||||
|
@ -93,7 +93,7 @@ bool PointerTracking::doInitialization(Module &M) {
|
||||
const SCEV *PointerTracking::computeAllocationCount(Value *P,
|
||||
const Type *&Ty) const {
|
||||
Value *V = P->stripPointerCasts();
|
||||
if (AllocationInst *AI = dyn_cast<AllocationInst>(V)) {
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(V)) {
|
||||
Value *arraySize = AI->getArraySize();
|
||||
Ty = AI->getAllocatedType();
|
||||
// arraySize elements of type Ty.
|
||||
|
@ -470,7 +470,7 @@ void llvm::ComputeMaskedBits(Value *V, const APInt &Mask,
|
||||
}
|
||||
|
||||
case Instruction::Alloca: {
|
||||
AllocationInst *AI = cast<AllocationInst>(V);
|
||||
AllocaInst *AI = cast<AllocaInst>(V);
|
||||
unsigned Align = AI->getAlignment();
|
||||
if (Align == 0 && TD)
|
||||
Align = TD->getABITypeAlignment(AI->getType()->getElementType());
|
||||
|
@ -720,7 +720,7 @@ void Interpreter::SwitchToNewBasicBlock(BasicBlock *Dest, ExecutionContext &SF){
|
||||
// Memory Instruction Implementations
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
void Interpreter::visitAllocationInst(AllocationInst &I) {
|
||||
void Interpreter::visitAllocaInst(AllocaInst &I) {
|
||||
ExecutionContext &SF = ECStack.back();
|
||||
|
||||
const Type *Ty = I.getType()->getElementType(); // Type to be allocated
|
||||
|
@ -139,7 +139,7 @@ public:
|
||||
void visitBinaryOperator(BinaryOperator &I);
|
||||
void visitICmpInst(ICmpInst &I);
|
||||
void visitFCmpInst(FCmpInst &I);
|
||||
void visitAllocationInst(AllocationInst &I);
|
||||
void visitAllocaInst(AllocaInst &I);
|
||||
void visitFreeInst(FreeInst &I);
|
||||
void visitLoadInst(LoadInst &I);
|
||||
void visitStoreInst(StoreInst &I);
|
||||
|
@ -1243,7 +1243,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI,
|
||||
Instruction *DepInst = DepInfo.getInst();
|
||||
|
||||
// Loading the allocation -> undef.
|
||||
if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
|
||||
if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
|
||||
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
|
||||
UndefValue::get(LI->getType())));
|
||||
continue;
|
||||
@ -1585,7 +1585,7 @@ bool GVN::processLoad(LoadInst *L, SmallVectorImpl<Instruction*> &toErase) {
|
||||
// If this load really doesn't depend on anything, then we must be loading an
|
||||
// undef value. This can happen when loading for a fresh allocation with no
|
||||
// intervening stores, for example.
|
||||
if (isa<AllocationInst>(DepInst) || isMalloc(DepInst)) {
|
||||
if (isa<AllocaInst>(DepInst) || isMalloc(DepInst)) {
|
||||
L->replaceAllUsesWith(UndefValue::get(L->getType()));
|
||||
toErase.push_back(L);
|
||||
NumGVNLoad++;
|
||||
@ -1653,7 +1653,7 @@ bool GVN::processInstruction(Instruction *I,
|
||||
|
||||
// Allocations are always uniquely numbered, so we can save time and memory
|
||||
// by fast failing them.
|
||||
} else if (isa<AllocationInst>(I) || isa<TerminatorInst>(I)) {
|
||||
} else if (isa<AllocaInst>(I) || isa<TerminatorInst>(I)) {
|
||||
localAvail[I->getParent()]->table.insert(std::make_pair(Num, I));
|
||||
return false;
|
||||
}
|
||||
@ -1803,7 +1803,7 @@ bool GVN::performPRE(Function& F) {
|
||||
BE = CurrentBlock->end(); BI != BE; ) {
|
||||
Instruction *CurInst = BI++;
|
||||
|
||||
if (isa<AllocationInst>(CurInst) ||
|
||||
if (isa<AllocaInst>(CurInst) ||
|
||||
isa<TerminatorInst>(CurInst) || isa<PHINode>(CurInst) ||
|
||||
CurInst->getType()->isVoidTy() ||
|
||||
CurInst->mayReadFromMemory() || CurInst->mayHaveSideEffects() ||
|
||||
|
@ -284,7 +284,7 @@ namespace {
|
||||
Instruction *visitInvokeInst(InvokeInst &II);
|
||||
Instruction *visitPHINode(PHINode &PN);
|
||||
Instruction *visitGetElementPtrInst(GetElementPtrInst &GEP);
|
||||
Instruction *visitAllocationInst(AllocationInst &AI);
|
||||
Instruction *visitAllocaInst(AllocaInst &AI);
|
||||
Instruction *visitFreeInst(FreeInst &FI);
|
||||
Instruction *visitLoadInst(LoadInst &LI);
|
||||
Instruction *visitStoreInst(StoreInst &SI);
|
||||
@ -425,7 +425,7 @@ namespace {
|
||||
bool isSub, Instruction &I);
|
||||
Instruction *InsertRangeTest(Value *V, Constant *Lo, Constant *Hi,
|
||||
bool isSigned, bool Inside, Instruction &IB);
|
||||
Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocationInst &AI);
|
||||
Instruction *PromoteCastOfAllocation(BitCastInst &CI, AllocaInst &AI);
|
||||
Instruction *MatchBSwap(BinaryOperator &I);
|
||||
bool SimplifyStoreAtEndOfBlock(StoreInst &SI);
|
||||
Instruction *SimplifyMemTransfer(MemIntrinsic *MI);
|
||||
@ -7745,7 +7745,7 @@ static Value *DecomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
|
||||
/// PromoteCastOfAllocation - If we find a cast of an allocation instruction,
|
||||
/// try to eliminate the cast by moving the type information into the alloc.
|
||||
Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||
AllocationInst &AI) {
|
||||
AllocaInst &AI) {
|
||||
const PointerType *PTy = cast<PointerType>(CI.getType());
|
||||
|
||||
BuilderTy AllocaBuilder(*Builder);
|
||||
@ -7817,7 +7817,7 @@ Instruction *InstCombiner::PromoteCastOfAllocation(BitCastInst &CI,
|
||||
Amt = AllocaBuilder.CreateAdd(Amt, Off, "tmp");
|
||||
}
|
||||
|
||||
AllocationInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
|
||||
AllocaInst *New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
|
||||
New->setAlignment(AI.getAlignment());
|
||||
New->takeName(&AI);
|
||||
|
||||
@ -8878,7 +8878,7 @@ Instruction *InstCombiner::visitBitCast(BitCastInst &CI) {
|
||||
// size, rewrite the allocation instruction to allocate the "right" type.
|
||||
// There is no need to modify malloc calls because it is their bitcast that
|
||||
// needs to be cleaned up.
|
||||
if (AllocationInst *AI = dyn_cast<AllocationInst>(Src))
|
||||
if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
|
||||
if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
|
||||
return V;
|
||||
|
||||
@ -11199,7 +11199,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
if (Offset == 0) {
|
||||
// If the bitcast is of an allocation, and the allocation will be
|
||||
// converted to match the type of the cast, don't touch this.
|
||||
if (isa<AllocationInst>(BCI->getOperand(0)) ||
|
||||
if (isa<AllocaInst>(BCI->getOperand(0)) ||
|
||||
isMalloc(BCI->getOperand(0))) {
|
||||
// See if the bitcast simplifies, if so, don't nuke this GEP yet.
|
||||
if (Instruction *I = visitBitCast(*BCI)) {
|
||||
@ -11238,21 +11238,21 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
Instruction *InstCombiner::visitAllocationInst(AllocationInst &AI) {
|
||||
Instruction *InstCombiner::visitAllocaInst(AllocaInst &AI) {
|
||||
// Convert: malloc Ty, C - where C is a constant != 1 into: malloc [C x Ty], 1
|
||||
if (AI.isArrayAllocation()) { // Check C != 1
|
||||
if (const ConstantInt *C = dyn_cast<ConstantInt>(AI.getArraySize())) {
|
||||
const Type *NewTy =
|
||||
ArrayType::get(AI.getAllocatedType(), C->getZExtValue());
|
||||
assert(isa<AllocaInst>(AI) && "Unknown type of allocation inst!");
|
||||
AllocationInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
|
||||
AllocaInst *New = Builder->CreateAlloca(NewTy, 0, AI.getName());
|
||||
New->setAlignment(AI.getAlignment());
|
||||
|
||||
// Scan to the end of the allocation instructions, to skip over a block of
|
||||
// allocas if possible...also skip interleaved debug info
|
||||
//
|
||||
BasicBlock::iterator It = New;
|
||||
while (isa<AllocationInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
|
||||
while (isa<AllocaInst>(*It) || isa<DbgInfoIntrinsic>(*It)) ++It;
|
||||
|
||||
// Now that I is pointing to the first non-allocation-inst in the block,
|
||||
// insert our getelementptr instruction...
|
||||
|
@ -410,7 +410,7 @@ private:
|
||||
void visitCallSite (CallSite CS);
|
||||
void visitUnwindInst (TerminatorInst &I) { /*returns void*/ }
|
||||
void visitUnreachableInst(TerminatorInst &I) { /*returns void*/ }
|
||||
void visitAllocationInst(Instruction &I) { markOverdefined(&I); }
|
||||
void visitAllocaInst (Instruction &I) { markOverdefined(&I); }
|
||||
void visitVANextInst (Instruction &I) { markOverdefined(&I); }
|
||||
void visitVAArgInst (Instruction &I) { markOverdefined(&I); }
|
||||
void visitFreeInst (Instruction &I) { /*returns void*/ }
|
||||
|
@ -100,32 +100,32 @@ namespace {
|
||||
|
||||
void MarkUnsafe(AllocaInfo &I) { I.isUnsafe = true; }
|
||||
|
||||
int isSafeAllocaToScalarRepl(AllocationInst *AI);
|
||||
int isSafeAllocaToScalarRepl(AllocaInst *AI);
|
||||
|
||||
void isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
|
||||
void isSafeUseOfAllocation(Instruction *User, AllocaInst *AI,
|
||||
AllocaInfo &Info);
|
||||
void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
|
||||
void isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI,
|
||||
AllocaInfo &Info);
|
||||
void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
|
||||
void isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI,
|
||||
unsigned OpNo, AllocaInfo &Info);
|
||||
void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocationInst *AI,
|
||||
void isSafeUseOfBitCastedAllocation(BitCastInst *User, AllocaInst *AI,
|
||||
AllocaInfo &Info);
|
||||
|
||||
void DoScalarReplacement(AllocationInst *AI,
|
||||
std::vector<AllocationInst*> &WorkList);
|
||||
void DoScalarReplacement(AllocaInst *AI,
|
||||
std::vector<AllocaInst*> &WorkList);
|
||||
void CleanupGEP(GetElementPtrInst *GEP);
|
||||
void CleanupAllocaUsers(AllocationInst *AI);
|
||||
AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
|
||||
void CleanupAllocaUsers(AllocaInst *AI);
|
||||
AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocaInst *Base);
|
||||
|
||||
void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
|
||||
void RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
|
||||
void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
AllocationInst *AI,
|
||||
AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocationInst *AI,
|
||||
void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts);
|
||||
|
||||
bool CanConvertToScalar(Value *V, bool &IsNotTrivial, const Type *&VecTy,
|
||||
@ -135,7 +135,7 @@ namespace {
|
||||
uint64_t Offset, IRBuilder<> &Builder);
|
||||
Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
|
||||
uint64_t Offset, IRBuilder<> &Builder);
|
||||
static Instruction *isOnlyCopiedFromConstantGlobal(AllocationInst *AI);
|
||||
static Instruction *isOnlyCopiedFromConstantGlobal(AllocaInst *AI);
|
||||
};
|
||||
}
|
||||
|
||||
@ -213,18 +213,18 @@ static uint64_t getNumSAElements(const Type *T) {
|
||||
// them if they are only used by getelementptr instructions.
|
||||
//
|
||||
bool SROA::performScalarRepl(Function &F) {
|
||||
std::vector<AllocationInst*> WorkList;
|
||||
std::vector<AllocaInst*> WorkList;
|
||||
|
||||
// Scan the entry basic block, adding any alloca's and mallocs to the worklist
|
||||
BasicBlock &BB = F.getEntryBlock();
|
||||
for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
|
||||
if (AllocationInst *A = dyn_cast<AllocationInst>(I))
|
||||
if (AllocaInst *A = dyn_cast<AllocaInst>(I))
|
||||
WorkList.push_back(A);
|
||||
|
||||
// Process the worklist
|
||||
bool Changed = false;
|
||||
while (!WorkList.empty()) {
|
||||
AllocationInst *AI = WorkList.back();
|
||||
AllocaInst *AI = WorkList.back();
|
||||
WorkList.pop_back();
|
||||
|
||||
// Handle dead allocas trivially. These can be formed by SROA'ing arrays
|
||||
@ -335,8 +335,8 @@ bool SROA::performScalarRepl(Function &F) {
|
||||
|
||||
/// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
|
||||
/// predicate, do SROA now.
|
||||
void SROA::DoScalarReplacement(AllocationInst *AI,
|
||||
std::vector<AllocationInst*> &WorkList) {
|
||||
void SROA::DoScalarReplacement(AllocaInst *AI,
|
||||
std::vector<AllocaInst*> &WorkList) {
|
||||
DEBUG(errs() << "Found inst to SROA: " << *AI << '\n');
|
||||
SmallVector<AllocaInst*, 32> ElementAllocas;
|
||||
if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
|
||||
@ -455,7 +455,7 @@ void SROA::DoScalarReplacement(AllocationInst *AI,
|
||||
/// getelementptr instruction of an array aggregate allocation. isFirstElt
|
||||
/// indicates whether Ptr is known to the start of the aggregate.
|
||||
///
|
||||
void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocationInst *AI,
|
||||
void SROA::isSafeElementUse(Value *Ptr, bool isFirstElt, AllocaInst *AI,
|
||||
AllocaInfo &Info) {
|
||||
for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
|
||||
I != E; ++I) {
|
||||
@ -520,7 +520,7 @@ static bool AllUsersAreLoads(Value *Ptr) {
|
||||
/// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
|
||||
/// aggregate allocation.
|
||||
///
|
||||
void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
|
||||
void SROA::isSafeUseOfAllocation(Instruction *User, AllocaInst *AI,
|
||||
AllocaInfo &Info) {
|
||||
if (BitCastInst *C = dyn_cast<BitCastInst>(User))
|
||||
return isSafeUseOfBitCastedAllocation(C, AI, Info);
|
||||
@ -605,7 +605,7 @@ void SROA::isSafeUseOfAllocation(Instruction *User, AllocationInst *AI,
|
||||
/// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
|
||||
/// intrinsic can be promoted by SROA. At this point, we know that the operand
|
||||
/// of the memintrinsic is a pointer to the beginning of the allocation.
|
||||
void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
|
||||
void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocaInst *AI,
|
||||
unsigned OpNo, AllocaInfo &Info) {
|
||||
// If not constant length, give up.
|
||||
ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
|
||||
@ -632,7 +632,7 @@ void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic *MI, AllocationInst *AI,
|
||||
|
||||
/// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
|
||||
/// are
|
||||
void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
|
||||
void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocaInst *AI,
|
||||
AllocaInfo &Info) {
|
||||
for (Value::use_iterator UI = BC->use_begin(), E = BC->use_end();
|
||||
UI != E; ++UI) {
|
||||
@ -690,7 +690,7 @@ void SROA::isSafeUseOfBitCastedAllocation(BitCastInst *BC, AllocationInst *AI,
|
||||
/// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
|
||||
/// to its first element. Transform users of the cast to use the new values
|
||||
/// instead.
|
||||
void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
|
||||
void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
Value::use_iterator UI = BCInst->use_begin(), UE = BCInst->use_end();
|
||||
while (UI != UE) {
|
||||
@ -729,7 +729,7 @@ void SROA::RewriteBitCastUserOfAlloca(Instruction *BCInst, AllocationInst *AI,
|
||||
/// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
|
||||
/// Rewrite it to copy or set the elements of the scalarized memory.
|
||||
void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
AllocationInst *AI,
|
||||
AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
|
||||
// If this is a memcpy/memmove, construct the other pointer as the
|
||||
@ -905,8 +905,7 @@ void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *BCInst,
|
||||
/// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
|
||||
/// overwrites the entire allocation. Extract out the pieces of the stored
|
||||
/// integer and store them individually.
|
||||
void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
AllocationInst *AI,
|
||||
void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts){
|
||||
// Extract each element out of the integer according to its structure offset
|
||||
// and store the element value to the individual alloca.
|
||||
@ -1029,7 +1028,7 @@ void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI,
|
||||
|
||||
/// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
|
||||
/// an integer. Load the individual pieces to form the aggregate value.
|
||||
void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocationInst *AI,
|
||||
void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
|
||||
SmallVector<AllocaInst*, 32> &NewElts) {
|
||||
// Extract each element out of the NewElts according to its structure offset
|
||||
// and form the result value.
|
||||
@ -1162,7 +1161,7 @@ static bool HasPadding(const Type *Ty, const TargetData &TD) {
|
||||
/// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
|
||||
/// or 1 if safe after canonicalization has been performed.
|
||||
///
|
||||
int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
|
||||
int SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
|
||||
// Loop over the use list of the alloca. We can only transform it if all of
|
||||
// the users are safe to transform.
|
||||
AllocaInfo Info;
|
||||
@ -1245,7 +1244,7 @@ void SROA::CleanupGEP(GetElementPtrInst *GEPI) {
|
||||
|
||||
/// CleanupAllocaUsers - If SROA reported that it can promote the specified
|
||||
/// allocation, but only if cleaned up, perform the cleanups required.
|
||||
void SROA::CleanupAllocaUsers(AllocationInst *AI) {
|
||||
void SROA::CleanupAllocaUsers(AllocaInst *AI) {
|
||||
// At this point, we know that the end result will be SROA'd and promoted, so
|
||||
// we can insert ugly code if required so long as sroa+mem2reg will clean it
|
||||
// up.
|
||||
@ -1853,7 +1852,7 @@ static bool isOnlyCopiedFromConstantGlobal(Value *V, Instruction *&TheCopy,
|
||||
/// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
|
||||
/// modified by a copy from a constant global. If we can prove this, we can
|
||||
/// replace any uses of the alloca with uses of the global directly.
|
||||
Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocationInst *AI) {
|
||||
Instruction *SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI) {
|
||||
Instruction *TheCopy = 0;
|
||||
if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false))
|
||||
return TheCopy;
|
||||
|
@ -129,7 +129,7 @@ bool TailDup::shouldEliminateUnconditionalBranch(TerminatorInst *TI,
|
||||
if (isa<CallInst>(I) || isa<InvokeInst>(I)) return false;
|
||||
|
||||
// Also alloca and malloc.
|
||||
if (isa<AllocationInst>(I)) return false;
|
||||
if (isa<AllocaInst>(I)) return false;
|
||||
|
||||
// Some vector instructions can expand into a number of instructions.
|
||||
if (isa<ShuffleVectorInst>(I) || isa<ExtractElementInst>(I) ||
|
||||
|
@ -1966,7 +1966,7 @@ void AssemblyWriter::printInstruction(const Instruction &I) {
|
||||
Out << " unwind ";
|
||||
writeOperand(II->getUnwindDest(), true);
|
||||
|
||||
} else if (const AllocationInst *AI = dyn_cast<AllocationInst>(&I)) {
|
||||
} else if (const AllocaInst *AI = dyn_cast<AllocaInst>(&I)) {
|
||||
Out << ' ';
|
||||
TypePrinter.print(AI->getType()->getElementType(), Out);
|
||||
if (!AI->getArraySize() || AI->isArrayAllocation()) {
|
||||
|
@ -426,7 +426,7 @@ bool Instruction::isSafeToSpeculativelyExecute() const {
|
||||
case Load: {
|
||||
if (cast<LoadInst>(this)->isVolatile())
|
||||
return false;
|
||||
if (isa<AllocationInst>(getOperand(0)) || isMalloc(getOperand(0)))
|
||||
if (isa<AllocaInst>(getOperand(0)) || isMalloc(getOperand(0)))
|
||||
return true;
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(getOperand(0)))
|
||||
return !GV->hasExternalWeakLinkage();
|
||||
|
@ -838,7 +838,7 @@ void BranchInst::setSuccessorV(unsigned idx, BasicBlock *B) {
|
||||
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
// AllocationInst Implementation
|
||||
// AllocaInst Implementation
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
||||
static Value *getAISize(LLVMContext &Context, Value *Amt) {
|
||||
@ -853,20 +853,54 @@ static Value *getAISize(LLVMContext &Context, Value *Amt) {
|
||||
return Amt;
|
||||
}
|
||||
|
||||
AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
|
||||
unsigned Align, const Twine &Name,
|
||||
Instruction *InsertBefore)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), iTy,
|
||||
AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize,
|
||||
const Twine &Name, Instruction *InsertBefore)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
|
||||
getAISize(Ty->getContext(), ArraySize), InsertBefore) {
|
||||
setAlignment(0);
|
||||
assert(Ty != Type::getVoidTy(Ty->getContext()) && "Cannot allocate void!");
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
|
||||
getAISize(Ty->getContext(), ArraySize), InsertAtEnd) {
|
||||
setAlignment(0);
|
||||
assert(Ty != Type::getVoidTy(Ty->getContext()) && "Cannot allocate void!");
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
AllocaInst::AllocaInst(const Type *Ty, const Twine &Name,
|
||||
Instruction *InsertBefore)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
|
||||
getAISize(Ty->getContext(), 0), InsertBefore) {
|
||||
setAlignment(0);
|
||||
assert(Ty != Type::getVoidTy(Ty->getContext()) && "Cannot allocate void!");
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
AllocaInst::AllocaInst(const Type *Ty, const Twine &Name,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
|
||||
getAISize(Ty->getContext(), 0), InsertAtEnd) {
|
||||
setAlignment(0);
|
||||
assert(Ty != Type::getVoidTy(Ty->getContext()) && "Cannot allocate void!");
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
|
||||
const Twine &Name, Instruction *InsertBefore)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
|
||||
getAISize(Ty->getContext(), ArraySize), InsertBefore) {
|
||||
setAlignment(Align);
|
||||
assert(Ty != Type::getVoidTy(Ty->getContext()) && "Cannot allocate void!");
|
||||
setName(Name);
|
||||
}
|
||||
|
||||
AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
|
||||
unsigned Align, const Twine &Name,
|
||||
BasicBlock *InsertAtEnd)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), iTy,
|
||||
AllocaInst::AllocaInst(const Type *Ty, Value *ArraySize, unsigned Align,
|
||||
const Twine &Name, BasicBlock *InsertAtEnd)
|
||||
: UnaryInstruction(PointerType::getUnqual(Ty), Alloca,
|
||||
getAISize(Ty->getContext(), ArraySize), InsertAtEnd) {
|
||||
setAlignment(Align);
|
||||
assert(Ty != Type::getVoidTy(Ty->getContext()) && "Cannot allocate void!");
|
||||
@ -874,22 +908,22 @@ AllocationInst::AllocationInst(const Type *Ty, Value *ArraySize, unsigned iTy,
|
||||
}
|
||||
|
||||
// Out of line virtual method, so the vtable, etc has a home.
|
||||
AllocationInst::~AllocationInst() {
|
||||
AllocaInst::~AllocaInst() {
|
||||
}
|
||||
|
||||
void AllocationInst::setAlignment(unsigned Align) {
|
||||
void AllocaInst::setAlignment(unsigned Align) {
|
||||
assert((Align & (Align-1)) == 0 && "Alignment is not a power of 2!");
|
||||
SubclassData = Log2_32(Align) + 1;
|
||||
assert(getAlignment() == Align && "Alignment representation error!");
|
||||
}
|
||||
|
||||
bool AllocationInst::isArrayAllocation() const {
|
||||
bool AllocaInst::isArrayAllocation() const {
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(getOperand(0)))
|
||||
return CI->getZExtValue() != 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
const Type *AllocationInst::getAllocatedType() const {
|
||||
const Type *AllocaInst::getAllocatedType() const {
|
||||
return getType()->getElementType();
|
||||
}
|
||||
|
||||
|
@ -321,7 +321,7 @@ namespace {
|
||||
void visitUserOp1(Instruction &I);
|
||||
void visitUserOp2(Instruction &I) { visitUserOp1(I); }
|
||||
void visitIntrinsicFunctionCall(Intrinsic::ID ID, CallInst &CI);
|
||||
void visitAllocationInst(AllocationInst &AI);
|
||||
void visitAllocaInst(AllocaInst &AI);
|
||||
void visitExtractValueInst(ExtractValueInst &EVI);
|
||||
void visitInsertValueInst(InsertValueInst &IVI);
|
||||
|
||||
@ -1282,7 +1282,7 @@ void Verifier::visitStoreInst(StoreInst &SI) {
|
||||
visitInstruction(SI);
|
||||
}
|
||||
|
||||
void Verifier::visitAllocationInst(AllocationInst &AI) {
|
||||
void Verifier::visitAllocaInst(AllocaInst &AI) {
|
||||
const PointerType *PTy = AI.getType();
|
||||
Assert1(PTy->getAddressSpace() == 0,
|
||||
"Allocation instruction pointer not in the generic address space!",
|
||||
|
Loading…
x
Reference in New Issue
Block a user