diff --git a/include/llvm/Transforms/Utils/PromoteMemToReg.h b/include/llvm/Transforms/Utils/PromoteMemToReg.h index 2f28f3333f9..22f46e5fc96 100644 --- a/include/llvm/Transforms/Utils/PromoteMemToReg.h +++ b/include/llvm/Transforms/Utils/PromoteMemToReg.h @@ -20,7 +20,6 @@ namespace llvm { class AllocaInst; -class DataLayout; class DominatorTree; class AliasSetTracker; @@ -30,7 +29,7 @@ class AliasSetTracker; /// (transitively) using this alloca. This also enforces that there is only /// ever one layer of bitcasts or GEPs between the alloca and the lifetime /// markers. -bool isAllocaPromotable(const AllocaInst *AI, const DataLayout *DL); +bool isAllocaPromotable(const AllocaInst *AI); /// \brief Promote the specified list of alloca instructions into scalar /// registers, inserting PHI nodes as appropriate. @@ -42,7 +41,7 @@ bool isAllocaPromotable(const AllocaInst *AI, const DataLayout *DL); /// If AST is specified, the specified tracker is updated to reflect changes /// made to the IR. void PromoteMemToReg(ArrayRef Allocas, DominatorTree &DT, - const DataLayout *DL, AliasSetTracker *AST = 0); + AliasSetTracker *AST = 0); } // End llvm namespace diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index be3ef6f1711..da441dc00a6 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -197,18 +197,6 @@ public: /// \brief Construct the slices of a particular alloca. AllocaSlices(const DataLayout &DL, AllocaInst &AI); - /// \brief Whether we determined during the trivial analysis of the alloca - /// that it was immediately promotable with mem2reg. - bool isAllocaPromotable() const { return IsAllocaPromotable; } - - /// \brief A list of directly stored values when \c isAllocaPromotable is - /// true. - /// - /// The contents are undefined if the alloca is not trivially promotable. - /// This is used to detect other allocas which should be iterated on when - /// doing direct promotion. - ArrayRef getStoredValues() const { return StoredValues; } - /// \brief Test whether a pointer to the allocation escapes our analysis. /// /// If this is true, the slices are never fully built and should be @@ -265,20 +253,10 @@ private: class SliceBuilder; friend class AllocaSlices::SliceBuilder; +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) /// \brief Handle to alloca instruction to simplify method interfaces. AllocaInst &AI; - - /// \brief A flag indicating if the alloca is trivially promotable. - /// - /// While walking the alloca's uses we track when the uses exceed what - /// mem2reg can trivially handle. This essentially should match the logic in - /// \c isAllocaPromotable but re-using the existing walk of the pointer uses. - bool IsAllocaPromotable; - - /// \brief Storage for stored values. - /// - /// Only used while the alloca is trivially promotable. - SmallVector StoredValues; +#endif /// \brief The instruction responsible for this alloca not having a known set /// of slices. @@ -347,9 +325,9 @@ class AllocaSlices::SliceBuilder : public PtrUseVisitor { SmallPtrSet VisitedDeadInsts; public: - SliceBuilder(const DataLayout &DL, AllocaSlices &S) + SliceBuilder(const DataLayout &DL, AllocaInst &AI, AllocaSlices &S) : PtrUseVisitor(DL), - AllocSize(DL.getTypeAllocSize(S.AI.getAllocatedType())), S(S) {} + AllocSize(DL.getTypeAllocSize(AI.getAllocatedType())), S(S) {} private: void markAsDead(Instruction &I) { @@ -402,15 +380,6 @@ private: if (GEPI.use_empty()) return markAsDead(GEPI); - // FIXME: mem2reg shouldn't care about the nature of the GEP, but instead - // the offsets of the loads. Until then, we short-circuit here for the - // promotable case. - if (GEPI.hasAllZeroIndices()) - return Base::enqueueUsers(GEPI); - - // Otherwise, there is something in the GEP, so we disable mem2reg and - // accumulate it. - S.IsAllocaPromotable = false; return Base::visitGetElementPtrInst(GEPI); } @@ -427,13 +396,6 @@ private: bool IsSplittable = Ty->isIntegerTy() && !IsVolatile && Offset == 0 && Size >= AllocSize; - // mem2reg can only promote non-volatile loads and stores which exactly - // load the alloca (no offset and the right type). - if (IsVolatile || Offset != 0 || Ty != S.AI.getAllocatedType()) - S.IsAllocaPromotable = false; - if (S.IsAllocaPromotable) - assert(Offset == 0); - insertUse(I, Offset, Size, IsSplittable); } @@ -474,9 +436,6 @@ private: return markAsDead(SI); } - if (S.IsAllocaPromotable) - S.StoredValues.push_back(ValOp); - assert((!SI.isSimple() || ValOp->getType()->isSingleValueType()) && "All simple FCA stores should have been pre-split"); handleLoadOrStore(ValOp->getType(), SI, Offset, Size, SI.isVolatile()); @@ -494,8 +453,6 @@ private: if (!IsOffsetKnown) return PI.setAborted(&II); - S.IsAllocaPromotable = false; - insertUse(II, Offset, Length ? Length->getLimitedValue() : AllocSize - Offset.getLimitedValue(), @@ -512,8 +469,6 @@ private: if (!IsOffsetKnown) return PI.setAborted(&II); - S.IsAllocaPromotable = false; - uint64_t RawOffset = Offset.getLimitedValue(); uint64_t Size = Length ? Length->getLimitedValue() : AllocSize - RawOffset; @@ -574,8 +529,6 @@ private: return; } - S.IsAllocaPromotable = false; - Base::visitIntrinsicInst(II); } @@ -650,8 +603,6 @@ private: return; } - S.IsAllocaPromotable = false; - insertUse(PN, Offset, PHISize); } @@ -659,18 +610,14 @@ private: if (SI.use_empty()) return markAsDead(SI); if (Value *Result = foldSelectInst(SI)) { - if (Result == *U) { + if (Result == *U) // If the result of the constant fold will be the pointer, recurse // through the select as if we had RAUW'ed it. enqueueUsers(SI); - - // FIXME: mem2reg should support this pattern, but it doesn't. - S.IsAllocaPromotable = false; - } else { + else // Otherwise the operand to the select is dead, and we can replace it // with undef. S.DeadOperands.push_back(U); - } return; } @@ -697,8 +644,6 @@ private: return; } - S.IsAllocaPromotable = false; - insertUse(SI, Offset, SelectSize); } @@ -709,8 +654,12 @@ private: }; AllocaSlices::AllocaSlices(const DataLayout &DL, AllocaInst &AI) - : AI(AI), IsAllocaPromotable(true), PointerEscapingInstr(0) { - SliceBuilder PB(DL, *this); + : +#if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) + AI(AI), +#endif + PointerEscapingInstr(0) { + SliceBuilder PB(DL, AI, *this); SliceBuilder::PtrInfo PtrI = PB.visitPtr(AI); if (PtrI.isEscaped() || PtrI.isAborted()) { // FIXME: We should sink the escape vs. abort info into the caller nicely, @@ -3390,24 +3339,6 @@ bool SROA::runOnAlloca(AllocaInst &AI) { if (S.begin() == S.end()) return Changed; - // Trivially promotable, don't go through the splitting and rewriting. - if (S.isAllocaPromotable()) { - DEBUG(dbgs() << " Directly promoting alloca: " << AI << "\n"); - PromotableAllocas.push_back(&AI); - - // Walk through the stored values quickly here to handle directly - // promotable allocas that require iterating on other allocas. - ArrayRef StoredValues = S.getStoredValues(); - for (ArrayRef::iterator SVI = StoredValues.begin(), - SVE = StoredValues.end(); - SVI != SVE; ++SVI) - if ((*SVI)->getType()->isPointerTy()) - if (AllocaInst *SAI = - dyn_cast((*SVI)->stripInBoundsOffsets())) - PostPromotionWorklist.insert(SAI); - return true; - } - Changed |= splitAlloca(AI, S); DEBUG(dbgs() << " Speculating PHIs\n"); @@ -3478,7 +3409,7 @@ bool SROA::promoteAllocas(Function &F) { if (DT && !ForceSSAUpdater) { DEBUG(dbgs() << "Promoting allocas with mem2reg...\n"); - PromoteMemToReg(PromotableAllocas, *DT, DL); + PromoteMemToReg(PromotableAllocas, *DT); PromotableAllocas.clear(); return true; } diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index 73b2edfac2c..33bbe151630 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -1426,7 +1426,7 @@ bool SROA::performPromotion(Function &F) { if (Allocas.empty()) break; if (HasDomTree) - PromoteMemToReg(Allocas, *DT, TD); + PromoteMemToReg(Allocas, *DT); else { SSAUpdater SSA; for (unsigned i = 0, e = Allocas.size(); i != e; ++i) { diff --git a/lib/Transforms/Utils/Mem2Reg.cpp b/lib/Transforms/Utils/Mem2Reg.cpp index ebd7db6210c..61b3965d8f1 100644 --- a/lib/Transforms/Utils/Mem2Reg.cpp +++ b/lib/Transforms/Utils/Mem2Reg.cpp @@ -16,7 +16,6 @@ #include "llvm/Transforms/Scalar.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/Dominators.h" -#include "llvm/IR/DataLayout.h" #include "llvm/IR/Function.h" #include "llvm/IR/Instructions.h" #include "llvm/Transforms/Utils/PromoteMemToReg.h" @@ -28,7 +27,6 @@ STATISTIC(NumPromoted, "Number of alloca's promoted"); namespace { struct PromotePass : public FunctionPass { static char ID; // Pass identification, replacement for typeid - PromotePass() : FunctionPass(ID) { initializePromotePassPass(*PassRegistry::getPassRegistry()); } @@ -64,7 +62,6 @@ bool PromotePass::runOnFunction(Function &F) { bool Changed = false; DominatorTree &DT = getAnalysis(); - const DataLayout *DL = getAnalysisIfAvailable(); while (1) { Allocas.clear(); @@ -73,12 +70,12 @@ bool PromotePass::runOnFunction(Function &F) { // the entry node for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I) if (AllocaInst *AI = dyn_cast(I)) // Is it an alloca? - if (isAllocaPromotable(AI, DL)) + if (isAllocaPromotable(AI)) Allocas.push_back(AI); if (Allocas.empty()) break; - PromoteMemToReg(Allocas, DT, DL); + PromoteMemToReg(Allocas, DT); NumPromoted += Allocas.size(); Changed = true; } diff --git a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp index 69101809970..1b51255251a 100644 --- a/lib/Transforms/Utils/PromoteMemoryToRegister.cpp +++ b/lib/Transforms/Utils/PromoteMemoryToRegister.cpp @@ -30,7 +30,6 @@ #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/STLExtras.h" -#include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/Statistic.h" @@ -46,7 +45,6 @@ #include "llvm/IR/Instructions.h" #include "llvm/IR/IntrinsicInst.h" #include "llvm/IR/Metadata.h" -#include "llvm/InstVisitor.h" #include "llvm/Support/CFG.h" #include "llvm/Transforms/Utils/Local.h" #include @@ -58,16 +56,56 @@ STATISTIC(NumSingleStore, "Number of alloca's promoted with a single store"); STATISTIC(NumDeadAlloca, "Number of dead alloca's removed"); STATISTIC(NumPHIInsert, "Number of PHI nodes inserted"); +bool llvm::isAllocaPromotable(const AllocaInst *AI) { + // FIXME: If the memory unit is of pointer or integer type, we can permit + // assignments to subsections of the memory unit. + + // Only allow direct and non-volatile loads and stores... + for (Value::const_use_iterator UI = AI->use_begin(), UE = AI->use_end(); + UI != UE; ++UI) { // Loop over all of the uses of the alloca + const User *U = *UI; + if (const LoadInst *LI = dyn_cast(U)) { + // Note that atomic loads can be transformed; atomic semantics do + // not have any meaning for a local alloca. + if (LI->isVolatile()) + return false; + } else if (const StoreInst *SI = dyn_cast(U)) { + if (SI->getOperand(0) == AI) + return false; // Don't allow a store OF the AI, only INTO the AI. + // Note that atomic stores can be transformed; atomic semantics do + // not have any meaning for a local alloca. + if (SI->isVolatile()) + return false; + } else if (const IntrinsicInst *II = dyn_cast(U)) { + if (II->getIntrinsicID() != Intrinsic::lifetime_start && + II->getIntrinsicID() != Intrinsic::lifetime_end) + return false; + } else if (const BitCastInst *BCI = dyn_cast(U)) { + if (BCI->getType() != Type::getInt8PtrTy(U->getContext())) + return false; + if (!onlyUsedByLifetimeMarkers(BCI)) + return false; + } else if (const GetElementPtrInst *GEPI = dyn_cast(U)) { + if (GEPI->getType() != Type::getInt8PtrTy(U->getContext())) + return false; + if (!GEPI->hasAllZeroIndices()) + return false; + if (!onlyUsedByLifetimeMarkers(GEPI)) + return false; + } else { + return false; + } + } + + return true; +} + namespace { -struct AllocaInfo : private InstVisitor { - const DataLayout *DL; - +struct AllocaInfo { SmallVector DefiningBlocks; SmallVector UsingBlocks; - SmallVector DeadInsts; - Type *AllocaTy; StoreInst *OnlyStore; BasicBlock *OnlyBlock; bool OnlyUsedInOneBlock; @@ -75,13 +113,9 @@ struct AllocaInfo : private InstVisitor { Value *AllocaPointerVal; DbgDeclareInst *DbgDeclare; - AllocaInfo(const DataLayout *DL) : DL(DL) {} - void clear() { DefiningBlocks.clear(); UsingBlocks.clear(); - DeadInsts.clear(); - AllocaTy = 0; OnlyStore = 0; OnlyBlock = 0; OnlyUsedInOneBlock = true; @@ -91,116 +125,39 @@ struct AllocaInfo : private InstVisitor { /// Scan the uses of the specified alloca, filling in the AllocaInfo used /// by the rest of the pass to reason about the uses of this alloca. - bool analyzeAlloca(AllocaInst &AI) { + void AnalyzeAlloca(AllocaInst *AI) { clear(); - AllocaTy = AI.getAllocatedType(); - enqueueUsers(AI); + // As we scan the uses of the alloca instruction, keep track of stores, + // and decide whether all of the loads and stores to the alloca are within + // the same basic block. + for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end(); + UI != E;) { + Instruction *User = cast(*UI++); - // Walk queued up uses in the worklist to handle nested uses. - while (!UseWorklist.empty()) { - U = UseWorklist.pop_back_val(); - Instruction &I = *cast(U->getUser()); - if (!visit(I)) - return false; // Propagate failure to promote up. + if (StoreInst *SI = dyn_cast(User)) { + // Remember the basic blocks which define new values for the alloca + DefiningBlocks.push_back(SI->getParent()); + AllocaPointerVal = SI->getOperand(0); + OnlyStore = SI; + } else { + LoadInst *LI = cast(User); + // Otherwise it must be a load instruction, keep track of variable + // reads. + UsingBlocks.push_back(LI->getParent()); + AllocaPointerVal = LI; + } if (OnlyUsedInOneBlock) { if (OnlyBlock == 0) - OnlyBlock = I.getParent(); - else if (OnlyBlock != I.getParent()) + OnlyBlock = User->getParent(); + else if (OnlyBlock != User->getParent()) OnlyUsedInOneBlock = false; } } - DbgDeclare = FindAllocaDbgDeclare(&AI); - return true; + DbgDeclare = FindAllocaDbgDeclare(AI); } - -private: - // Befriend the base class so it can call through private visitor methods. - friend class InstVisitor; - - /// \brief A use pointer that is non-null when visiting uses. - Use *U; - - /// \brief A worklist for recursively visiting all uses of an alloca. - SmallVector UseWorklist; - - /// \brief A set for preventing cyclic visitation. - SmallPtrSet VisitedUses; - - void enqueueUsers(Instruction &I) { - for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; - ++UI) - if (VisitedUses.insert(&UI.getUse())) - UseWorklist.push_back(&UI.getUse()); - } - - bool visitLoadInst(LoadInst &LI) { - if (LI.isVolatile() || LI.getType() != AllocaTy) - return false; - - // Keep track of variable reads. - UsingBlocks.push_back(LI.getParent()); - AllocaPointerVal = &LI; - return true; - } - - bool visitStoreInst(StoreInst &SI) { - if (SI.isVolatile() || SI.getValueOperand() == U->get() || - SI.getValueOperand()->getType() != AllocaTy) - return false; - - // Remember the basic blocks which define new values for the alloca - DefiningBlocks.push_back(SI.getParent()); - AllocaPointerVal = SI.getOperand(0); - OnlyStore = &SI; - return true; - } - - bool visitBitCastInst(BitCastInst &BC) { - if (BC.use_empty()) - DeadInsts.push_back(&BC); - else - enqueueUsers(BC); - return true; - } - - bool visitGetElementPtrInst(GetElementPtrInst &GEPI) { - if (GEPI.use_empty()) { - DeadInsts.push_back(&GEPI); - return true; - } - - enqueueUsers(GEPI); - - return GEPI.hasAllZeroIndices(); - } - - // We can promote through debug info intrinsics as they don't alter the - // value stored in memory. - bool visitDbgInfoIntrinsic(DbgInfoIntrinsic &I) { - DeadInsts.push_back(&I); - return true; - } - - bool visitIntrinsicInst(IntrinsicInst &II) { - switch (II.getIntrinsicID()) { - default: - return false; - - // Lifetime intrinsics don't preclude promoting the memory to a register. - // FIXME: We should use these to promote to undef when outside of a valid - // lifetime. - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - DeadInsts.push_back(&II); - return true; - } - } - - // The fallback is that the alloca cannot be promoted. - bool visitInstruction(Instruction &I) { return false; } }; // Data package used by RenamePass() @@ -278,7 +235,6 @@ struct PromoteMem2Reg { std::vector Allocas; DominatorTree &DT; DIBuilder DIB; - const DataLayout *DL; /// An AliasSetTracker object to update. If null, don't update it. AliasSetTracker *AST; @@ -324,9 +280,9 @@ struct PromoteMem2Reg { public: PromoteMem2Reg(ArrayRef Allocas, DominatorTree &DT, - const DataLayout *DL, AliasSetTracker *AST) + AliasSetTracker *AST) : Allocas(Allocas.begin(), Allocas.end()), DT(DT), - DIB(*DT.getRoot()->getParent()->getParent()), DL(DL), AST(AST) {} + DIB(*DT.getRoot()->getParent()->getParent()), AST(AST) {} void run(); @@ -357,39 +313,27 @@ private: } // end of anonymous namespace -/// \brief Walk a small vector of dead instructions and recursively remove them -/// and subsequently dead instructions. -/// -/// This is only valid to call on dead instructions using an alloca which is -/// promotable, as we leverage that assumption to delete them faster. -static void removeDeadInstructions(AllocaInst *AI, - SmallVectorImpl &DeadInsts) { - while (!DeadInsts.empty()) { - Instruction *I = DeadInsts.pop_back_val(); +static void removeLifetimeIntrinsicUsers(AllocaInst *AI) { + // Knowing that this alloca is promotable, we know that it's safe to kill all + // instructions except for load and store. - // Don't delete the alloca itself. - if (I == AI) + for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end(); + UI != UE;) { + Instruction *I = cast(*UI); + ++UI; + if (isa(I) || isa(I)) continue; - // Note that we open code the deletion algorithm here because we know - // apriori that all of the instructions using an alloca that reaches here - // are trivially dead when their use list becomes empty (The only risk are - // lifetime markers which we specifically want to nuke). By coding it here - // we can skip the triviality test and be more efficient. - // - // Null out all of the instruction's operands to see if any operand becomes - // dead as we go. - for (User::op_iterator OI = I->op_begin(), OE = I->op_end(); OI != OE; - ++OI) { - Instruction *Op = dyn_cast(*OI); - if (!Op) - continue; - - OI->set(0); - if (!Op->use_empty()) - continue; - - DeadInsts.push_back(Op); + if (!I->getType()->isVoidTy()) { + // The only users of this bitcast/GEP instruction are lifetime intrinsics. + // Follow the use/def chain to erase them now instead of leaving it for + // dead code elimination later. + for (Value::use_iterator UI = I->use_begin(), UE = I->use_end(); + UI != UE;) { + Instruction *Inst = cast(*UI); + ++UI; + Inst->eraseFromParent(); + } } I->eraseFromParent(); } @@ -590,23 +534,17 @@ void PromoteMem2Reg::run() { PointerAllocaValues.resize(Allocas.size()); AllocaDbgDeclares.resize(Allocas.size()); - AllocaInfo Info(DL); + AllocaInfo Info; LargeBlockInfo LBI; for (unsigned AllocaNum = 0; AllocaNum != Allocas.size(); ++AllocaNum) { AllocaInst *AI = Allocas[AllocaNum]; + assert(isAllocaPromotable(AI) && "Cannot promote non-promotable alloca!"); assert(AI->getParent()->getParent() == &F && "All allocas should be in the same function, which is same as DF!"); - // Calculate the set of read and write-locations for each alloca. This is - // analogous to finding the 'uses' and 'definitions' of each variable. - bool Good = Info.analyzeAlloca(*AI); - (void)Good; - assert(Good && "Cannot promote non-promotable alloca!"); - - // Nuke all of the dead instructions. - removeDeadInstructions(AI, Info.DeadInsts); + removeLifetimeIntrinsicUsers(AI); if (AI->use_empty()) { // If there are no uses of the alloca, just delete it now. @@ -620,6 +558,10 @@ void PromoteMem2Reg::run() { continue; } + // Calculate the set of read and write-locations for each alloca. This is + // analogous to finding the 'uses' and 'definitions' of each variable. + Info.AnalyzeAlloca(AI); + // If there is only a single store to this value, replace any loads of // it that are directly dominated by the definition with the value stored. if (Info.DefiningBlocks.size() == 1) { @@ -1145,19 +1087,11 @@ NextIteration: goto NextIteration; } -bool llvm::isAllocaPromotable(const AllocaInst *AI, const DataLayout *DL) { - // We cast away constness because we re-use the non-const analysis that the - // actual promotion routine uses. While it is non-const, it doesn't actually - // mutate anything at this phase, and we discard the non-const results that - // promotion uses to mutate the alloca. - return AllocaInfo(DL).analyzeAlloca(*const_cast(AI)); -} - void llvm::PromoteMemToReg(ArrayRef Allocas, DominatorTree &DT, - const DataLayout *DL, AliasSetTracker *AST) { + AliasSetTracker *AST) { // If there is nothing to do, bail out... if (Allocas.empty()) return; - PromoteMem2Reg(Allocas, DT, DL, AST).run(); + PromoteMem2Reg(Allocas, DT, AST).run(); } diff --git a/test/Transforms/Mem2Reg/ignore-lifetime.ll b/test/Transforms/Mem2Reg/ignore-lifetime.ll new file mode 100644 index 00000000000..3d5368c1d7c --- /dev/null +++ b/test/Transforms/Mem2Reg/ignore-lifetime.ll @@ -0,0 +1,52 @@ +; RUN: opt -mem2reg -S -o - < %s | FileCheck %s + +declare void @llvm.lifetime.start(i64 %size, i8* nocapture %ptr) +declare void @llvm.lifetime.end(i64 %size, i8* nocapture %ptr) + +define void @test1() { +; CHECK: test1 +; CHECK-NOT: alloca + %A = alloca i32 + %B = bitcast i32* %A to i8* + call void @llvm.lifetime.start(i64 2, i8* %B) + store i32 1, i32* %A + call void @llvm.lifetime.end(i64 2, i8* %B) + ret void +} + +define void @test2() { +; CHECK: test2 +; CHECK-NOT: alloca + %A = alloca {i8, i16} + %B = getelementptr {i8, i16}* %A, i32 0, i32 0 + call void @llvm.lifetime.start(i64 2, i8* %B) + store {i8, i16} zeroinitializer, {i8, i16}* %A + call void @llvm.lifetime.end(i64 2, i8* %B) + ret void +} +; RUN: opt -mem2reg -S -o - < %s | FileCheck %s + +declare void @llvm.lifetime.start(i64 %size, i8* nocapture %ptr) +declare void @llvm.lifetime.end(i64 %size, i8* nocapture %ptr) + +define void @test1() { +; CHECK: test1 +; CHECK-NOT: alloca + %A = alloca i32 + %B = bitcast i32* %A to i8* + call void @llvm.lifetime.start(i64 2, i8* %B) + store i32 1, i32* %A + call void @llvm.lifetime.end(i64 2, i8* %B) + ret void +} + +define void @test2() { +; CHECK: test2 +; CHECK-NOT: alloca + %A = alloca {i8, i16} + %B = getelementptr {i8, i16}* %A, i32 0, i32 0 + call void @llvm.lifetime.start(i64 2, i8* %B) + store {i8, i16} zeroinitializer, {i8, i16}* %A + call void @llvm.lifetime.end(i64 2, i8* %B) + ret void +} diff --git a/test/Transforms/Mem2Reg/use-analysis.ll b/test/Transforms/Mem2Reg/use-analysis.ll deleted file mode 100644 index b08b1f191b8..00000000000 --- a/test/Transforms/Mem2Reg/use-analysis.ll +++ /dev/null @@ -1,70 +0,0 @@ -; RUN: opt -mem2reg -S -o - < %s | FileCheck %s - -target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-n8:16:32:64" - -declare void @llvm.lifetime.start(i64 %size, i8* nocapture %ptr) -declare void @llvm.lifetime.end(i64 %size, i8* nocapture %ptr) - -define void @test1() { -; Ensure we can look through a bitcast to i8* and the addition of lifetime -; markers. -; -; CHECK-LABEL: @test1( -; CHECK-NOT: alloca -; CHECK: ret void - - %A = alloca i32 - %B = bitcast i32* %A to i8* - call void @llvm.lifetime.start(i64 2, i8* %B) - store i32 1, i32* %A - call void @llvm.lifetime.end(i64 2, i8* %B) - ret void -} - -define void @test2() { -; Ensure we can look through a GEP to i8* and the addition of lifetime -; markers. -; -; CHECK-LABEL: @test2( -; CHECK-NOT: alloca -; CHECK: ret void - - %A = alloca {i8, i16} - %B = getelementptr {i8, i16}* %A, i32 0, i32 0 - call void @llvm.lifetime.start(i64 2, i8* %B) - store {i8, i16} zeroinitializer, {i8, i16}* %A - call void @llvm.lifetime.end(i64 2, i8* %B) - ret void -} - -define i32 @test3(i32 %x) { -; CHECK-LABEL: @test3( -; -; Check that we recursively walk the uses of the alloca and thus can see -; through round trip bitcasts, dead bitcasts, GEPs, multiple GEPs, and lifetime -; markers. -entry: - %a = alloca i32 -; CHECK-NOT: alloca - - %b = bitcast i32* %a to i8* - %b2 = getelementptr inbounds i8* %b, i32 0 - %b3 = getelementptr inbounds i8* %b2, i32 0 - call void @llvm.lifetime.start(i64 -1, i8* %b3) -; CHECK-NOT: call void @llvm.lifetime.start - - store i32 %x, i32* %a -; CHECK-NOT: store - - %dead = bitcast i32* %a to i4096* - %dead1 = bitcast i4096* %dead to i42* - %dead2 = getelementptr inbounds i32* %a, i32 %x -; CHECK-NOT: bitcast -; CHECK-NOT: getelementptr - - %ret = load i32* %a -; CHECK-NOT: load - - ret i32 %ret -; CHECK: ret i32 %x -}