Make MemoryBuiltins aware of TargetLibraryInfo.

This disables malloc-specific optimization when -fno-builtin (or -ffreestanding)
is specified. This has been a problem for a long time but became more severe
with the recent memory builtin improvements.

Since the memory builtin functions are used everywhere, this required passing
TLI in many places. This means that functions that now have an optional TLI
argument, like RecursivelyDeleteTriviallyDeadFunctions, won't remove dead
mallocs anymore if the TLI argument is missing. I've updated most passes to do
the right thing.

Fixes PR13694 and probably others.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@162841 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Benjamin Kramer 2012-08-29 15:32:21 +00:00
parent fd49821c35
commit 8e0d1c03ca
31 changed files with 361 additions and 184 deletions

View File

@ -46,6 +46,7 @@ class LoadInst;
class StoreInst; class StoreInst;
class VAArgInst; class VAArgInst;
class TargetData; class TargetData;
class TargetLibraryInfo;
class Pass; class Pass;
class AnalysisUsage; class AnalysisUsage;
class MemTransferInst; class MemTransferInst;
@ -55,6 +56,7 @@ class DominatorTree;
class AliasAnalysis { class AliasAnalysis {
protected: protected:
const TargetData *TD; const TargetData *TD;
const TargetLibraryInfo *TLI;
private: private:
AliasAnalysis *AA; // Previous Alias Analysis to chain to. AliasAnalysis *AA; // Previous Alias Analysis to chain to.
@ -73,7 +75,7 @@ protected:
public: public:
static char ID; // Class identification, replacement for typeinfo static char ID; // Class identification, replacement for typeinfo
AliasAnalysis() : TD(0), AA(0) {} AliasAnalysis() : TD(0), TLI(0), AA(0) {}
virtual ~AliasAnalysis(); // We want to be subclassed virtual ~AliasAnalysis(); // We want to be subclassed
/// UnknownSize - This is a special value which can be used with the /// UnknownSize - This is a special value which can be used with the
@ -86,6 +88,11 @@ public:
/// ///
const TargetData *getTargetData() const { return TD; } const TargetData *getTargetData() const { return TD; }
/// getTargetLibraryInfo - Return a pointer to the current TargetLibraryInfo
/// object, or null if no TargetLibraryInfo object is available.
///
const TargetLibraryInfo *getTargetLibraryInfo() const { return TLI; }
/// getTypeStoreSize - Return the TargetData store size for the given type, /// getTypeStoreSize - Return the TargetData store size for the given type,
/// if known, or a conservative value otherwise. /// if known, or a conservative value otherwise.
/// ///

View File

@ -28,6 +28,7 @@ namespace llvm {
class CallInst; class CallInst;
class PointerType; class PointerType;
class TargetData; class TargetData;
class TargetLibraryInfo;
class Type; class Type;
class Value; class Value;
@ -35,27 +36,33 @@ class Value;
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like). /// like).
bool isAllocationFn(const Value *V, bool LookThroughBitCast = false); bool isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a function that returns a /// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool isNoAliasFn(const Value *V, bool LookThroughBitCast = false); bool isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc). /// allocates uninitialized memory (such as malloc).
bool isMallocLikeFn(const Value *V, bool LookThroughBitCast = false); bool isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc). /// allocates zero-filled memory (such as calloc).
bool isCallocLikeFn(const Value *V, bool LookThroughBitCast = false); bool isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like). /// allocates memory (either malloc, calloc, or strdup like).
bool isAllocLikeFn(const Value *V, bool LookThroughBitCast = false); bool isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc). /// reallocates memory (such as realloc).
bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false); bool isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
@ -65,29 +72,31 @@ bool isReallocLikeFn(const Value *V, bool LookThroughBitCast = false);
/// extractMallocCall - Returns the corresponding CallInst if the instruction /// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here. /// ignore InvokeInst here.
const CallInst *extractMallocCall(const Value *I); const CallInst *extractMallocCall(const Value *I, const TargetLibraryInfo *TLI);
static inline CallInst *extractMallocCall(Value *I) { static inline CallInst *extractMallocCall(Value *I,
return const_cast<CallInst*>(extractMallocCall((const Value*)I)); const TargetLibraryInfo *TLI) {
return const_cast<CallInst*>(extractMallocCall((const Value*)I, TLI));
} }
/// isArrayMalloc - Returns the corresponding CallInst if the instruction /// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size /// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL. /// is not constant 1. Otherwise, return NULL.
const CallInst *isArrayMalloc(const Value *I, const TargetData *TD); const CallInst *isArrayMalloc(const Value *I, const TargetData *TD,
const TargetLibraryInfo *TLI);
/// getMallocType - Returns the PointerType resulting from the malloc call. /// getMallocType - Returns the PointerType resulting from the malloc call.
/// The PointerType depends on the number of bitcast uses of the malloc call: /// The PointerType depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type. /// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type. /// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL. /// >1: Unique PointerType cannot be determined, return NULL.
PointerType *getMallocType(const CallInst *CI); PointerType *getMallocType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// getMallocAllocatedType - Returns the Type allocated by malloc call. /// getMallocAllocatedType - Returns the Type allocated by malloc call.
/// The Type depends on the number of bitcast uses of the malloc call: /// The Type depends on the number of bitcast uses of the malloc call:
/// 0: PointerType is the malloc calls' return type. /// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type. /// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL. /// >1: Unique PointerType cannot be determined, return NULL.
Type *getMallocAllocatedType(const CallInst *CI); Type *getMallocAllocatedType(const CallInst *CI, const TargetLibraryInfo *TLI);
/// getMallocArraySize - Returns the array size of a malloc call. If the /// getMallocArraySize - Returns the array size of a malloc call. If the
/// argument passed to malloc is a multiple of the size of the malloced type, /// argument passed to malloc is a multiple of the size of the malloced type,
@ -95,6 +104,7 @@ Type *getMallocAllocatedType(const CallInst *CI);
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined. /// determined.
Value *getMallocArraySize(CallInst *CI, const TargetData *TD, Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false); bool LookThroughSExt = false);
@ -104,9 +114,10 @@ Value *getMallocArraySize(CallInst *CI, const TargetData *TD,
/// extractCallocCall - Returns the corresponding CallInst if the instruction /// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call. /// is a calloc call.
const CallInst *extractCallocCall(const Value *I); const CallInst *extractCallocCall(const Value *I, const TargetLibraryInfo *TLI);
static inline CallInst *extractCallocCall(Value *I) { static inline CallInst *extractCallocCall(Value *I,
return const_cast<CallInst*>(extractCallocCall((const Value*)I)); const TargetLibraryInfo *TLI) {
return const_cast<CallInst*>(extractCallocCall((const Value*)I, TLI));
} }
@ -115,10 +126,10 @@ static inline CallInst *extractCallocCall(Value *I) {
// //
/// isFreeCall - Returns non-null if the value is a call to the builtin free() /// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *isFreeCall(const Value *I); const CallInst *isFreeCall(const Value *I, const TargetLibraryInfo *TLI);
static inline CallInst *isFreeCall(Value *I) { static inline CallInst *isFreeCall(Value *I, const TargetLibraryInfo *TLI) {
return const_cast<CallInst*>(isFreeCall((const Value*)I)); return const_cast<CallInst*>(isFreeCall((const Value*)I, TLI));
} }
@ -131,7 +142,7 @@ static inline CallInst *isFreeCall(Value *I) {
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables. /// byval arguments, and global variables.
bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD, bool getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
bool RoundToAlign = false); const TargetLibraryInfo *TLI, bool RoundToAlign = false);
@ -143,6 +154,7 @@ class ObjectSizeOffsetVisitor
: public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> { : public InstVisitor<ObjectSizeOffsetVisitor, SizeOffsetType> {
const TargetData *TD; const TargetData *TD;
const TargetLibraryInfo *TLI;
bool RoundToAlign; bool RoundToAlign;
unsigned IntTyBits; unsigned IntTyBits;
APInt Zero; APInt Zero;
@ -155,8 +167,8 @@ class ObjectSizeOffsetVisitor
} }
public: public:
ObjectSizeOffsetVisitor(const TargetData *TD, LLVMContext &Context, ObjectSizeOffsetVisitor(const TargetData *TD, const TargetLibraryInfo *TLI,
bool RoundToAlign = false); LLVMContext &Context, bool RoundToAlign = false);
SizeOffsetType compute(Value *V); SizeOffsetType compute(Value *V);
@ -202,6 +214,7 @@ class ObjectSizeOffsetEvaluator
typedef SmallPtrSet<const Value*, 8> PtrSetTy; typedef SmallPtrSet<const Value*, 8> PtrSetTy;
const TargetData *TD; const TargetData *TD;
const TargetLibraryInfo *TLI;
LLVMContext &Context; LLVMContext &Context;
BuilderTy Builder; BuilderTy Builder;
IntegerType *IntTy; IntegerType *IntTy;
@ -215,7 +228,8 @@ class ObjectSizeOffsetEvaluator
SizeOffsetEvalType compute_(Value *V); SizeOffsetEvalType compute_(Value *V);
public: public:
ObjectSizeOffsetEvaluator(const TargetData *TD, LLVMContext &Context); ObjectSizeOffsetEvaluator(const TargetData *TD, const TargetLibraryInfo *TLI,
LLVMContext &Context);
SizeOffsetEvalType compute(Value *V); SizeOffsetEvalType compute(Value *V);
bool knownSize(SizeOffsetEvalType SizeOffset) { bool knownSize(SizeOffsetEvalType SizeOffset) {

View File

@ -18,6 +18,26 @@ namespace llvm {
namespace LibFunc { namespace LibFunc {
enum Func { enum Func {
/// void operator delete[](void*);
ZdaPv,
/// void operator delete(void*);
ZdlPv,
/// void *new[](unsigned int);
Znaj,
/// void *new[](unsigned int, nothrow);
ZnajRKSt9nothrow_t,
/// void *new[](unsigned long);
Znam,
/// void *new[](unsigned long, nothrow);
ZnamRKSt9nothrow_t,
/// void *new(unsigned int);
Znwj,
/// void *new(unsigned int, nothrow);
ZnwjRKSt9nothrow_t,
/// void *new(unsigned long);
Znwm,
/// void *new(unsigned long, nothrow);
ZnwmRKSt9nothrow_t,
/// int __cxa_atexit(void (*f)(void *), void *p, void *d); /// int __cxa_atexit(void (*f)(void *), void *p, void *d);
cxa_atexit, cxa_atexit,
/// void __cxa_guard_abort(guard_t *guard); /// void __cxa_guard_abort(guard_t *guard);
@ -71,6 +91,8 @@ namespace llvm {
atanhl, atanhl,
/// long double atanl(long double x); /// long double atanl(long double x);
atanl, atanl,
/// void *calloc(size_t count, size_t size);
calloc,
/// double cbrt(double x); /// double cbrt(double x);
cbrt, cbrt,
/// float cbrtf(float x); /// float cbrtf(float x);
@ -149,6 +171,8 @@ namespace llvm {
fputc, fputc,
/// int fputs(const char *s, FILE *stream); /// int fputs(const char *s, FILE *stream);
fputs, fputs,
/// void free(void *ptr);
free,
/// size_t fwrite(const void *ptr, size_t size, size_t nitems, /// size_t fwrite(const void *ptr, size_t size, size_t nitems,
/// FILE *stream); /// FILE *stream);
fwrite, fwrite,
@ -184,6 +208,8 @@ namespace llvm {
logf, logf,
/// long double logl(long double x); /// long double logl(long double x);
logl, logl,
/// void *malloc(size_t size);
malloc,
/// void *memchr(const void *s, int c, size_t n); /// void *memchr(const void *s, int c, size_t n);
memchr, memchr,
/// int memcmp(const void *s1, const void *s2, size_t n); /// int memcmp(const void *s1, const void *s2, size_t n);
@ -202,6 +228,8 @@ namespace llvm {
nearbyintf, nearbyintf,
/// long double nearbyintl(long double x); /// long double nearbyintl(long double x);
nearbyintl, nearbyintl,
/// int posix_memalign(void **memptr, size_t alignment, size_t size);
posix_memalign,
/// double pow(double x, double y); /// double pow(double x, double y);
pow, pow,
/// float powf(float x, float y); /// float powf(float x, float y);
@ -212,6 +240,10 @@ namespace llvm {
putchar, putchar,
/// int puts(const char *s); /// int puts(const char *s);
puts, puts,
/// void *realloc(void *ptr, size_t size);
realloc,
/// void *reallocf(void *ptr, size_t size);
reallocf,
/// double rint(double x); /// double rint(double x);
rint, rint,
/// float rintf(float x); /// float rintf(float x);
@ -250,6 +282,8 @@ namespace llvm {
strchr, strchr,
/// char *strcpy(char *s1, const char *s2); /// char *strcpy(char *s1, const char *s2);
strcpy, strcpy,
/// char *strdup(const char *s1);
strdup,
/// size_t strlen(const char *s); /// size_t strlen(const char *s);
strlen, strlen,
/// char *strncat(char *s1, const char *s2, size_t n); /// char *strncat(char *s1, const char *s2, size_t n);
@ -258,6 +292,8 @@ namespace llvm {
strncmp, strncmp,
/// char *strncpy(char *s1, const char *s2, size_t n); /// char *strncpy(char *s1, const char *s2, size_t n);
strncpy, strncpy,
/// char *strndup(const char *s1, size_t n);
strndup,
/// size_t strnlen(const char *s, size_t maxlen); /// size_t strnlen(const char *s, size_t maxlen);
strnlen, strnlen,
/// double tan(double x); /// double tan(double x);
@ -278,6 +314,8 @@ namespace llvm {
truncf, truncf,
/// long double truncl(long double x); /// long double truncl(long double x);
truncl, truncl,
/// void *valloc(size_t size);
valloc,
NumLibFuncs NumLibFuncs
}; };

View File

@ -27,6 +27,7 @@ class AliasAnalysis;
class Instruction; class Instruction;
class Pass; class Pass;
class ReturnInst; class ReturnInst;
class TargetLibraryInfo;
/// DeleteDeadBlock - Delete the specified block, which must have no /// DeleteDeadBlock - Delete the specified block, which must have no
/// predecessors. /// predecessors.
@ -44,7 +45,7 @@ void FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P = 0);
/// a result. This includes tracing the def-use list from the PHI to see if /// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle. Return true /// it is ultimately unused or if it reaches an unused cycle. Return true
/// if any PHIs were deleted. /// if any PHIs were deleted.
bool DeleteDeadPHIs(BasicBlock *BB); bool DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI = 0);
/// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor, /// MergeBlockIntoPredecessor - Attempts to merge a block into its predecessor,
/// if possible. The return value indicates success or failure. /// if possible. The return value indicates success or failure.

View File

@ -36,6 +36,7 @@ class PHINode;
class AllocaInst; class AllocaInst;
class ConstantExpr; class ConstantExpr;
class TargetData; class TargetData;
class TargetLibraryInfo;
class DIBuilder; class DIBuilder;
template<typename T> class SmallVectorImpl; template<typename T> class SmallVectorImpl;
@ -51,7 +52,8 @@ template<typename T> class SmallVectorImpl;
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if /// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true. /// DeleteDeadConditions is true.
bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false); bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false,
const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Local dead code elimination. // Local dead code elimination.
@ -60,20 +62,21 @@ bool ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions = false);
/// isInstructionTriviallyDead - Return true if the result produced by the /// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects. /// instruction is not used, and the instruction has no side effects.
/// ///
bool isInstructionTriviallyDead(Instruction *I); bool isInstructionTriviallyDead(Instruction *I, const TargetLibraryInfo *TLI=0);
/// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a /// RecursivelyDeleteTriviallyDeadInstructions - If the specified value is a
/// trivially dead instruction, delete it. If that makes any of its operands /// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any /// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted. /// instructions were deleted.
bool RecursivelyDeleteTriviallyDeadInstructions(Value *V); bool RecursivelyDeleteTriviallyDeadInstructions(Value *V,
const TargetLibraryInfo *TLI=0);
/// RecursivelyDeleteDeadPHINode - If the specified value is an effectively /// RecursivelyDeleteDeadPHINode - If the specified value is an effectively
/// dead PHI node, due to being a def-use chain of single-use nodes that /// dead PHI node, due to being a def-use chain of single-use nodes that
/// either forms a cycle or is terminated by a trivially dead instruction, /// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them /// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made. /// too, recursively. Return true if a change was made.
bool RecursivelyDeleteDeadPHINode(PHINode *PN); bool RecursivelyDeleteDeadPHINode(PHINode *PN, const TargetLibraryInfo *TLI=0);
/// SimplifyInstructionsInBlock - Scan the specified basic block and try to /// SimplifyInstructionsInBlock - Scan the specified basic block and try to
@ -81,7 +84,8 @@ bool RecursivelyDeleteDeadPHINode(PHINode *PN);
/// ///
/// This returns true if it changed the code, note that it can delete /// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block. /// instructions in other blocks as well in this block.
bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0); bool SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD = 0,
const TargetLibraryInfo *TLI = 0);
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// Control Flow Graph Restructuring. // Control Flow Graph Restructuring.

View File

@ -36,6 +36,7 @@
#include "llvm/LLVMContext.h" #include "llvm/LLVMContext.h"
#include "llvm/Type.h" #include "llvm/Type.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
using namespace llvm; using namespace llvm;
// Register the AliasAnalysis interface, providing a nice name to refer to. // Register the AliasAnalysis interface, providing a nice name to refer to.
@ -452,6 +453,7 @@ AliasAnalysis::~AliasAnalysis() {}
/// ///
void AliasAnalysis::InitializeAliasAnalysis(Pass *P) { void AliasAnalysis::InitializeAliasAnalysis(Pass *P) {
TD = P->getAnalysisIfAvailable<TargetData>(); TD = P->getAnalysisIfAvailable<TargetData>();
TLI = P->getAnalysisIfAvailable<TargetLibraryInfo>();
AA = &P->getAnalysis<AliasAnalysis>(); AA = &P->getAnalysis<AliasAnalysis>();
} }

View File

@ -85,9 +85,10 @@ static bool isEscapeSource(const Value *V) {
/// getObjectSize - Return the size of the object specified by V, or /// getObjectSize - Return the size of the object specified by V, or
/// UnknownSize if unknown. /// UnknownSize if unknown.
static uint64_t getObjectSize(const Value *V, const TargetData &TD, static uint64_t getObjectSize(const Value *V, const TargetData &TD,
const TargetLibraryInfo &TLI,
bool RoundToAlign = false) { bool RoundToAlign = false) {
uint64_t Size; uint64_t Size;
if (getObjectSize(V, Size, &TD, RoundToAlign)) if (getObjectSize(V, Size, &TD, &TLI, RoundToAlign))
return Size; return Size;
return AliasAnalysis::UnknownSize; return AliasAnalysis::UnknownSize;
} }
@ -95,10 +96,11 @@ static uint64_t getObjectSize(const Value *V, const TargetData &TD,
/// isObjectSmallerThan - Return true if we can prove that the object specified /// isObjectSmallerThan - Return true if we can prove that the object specified
/// by V is smaller than Size. /// by V is smaller than Size.
static bool isObjectSmallerThan(const Value *V, uint64_t Size, static bool isObjectSmallerThan(const Value *V, uint64_t Size,
const TargetData &TD) { const TargetData &TD,
const TargetLibraryInfo &TLI) {
// This function needs to use the aligned object size because we allow // This function needs to use the aligned object size because we allow
// reads a bit past the end given sufficient alignment. // reads a bit past the end given sufficient alignment.
uint64_t ObjectSize = getObjectSize(V, TD, /*RoundToAlign*/true); uint64_t ObjectSize = getObjectSize(V, TD, TLI, /*RoundToAlign*/true);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size; return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize < Size;
} }
@ -106,8 +108,8 @@ static bool isObjectSmallerThan(const Value *V, uint64_t Size,
/// isObjectSize - Return true if we can prove that the object specified /// isObjectSize - Return true if we can prove that the object specified
/// by V has size Size. /// by V has size Size.
static bool isObjectSize(const Value *V, uint64_t Size, static bool isObjectSize(const Value *V, uint64_t Size,
const TargetData &TD) { const TargetData &TD, const TargetLibraryInfo &TLI) {
uint64_t ObjectSize = getObjectSize(V, TD); uint64_t ObjectSize = getObjectSize(V, TD, TLI);
return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size; return ObjectSize != AliasAnalysis::UnknownSize && ObjectSize == Size;
} }
@ -1133,8 +1135,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// If the size of one access is larger than the entire object on the other // If the size of one access is larger than the entire object on the other
// side, then we know such behavior is undefined and can assume no alias. // side, then we know such behavior is undefined and can assume no alias.
if (TD) if (TD)
if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD)) || if ((V1Size != UnknownSize && isObjectSmallerThan(O2, V1Size, *TD, *TLI)) ||
(V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD))) (V2Size != UnknownSize && isObjectSmallerThan(O1, V2Size, *TD, *TLI)))
return NoAlias; return NoAlias;
// Check the cache before climbing up use-def chains. This also terminates // Check the cache before climbing up use-def chains. This also terminates
@ -1184,8 +1186,8 @@ BasicAliasAnalysis::aliasCheck(const Value *V1, uint64_t V1Size,
// accesses is accessing the entire object, then the accesses must // accesses is accessing the entire object, then the accesses must
// overlap in some way. // overlap in some way.
if (TD && O1 == O2) if (TD && O1 == O2)
if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD)) || if ((V1Size != UnknownSize && isObjectSize(O1, V1Size, *TD, *TLI)) ||
(V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD))) (V2Size != UnknownSize && isObjectSize(O2, V2Size, *TD, *TLI)))
return AliasCache[Locs] = PartialAlias; return AliasCache[Locs] = PartialAlias;
AliasResult Result = AliasResult Result =

View File

@ -263,7 +263,7 @@ bool GlobalsModRef::AnalyzeUsesOfPointer(Value *V,
} else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) { } else if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest)) if (AnalyzeUsesOfPointer(BCI, Readers, Writers, OkayStoreDest))
return true; return true;
} else if (isFreeCall(U)) { } else if (isFreeCall(U, TLI)) {
Writers.push_back(cast<Instruction>(U)->getParent()->getParent()); Writers.push_back(cast<Instruction>(U)->getParent()->getParent());
} else if (CallInst *CI = dyn_cast<CallInst>(U)) { } else if (CallInst *CI = dyn_cast<CallInst>(U)) {
// Make sure that this is just the function being called, not that it is // Make sure that this is just the function being called, not that it is
@ -329,7 +329,7 @@ bool GlobalsModRef::AnalyzeIndirectGlobalMemory(GlobalValue *GV) {
// Check the value being stored. // Check the value being stored.
Value *Ptr = GetUnderlyingObject(SI->getOperand(0)); Value *Ptr = GetUnderlyingObject(SI->getOperand(0));
if (!isAllocLikeFn(Ptr)) if (!isAllocLikeFn(Ptr, TLI))
return false; // Too hard to analyze. return false; // Too hard to analyze.
// Analyze all uses of the allocation. If any of them are used in a // Analyze all uses of the allocation. If any of them are used in a
@ -458,7 +458,7 @@ void GlobalsModRef::AnalyzeCallGraph(CallGraph &CG, Module &M) {
if (SI->isVolatile()) if (SI->isVolatile())
// Treat volatile stores as reading memory somewhere. // Treat volatile stores as reading memory somewhere.
FunctionEffect |= Ref; FunctionEffect |= Ref;
} else if (isAllocationFn(&*II) || isFreeCall(&*II)) { } else if (isAllocationFn(&*II, TLI) || isFreeCall(&*II, TLI)) {
FunctionEffect |= ModRef; FunctionEffect |= ModRef;
} else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) { } else if (IntrinsicInst *Intrinsic = dyn_cast<IntrinsicInst>(&*II)) {
// The callgraph doesn't include intrinsic calls. // The callgraph doesn't include intrinsic calls.

View File

@ -26,6 +26,7 @@
#include "llvm/Support/MathExtras.h" #include "llvm/Support/MathExtras.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
using namespace llvm; using namespace llvm;
@ -39,7 +40,7 @@ enum AllocType {
}; };
struct AllocFnsTy { struct AllocFnsTy {
const char *Name; LibFunc::Func Func;
AllocType AllocTy; AllocType AllocTy;
unsigned char NumParams; unsigned char NumParams;
// First and Second size parameters (or -1 if unused) // First and Second size parameters (or -1 if unused)
@ -49,22 +50,22 @@ struct AllocFnsTy {
// FIXME: certain users need more information. E.g., SimplifyLibCalls needs to // FIXME: certain users need more information. E.g., SimplifyLibCalls needs to
// know which functions are nounwind, noalias, nocapture parameters, etc. // know which functions are nounwind, noalias, nocapture parameters, etc.
static const AllocFnsTy AllocationFnData[] = { static const AllocFnsTy AllocationFnData[] = {
{"malloc", MallocLike, 1, 0, -1}, {LibFunc::malloc, MallocLike, 1, 0, -1},
{"valloc", MallocLike, 1, 0, -1}, {LibFunc::valloc, MallocLike, 1, 0, -1},
{"_Znwj", MallocLike, 1, 0, -1}, // new(unsigned int) {LibFunc::Znwj, MallocLike, 1, 0, -1}, // new(unsigned int)
{"_ZnwjRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned int, nothrow) {LibFunc::ZnwjRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned int, nothrow)
{"_Znwm", MallocLike, 1, 0, -1}, // new(unsigned long) {LibFunc::Znwm, MallocLike, 1, 0, -1}, // new(unsigned long)
{"_ZnwmRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new(unsigned long, nothrow) {LibFunc::ZnwmRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new(unsigned long, nothrow)
{"_Znaj", MallocLike, 1, 0, -1}, // new[](unsigned int) {LibFunc::Znaj, MallocLike, 1, 0, -1}, // new[](unsigned int)
{"_ZnajRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow) {LibFunc::ZnajRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned int, nothrow)
{"_Znam", MallocLike, 1, 0, -1}, // new[](unsigned long) {LibFunc::Znam, MallocLike, 1, 0, -1}, // new[](unsigned long)
{"_ZnamRKSt9nothrow_t", MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow) {LibFunc::ZnamRKSt9nothrow_t, MallocLike, 2, 0, -1}, // new[](unsigned long, nothrow)
{"posix_memalign", MallocLike, 3, 2, -1}, {LibFunc::posix_memalign, MallocLike, 3, 2, -1},
{"calloc", CallocLike, 2, 0, 1}, {LibFunc::calloc, CallocLike, 2, 0, 1},
{"realloc", ReallocLike, 2, 1, -1}, {LibFunc::realloc, ReallocLike, 2, 1, -1},
{"reallocf", ReallocLike, 2, 1, -1}, {LibFunc::reallocf, ReallocLike, 2, 1, -1},
{"strdup", StrDupLike, 1, -1, -1}, {LibFunc::strdup, StrDupLike, 1, -1, -1},
{"strndup", StrDupLike, 2, 1, -1} {LibFunc::strndup, StrDupLike, 2, 1, -1}
}; };
@ -85,15 +86,22 @@ static Function *getCalledFunction(const Value *V, bool LookThroughBitCast) {
/// \brief Returns the allocation data for the given value if it is a call to a /// \brief Returns the allocation data for the given value if it is a call to a
/// known allocation function, and NULL otherwise. /// known allocation function, and NULL otherwise.
static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy, static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
const TargetLibraryInfo *TLI,
bool LookThroughBitCast = false) { bool LookThroughBitCast = false) {
Function *Callee = getCalledFunction(V, LookThroughBitCast); Function *Callee = getCalledFunction(V, LookThroughBitCast);
if (!Callee) if (!Callee)
return 0; return 0;
// Make sure that the function is available.
StringRef FnName = Callee->getName();
LibFunc::Func TLIFn;
if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
return 0;
unsigned i = 0; unsigned i = 0;
bool found = false; bool found = false;
for ( ; i < array_lengthof(AllocationFnData); ++i) { for ( ; i < array_lengthof(AllocationFnData); ++i) {
if (Callee->getName() == AllocationFnData[i].Name) { if (AllocationFnData[i].Func == TLIFn) {
found = true; found = true;
break; break;
} }
@ -106,7 +114,6 @@ static const AllocFnsTy *getAllocationData(const Value *V, AllocType AllocTy,
return 0; return 0;
// Check function prototype. // Check function prototype.
// FIXME: Check the nobuiltin metadata?? (PR5130)
int FstParam = FnData->FstParam; int FstParam = FnData->FstParam;
int SndParam = FnData->SndParam; int SndParam = FnData->SndParam;
FunctionType *FTy = Callee->getFunctionType(); FunctionType *FTy = Callee->getFunctionType();
@ -132,57 +139,65 @@ static bool hasNoAliasAttr(const Value *V, bool LookThroughBitCast) {
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates or reallocates memory (either malloc, calloc, realloc, or strdup /// allocates or reallocates memory (either malloc, calloc, realloc, or strdup
/// like). /// like).
bool llvm::isAllocationFn(const Value *V, bool LookThroughBitCast) { bool llvm::isAllocationFn(const Value *V, const TargetLibraryInfo *TLI,
return getAllocationData(V, AnyAlloc, LookThroughBitCast); bool LookThroughBitCast) {
return getAllocationData(V, AnyAlloc, TLI, LookThroughBitCast);
} }
/// \brief Tests if a value is a call or invoke to a function that returns a /// \brief Tests if a value is a call or invoke to a function that returns a
/// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions). /// NoAlias pointer (including malloc/calloc/realloc/strdup-like functions).
bool llvm::isNoAliasFn(const Value *V, bool LookThroughBitCast) { bool llvm::isNoAliasFn(const Value *V, const TargetLibraryInfo *TLI,
bool LookThroughBitCast) {
// it's safe to consider realloc as noalias since accessing the original // it's safe to consider realloc as noalias since accessing the original
// pointer is undefined behavior // pointer is undefined behavior
return isAllocationFn(V, LookThroughBitCast) || return isAllocationFn(V, TLI, LookThroughBitCast) ||
hasNoAliasAttr(V, LookThroughBitCast); hasNoAliasAttr(V, LookThroughBitCast);
} }
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates uninitialized memory (such as malloc). /// allocates uninitialized memory (such as malloc).
bool llvm::isMallocLikeFn(const Value *V, bool LookThroughBitCast) { bool llvm::isMallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
return getAllocationData(V, MallocLike, LookThroughBitCast); bool LookThroughBitCast) {
return getAllocationData(V, MallocLike, TLI, LookThroughBitCast);
} }
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates zero-filled memory (such as calloc). /// allocates zero-filled memory (such as calloc).
bool llvm::isCallocLikeFn(const Value *V, bool LookThroughBitCast) { bool llvm::isCallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
return getAllocationData(V, CallocLike, LookThroughBitCast); bool LookThroughBitCast) {
return getAllocationData(V, CallocLike, TLI, LookThroughBitCast);
} }
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// allocates memory (either malloc, calloc, or strdup like). /// allocates memory (either malloc, calloc, or strdup like).
bool llvm::isAllocLikeFn(const Value *V, bool LookThroughBitCast) { bool llvm::isAllocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
return getAllocationData(V, AllocLike, LookThroughBitCast); bool LookThroughBitCast) {
return getAllocationData(V, AllocLike, TLI, LookThroughBitCast);
} }
/// \brief Tests if a value is a call or invoke to a library function that /// \brief Tests if a value is a call or invoke to a library function that
/// reallocates memory (such as realloc). /// reallocates memory (such as realloc).
bool llvm::isReallocLikeFn(const Value *V, bool LookThroughBitCast) { bool llvm::isReallocLikeFn(const Value *V, const TargetLibraryInfo *TLI,
return getAllocationData(V, ReallocLike, LookThroughBitCast); bool LookThroughBitCast) {
return getAllocationData(V, ReallocLike, TLI, LookThroughBitCast);
} }
/// extractMallocCall - Returns the corresponding CallInst if the instruction /// extractMallocCall - Returns the corresponding CallInst if the instruction
/// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we /// is a malloc call. Since CallInst::CreateMalloc() only creates calls, we
/// ignore InvokeInst here. /// ignore InvokeInst here.
const CallInst *llvm::extractMallocCall(const Value *I) { const CallInst *llvm::extractMallocCall(const Value *I,
return isMallocLikeFn(I) ? dyn_cast<CallInst>(I) : 0; const TargetLibraryInfo *TLI) {
return isMallocLikeFn(I, TLI) ? dyn_cast<CallInst>(I) : 0;
} }
static Value *computeArraySize(const CallInst *CI, const TargetData *TD, static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
const TargetLibraryInfo *TLI,
bool LookThroughSExt = false) { bool LookThroughSExt = false) {
if (!CI) if (!CI)
return NULL; return NULL;
// The size of the malloc's result type must be known to determine array size. // The size of the malloc's result type must be known to determine array size.
Type *T = getMallocAllocatedType(CI); Type *T = getMallocAllocatedType(CI, TLI);
if (!T || !T->isSized() || !TD) if (!T || !T->isSized() || !TD)
return NULL; return NULL;
@ -204,9 +219,11 @@ static Value *computeArraySize(const CallInst *CI, const TargetData *TD,
/// isArrayMalloc - Returns the corresponding CallInst if the instruction /// isArrayMalloc - Returns the corresponding CallInst if the instruction
/// is a call to malloc whose array size can be determined and the array size /// is a call to malloc whose array size can be determined and the array size
/// is not constant 1. Otherwise, return NULL. /// is not constant 1. Otherwise, return NULL.
const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) { const CallInst *llvm::isArrayMalloc(const Value *I,
const CallInst *CI = extractMallocCall(I); const TargetData *TD,
Value *ArraySize = computeArraySize(CI, TD); const TargetLibraryInfo *TLI) {
const CallInst *CI = extractMallocCall(I, TLI);
Value *ArraySize = computeArraySize(CI, TD, TLI);
if (ArraySize && if (ArraySize &&
ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1)) ArraySize != ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
@ -221,8 +238,9 @@ const CallInst *llvm::isArrayMalloc(const Value *I, const TargetData *TD) {
/// 0: PointerType is the calls' return type. /// 0: PointerType is the calls' return type.
/// 1: PointerType is the bitcast's result type. /// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL. /// >1: Unique PointerType cannot be determined, return NULL.
PointerType *llvm::getMallocType(const CallInst *CI) { PointerType *llvm::getMallocType(const CallInst *CI,
assert(isMallocLikeFn(CI) && "getMallocType and not malloc call"); const TargetLibraryInfo *TLI) {
assert(isMallocLikeFn(CI, TLI) && "getMallocType and not malloc call");
PointerType *MallocType = NULL; PointerType *MallocType = NULL;
unsigned NumOfBitCastUses = 0; unsigned NumOfBitCastUses = 0;
@ -252,8 +270,9 @@ PointerType *llvm::getMallocType(const CallInst *CI) {
/// 0: PointerType is the malloc calls' return type. /// 0: PointerType is the malloc calls' return type.
/// 1: PointerType is the bitcast's result type. /// 1: PointerType is the bitcast's result type.
/// >1: Unique PointerType cannot be determined, return NULL. /// >1: Unique PointerType cannot be determined, return NULL.
Type *llvm::getMallocAllocatedType(const CallInst *CI) { Type *llvm::getMallocAllocatedType(const CallInst *CI,
PointerType *PT = getMallocType(CI); const TargetLibraryInfo *TLI) {
PointerType *PT = getMallocType(CI, TLI);
return PT ? PT->getElementType() : NULL; return PT ? PT->getElementType() : NULL;
} }
@ -263,21 +282,23 @@ Type *llvm::getMallocAllocatedType(const CallInst *CI) {
/// constant 1. Otherwise, return NULL for mallocs whose array size cannot be /// constant 1. Otherwise, return NULL for mallocs whose array size cannot be
/// determined. /// determined.
Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD, Value *llvm::getMallocArraySize(CallInst *CI, const TargetData *TD,
const TargetLibraryInfo *TLI,
bool LookThroughSExt) { bool LookThroughSExt) {
assert(isMallocLikeFn(CI) && "getMallocArraySize and not malloc call"); assert(isMallocLikeFn(CI, TLI) && "getMallocArraySize and not malloc call");
return computeArraySize(CI, TD, LookThroughSExt); return computeArraySize(CI, TD, TLI, LookThroughSExt);
} }
/// extractCallocCall - Returns the corresponding CallInst if the instruction /// extractCallocCall - Returns the corresponding CallInst if the instruction
/// is a calloc call. /// is a calloc call.
const CallInst *llvm::extractCallocCall(const Value *I) { const CallInst *llvm::extractCallocCall(const Value *I,
return isCallocLikeFn(I) ? cast<CallInst>(I) : 0; const TargetLibraryInfo *TLI) {
return isCallocLikeFn(I, TLI) ? cast<CallInst>(I) : 0;
} }
/// isFreeCall - Returns non-null if the value is a call to the builtin free() /// isFreeCall - Returns non-null if the value is a call to the builtin free()
const CallInst *llvm::isFreeCall(const Value *I) { const CallInst *llvm::isFreeCall(const Value *I, const TargetLibraryInfo *TLI) {
const CallInst *CI = dyn_cast<CallInst>(I); const CallInst *CI = dyn_cast<CallInst>(I);
if (!CI) if (!CI)
return 0; return 0;
@ -285,9 +306,14 @@ const CallInst *llvm::isFreeCall(const Value *I) {
if (Callee == 0 || !Callee->isDeclaration()) if (Callee == 0 || !Callee->isDeclaration())
return 0; return 0;
if (Callee->getName() != "free" && StringRef FnName = Callee->getName();
Callee->getName() != "_ZdlPv" && // operator delete(void*) LibFunc::Func TLIFn;
Callee->getName() != "_ZdaPv") // operator delete[](void*) if (!TLI || !TLI->getLibFunc(FnName, TLIFn) || !TLI->has(TLIFn))
return 0;
if (TLIFn != LibFunc::free &&
TLIFn != LibFunc::ZdlPv && // operator delete(void*)
TLIFn != LibFunc::ZdaPv) // operator delete[](void*)
return 0; return 0;
// Check free prototype. // Check free prototype.
@ -316,11 +342,11 @@ const CallInst *llvm::isFreeCall(const Value *I) {
/// If RoundToAlign is true, then Size is rounded up to the aligment of allocas, /// If RoundToAlign is true, then Size is rounded up to the aligment of allocas,
/// byval arguments, and global variables. /// byval arguments, and global variables.
bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD, bool llvm::getObjectSize(const Value *Ptr, uint64_t &Size, const TargetData *TD,
bool RoundToAlign) { const TargetLibraryInfo *TLI, bool RoundToAlign) {
if (!TD) if (!TD)
return false; return false;
ObjectSizeOffsetVisitor Visitor(TD, Ptr->getContext(), RoundToAlign); ObjectSizeOffsetVisitor Visitor(TD, TLI, Ptr->getContext(), RoundToAlign);
SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr)); SizeOffsetType Data = Visitor.compute(const_cast<Value*>(Ptr));
if (!Visitor.bothKnown(Data)) if (!Visitor.bothKnown(Data))
return false; return false;
@ -348,9 +374,10 @@ APInt ObjectSizeOffsetVisitor::align(APInt Size, uint64_t Align) {
} }
ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD, ObjectSizeOffsetVisitor::ObjectSizeOffsetVisitor(const TargetData *TD,
const TargetLibraryInfo *TLI,
LLVMContext &Context, LLVMContext &Context,
bool RoundToAlign) bool RoundToAlign)
: TD(TD), RoundToAlign(RoundToAlign) { : TD(TD), TLI(TLI), RoundToAlign(RoundToAlign) {
IntegerType *IntTy = TD->getIntPtrType(Context); IntegerType *IntTy = TD->getIntPtrType(Context);
IntTyBits = IntTy->getBitWidth(); IntTyBits = IntTy->getBitWidth();
Zero = APInt::getNullValue(IntTyBits); Zero = APInt::getNullValue(IntTyBits);
@ -416,7 +443,8 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitArgument(Argument &A) {
} }
SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) { SizeOffsetType ObjectSizeOffsetVisitor::visitCallSite(CallSite CS) {
const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc); const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
TLI);
if (!FnData) if (!FnData)
return unknown(); return unknown();
@ -532,8 +560,9 @@ SizeOffsetType ObjectSizeOffsetVisitor::visitInstruction(Instruction &I) {
ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD, ObjectSizeOffsetEvaluator::ObjectSizeOffsetEvaluator(const TargetData *TD,
const TargetLibraryInfo *TLI,
LLVMContext &Context) LLVMContext &Context)
: TD(TD), Context(Context), Builder(Context, TargetFolder(TD)) { : TD(TD), TLI(TLI), Context(Context), Builder(Context, TargetFolder(TD)) {
IntTy = TD->getIntPtrType(Context); IntTy = TD->getIntPtrType(Context);
Zero = ConstantInt::get(IntTy, 0); Zero = ConstantInt::get(IntTy, 0);
} }
@ -558,7 +587,7 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute(Value *V) {
} }
SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) { SizeOffsetEvalType ObjectSizeOffsetEvaluator::compute_(Value *V) {
ObjectSizeOffsetVisitor Visitor(TD, Context); ObjectSizeOffsetVisitor Visitor(TD, TLI, Context);
SizeOffsetType Const = Visitor.compute(V); SizeOffsetType Const = Visitor.compute(V);
if (Visitor.bothKnown(Const)) if (Visitor.bothKnown(Const))
return std::make_pair(ConstantInt::get(Context, Const.first), return std::make_pair(ConstantInt::get(Context, Const.first),
@ -621,7 +650,8 @@ SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitAllocaInst(AllocaInst &I) {
} }
SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) { SizeOffsetEvalType ObjectSizeOffsetEvaluator::visitCallSite(CallSite CS) {
const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc); const AllocFnsTy *FnData = getAllocationData(CS.getInstruction(), AnyAlloc,
TLI);
if (!FnData) if (!FnData)
return unknown(); return unknown();

View File

@ -148,7 +148,7 @@ AliasAnalysis::ModRefResult GetLocation(const Instruction *Inst,
return AliasAnalysis::ModRef; return AliasAnalysis::ModRef;
} }
if (const CallInst *CI = isFreeCall(Inst)) { if (const CallInst *CI = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
// calls to free() deallocate the entire structure // calls to free() deallocate the entire structure
Loc = AliasAnalysis::Location(CI->getArgOperand(0)); Loc = AliasAnalysis::Location(CI->getArgOperand(0));
return AliasAnalysis::Mod; return AliasAnalysis::Mod;
@ -479,7 +479,7 @@ getPointerDependencyFrom(const AliasAnalysis::Location &MemLoc, bool isLoad,
// a subsequent bitcast of the malloc call result. There can be stores to // a subsequent bitcast of the malloc call result. There can be stores to
// the malloced memory between the malloc call and its bitcast uses, and we // the malloced memory between the malloc call and its bitcast uses, and we
// need to continue scanning until the malloc call. // need to continue scanning until the malloc call.
if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst)) { if (isa<AllocaInst>(Inst) || isNoAliasFn(Inst, AA->getTargetLibraryInfo())){
const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD); const Value *AccessPtr = GetUnderlyingObject(MemLoc.Ptr, TD);
if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr)) if (AccessPtr == Inst || AA->isMustAlias(Inst, AccessPtr))

View File

@ -24,6 +24,16 @@ void TargetLibraryInfo::anchor() { }
const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] = const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
{ {
"_ZdaPv",
"_ZdlPv",
"_Znaj",
"_ZnajRKSt9nothrow_t",
"_Znam",
"_ZnamRKSt9nothrow_t",
"_Znwj",
"_ZnwjRKSt9nothrow_t",
"_Znwm",
"_ZnwmRKSt9nothrow_t",
"__cxa_atexit", "__cxa_atexit",
"__cxa_guard_abort", "__cxa_guard_abort",
"__cxa_guard_acquire", "__cxa_guard_acquire",
@ -50,6 +60,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"atanhf", "atanhf",
"atanhl", "atanhl",
"atanl", "atanl",
"calloc",
"cbrt", "cbrt",
"cbrtf", "cbrtf",
"cbrtl", "cbrtl",
@ -89,6 +100,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"fmodl", "fmodl",
"fputc", "fputc",
"fputs", "fputs",
"free",
"fwrite", "fwrite",
"iprintf", "iprintf",
"log", "log",
@ -106,6 +118,7 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"logbl", "logbl",
"logf", "logf",
"logl", "logl",
"malloc",
"memchr", "memchr",
"memcmp", "memcmp",
"memcpy", "memcpy",
@ -115,11 +128,14 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"nearbyint", "nearbyint",
"nearbyintf", "nearbyintf",
"nearbyintl", "nearbyintl",
"posix_memalign",
"pow", "pow",
"powf", "powf",
"powl", "powl",
"putchar", "putchar",
"puts", "puts",
"realloc",
"reallocf",
"rint", "rint",
"rintf", "rintf",
"rintl", "rintl",
@ -139,10 +155,12 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"strcat", "strcat",
"strchr", "strchr",
"strcpy", "strcpy",
"strdup",
"strlen", "strlen",
"strncat", "strncat",
"strncmp", "strncmp",
"strncpy", "strncpy",
"strndup",
"strnlen", "strnlen",
"tan", "tan",
"tanf", "tanf",
@ -152,7 +170,8 @@ const char* TargetLibraryInfo::StandardNames[LibFunc::NumLibFuncs] =
"tanl", "tanl",
"trunc", "trunc",
"truncf", "truncf",
"truncl" "truncl",
"valloc"
}; };
/// initialize - Initialize the set of available library functions based on the /// initialize - Initialize the set of available library functions based on the

View File

@ -346,7 +346,7 @@ static bool isLeakCheckerRoot(GlobalVariable *GV) {
/// Given a value that is stored to a global but never read, determine whether /// Given a value that is stored to a global but never read, determine whether
/// it's safe to remove the store and the chain of computation that feeds the /// it's safe to remove the store and the chain of computation that feeds the
/// store. /// store.
static bool IsSafeComputationToRemove(Value *V) { static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
do { do {
if (isa<Constant>(V)) if (isa<Constant>(V))
return true; return true;
@ -355,7 +355,7 @@ static bool IsSafeComputationToRemove(Value *V) {
if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) || if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
isa<GlobalValue>(V)) isa<GlobalValue>(V))
return false; return false;
if (isAllocationFn(V)) if (isAllocationFn(V, TLI))
return true; return true;
Instruction *I = cast<Instruction>(V); Instruction *I = cast<Instruction>(V);
@ -376,7 +376,8 @@ static bool IsSafeComputationToRemove(Value *V) {
/// of the global and clean up any that obviously don't assign the global a /// of the global and clean up any that obviously don't assign the global a
/// value that isn't dynamically allocated. /// value that isn't dynamically allocated.
/// ///
static bool CleanupPointerRootUsers(GlobalVariable *GV) { static bool CleanupPointerRootUsers(GlobalVariable *GV,
const TargetLibraryInfo *TLI) {
// A brief explanation of leak checkers. The goal is to find bugs where // A brief explanation of leak checkers. The goal is to find bugs where
// pointers are forgotten, causing an accumulating growth in memory // pointers are forgotten, causing an accumulating growth in memory
// usage over time. The common strategy for leak checkers is to whitelist the // usage over time. The common strategy for leak checkers is to whitelist the
@ -432,18 +433,18 @@ static bool CleanupPointerRootUsers(GlobalVariable *GV) {
C->destroyConstant(); C->destroyConstant();
// This could have invalidated UI, start over from scratch. // This could have invalidated UI, start over from scratch.
Dead.clear(); Dead.clear();
CleanupPointerRootUsers(GV); CleanupPointerRootUsers(GV, TLI);
return true; return true;
} }
} }
} }
for (int i = 0, e = Dead.size(); i != e; ++i) { for (int i = 0, e = Dead.size(); i != e; ++i) {
if (IsSafeComputationToRemove(Dead[i].first)) { if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
Dead[i].second->eraseFromParent(); Dead[i].second->eraseFromParent();
Instruction *I = Dead[i].first; Instruction *I = Dead[i].first;
do { do {
if (isAllocationFn(I)) if (isAllocationFn(I, TLI))
break; break;
Instruction *J = dyn_cast<Instruction>(I->getOperand(0)); Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
if (!J) if (!J)
@ -975,7 +976,7 @@ static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
// nor is the global. // nor is the global.
if (AllNonStoreUsesGone) { if (AllNonStoreUsesGone) {
if (isLeakCheckerRoot(GV)) { if (isLeakCheckerRoot(GV)) {
Changed |= CleanupPointerRootUsers(GV); Changed |= CleanupPointerRootUsers(GV, TLI);
} else { } else {
Changed = true; Changed = true;
CleanupConstantGlobalUsers(GV, 0, TD, TLI); CleanupConstantGlobalUsers(GV, 0, TD, TLI);
@ -1465,9 +1466,10 @@ static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
/// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
/// it up into multiple allocations of arrays of the fields. /// it up into multiple allocations of arrays of the fields.
static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI, static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
Value *NElems, TargetData *TD) { Value *NElems, TargetData *TD,
const TargetLibraryInfo *TLI) {
DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n'); DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI << '\n');
Type *MAT = getMallocAllocatedType(CI); Type *MAT = getMallocAllocatedType(CI, TLI);
StructType *STy = cast<StructType>(MAT); StructType *STy = cast<StructType>(MAT);
// There is guaranteed to be at least one use of the malloc (storing // There is guaranteed to be at least one use of the malloc (storing
@ -1688,7 +1690,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// This eliminates dynamic allocation, avoids an indirection accessing the // This eliminates dynamic allocation, avoids an indirection accessing the
// data, and exposes the resultant global to further GlobalOpt. // data, and exposes the resultant global to further GlobalOpt.
// We cannot optimize the malloc if we cannot determine malloc array size. // We cannot optimize the malloc if we cannot determine malloc array size.
Value *NElems = getMallocArraySize(CI, TD, true); Value *NElems = getMallocArraySize(CI, TD, TLI, true);
if (!NElems) if (!NElems)
return false; return false;
@ -1725,7 +1727,7 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
// If this is a fixed size array, transform the Malloc to be an alloc of // If this is a fixed size array, transform the Malloc to be an alloc of
// structs. malloc [100 x struct],1 -> malloc struct, 100 // structs. malloc [100 x struct],1 -> malloc struct, 100
if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI))) { if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
Type *IntPtrTy = TD->getIntPtrType(CI->getContext()); Type *IntPtrTy = TD->getIntPtrType(CI->getContext());
unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes(); unsigned TypeSize = TD->getStructLayout(AllocSTy)->getSizeInBytes();
Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize); Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
@ -1742,7 +1744,8 @@ static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV,
CI = cast<CallInst>(Malloc); CI = cast<CallInst>(Malloc);
} }
GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, true), TD); GVI = PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, TD, TLI, true),
TD, TLI);
return true; return true;
} }
@ -1771,8 +1774,8 @@ static bool OptimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
// Optimize away any trapping uses of the loaded value. // Optimize away any trapping uses of the loaded value.
if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI)) if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, TD, TLI))
return true; return true;
} else if (CallInst *CI = extractMallocCall(StoredOnceVal)) { } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
Type *MallocType = getMallocAllocatedType(CI); Type *MallocType = getMallocAllocatedType(CI, TLI);
if (MallocType && if (MallocType &&
TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI, TryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType, Ordering, GVI,
TD, TLI)) TD, TLI))
@ -1964,7 +1967,7 @@ bool GlobalOpt::ProcessInternalGlobal(GlobalVariable *GV,
bool Changed; bool Changed;
if (isLeakCheckerRoot(GV)) { if (isLeakCheckerRoot(GV)) {
// Delete any constant stores to the global. // Delete any constant stores to the global.
Changed = CleanupPointerRootUsers(GV); Changed = CleanupPointerRootUsers(GV, TLI);
} else { } else {
// Delete any stores we can find to the global. We may not be able to // Delete any stores we can find to the global. We may not be able to
// make it completely dead though. // make it completely dead though.

View File

@ -20,6 +20,7 @@
#include "llvm/Analysis/CallGraph.h" #include "llvm/Analysis/CallGraph.h"
#include "llvm/Analysis/InlineCost.h" #include "llvm/Analysis/InlineCost.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/IPO/InlinerPass.h" #include "llvm/Transforms/IPO/InlinerPass.h"
#include "llvm/Transforms/Utils/Cloning.h" #include "llvm/Transforms/Utils/Cloning.h"
#include "llvm/Transforms/Utils/Local.h" #include "llvm/Transforms/Utils/Local.h"
@ -339,6 +340,7 @@ static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
bool Inliner::runOnSCC(CallGraphSCC &SCC) { bool Inliner::runOnSCC(CallGraphSCC &SCC) {
CallGraph &CG = getAnalysis<CallGraph>(); CallGraph &CG = getAnalysis<CallGraph>();
const TargetData *TD = getAnalysisIfAvailable<TargetData>(); const TargetData *TD = getAnalysisIfAvailable<TargetData>();
const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
SmallPtrSet<Function*, 8> SCCFunctions; SmallPtrSet<Function*, 8> SCCFunctions;
DEBUG(dbgs() << "Inliner visiting SCC:"); DEBUG(dbgs() << "Inliner visiting SCC:");
@ -417,7 +419,7 @@ bool Inliner::runOnSCC(CallGraphSCC &SCC) {
// just delete the call instead of trying to inline it, regardless of // just delete the call instead of trying to inline it, regardless of
// size. This happens because IPSCCP propagates the result out of the // size. This happens because IPSCCP propagates the result out of the
// call and then we're left with the dead call. // call and then we're left with the dead call.
if (isInstructionTriviallyDead(CS.getInstruction())) { if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
DEBUG(dbgs() << " -> Deleting dead call: " DEBUG(dbgs() << " -> Deleting dead call: "
<< *CS.getInstruction() << "\n"); << *CS.getInstruction() << "\n");
// Update the call graph by deleting the edge from Callee to Caller. // Update the call graph by deleting the edge from Callee to Caller.

View File

@ -168,7 +168,7 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
/// the heavy lifting. /// the heavy lifting.
/// ///
Instruction *InstCombiner::visitCallInst(CallInst &CI) { Instruction *InstCombiner::visitCallInst(CallInst &CI) {
if (isFreeCall(&CI)) if (isFreeCall(&CI, TLI))
return visitFree(CI); return visitFree(CI);
// If the caller function is nounwind, mark the call as nounwind, even if the // If the caller function is nounwind, mark the call as nounwind, even if the
@ -243,7 +243,7 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
default: break; default: break;
case Intrinsic::objectsize: { case Intrinsic::objectsize: {
uint64_t Size; uint64_t Size;
if (getObjectSize(II->getArgOperand(0), Size, TD)) if (getObjectSize(II->getArgOperand(0), Size, TD, TLI))
return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size)); return ReplaceInstUsesWith(CI, ConstantInt::get(CI.getType(), Size));
return 0; return 0;
} }
@ -877,7 +877,7 @@ static IntrinsicInst *FindInitTrampoline(Value *Callee) {
// visitCallSite - Improvements for call and invoke instructions. // visitCallSite - Improvements for call and invoke instructions.
// //
Instruction *InstCombiner::visitCallSite(CallSite CS) { Instruction *InstCombiner::visitCallSite(CallSite CS) {
if (isAllocLikeFn(CS.getInstruction())) if (isAllocLikeFn(CS.getInstruction(), TLI))
return visitAllocSite(*CS.getInstruction()); return visitAllocSite(*CS.getInstruction());
bool Changed = false; bool Changed = false;

View File

@ -1068,7 +1068,7 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
// If the bitcast is of an allocation, and the allocation will be // If the bitcast is of an allocation, and the allocation will be
// converted to match the type of the cast, don't touch this. // converted to match the type of the cast, don't touch this.
if (isa<AllocaInst>(BCI->getOperand(0)) || if (isa<AllocaInst>(BCI->getOperand(0)) ||
isAllocationFn(BCI->getOperand(0))) { isAllocationFn(BCI->getOperand(0), TLI)) {
// See if the bitcast simplifies, if so, don't nuke this GEP yet. // See if the bitcast simplifies, if so, don't nuke this GEP yet.
if (Instruction *I = visitBitCast(*BCI)) { if (Instruction *I = visitBitCast(*BCI)) {
if (I != BCI) { if (I != BCI) {
@ -1107,7 +1107,8 @@ Instruction *InstCombiner::visitGetElementPtrInst(GetElementPtrInst &GEP) {
static bool static bool
isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users) { isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users,
const TargetLibraryInfo *TLI) {
SmallVector<Instruction*, 4> Worklist; SmallVector<Instruction*, 4> Worklist;
Worklist.push_back(AI); Worklist.push_back(AI);
@ -1163,7 +1164,7 @@ isAllocSiteRemovable(Instruction *AI, SmallVectorImpl<WeakVH> &Users) {
} }
} }
if (isFreeCall(I)) { if (isFreeCall(I, TLI)) {
Users.push_back(I); Users.push_back(I);
continue; continue;
} }
@ -1188,7 +1189,7 @@ Instruction *InstCombiner::visitAllocSite(Instruction &MI) {
// to null and free calls, delete the calls and replace the comparisons with // to null and free calls, delete the calls and replace the comparisons with
// true or false as appropriate. // true or false as appropriate.
SmallVector<WeakVH, 64> Users; SmallVector<WeakVH, 64> Users;
if (isAllocSiteRemovable(&MI, Users)) { if (isAllocSiteRemovable(&MI, Users, TLI)) {
for (unsigned i = 0, e = Users.size(); i != e; ++i) { for (unsigned i = 0, e = Users.size(); i != e; ++i) {
Instruction *I = cast_or_null<Instruction>(&*Users[i]); Instruction *I = cast_or_null<Instruction>(&*Users[i]);
if (!I) continue; if (!I) continue;
@ -1872,7 +1873,7 @@ static bool AddReachableCodeToWorklist(BasicBlock *BB,
Instruction *Inst = BBI++; Instruction *Inst = BBI++;
// DCE instruction if trivially dead. // DCE instruction if trivially dead.
if (isInstructionTriviallyDead(Inst)) { if (isInstructionTriviallyDead(Inst, TLI)) {
++NumDeadInst; ++NumDeadInst;
DEBUG(errs() << "IC: DCE: " << *Inst << '\n'); DEBUG(errs() << "IC: DCE: " << *Inst << '\n');
Inst->eraseFromParent(); Inst->eraseFromParent();
@ -2002,7 +2003,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
if (I == 0) continue; // skip null values. if (I == 0) continue; // skip null values.
// Check to see if we can DCE the instruction. // Check to see if we can DCE the instruction.
if (isInstructionTriviallyDead(I)) { if (isInstructionTriviallyDead(I, TLI)) {
DEBUG(errs() << "IC: DCE: " << *I << '\n'); DEBUG(errs() << "IC: DCE: " << *I << '\n');
EraseInstFromFunction(*I); EraseInstFromFunction(*I);
++NumDeadInst; ++NumDeadInst;
@ -2102,7 +2103,7 @@ bool InstCombiner::DoOneIteration(Function &F, unsigned Iteration) {
// If the instruction was modified, it's possible that it is now dead. // If the instruction was modified, it's possible that it is now dead.
// if so, remove it. // if so, remove it.
if (isInstructionTriviallyDead(I)) { if (isInstructionTriviallyDead(I, TLI)) {
EraseInstFromFunction(*I); EraseInstFromFunction(*I);
} else { } else {
Worklist.Add(I); Worklist.Add(I);

View File

@ -24,6 +24,7 @@
#include "llvm/Support/TargetFolder.h" #include "llvm/Support/TargetFolder.h"
#include "llvm/Support/raw_ostream.h" #include "llvm/Support/raw_ostream.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/Transforms/Instrumentation.h" #include "llvm/Transforms/Instrumentation.h"
using namespace llvm; using namespace llvm;
@ -48,10 +49,12 @@ namespace {
virtual void getAnalysisUsage(AnalysisUsage &AU) const { virtual void getAnalysisUsage(AnalysisUsage &AU) const {
AU.addRequired<TargetData>(); AU.addRequired<TargetData>();
AU.addRequired<TargetLibraryInfo>();
} }
private: private:
const TargetData *TD; const TargetData *TD;
const TargetLibraryInfo *TLI;
ObjectSizeOffsetEvaluator *ObjSizeEval; ObjectSizeOffsetEvaluator *ObjSizeEval;
BuilderTy *Builder; BuilderTy *Builder;
Instruction *Inst; Instruction *Inst;
@ -166,11 +169,12 @@ bool BoundsChecking::instrument(Value *Ptr, Value *InstVal) {
bool BoundsChecking::runOnFunction(Function &F) { bool BoundsChecking::runOnFunction(Function &F) {
TD = &getAnalysis<TargetData>(); TD = &getAnalysis<TargetData>();
TLI = &getAnalysis<TargetLibraryInfo>();
TrapBB = 0; TrapBB = 0;
BuilderTy TheBuilder(F.getContext(), TargetFolder(TD)); BuilderTy TheBuilder(F.getContext(), TargetFolder(TD));
Builder = &TheBuilder; Builder = &TheBuilder;
ObjectSizeOffsetEvaluator TheObjSizeEval(TD, F.getContext()); ObjectSizeOffsetEvaluator TheObjSizeEval(TD, TLI, F.getContext());
ObjSizeEval = &TheObjSizeEval; ObjSizeEval = &TheObjSizeEval;
// check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory // check HANDLE_MEMORY_INST in include/llvm/Instruction.def for memory

View File

@ -988,7 +988,7 @@ bool CodeGenPrepare::OptimizeMemoryInst(Instruction *MemoryInst, Value *Addr,
WeakVH IterHandle(CurInstIterator); WeakVH IterHandle(CurInstIterator);
BasicBlock *BB = CurInstIterator->getParent(); BasicBlock *BB = CurInstIterator->getParent();
RecursivelyDeleteTriviallyDeadInstructions(Repl); RecursivelyDeleteTriviallyDeadInstructions(Repl, TLInfo);
if (IterHandle != CurInstIterator) { if (IterHandle != CurInstIterator) {
// If the iterator instruction was recursively deleted, start over at the // If the iterator instruction was recursively deleted, start over at the

View File

@ -22,6 +22,7 @@
#include "llvm/Instruction.h" #include "llvm/Instruction.h"
#include "llvm/Pass.h" #include "llvm/Pass.h"
#include "llvm/Support/InstIterator.h" #include "llvm/Support/InstIterator.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
using namespace llvm; using namespace llvm;
@ -38,10 +39,11 @@ namespace {
initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry()); initializeDeadInstEliminationPass(*PassRegistry::getPassRegistry());
} }
virtual bool runOnBasicBlock(BasicBlock &BB) { virtual bool runOnBasicBlock(BasicBlock &BB) {
TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
bool Changed = false; bool Changed = false;
for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) { for (BasicBlock::iterator DI = BB.begin(); DI != BB.end(); ) {
Instruction *Inst = DI++; Instruction *Inst = DI++;
if (isInstructionTriviallyDead(Inst)) { if (isInstructionTriviallyDead(Inst, TLI)) {
Inst->eraseFromParent(); Inst->eraseFromParent();
Changed = true; Changed = true;
++DIEEliminated; ++DIEEliminated;
@ -87,6 +89,8 @@ char DCE::ID = 0;
INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false) INITIALIZE_PASS(DCE, "dce", "Dead Code Elimination", false, false)
bool DCE::runOnFunction(Function &F) { bool DCE::runOnFunction(Function &F) {
TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
// Start out with all of the instructions in the worklist... // Start out with all of the instructions in the worklist...
std::vector<Instruction*> WorkList; std::vector<Instruction*> WorkList;
for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i) for (inst_iterator i = inst_begin(F), e = inst_end(F); i != e; ++i)
@ -101,7 +105,7 @@ bool DCE::runOnFunction(Function &F) {
Instruction *I = WorkList.back(); Instruction *I = WorkList.back();
WorkList.pop_back(); WorkList.pop_back();
if (isInstructionTriviallyDead(I)) { // If the instruction is dead. if (isInstructionTriviallyDead(I, TLI)) { // If the instruction is dead.
// Loop over all of the values that the instruction uses, if there are // Loop over all of the values that the instruction uses, if there are
// instructions being used, add them to the worklist, because they might // instructions being used, add them to the worklist, because they might
// go dead after this one is removed. // go dead after this one is removed.

View File

@ -106,6 +106,7 @@ FunctionPass *llvm::createDeadStoreEliminationPass() { return new DSE(); }
/// ///
static void DeleteDeadInstruction(Instruction *I, static void DeleteDeadInstruction(Instruction *I,
MemoryDependenceAnalysis &MD, MemoryDependenceAnalysis &MD,
const TargetLibraryInfo *TLI,
SmallSetVector<Value*, 16> *ValueSet = 0) { SmallSetVector<Value*, 16> *ValueSet = 0) {
SmallVector<Instruction*, 32> NowDeadInsts; SmallVector<Instruction*, 32> NowDeadInsts;
@ -130,7 +131,7 @@ static void DeleteDeadInstruction(Instruction *I,
if (!Op->use_empty()) continue; if (!Op->use_empty()) continue;
if (Instruction *OpI = dyn_cast<Instruction>(Op)) if (Instruction *OpI = dyn_cast<Instruction>(Op))
if (isInstructionTriviallyDead(OpI)) if (isInstructionTriviallyDead(OpI, TLI))
NowDeadInsts.push_back(OpI); NowDeadInsts.push_back(OpI);
} }
@ -276,7 +277,7 @@ static Value *getStoredPointerOperand(Instruction *I) {
static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) { static uint64_t getPointerSize(const Value *V, AliasAnalysis &AA) {
uint64_t Size; uint64_t Size;
if (getObjectSize(V, Size, AA.getTargetData())) if (getObjectSize(V, Size, AA.getTargetData(), AA.getTargetLibraryInfo()))
return Size; return Size;
return AliasAnalysis::UnknownSize; return AliasAnalysis::UnknownSize;
} }
@ -454,7 +455,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
Instruction *Inst = BBI++; Instruction *Inst = BBI++;
// Handle 'free' calls specially. // Handle 'free' calls specially.
if (CallInst *F = isFreeCall(Inst)) { if (CallInst *F = isFreeCall(Inst, AA->getTargetLibraryInfo())) {
MadeChange |= HandleFree(F); MadeChange |= HandleFree(F);
continue; continue;
} }
@ -483,7 +484,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
// in case we need it. // in case we need it.
WeakVH NextInst(BBI); WeakVH NextInst(BBI);
DeleteDeadInstruction(SI, *MD); DeleteDeadInstruction(SI, *MD, AA->getTargetLibraryInfo());
if (NextInst == 0) // Next instruction deleted. if (NextInst == 0) // Next instruction deleted.
BBI = BB.begin(); BBI = BB.begin();
@ -530,7 +531,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) {
<< *DepWrite << "\n KILLER: " << *Inst << '\n'); << *DepWrite << "\n KILLER: " << *Inst << '\n');
// Delete the store and now-dead instructions that feed it. // Delete the store and now-dead instructions that feed it.
DeleteDeadInstruction(DepWrite, *MD); DeleteDeadInstruction(DepWrite, *MD, AA->getTargetLibraryInfo());
++NumFastStores; ++NumFastStores;
MadeChange = true; MadeChange = true;
@ -640,7 +641,7 @@ bool DSE::HandleFree(CallInst *F) {
Instruction *Next = llvm::next(BasicBlock::iterator(Dependency)); Instruction *Next = llvm::next(BasicBlock::iterator(Dependency));
// DCE instructions only used to calculate that store // DCE instructions only used to calculate that store
DeleteDeadInstruction(Dependency, *MD); DeleteDeadInstruction(Dependency, *MD, AA->getTargetLibraryInfo());
++NumFastStores; ++NumFastStores;
MadeChange = true; MadeChange = true;
@ -680,7 +681,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
// Okay, so these are dead heap objects, but if the pointer never escapes // Okay, so these are dead heap objects, but if the pointer never escapes
// then it's leaked by this function anyways. // then it's leaked by this function anyways.
else if (isAllocLikeFn(I) && !PointerMayBeCaptured(I, true, true)) else if (isAllocLikeFn(I, AA->getTargetLibraryInfo()) &&
!PointerMayBeCaptured(I, true, true))
DeadStackObjects.insert(I); DeadStackObjects.insert(I);
} }
@ -724,7 +726,8 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
dbgs() << '\n'); dbgs() << '\n');
// DCE instructions only used to calculate that store. // DCE instructions only used to calculate that store.
DeleteDeadInstruction(Dead, *MD, &DeadStackObjects); DeleteDeadInstruction(Dead, *MD, AA->getTargetLibraryInfo(),
&DeadStackObjects);
++NumFastStores; ++NumFastStores;
MadeChange = true; MadeChange = true;
continue; continue;
@ -732,9 +735,10 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
} }
// Remove any dead non-memory-mutating instructions. // Remove any dead non-memory-mutating instructions.
if (isInstructionTriviallyDead(BBI)) { if (isInstructionTriviallyDead(BBI, AA->getTargetLibraryInfo())) {
Instruction *Inst = BBI++; Instruction *Inst = BBI++;
DeleteDeadInstruction(Inst, *MD, &DeadStackObjects); DeleteDeadInstruction(Inst, *MD, AA->getTargetLibraryInfo(),
&DeadStackObjects);
++NumFastOther; ++NumFastOther;
MadeChange = true; MadeChange = true;
continue; continue;
@ -750,7 +754,7 @@ bool DSE::handleEndBlock(BasicBlock &BB) {
if (CallSite CS = cast<Value>(BBI)) { if (CallSite CS = cast<Value>(BBI)) {
// Remove allocation function calls from the list of dead stack objects; // Remove allocation function calls from the list of dead stack objects;
// there can't be any references before the definition. // there can't be any references before the definition.
if (isAllocLikeFn(BBI)) if (isAllocLikeFn(BBI, AA->getTargetLibraryInfo()))
DeadStackObjects.remove(BBI); DeadStackObjects.remove(BBI);
// If this call does not access memory, it can't be loading any of our // If this call does not access memory, it can't be loading any of our

View File

@ -374,7 +374,7 @@ bool EarlyCSE::processNode(DomTreeNode *Node) {
Instruction *Inst = I++; Instruction *Inst = I++;
// Dead instructions should just be removed. // Dead instructions should just be removed.
if (isInstructionTriviallyDead(Inst)) { if (isInstructionTriviallyDead(Inst, TLI)) {
DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n'); DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst << '\n');
Inst->eraseFromParent(); Inst->eraseFromParent();
Changed = true; Changed = true;

View File

@ -1436,7 +1436,7 @@ bool GVN::processNonLocalLoad(LoadInst *LI) {
Instruction *DepInst = DepInfo.getInst(); Instruction *DepInst = DepInfo.getInst();
// Loading the allocation -> undef. // Loading the allocation -> undef.
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst) || if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI) ||
// Loading immediately after lifetime begin -> undef. // Loading immediately after lifetime begin -> undef.
isLifetimeStart(DepInst)) { isLifetimeStart(DepInst)) {
ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB, ValuesPerBlock.push_back(AvailableValueInBlock::get(DepBB,
@ -1951,7 +1951,7 @@ bool GVN::processLoad(LoadInst *L) {
// If this load really doesn't depend on anything, then we must be loading an // If this load really doesn't depend on anything, then we must be loading an
// undef value. This can happen when loading for a fresh allocation with no // undef value. This can happen when loading for a fresh allocation with no
// intervening stores, for example. // intervening stores, for example.
if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst)) { if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) {
L->replaceAllUsesWith(UndefValue::get(L->getType())); L->replaceAllUsesWith(UndefValue::get(L->getType()));
markInstructionForDeletion(L); markInstructionForDeletion(L);
++NumGVNLoad; ++NumGVNLoad;

View File

@ -44,6 +44,7 @@
#include "llvm/Transforms/Utils/BasicBlockUtils.h" #include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/SimplifyIndVar.h" #include "llvm/Transforms/Utils/SimplifyIndVar.h"
#include "llvm/Target/TargetData.h" #include "llvm/Target/TargetData.h"
#include "llvm/Target/TargetLibraryInfo.h"
#include "llvm/ADT/DenseMap.h" #include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallVector.h" #include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Statistic.h" #include "llvm/ADT/Statistic.h"
@ -68,6 +69,7 @@ namespace {
ScalarEvolution *SE; ScalarEvolution *SE;
DominatorTree *DT; DominatorTree *DT;
TargetData *TD; TargetData *TD;
TargetLibraryInfo *TLI;
SmallVector<WeakVH, 16> DeadInsts; SmallVector<WeakVH, 16> DeadInsts;
bool Changed; bool Changed;
@ -414,11 +416,11 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
// new comparison. // new comparison.
NewCompare->takeName(Compare); NewCompare->takeName(Compare);
Compare->replaceAllUsesWith(NewCompare); Compare->replaceAllUsesWith(NewCompare);
RecursivelyDeleteTriviallyDeadInstructions(Compare); RecursivelyDeleteTriviallyDeadInstructions(Compare, TLI);
// Delete the old floating point increment. // Delete the old floating point increment.
Incr->replaceAllUsesWith(UndefValue::get(Incr->getType())); Incr->replaceAllUsesWith(UndefValue::get(Incr->getType()));
RecursivelyDeleteTriviallyDeadInstructions(Incr); RecursivelyDeleteTriviallyDeadInstructions(Incr, TLI);
// If the FP induction variable still has uses, this is because something else // If the FP induction variable still has uses, this is because something else
// in the loop uses its value. In order to canonicalize the induction // in the loop uses its value. In order to canonicalize the induction
@ -431,7 +433,7 @@ void IndVarSimplify::HandleFloatingPointIV(Loop *L, PHINode *PN) {
Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv", Value *Conv = new SIToFPInst(NewPHI, PN->getType(), "indvar.conv",
PN->getParent()->getFirstInsertionPt()); PN->getParent()->getFirstInsertionPt());
PN->replaceAllUsesWith(Conv); PN->replaceAllUsesWith(Conv);
RecursivelyDeleteTriviallyDeadInstructions(PN); RecursivelyDeleteTriviallyDeadInstructions(PN, TLI);
} }
Changed = true; Changed = true;
} }
@ -550,14 +552,14 @@ void IndVarSimplify::RewriteLoopExitValues(Loop *L, SCEVExpander &Rewriter) {
PN->setIncomingValue(i, ExitVal); PN->setIncomingValue(i, ExitVal);
// If this instruction is dead now, delete it. // If this instruction is dead now, delete it.
RecursivelyDeleteTriviallyDeadInstructions(Inst); RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
if (NumPreds == 1) { if (NumPreds == 1) {
// Completely replace a single-pred PHI. This is safe, because the // Completely replace a single-pred PHI. This is safe, because the
// NewVal won't be variant in the loop, so we don't need an LCSSA phi // NewVal won't be variant in the loop, so we don't need an LCSSA phi
// node anymore. // node anymore.
PN->replaceAllUsesWith(ExitVal); PN->replaceAllUsesWith(ExitVal);
RecursivelyDeleteTriviallyDeadInstructions(PN); RecursivelyDeleteTriviallyDeadInstructions(PN, TLI);
} }
} }
if (NumPreds != 1) { if (NumPreds != 1) {
@ -1697,6 +1699,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
SE = &getAnalysis<ScalarEvolution>(); SE = &getAnalysis<ScalarEvolution>();
DT = &getAnalysis<DominatorTree>(); DT = &getAnalysis<DominatorTree>();
TD = getAnalysisIfAvailable<TargetData>(); TD = getAnalysisIfAvailable<TargetData>();
TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
DeadInsts.clear(); DeadInsts.clear();
Changed = false; Changed = false;
@ -1763,7 +1766,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
while (!DeadInsts.empty()) while (!DeadInsts.empty())
if (Instruction *Inst = if (Instruction *Inst =
dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val())) dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
RecursivelyDeleteTriviallyDeadInstructions(Inst); RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
// The Rewriter may not be used from this point on. // The Rewriter may not be used from this point on.
@ -1772,7 +1775,7 @@ bool IndVarSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
SinkUnusedInvariants(L); SinkUnusedInvariants(L);
// Clean up dead instructions. // Clean up dead instructions.
Changed |= DeleteDeadPHIs(L->getHeader()); Changed |= DeleteDeadPHIs(L->getHeader(), TLI);
// Check a post-condition. // Check a post-condition.
assert(L->isLCSSAForm(*DT) && assert(L->isLCSSAForm(*DT) &&
"Indvars did not leave the loop in lcssa form!"); "Indvars did not leave the loop in lcssa form!");

View File

@ -1455,7 +1455,7 @@ bool JumpThreading::ThreadEdge(BasicBlock *BB,
// At this point, the IR is fully up to date and consistent. Do a quick scan // At this point, the IR is fully up to date and consistent. Do a quick scan
// over the new instructions and zap any that are constants or dead. This // over the new instructions and zap any that are constants or dead. This
// frequently happens because of phi translation. // frequently happens because of phi translation.
SimplifyInstructionsInBlock(NewBB, TD); SimplifyInstructionsInBlock(NewBB, TD, TLI);
// Threaded an edge! // Threaded an edge!
++NumThreads; ++NumThreads;

View File

@ -307,7 +307,7 @@ void LICM::SinkRegion(DomTreeNode *N) {
// If the instruction is dead, we would try to sink it because it isn't used // If the instruction is dead, we would try to sink it because it isn't used
// in the loop, instead, just delete it. // in the loop, instead, just delete it.
if (isInstructionTriviallyDead(&I)) { if (isInstructionTriviallyDead(&I, TLI)) {
DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n'); DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
++II; ++II;
CurAST->deleteValue(&I); CurAST->deleteValue(&I);

View File

@ -132,7 +132,8 @@ Pass *llvm::createLoopIdiomPass() { return new LoopIdiomRecognize(); }
/// and zero out all the operands of this instruction. If any of them become /// and zero out all the operands of this instruction. If any of them become
/// dead, delete them and the computation tree that feeds them. /// dead, delete them and the computation tree that feeds them.
/// ///
static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) { static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE,
const TargetLibraryInfo *TLI) {
SmallVector<Instruction*, 32> NowDeadInsts; SmallVector<Instruction*, 32> NowDeadInsts;
NowDeadInsts.push_back(I); NowDeadInsts.push_back(I);
@ -153,7 +154,7 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
if (!Op->use_empty()) continue; if (!Op->use_empty()) continue;
if (Instruction *OpI = dyn_cast<Instruction>(Op)) if (Instruction *OpI = dyn_cast<Instruction>(Op))
if (isInstructionTriviallyDead(OpI)) if (isInstructionTriviallyDead(OpI, TLI))
NowDeadInsts.push_back(OpI); NowDeadInsts.push_back(OpI);
} }
@ -164,10 +165,11 @@ static void deleteDeadInstruction(Instruction *I, ScalarEvolution &SE) {
/// deleteIfDeadInstruction - If the specified value is a dead instruction, /// deleteIfDeadInstruction - If the specified value is a dead instruction,
/// delete it and any recursively used instructions. /// delete it and any recursively used instructions.
static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE) { static void deleteIfDeadInstruction(Value *V, ScalarEvolution &SE,
const TargetLibraryInfo *TLI) {
if (Instruction *I = dyn_cast<Instruction>(V)) if (Instruction *I = dyn_cast<Instruction>(V))
if (isInstructionTriviallyDead(I)) if (isInstructionTriviallyDead(I, TLI))
deleteDeadInstruction(I, SE); deleteDeadInstruction(I, SE, TLI);
} }
bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) { bool LoopIdiomRecognize::runOnLoop(Loop *L, LPPassManager &LPM) {
@ -490,7 +492,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
StoreSize, getAnalysis<AliasAnalysis>(), TheStore)){ StoreSize, getAnalysis<AliasAnalysis>(), TheStore)){
Expander.clear(); Expander.clear();
// If we generated new code for the base pointer, clean up. // If we generated new code for the base pointer, clean up.
deleteIfDeadInstruction(BasePtr, *SE); deleteIfDeadInstruction(BasePtr, *SE, TLI);
return false; return false;
} }
@ -538,7 +540,7 @@ processLoopStridedStore(Value *DestPtr, unsigned StoreSize,
// Okay, the memset has been formed. Zap the original store and anything that // Okay, the memset has been formed. Zap the original store and anything that
// feeds into it. // feeds into it.
deleteDeadInstruction(TheStore, *SE); deleteDeadInstruction(TheStore, *SE, TLI);
++NumMemSet; ++NumMemSet;
return true; return true;
} }
@ -579,7 +581,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
getAnalysis<AliasAnalysis>(), SI)) { getAnalysis<AliasAnalysis>(), SI)) {
Expander.clear(); Expander.clear();
// If we generated new code for the base pointer, clean up. // If we generated new code for the base pointer, clean up.
deleteIfDeadInstruction(StoreBasePtr, *SE); deleteIfDeadInstruction(StoreBasePtr, *SE, TLI);
return false; return false;
} }
@ -594,8 +596,8 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
StoreSize, getAnalysis<AliasAnalysis>(), SI)) { StoreSize, getAnalysis<AliasAnalysis>(), SI)) {
Expander.clear(); Expander.clear();
// If we generated new code for the base pointer, clean up. // If we generated new code for the base pointer, clean up.
deleteIfDeadInstruction(LoadBasePtr, *SE); deleteIfDeadInstruction(LoadBasePtr, *SE, TLI);
deleteIfDeadInstruction(StoreBasePtr, *SE); deleteIfDeadInstruction(StoreBasePtr, *SE, TLI);
return false; return false;
} }
@ -628,7 +630,7 @@ processLoopStoreOfLoopLoad(StoreInst *SI, unsigned StoreSize,
// Okay, the memset has been formed. Zap the original store and anything that // Okay, the memset has been formed. Zap the original store and anything that
// feeds into it. // feeds into it.
deleteDeadInstruction(SI, *SE); deleteDeadInstruction(SI, *SE, TLI);
++NumMemCpy; ++NumMemCpy;
return true; return true;
} }

View File

@ -120,7 +120,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) {
++NumSimplified; ++NumSimplified;
} }
} }
LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I); LocalChanged |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
if (IsSubloopHeader && !isa<PHINode>(I)) if (IsSubloopHeader && !isa<PHINode>(I))
break; break;

View File

@ -94,7 +94,7 @@ void llvm::FoldSingleEntryPHINodes(BasicBlock *BB, Pass *P) {
/// is dead. Also recursively delete any operands that become dead as /// is dead. Also recursively delete any operands that become dead as
/// a result. This includes tracing the def-use list from the PHI to see if /// a result. This includes tracing the def-use list from the PHI to see if
/// it is ultimately unused or if it reaches an unused cycle. /// it is ultimately unused or if it reaches an unused cycle.
bool llvm::DeleteDeadPHIs(BasicBlock *BB) { bool llvm::DeleteDeadPHIs(BasicBlock *BB, const TargetLibraryInfo *TLI) {
// Recursively deleting a PHI may cause multiple PHIs to be deleted // Recursively deleting a PHI may cause multiple PHIs to be deleted
// or RAUW'd undef, so use an array of WeakVH for the PHIs to delete. // or RAUW'd undef, so use an array of WeakVH for the PHIs to delete.
SmallVector<WeakVH, 8> PHIs; SmallVector<WeakVH, 8> PHIs;
@ -105,7 +105,7 @@ bool llvm::DeleteDeadPHIs(BasicBlock *BB) {
bool Changed = false; bool Changed = false;
for (unsigned i = 0, e = PHIs.size(); i != e; ++i) for (unsigned i = 0, e = PHIs.size(); i != e; ++i)
if (PHINode *PN = dyn_cast_or_null<PHINode>(PHIs[i].operator Value*())) if (PHINode *PN = dyn_cast_or_null<PHINode>(PHIs[i].operator Value*()))
Changed |= RecursivelyDeleteDeadPHINode(PN); Changed |= RecursivelyDeleteDeadPHINode(PN, TLI);
return Changed; return Changed;
} }

View File

@ -52,7 +52,8 @@ using namespace llvm;
/// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch /// Also calls RecursivelyDeleteTriviallyDeadInstructions() on any branch/switch
/// conditions and indirectbr addresses this might make dead if /// conditions and indirectbr addresses this might make dead if
/// DeleteDeadConditions is true. /// DeleteDeadConditions is true.
bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) { bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions,
const TargetLibraryInfo *TLI) {
TerminatorInst *T = BB->getTerminator(); TerminatorInst *T = BB->getTerminator();
IRBuilder<> Builder(T); IRBuilder<> Builder(T);
@ -96,7 +97,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
Value *Cond = BI->getCondition(); Value *Cond = BI->getCondition();
BI->eraseFromParent(); BI->eraseFromParent();
if (DeleteDeadConditions) if (DeleteDeadConditions)
RecursivelyDeleteTriviallyDeadInstructions(Cond); RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
return true; return true;
} }
return false; return false;
@ -161,7 +162,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
Value *Cond = SI->getCondition(); Value *Cond = SI->getCondition();
SI->eraseFromParent(); SI->eraseFromParent();
if (DeleteDeadConditions) if (DeleteDeadConditions)
RecursivelyDeleteTriviallyDeadInstructions(Cond); RecursivelyDeleteTriviallyDeadInstructions(Cond, TLI);
return true; return true;
} }
@ -205,7 +206,7 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
Value *Address = IBI->getAddress(); Value *Address = IBI->getAddress();
IBI->eraseFromParent(); IBI->eraseFromParent();
if (DeleteDeadConditions) if (DeleteDeadConditions)
RecursivelyDeleteTriviallyDeadInstructions(Address); RecursivelyDeleteTriviallyDeadInstructions(Address, TLI);
// If we didn't find our destination in the IBI successor list, then we // If we didn't find our destination in the IBI successor list, then we
// have undefined behavior. Replace the unconditional branch with an // have undefined behavior. Replace the unconditional branch with an
@ -230,7 +231,8 @@ bool llvm::ConstantFoldTerminator(BasicBlock *BB, bool DeleteDeadConditions) {
/// isInstructionTriviallyDead - Return true if the result produced by the /// isInstructionTriviallyDead - Return true if the result produced by the
/// instruction is not used, and the instruction has no side effects. /// instruction is not used, and the instruction has no side effects.
/// ///
bool llvm::isInstructionTriviallyDead(Instruction *I) { bool llvm::isInstructionTriviallyDead(Instruction *I,
const TargetLibraryInfo *TLI) {
if (!I->use_empty() || isa<TerminatorInst>(I)) return false; if (!I->use_empty() || isa<TerminatorInst>(I)) return false;
// We don't want the landingpad instruction removed by anything this general. // We don't want the landingpad instruction removed by anything this general.
@ -265,9 +267,9 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
return isa<UndefValue>(II->getArgOperand(1)); return isa<UndefValue>(II->getArgOperand(1));
} }
if (isAllocLikeFn(I)) return true; if (isAllocLikeFn(I, TLI)) return true;
if (CallInst *CI = isFreeCall(I)) if (CallInst *CI = isFreeCall(I, TLI))
if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0))) if (Constant *C = dyn_cast<Constant>(CI->getArgOperand(0)))
return C->isNullValue() || isa<UndefValue>(C); return C->isNullValue() || isa<UndefValue>(C);
@ -278,9 +280,11 @@ bool llvm::isInstructionTriviallyDead(Instruction *I) {
/// trivially dead instruction, delete it. If that makes any of its operands /// trivially dead instruction, delete it. If that makes any of its operands
/// trivially dead, delete them too, recursively. Return true if any /// trivially dead, delete them too, recursively. Return true if any
/// instructions were deleted. /// instructions were deleted.
bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) { bool
llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V,
const TargetLibraryInfo *TLI) {
Instruction *I = dyn_cast<Instruction>(V); Instruction *I = dyn_cast<Instruction>(V);
if (!I || !I->use_empty() || !isInstructionTriviallyDead(I)) if (!I || !I->use_empty() || !isInstructionTriviallyDead(I, TLI))
return false; return false;
SmallVector<Instruction*, 16> DeadInsts; SmallVector<Instruction*, 16> DeadInsts;
@ -301,7 +305,7 @@ bool llvm::RecursivelyDeleteTriviallyDeadInstructions(Value *V) {
// operand, and if it is 'trivially' dead, delete it in a future loop // operand, and if it is 'trivially' dead, delete it in a future loop
// iteration. // iteration.
if (Instruction *OpI = dyn_cast<Instruction>(OpV)) if (Instruction *OpI = dyn_cast<Instruction>(OpV))
if (isInstructionTriviallyDead(OpI)) if (isInstructionTriviallyDead(OpI, TLI))
DeadInsts.push_back(OpI); DeadInsts.push_back(OpI);
} }
@ -334,19 +338,20 @@ static bool areAllUsesEqual(Instruction *I) {
/// either forms a cycle or is terminated by a trivially dead instruction, /// either forms a cycle or is terminated by a trivially dead instruction,
/// delete it. If that makes any of its operands trivially dead, delete them /// delete it. If that makes any of its operands trivially dead, delete them
/// too, recursively. Return true if a change was made. /// too, recursively. Return true if a change was made.
bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) { bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN,
const TargetLibraryInfo *TLI) {
SmallPtrSet<Instruction*, 4> Visited; SmallPtrSet<Instruction*, 4> Visited;
for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects(); for (Instruction *I = PN; areAllUsesEqual(I) && !I->mayHaveSideEffects();
I = cast<Instruction>(*I->use_begin())) { I = cast<Instruction>(*I->use_begin())) {
if (I->use_empty()) if (I->use_empty())
return RecursivelyDeleteTriviallyDeadInstructions(I); return RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
// If we find an instruction more than once, we're on a cycle that // If we find an instruction more than once, we're on a cycle that
// won't prove fruitful. // won't prove fruitful.
if (!Visited.insert(I)) { if (!Visited.insert(I)) {
// Break the cycle and delete the instruction and its operands. // Break the cycle and delete the instruction and its operands.
I->replaceAllUsesWith(UndefValue::get(I->getType())); I->replaceAllUsesWith(UndefValue::get(I->getType()));
(void)RecursivelyDeleteTriviallyDeadInstructions(I); (void)RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
return true; return true;
} }
} }
@ -358,7 +363,8 @@ bool llvm::RecursivelyDeleteDeadPHINode(PHINode *PN) {
/// ///
/// This returns true if it changed the code, note that it can delete /// This returns true if it changed the code, note that it can delete
/// instructions in other blocks as well in this block. /// instructions in other blocks as well in this block.
bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) { bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD,
const TargetLibraryInfo *TLI) {
bool MadeChange = false; bool MadeChange = false;
#ifndef NDEBUG #ifndef NDEBUG
@ -381,7 +387,7 @@ bool llvm::SimplifyInstructionsInBlock(BasicBlock *BB, const TargetData *TD) {
continue; continue;
} }
MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst); MadeChange |= RecursivelyDeleteTriviallyDeadInstructions(Inst, TLI);
if (BIHandle != BI) if (BIHandle != BI)
BI = BB->begin(); BI = BB->begin();
} }

View File

@ -72,7 +72,7 @@ namespace {
++NumSimplified; ++NumSimplified;
Changed = true; Changed = true;
} }
Changed |= RecursivelyDeleteTriviallyDeadInstructions(I); Changed |= RecursivelyDeleteTriviallyDeadInstructions(I, TLI);
} }
// Place the list of instructions to simplify on the next loop iteration // Place the list of instructions to simplify on the next loop iteration

View File

@ -601,7 +601,7 @@ namespace {
// It is important to cleanup here so that future iterations of this // It is important to cleanup here so that future iterations of this
// function have less work to do. // function have less work to do.
(void) SimplifyInstructionsInBlock(&BB, TD); (void) SimplifyInstructionsInBlock(&BB, TD, AA->getTargetLibraryInfo());
return true; return true;
} }

View File

@ -0,0 +1,31 @@
; RUN: opt -S -basicaa -gvn < %s | FileCheck %s
; RUN: opt -S -basicaa -gvn -disable-simplify-libcalls < %s | FileCheck %s -check-prefix=CHECK_NO_LIBCALLS
; PR13694
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.8.0"
declare i8* @malloc(i64) nounwind
define noalias i8* @test() nounwind uwtable ssp {
entry:
%call = tail call i8* @malloc(i64 100) nounwind
%0 = load i8* %call, align 1
%tobool = icmp eq i8 %0, 0
br i1 %tobool, label %if.end, label %if.then
if.then: ; preds = %entry
store i8 0, i8* %call, align 1
br label %if.end
if.end: ; preds = %if.then, %entry
ret i8* %call
; CHECK: @test
; CHECK-NOT: load
; CHECK-NOT: icmp
; CHECK_NO_LIBCALLS: @test
; CHECK_NO_LIBCALLS: load
; CHECK_NO_LIBCALLS: icmp
}