mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-06 10:58:44 +00:00
revert my previous patches that introduced an additional parameter to the objectsize intrinsic.
After a lot of discussion, we realized it's not the best option for run-time bounds checking llvm-svn: 157255
This commit is contained in:
parent
151b044b75
commit
944814b41a
@ -8467,8 +8467,8 @@ LLVM</a>.</p>
|
||||
|
||||
<h5>Syntax:</h5>
|
||||
<pre>
|
||||
declare i32 @llvm.objectsize.i32(i8* <object>, i1 <min>, i32 <runtime>)
|
||||
declare i64 @llvm.objectsize.i64(i8* <object>, i1 <min>, i32 <runtime>)
|
||||
declare i32 @llvm.objectsize.i32(i8* <object>, i1 <min>)
|
||||
declare i64 @llvm.objectsize.i64(i8* <object>, i1 <min>)
|
||||
</pre>
|
||||
|
||||
<h5>Overview:</h5>
|
||||
@ -8479,21 +8479,17 @@ LLVM</a>.</p>
|
||||
an allocation of a specific class, structure, array, or other object.</p>
|
||||
|
||||
<h5>Arguments:</h5>
|
||||
<p>The <tt>llvm.objectsize</tt> intrinsic takes three arguments. The first
|
||||
<p>The <tt>llvm.objectsize</tt> intrinsic takes two arguments. The first
|
||||
argument is a pointer to or into the <tt>object</tt>. The second argument
|
||||
is a boolean and determines whether <tt>llvm.objectsize</tt> returns 0 (if true)
|
||||
or -1 (if false) when the object size is unknown.
|
||||
The third argument, <tt>runtime</tt>, indicates whether the compiler is allowed
|
||||
to return a non-constant value. The higher the value, the higher the potential
|
||||
run-time performance impact.
|
||||
The second and third arguments only accepts constants.</p>
|
||||
is a boolean and determines whether <tt>llvm.objectsize</tt> returns 0 (if
|
||||
true) or -1 (if false) when the object size is unknown.
|
||||
The second argument only accepts constants.</p>
|
||||
|
||||
<h5>Semantics:</h5>
|
||||
<p>The <tt>llvm.objectsize</tt> intrinsic is lowered to a constant representing
|
||||
the size of the object concerned. If the size cannot be determined at compile
|
||||
time, <tt>llvm.objectsize</tt> either returns <tt>i32/i64 -1 or 0</tt>
|
||||
(depending on the <tt>min</tt> argument) if <tt>runtime</tt> is 0, or a run-time
|
||||
value (if <tt>runtime</tt> > 0 and an expression could be generated).</p>
|
||||
time, <tt>llvm.objectsize</tt> returns <tt>i32/i64 -1 or 0</tt>
|
||||
(depending on the <tt>min</tt> argument).</p>
|
||||
|
||||
</div>
|
||||
<!-- _______________________________________________________________________ -->
|
||||
|
@ -272,8 +272,7 @@ def int_sigsetjmp : Intrinsic<[llvm_i32_ty] , [llvm_ptr_ty, llvm_i32_ty]>;
|
||||
def int_siglongjmp : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty]>;
|
||||
|
||||
// Internal interface for object size checking
|
||||
def int_objectsize : Intrinsic<[llvm_anyint_ty],
|
||||
[llvm_ptr_ty, llvm_i1_ty, llvm_i32_ty],
|
||||
def int_objectsize : Intrinsic<[llvm_anyint_ty], [llvm_ptr_ty, llvm_i1_ty],
|
||||
[IntrNoMem]>,
|
||||
GCCBuiltin<"__builtin_object_size">;
|
||||
|
||||
|
@ -165,73 +165,6 @@ Instruction *InstCombiner::SimplifyMemSet(MemSetInst *MI) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// computeAllocSize - compute the object size allocated by an allocation
|
||||
/// site. Returns 0 if the size is not constant (in SizeValue), 1 if the size
|
||||
/// is constant (in Size), and 2 if the size could not be determined within the
|
||||
/// given maximum Penalty that the computation would incurr at run-time.
|
||||
static int computeAllocSize(Value *Alloc, uint64_t &Size, Value* &SizeValue,
|
||||
uint64_t Penalty, TargetData *TD,
|
||||
InstCombiner::BuilderTy *Builder) {
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Alloc)) {
|
||||
if (GV->hasDefinitiveInitializer()) {
|
||||
Constant *C = GV->getInitializer();
|
||||
Size = TD->getTypeAllocSize(C->getType());
|
||||
return 1;
|
||||
}
|
||||
// Can't determine size of the GV.
|
||||
return 2;
|
||||
|
||||
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(Alloc)) {
|
||||
if (!AI->getAllocatedType()->isSized())
|
||||
return 2;
|
||||
|
||||
Size = TD->getTypeAllocSize(AI->getAllocatedType());
|
||||
if (!AI->isArrayAllocation())
|
||||
return 1; // we are done
|
||||
|
||||
Value *ArraySize = AI->getArraySize();
|
||||
if (const ConstantInt *C = dyn_cast<ConstantInt>(ArraySize)) {
|
||||
Size *= C->getZExtValue();
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (Penalty < 2)
|
||||
return 2;
|
||||
|
||||
SizeValue = ConstantInt::get(ArraySize->getType(), Size);
|
||||
SizeValue = Builder->CreateMul(SizeValue, ArraySize);
|
||||
return 0;
|
||||
|
||||
} else if (CallInst *MI = extractMallocCall(Alloc)) {
|
||||
SizeValue = MI->getArgOperand(0);
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(SizeValue)) {
|
||||
Size = CI->getZExtValue();
|
||||
return 1;
|
||||
}
|
||||
return Penalty >= 2 ? 0 : 2;
|
||||
|
||||
} else if (CallInst *MI = extractCallocCall(Alloc)) {
|
||||
Value *Arg1 = MI->getArgOperand(0);
|
||||
Value *Arg2 = MI->getArgOperand(1);
|
||||
if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1)) {
|
||||
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2)) {
|
||||
Size = (CI1->getValue() * CI2->getValue()).getZExtValue();
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (Penalty < 2)
|
||||
return 2;
|
||||
|
||||
SizeValue = Builder->CreateMul(Arg1, Arg2);
|
||||
return 0;
|
||||
}
|
||||
|
||||
DEBUG(errs() << "computeAllocSize failed:\n");
|
||||
DEBUG(Alloc->dump());
|
||||
return 2;
|
||||
}
|
||||
|
||||
/// visitCallInst - CallInst simplification. This mostly only handles folding
|
||||
/// of intrinsic instructions. For normal calls, it allows visitCallSite to do
|
||||
/// the heavy lifting.
|
||||
@ -317,60 +250,81 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) {
|
||||
if (!TD) return 0;
|
||||
|
||||
Type *ReturnTy = CI.getType();
|
||||
uint64_t Penalty = cast<ConstantInt>(II->getArgOperand(2))->getZExtValue();
|
||||
uint64_t DontKnow = II->getArgOperand(1) == Builder->getTrue() ? 0 : -1ULL;
|
||||
|
||||
// Get to the real allocated thing and offset as fast as possible.
|
||||
Value *Op1 = II->getArgOperand(0)->stripPointerCasts();
|
||||
GEPOperator *GEP;
|
||||
|
||||
if ((GEP = dyn_cast<GEPOperator>(Op1))) {
|
||||
// check if we will be able to get the offset
|
||||
if (!GEP->hasAllConstantIndices() && Penalty < 2)
|
||||
uint64_t Offset = 0;
|
||||
uint64_t Size = -1ULL;
|
||||
|
||||
// Try to look through constant GEPs.
|
||||
if (GEPOperator *GEP = dyn_cast<GEPOperator>(Op1)) {
|
||||
if (!GEP->hasAllConstantIndices()) return 0;
|
||||
|
||||
// Get the current byte offset into the thing. Use the original
|
||||
// operand in case we're looking through a bitcast.
|
||||
SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
|
||||
if (!GEP->getPointerOperandType()->isPointerTy())
|
||||
return 0;
|
||||
Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
|
||||
|
||||
Op1 = GEP->getPointerOperand()->stripPointerCasts();
|
||||
|
||||
// Make sure we're not a constant offset from an external
|
||||
// global.
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1))
|
||||
if (!GV->hasDefinitiveInitializer()) return 0;
|
||||
}
|
||||
|
||||
uint64_t Size;
|
||||
Value *SizeValue;
|
||||
int ConstAlloc = computeAllocSize(Op1, Size, SizeValue, Penalty, TD,
|
||||
Builder);
|
||||
// If we've stripped down to a single global variable that we
|
||||
// can know the size of then just return that.
|
||||
if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Op1)) {
|
||||
if (GV->hasDefinitiveInitializer()) {
|
||||
Constant *C = GV->getInitializer();
|
||||
Size = TD->getTypeAllocSize(C->getType());
|
||||
} else {
|
||||
// Can't determine size of the GV.
|
||||
Constant *RetVal = ConstantInt::get(ReturnTy, DontKnow);
|
||||
return ReplaceInstUsesWith(CI, RetVal);
|
||||
}
|
||||
} else if (AllocaInst *AI = dyn_cast<AllocaInst>(Op1)) {
|
||||
// Get alloca size.
|
||||
if (AI->getAllocatedType()->isSized()) {
|
||||
Size = TD->getTypeAllocSize(AI->getAllocatedType());
|
||||
if (AI->isArrayAllocation()) {
|
||||
const ConstantInt *C = dyn_cast<ConstantInt>(AI->getArraySize());
|
||||
if (!C) return 0;
|
||||
Size *= C->getZExtValue();
|
||||
}
|
||||
}
|
||||
} else if (CallInst *MI = extractMallocCall(Op1)) {
|
||||
// Get allocation size.
|
||||
Value *Arg = MI->getArgOperand(0);
|
||||
if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg))
|
||||
Size = CI->getZExtValue();
|
||||
|
||||
} else if (CallInst *MI = extractCallocCall(Op1)) {
|
||||
// Get allocation size.
|
||||
Value *Arg1 = MI->getArgOperand(0);
|
||||
Value *Arg2 = MI->getArgOperand(1);
|
||||
if (ConstantInt *CI1 = dyn_cast<ConstantInt>(Arg1))
|
||||
if (ConstantInt *CI2 = dyn_cast<ConstantInt>(Arg2))
|
||||
Size = (CI1->getValue() * CI2->getValue()).getZExtValue();
|
||||
}
|
||||
|
||||
// Do not return "I don't know" here. Later optimization passes could
|
||||
// make it possible to evaluate objectsize to a constant.
|
||||
if (ConstAlloc == 2)
|
||||
if (Size == -1ULL)
|
||||
return 0;
|
||||
|
||||
uint64_t Offset = 0;
|
||||
Value *OffsetValue = 0;
|
||||
|
||||
if (GEP) {
|
||||
if (GEP->hasAllConstantIndices()) {
|
||||
SmallVector<Value*, 8> Ops(GEP->idx_begin(), GEP->idx_end());
|
||||
assert(GEP->getPointerOperandType()->isPointerTy());
|
||||
Offset = TD->getIndexedOffset(GEP->getPointerOperandType(), Ops);
|
||||
} else
|
||||
OffsetValue = EmitGEPOffset(GEP, true /*NoNUW*/);
|
||||
}
|
||||
|
||||
if (!OffsetValue && ConstAlloc) {
|
||||
if (Size < Offset) {
|
||||
// Out of bounds
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, 0));
|
||||
// Out of bound reference? Negative index normalized to large
|
||||
// index? Just return "I don't know".
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, DontKnow));
|
||||
}
|
||||
return ReplaceInstUsesWith(CI, ConstantInt::get(ReturnTy, Size-Offset));
|
||||
}
|
||||
|
||||
if (!OffsetValue)
|
||||
OffsetValue = ConstantInt::get(ReturnTy, Offset);
|
||||
if (ConstAlloc)
|
||||
SizeValue = ConstantInt::get(ReturnTy, Size);
|
||||
|
||||
Value *Val = Builder->CreateSub(SizeValue, OffsetValue);
|
||||
// return 0 if there's an overflow
|
||||
Value *Cmp = Builder->CreateICmpULT(SizeValue, OffsetValue);
|
||||
Val = Builder->CreateSelect(Cmp, ConstantInt::get(ReturnTy, 0), Val);
|
||||
return ReplaceInstUsesWith(CI, Val);
|
||||
}
|
||||
case Intrinsic::bswap:
|
||||
// bswap(bswap(x)) -> x
|
||||
if (IntrinsicInst *Operand = dyn_cast<IntrinsicInst>(II->getArgOperand(0)))
|
||||
|
@ -52,20 +52,6 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'o': {
|
||||
// FIXME: remove in LLVM 3.3
|
||||
if (Name.startswith("objectsize.") && F->arg_size() == 2) {
|
||||
Type *Tys[] = {F->getReturnType(),
|
||||
F->arg_begin()->getType(),
|
||||
Type::getInt1Ty(F->getContext()),
|
||||
Type::getInt32Ty(F->getContext())};
|
||||
NewFn = Intrinsic::getDeclaration(F->getParent(), Intrinsic::objectsize,
|
||||
Tys);
|
||||
NewFn->takeName(F);
|
||||
return true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case 'x': {
|
||||
if (Name.startswith("x86.sse2.pcmpeq.") ||
|
||||
Name.startswith("x86.sse2.pcmpgt.") ||
|
||||
@ -209,7 +195,7 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
llvm_unreachable("Unknown function for CallInst upgrade.");
|
||||
|
||||
case Intrinsic::ctlz:
|
||||
case Intrinsic::cttz: {
|
||||
case Intrinsic::cttz:
|
||||
assert(CI->getNumArgOperands() == 1 &&
|
||||
"Mismatch between function args and call args");
|
||||
StringRef Name = CI->getName();
|
||||
@ -219,16 +205,6 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
CI->eraseFromParent();
|
||||
return;
|
||||
}
|
||||
case Intrinsic::objectsize: {
|
||||
StringRef Name = CI->getName();
|
||||
CI->setName(Name + ".old");
|
||||
CI->replaceAllUsesWith(Builder.CreateCall3(NewFn, CI->getArgOperand(0),
|
||||
CI->getArgOperand(1),
|
||||
Builder.getInt32(0), Name));
|
||||
CI->eraseFromParent();
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// This tests each Function to determine if it needs upgrading. When we find
|
||||
|
@ -43,7 +43,7 @@ bb:
|
||||
%3 = load i32* @tabsize, align 4
|
||||
%4 = srem i32 %cols, %3
|
||||
%5 = sdiv i32 %cols, %3
|
||||
%6 = tail call i32 @llvm.objectsize.i32(i8* null, i1 false, i32 0)
|
||||
%6 = tail call i32 @llvm.objectsize.i32(i8* null, i1 false)
|
||||
%7 = tail call i8* @__memset_chk(i8* null, i32 9, i32 %5, i32 %6) nounwind
|
||||
br label %bb1
|
||||
|
||||
@ -54,5 +54,5 @@ bb1:
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1, i32) nounwind readnone
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readnone
|
||||
declare i8* @__memset_chk(i8*, i32, i32, i32) nounwind
|
||||
|
@ -23,7 +23,7 @@ bb32: ; preds = %bb6
|
||||
%3 = load double* %1, align 4
|
||||
%4 = load double* %0, align 4
|
||||
call void @Parse_Vector(double* %0) nounwind
|
||||
%5 = call i32 @llvm.objectsize.i32(i8* undef, i1 false, i32 0)
|
||||
%5 = call i32 @llvm.objectsize.i32(i8* undef, i1 false)
|
||||
%6 = icmp eq i32 %5, -1
|
||||
br i1 %6, label %bb34, label %bb33
|
||||
|
||||
@ -36,7 +36,7 @@ unreachable
|
||||
}
|
||||
|
||||
declare void @Parse_Vector(double*)
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1, i32)
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1)
|
||||
|
||||
|
||||
; PR9578
|
||||
|
@ -1,16 +0,0 @@
|
||||
; RUN: opt < %s -verify -S | FileCheck %s
|
||||
; check automatic upgrade of objectsize. To be removed in LLVM 3.3.
|
||||
|
||||
target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f32:32:32-f64:32:64-v64:64:64-v128:128:128-a0:0:64-f80:128:128"
|
||||
|
||||
define i32 @foo() nounwind {
|
||||
; CHECK: @foo
|
||||
%1 = alloca i8, align 4
|
||||
%2 = getelementptr inbounds i8* %1, i32 0
|
||||
; CHECK: llvm.objectsize.i32(i8* %2, i1 false, i32 0)
|
||||
%3 = call i32 @llvm.objectsize.i32(i8* %2, i1 0)
|
||||
ret i32 %3
|
||||
}
|
||||
|
||||
; CHECK: @llvm.objectsize.i32(i8*, i1, i32)
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
|
@ -8,7 +8,7 @@ target triple = "x86_64-apple-macosx10.6.0"
|
||||
|
||||
@aux_temp = external global %struct.dfa, align 8
|
||||
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1, i32) nounwind readnone
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
|
||||
|
||||
declare void @__memset_chk() nounwind
|
||||
|
||||
@ -21,12 +21,12 @@ if.end.i: ; preds = %entry
|
||||
br i1 undef, label %land.end.thread.i, label %land.end.i
|
||||
|
||||
land.end.thread.i: ; preds = %if.end.i
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* undef, i1 false, i32 0) nounwind
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* undef, i1 false) nounwind
|
||||
%cmp1710.i = icmp eq i64 %0, -1
|
||||
br i1 %cmp1710.i, label %cond.false156.i, label %cond.true138.i
|
||||
|
||||
land.end.i: ; preds = %if.end.i
|
||||
%1 = call i64 @llvm.objectsize.i64(i8* undef, i1 false, i32 0) nounwind
|
||||
%1 = call i64 @llvm.objectsize.i64(i8* undef, i1 false) nounwind
|
||||
%cmp17.i = icmp eq i64 %1, -1
|
||||
br i1 %cmp17.i, label %cond.false156.i, label %cond.true138.i
|
||||
|
||||
|
@ -203,7 +203,7 @@ entry:
|
||||
; <rdar://problem/9187792>
|
||||
define fastcc void @func_61() nounwind sspreq {
|
||||
entry:
|
||||
%t1 = tail call i64 @llvm.objectsize.i64(i8* undef, i1 false, i32 0)
|
||||
%t1 = tail call i64 @llvm.objectsize.i64(i8* undef, i1 false)
|
||||
%t2 = icmp eq i64 %t1, -1
|
||||
br i1 %t2, label %bb2, label %bb1
|
||||
|
||||
@ -214,7 +214,7 @@ bb2:
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1, i32) nounwind readnone
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
|
||||
|
||||
; PR10277
|
||||
; This test has dead code elimination caused by remat during spilling.
|
||||
|
@ -10,7 +10,7 @@ target triple = "x86_64-apple-darwin10.0"
|
||||
define void @bar() nounwind ssp {
|
||||
entry:
|
||||
%tmp = load i8** @p ; <i8*> [#uses=1]
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* %tmp, i1 0, i32 0) ; <i64> [#uses=1]
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* %tmp, i1 0) ; <i64> [#uses=1]
|
||||
%cmp = icmp ne i64 %0, -1 ; <i1> [#uses=1]
|
||||
; X64: movabsq $-1, [[RAX:%r..]]
|
||||
; X64: cmpq $-1, [[RAX]]
|
||||
@ -19,7 +19,7 @@ entry:
|
||||
cond.true: ; preds = %entry
|
||||
%tmp1 = load i8** @p ; <i8*> [#uses=1]
|
||||
%tmp2 = load i8** @p ; <i8*> [#uses=1]
|
||||
%1 = call i64 @llvm.objectsize.i64(i8* %tmp2, i1 1, i32 0) ; <i64> [#uses=1]
|
||||
%1 = call i64 @llvm.objectsize.i64(i8* %tmp2, i1 1) ; <i64> [#uses=1]
|
||||
%call = call i8* @__strcpy_chk(i8* %tmp1, i8* getelementptr inbounds ([3 x i8]* @.str, i32 0, i32 0), i64 %1) ssp ; <i8*> [#uses=1]
|
||||
br label %cond.end
|
||||
|
||||
@ -33,7 +33,7 @@ cond.end: ; preds = %cond.false, %cond.t
|
||||
ret void
|
||||
}
|
||||
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1, i32) nounwind readonly
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readonly
|
||||
|
||||
declare i8* @__strcpy_chk(i8*, i8*, i64) ssp
|
||||
|
||||
@ -47,7 +47,7 @@ entry:
|
||||
%tmp = load i8** %__dest.addr ; <i8*> [#uses=1]
|
||||
%tmp1 = load i8** %__src.addr ; <i8*> [#uses=1]
|
||||
%tmp2 = load i8** %__dest.addr ; <i8*> [#uses=1]
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* %tmp2, i1 1, i32 0) ; <i64> [#uses=1]
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* %tmp2, i1 1) ; <i64> [#uses=1]
|
||||
%call = call i8* @__strcpy_chk(i8* %tmp, i8* %tmp1, i64 %0) ssp ; <i8*> [#uses=1]
|
||||
store i8* %call, i8** %retval
|
||||
%1 = load i8** %retval ; <i8*> [#uses=1]
|
||||
|
@ -2,13 +2,13 @@
|
||||
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
|
||||
target triple = "x86_64-apple-darwin10.0.0"
|
||||
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1, i32) nounwind readnone
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
|
||||
|
||||
define void @test5() nounwind optsize noinline ssp {
|
||||
entry:
|
||||
; CHECK: movq ___stack_chk_guard@GOTPCREL(%rip)
|
||||
%buf = alloca [64 x i8], align 16
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* undef, i1 false, i32 0)
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* undef, i1 false)
|
||||
br i1 false, label %if.end, label %if.then
|
||||
|
||||
if.then: ; preds = %entry
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
declare void @llvm.dbg.declare(metadata, metadata) nounwind readnone
|
||||
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1, i32) nounwind readnone
|
||||
declare i64 @llvm.objectsize.i64(i8*, i1) nounwind readnone
|
||||
|
||||
declare i8* @foo(i8*, i32, i64, i64) nounwind
|
||||
|
||||
@ -23,7 +23,7 @@ entry:
|
||||
%tmp1 = load i32* %__val.addr, align 4, !dbg !21, !tbaa !17
|
||||
%tmp2 = load i64* %__len.addr, align 8, !dbg !21, !tbaa !19
|
||||
%tmp3 = load i8** %__dest.addr, align 8, !dbg !21, !tbaa !13
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* %tmp3, i1 false, i32 0), !dbg !21
|
||||
%0 = call i64 @llvm.objectsize.i64(i8* %tmp3, i1 false), !dbg !21
|
||||
%call = call i8* @foo(i8* %tmp, i32 %tmp1, i64 %tmp2, i64 %0), !dbg !21
|
||||
ret i8* %call, !dbg !21
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ target datalayout = "e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:32:64-f3
|
||||
define i32 @foo() nounwind {
|
||||
; CHECK: @foo
|
||||
; CHECK-NEXT: ret i32 60
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false, i32 0)
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ define i8* @bar() nounwind {
|
||||
; CHECK: @bar
|
||||
entry:
|
||||
%retval = alloca i8*
|
||||
%0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false, i32 0)
|
||||
%0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([60 x i8]* @a, i32 0, i32 0), i1 false)
|
||||
%cmp = icmp ne i32 %0, -1
|
||||
; CHECK: br i1 true
|
||||
br i1 %cmp, label %cond.true, label %cond.false
|
||||
@ -34,7 +34,7 @@ cond.false:
|
||||
define i32 @f() nounwind {
|
||||
; CHECK: @f
|
||||
; CHECK-NEXT: ret i32 0
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* getelementptr ([60 x i8]* @a, i32 1, i32 0), i1 false, i32 0)
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* getelementptr ([60 x i8]* @a, i32 1, i32 0), i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
@ -42,8 +42,8 @@ define i32 @f() nounwind {
|
||||
|
||||
define i1 @baz() nounwind {
|
||||
; CHECK: @baz
|
||||
; CHECK-NEXT: objectsize
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 0), i1 false, i32 0)
|
||||
; CHECK-NEXT: ret i1 true
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 0), i1 false)
|
||||
%2 = icmp eq i32 %1, -1
|
||||
ret i1 %2
|
||||
}
|
||||
@ -52,7 +52,7 @@ define void @test1(i8* %q, i32 %x) nounwind noinline {
|
||||
; CHECK: @test1
|
||||
; CHECK: objectsize.i32
|
||||
entry:
|
||||
%0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 10), i1 false, i32 0) ; <i64> [#uses=1]
|
||||
%0 = call i32 @llvm.objectsize.i32(i8* getelementptr inbounds ([0 x i8]* @window, i32 0, i32 10), i1 false) ; <i64> [#uses=1]
|
||||
%1 = icmp eq i32 %0, -1 ; <i1> [#uses=1]
|
||||
br i1 %1, label %"47", label %"46"
|
||||
|
||||
@ -68,7 +68,7 @@ entry:
|
||||
define i32 @test2() nounwind {
|
||||
; CHECK: @test2
|
||||
; CHECK-NEXT: ret i32 34
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* getelementptr (i8* bitcast ([9 x i32]* @.str5 to i8*), i32 2), i1 false, i32 0)
|
||||
%1 = call i32 @llvm.objectsize.i32(i8* getelementptr (i8* bitcast ([9 x i32]* @.str5 to i8*), i32 2), i1 false)
|
||||
ret i32 %1
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ define i32 @test2() nounwind {
|
||||
|
||||
declare i8* @__memcpy_chk(i8*, i8*, i32, i32) nounwind
|
||||
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1, i32) nounwind readonly
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
|
||||
|
||||
declare i8* @__inline_memcpy_chk(i8*, i8*, i32) nounwind inlinehint
|
||||
|
||||
@ -89,7 +89,7 @@ entry:
|
||||
bb11:
|
||||
%0 = getelementptr inbounds float* getelementptr inbounds ([480 x float]* @array, i32 0, i32 128), i32 -127 ; <float*> [#uses=1]
|
||||
%1 = bitcast float* %0 to i8* ; <i8*> [#uses=1]
|
||||
%2 = call i32 @llvm.objectsize.i32(i8* %1, i1 false, i32 0) ; <i32> [#uses=1]
|
||||
%2 = call i32 @llvm.objectsize.i32(i8* %1, i1 false) ; <i32> [#uses=1]
|
||||
%3 = call i8* @__memcpy_chk(i8* undef, i8* undef, i32 512, i32 %2) nounwind ; <i8*> [#uses=0]
|
||||
; CHECK: unreachable
|
||||
unreachable
|
||||
@ -111,7 +111,7 @@ define i32 @test4() nounwind ssp {
|
||||
entry:
|
||||
%0 = alloca %struct.data, align 8
|
||||
%1 = bitcast %struct.data* %0 to i8*
|
||||
%2 = call i32 @llvm.objectsize.i32(i8* %1, i1 false, i32 0) nounwind
|
||||
%2 = call i32 @llvm.objectsize.i32(i8* %1, i1 false) nounwind
|
||||
; CHECK-NOT: @llvm.objectsize
|
||||
; CHECK: @llvm.memset.p0i8.i32(i8* %1, i8 0, i32 1824, i32 8, i1 false)
|
||||
%3 = call i8* @__memset_chk(i8* %1, i32 0, i32 1824, i32 %2) nounwind
|
||||
@ -125,7 +125,7 @@ define void @test5(i32 %n) nounwind ssp {
|
||||
; CHECK: @test5
|
||||
entry:
|
||||
%0 = tail call noalias i8* @malloc(i32 20) nounwind
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* %0, i1 false, i32 0)
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* %0, i1 false)
|
||||
%2 = load i8** @s, align 8
|
||||
; CHECK-NOT: @llvm.objectsize
|
||||
; CHECK: @llvm.memcpy.p0i8.p0i8.i32(i8* %0, i8* %1, i32 10, i32 1, i1 false)
|
||||
@ -137,7 +137,7 @@ define void @test6(i32 %n) nounwind ssp {
|
||||
; CHECK: @test6
|
||||
entry:
|
||||
%0 = tail call noalias i8* @malloc(i32 20) nounwind
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* %0, i1 false, i32 0)
|
||||
%1 = tail call i32 @llvm.objectsize.i32(i8* %0, i1 false)
|
||||
%2 = load i8** @s, align 8
|
||||
; CHECK-NOT: @llvm.objectsize
|
||||
; CHECK: @__memcpy_chk(i8* %0, i8* %1, i32 30, i32 20)
|
||||
@ -153,7 +153,7 @@ define i32 @test7() {
|
||||
; CHECK: @test7
|
||||
%alloc = call noalias i8* @malloc(i32 48) nounwind
|
||||
%gep = getelementptr inbounds i8* %alloc, i32 16
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false, i32 0) nounwind readonly
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false) nounwind readonly
|
||||
; CHECK-NEXT: ret i32 32
|
||||
ret i32 %objsize
|
||||
}
|
||||
@ -164,87 +164,7 @@ define i32 @test8() {
|
||||
; CHECK: @test8
|
||||
%alloc = call noalias i8* @calloc(i32 5, i32 7) nounwind
|
||||
%gep = getelementptr inbounds i8* %alloc, i32 5
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false, i32 0) nounwind readonly
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false) nounwind readonly
|
||||
; CHECK-NEXT: ret i32 30
|
||||
ret i32 %objsize
|
||||
}
|
||||
|
||||
; CHECK: @test9
|
||||
define i32 @test9(i32 %x, i32 %y) nounwind {
|
||||
%a = alloca [3 x [4 x double]], align 8
|
||||
%1 = getelementptr inbounds [3 x [4 x double]]* %a, i32 0, i32 %x
|
||||
%2 = getelementptr inbounds [4 x double]* %1, i32 0, i32 %y
|
||||
%3 = bitcast double* %2 to i8*
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %3, i1 false, i32 2)
|
||||
ret i32 %objsize
|
||||
; CHECK-NEXT: shl i32 %x, 5
|
||||
; CHECK-NEXT: shl i32 %y, 3
|
||||
; CHECK-NEXT: add i32
|
||||
; CHECK-NEXT: sub i32 96,
|
||||
; CHECK-NEXT: icmp ugt i32 {{.*}}, 96
|
||||
; CHECK-NEXT: select i1 {{.*}}, i32 0,
|
||||
}
|
||||
|
||||
; CHECK: @test10
|
||||
define i32 @test10(i32 %x, i32 %y) nounwind {
|
||||
%alloc = call noalias i8* @calloc(i32 %x, i32 %y) nounwind
|
||||
%gep = getelementptr inbounds i8* %alloc, i32 5
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false, i32 2)
|
||||
ret i32 %objsize
|
||||
; CHECK-NEXT: mul i32
|
||||
; CHECK-NEXT: add i32 {{.*}}, -5
|
||||
; CHECK-NEXT: icmp ult i32 {{.*}}, 5
|
||||
; CHECK-NEXT: select i1
|
||||
; CHECK-NEXT: ret
|
||||
}
|
||||
|
||||
; CHECK: @test11
|
||||
define i32 @test11(i32 %x, i32 %y) nounwind {
|
||||
%alloc = call i8* @malloc(i32 %x)
|
||||
%allocd = bitcast i8* %alloc to double*
|
||||
%gep = getelementptr double* %allocd, i32 %y
|
||||
%gepi8 = bitcast double* %gep to i8*
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gepi8, i1 false, i32 2)
|
||||
ret i32 %objsize
|
||||
; CHECK-NEXT: shl i32
|
||||
; CHECK-NEXT: sub i32
|
||||
; CHECK-NEXT: icmp ugt i32
|
||||
; CHECK-NEXT: select i1
|
||||
; CHECK-NEXT: ret
|
||||
}
|
||||
|
||||
; CHECK: @test12
|
||||
define i32 @test12(i32 %x) nounwind {
|
||||
%alloc = alloca i32, i32 %x, align 16
|
||||
%gep = getelementptr i32* %alloc, i32 7
|
||||
%gepi8 = bitcast i32* %gep to i8*
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gepi8, i1 false, i32 2)
|
||||
ret i32 %objsize
|
||||
; CHECK-NEXT: shl i32
|
||||
; CHECK-NEXT: add i32 {{.*}}, -28
|
||||
; CHECK-NEXT: icmp ult i32 {{.*}}, 28
|
||||
; CHECK-NEXT: select i1
|
||||
; CHECK-NEXT: ret
|
||||
}
|
||||
|
||||
; CHECK: @test13
|
||||
define i32 @test13(i32 %x, i32 %y) nounwind {
|
||||
%alloc = call i8* @calloc(i32 %x, i32 %y)
|
||||
%alloc2 = call i8* @malloc(i32 %x)
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %alloc, i1 false, i32 1)
|
||||
%objsize2 = call i32 @llvm.objectsize.i32(i8* %alloc2, i1 false, i32 1)
|
||||
%add = add i32 %objsize, %objsize2
|
||||
ret i32 %add
|
||||
; CHECK: objectsize
|
||||
; CHECK: objectsize
|
||||
; CHECK: add
|
||||
}
|
||||
|
||||
; CHECK: @overflow
|
||||
define i32 @overflow() {
|
||||
%alloc = call noalias i8* @malloc(i32 21) nounwind
|
||||
%gep = getelementptr inbounds i8* %alloc, i32 50
|
||||
%objsize = call i32 @llvm.objectsize.i32(i8* %gep, i1 false, i32 0) nounwind readonly
|
||||
; CHECK-NEXT: ret i32 0
|
||||
ret i32 %objsize
|
||||
}
|
||||
|
@ -11,7 +11,7 @@ declare i8* @stpcpy(i8*, i8*)
|
||||
|
||||
declare i8* @__stpcpy_chk(i8*, i8*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1, i32) nounwind readonly
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
|
||||
|
||||
define i32 @t1() {
|
||||
; CHECK: @t1
|
||||
@ -28,7 +28,7 @@ define i32 @t2() {
|
||||
%target = alloca [1024 x i8]
|
||||
%arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
|
||||
%arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
|
||||
%tmp1 = call i32 @llvm.objectsize.i32(i8* %arg1, i1 false, i32 0)
|
||||
%tmp1 = call i32 @llvm.objectsize.i32(i8* %arg1, i1 false)
|
||||
%rslt1 = call i8* @__stpcpy_chk(i8* %arg1, i8* %arg2, i32 %tmp1)
|
||||
; CHECK: @__memcpy_chk
|
||||
ret i32 0
|
||||
|
@ -11,7 +11,7 @@ declare i8* @strcpy(i8*, i8*)
|
||||
|
||||
declare i8* @__strcpy_chk(i8*, i8*, i32) nounwind
|
||||
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1, i32) nounwind readonly
|
||||
declare i32 @llvm.objectsize.i32(i8*, i1) nounwind readonly
|
||||
|
||||
; rdar://6839935
|
||||
|
||||
@ -30,7 +30,7 @@ define i32 @t2() {
|
||||
%target = alloca [1024 x i8]
|
||||
%arg1 = getelementptr [1024 x i8]* %target, i32 0, i32 0
|
||||
%arg2 = getelementptr [6 x i8]* @hello, i32 0, i32 0
|
||||
%tmp1 = call i32 @llvm.objectsize.i32(i8* %arg1, i1 false, i32 0)
|
||||
%tmp1 = call i32 @llvm.objectsize.i32(i8* %arg1, i1 false)
|
||||
%rslt1 = call i8* @__strcpy_chk(i8* %arg1, i8* %arg2, i32 %tmp1)
|
||||
; CHECK: @__memcpy_chk
|
||||
ret i32 0
|
||||
|
Loading…
Reference in New Issue
Block a user