mirror of
https://github.com/RPCSX/llvm.git
synced 2025-04-18 08:00:02 +00:00
[asan] Don't run stack malloc on functions containing inline assembly.
It makes LLVM run out of registers even on 64-bit platforms. For example, the following test case fails on darwin. clang -cc1 -O0 -triple x86_64-apple-macosx10.10.0 -emit-obj -fsanitize=address -mstackrealign -o ~/tmp/ex.o -x c ex.c error: inline assembly requires more registers than available void TestInlineAssembly(const unsigned char *S, unsigned int pS, unsigned char *D, unsigned int pD, unsigned int h) { unsigned int sr = 4, pDiffD = pD - 5; unsigned int pDiffS = (pS << 1) - 5; char flagSA = ((pS & 15) == 0), flagDA = ((pD & 15) == 0); asm volatile ( "mov %0, %%"PTR_REG("si")"\n" "mov %2, %%"PTR_REG("cx")"\n" "mov %1, %%"PTR_REG("di")"\n" "mov %8, %%"PTR_REG("ax")"\n" : : "m" (S), "m" (D), "m" (pS), "m" (pDiffS), "m" (pDiffD), "m" (sr), "m" (flagSA), "m" (flagDA), "m" (h) : "%"PTR_REG("si"), "%"PTR_REG("di"), "%"PTR_REG("ax"), "%"PTR_REG("cx"), "%"PTR_REG("dx"), "memory" ); } http://reviews.llvm.org/D10719 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@240722 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
a29c6d9ea6
commit
f291e057a4
@ -1753,11 +1753,10 @@ void FunctionStackPoisoner::poisonStack() {
|
|||||||
uint64_t LocalStackSize = L.FrameSize;
|
uint64_t LocalStackSize = L.FrameSize;
|
||||||
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
|
bool DoStackMalloc = ClUseAfterReturn && !ASan.CompileKernel &&
|
||||||
LocalStackSize <= kMaxStackMallocSize;
|
LocalStackSize <= kMaxStackMallocSize;
|
||||||
// Don't do dynamic alloca in presence of inline asm: too often it makes
|
// Don't do dynamic alloca or stack malloc in presence of inline asm:
|
||||||
// assumptions on which registers are available. Don't do stack malloc in the
|
// too often it makes assumptions on which registers are available.
|
||||||
// presence of inline asm on 32-bit platforms for the same reason.
|
|
||||||
bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
|
bool DoDynamicAlloca = ClDynamicAllocaStack && !HasNonEmptyInlineAsm;
|
||||||
DoStackMalloc &= !HasNonEmptyInlineAsm || ASan.LongSize != 32;
|
DoStackMalloc &= !HasNonEmptyInlineAsm;
|
||||||
|
|
||||||
Value *StaticAlloca =
|
Value *StaticAlloca =
|
||||||
DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
|
DoDynamicAlloca ? nullptr : createAllocaForLayout(IRB, L, false);
|
||||||
|
@ -0,0 +1,56 @@
|
|||||||
|
; RUN: opt < %s -asan -S -o %t.ll
|
||||||
|
; RUN: FileCheck %s < %t.ll
|
||||||
|
|
||||||
|
; Don't do stack malloc on functions containing inline assembly on 64-bit
|
||||||
|
; platforms. It makes LLVM run out of registers.
|
||||||
|
|
||||||
|
; CHECK-LABEL: define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h)
|
||||||
|
; CHECK: %MyAlloca
|
||||||
|
; CHECK-NOT: call {{.*}} @__asan_stack_malloc
|
||||||
|
|
||||||
|
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
|
||||||
|
target triple = "x86_64-apple-macosx10.10.0"
|
||||||
|
|
||||||
|
define void @TestAbsenceOfStackMalloc(i8* %S, i32 %pS, i8* %D, i32 %pD, i32 %h) #0 {
|
||||||
|
entry:
|
||||||
|
%S.addr = alloca i8*, align 8
|
||||||
|
%pS.addr = alloca i32, align 4
|
||||||
|
%D.addr = alloca i8*, align 8
|
||||||
|
%pD.addr = alloca i32, align 4
|
||||||
|
%h.addr = alloca i32, align 4
|
||||||
|
%sr = alloca i32, align 4
|
||||||
|
%pDiffD = alloca i32, align 4
|
||||||
|
%pDiffS = alloca i32, align 4
|
||||||
|
%flagSA = alloca i8, align 1
|
||||||
|
%flagDA = alloca i8, align 1
|
||||||
|
store i8* %S, i8** %S.addr, align 8
|
||||||
|
store i32 %pS, i32* %pS.addr, align 4
|
||||||
|
store i8* %D, i8** %D.addr, align 8
|
||||||
|
store i32 %pD, i32* %pD.addr, align 4
|
||||||
|
store i32 %h, i32* %h.addr, align 4
|
||||||
|
store i32 4, i32* %sr, align 4
|
||||||
|
%0 = load i32, i32* %pD.addr, align 4
|
||||||
|
%sub = sub i32 %0, 5
|
||||||
|
store i32 %sub, i32* %pDiffD, align 4
|
||||||
|
%1 = load i32, i32* %pS.addr, align 4
|
||||||
|
%shl = shl i32 %1, 1
|
||||||
|
%sub1 = sub i32 %shl, 5
|
||||||
|
store i32 %sub1, i32* %pDiffS, align 4
|
||||||
|
%2 = load i32, i32* %pS.addr, align 4
|
||||||
|
%and = and i32 %2, 15
|
||||||
|
%cmp = icmp eq i32 %and, 0
|
||||||
|
%conv = zext i1 %cmp to i32
|
||||||
|
%conv2 = trunc i32 %conv to i8
|
||||||
|
store i8 %conv2, i8* %flagSA, align 1
|
||||||
|
%3 = load i32, i32* %pD.addr, align 4
|
||||||
|
%and3 = and i32 %3, 15
|
||||||
|
%cmp4 = icmp eq i32 %and3, 0
|
||||||
|
%conv5 = zext i1 %cmp4 to i32
|
||||||
|
%conv6 = trunc i32 %conv5 to i8
|
||||||
|
store i8 %conv6, i8* %flagDA, align 1
|
||||||
|
call void asm sideeffect "mov\09\09\09$0,\09\09\09\09\09\09\09\09\09\09%rsi\0Amov\09\09\09$2,\09\09\09\09\09\09\09\09\09\09%rcx\0Amov\09\09\09$1,\09\09\09\09\09\09\09\09\09\09%rdi\0Amov\09\09\09$8,\09\09\09\09\09\09\09\09\09\09%rax\0A", "*m,*m,*m,*m,*m,*m,*m,*m,*m,~{rsi},~{rdi},~{rax},~{rcx},~{rdx},~{memory},~{dirflag},~{fpsr},~{flags}"(i8** %S.addr, i8** %D.addr, i32* %pS.addr, i32* %pDiffS, i32* %pDiffD, i32* %sr, i8* %flagSA, i8* %flagDA, i32* %h.addr) #1
|
||||||
|
ret void
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes #0 = { nounwind sanitize_address }
|
||||||
|
attributes #1 = { nounwind }
|
Loading…
x
Reference in New Issue
Block a user