[asan] properly instrument memory accesses that have small alignment (smaller than min(8,size)) by making two checks instead of one. This may slowdown some cases, e.g. long long on 32-bit or wide loads produced after loop unrolling. The benefit is higher sencitivity.

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@209508 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Kostya Serebryany 2014-05-23 11:52:07 +00:00
parent 4f22c980f4
commit f2938bf8da
4 changed files with 44 additions and 18 deletions

View File

@ -623,26 +623,31 @@ void AddressSanitizer::instrumentMemIntrinsic(MemIntrinsic *MI) {
} }
// If I is an interesting memory access, return the PointerOperand // If I is an interesting memory access, return the PointerOperand
// and set IsWrite. Otherwise return NULL. // and set IsWrite/Alignment. Otherwise return NULL.
static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite) { static Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
unsigned *Alignment) {
if (LoadInst *LI = dyn_cast<LoadInst>(I)) { if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
if (!ClInstrumentReads) return nullptr; if (!ClInstrumentReads) return nullptr;
*IsWrite = false; *IsWrite = false;
*Alignment = LI->getAlignment();
return LI->getPointerOperand(); return LI->getPointerOperand();
} }
if (StoreInst *SI = dyn_cast<StoreInst>(I)) { if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
if (!ClInstrumentWrites) return nullptr; if (!ClInstrumentWrites) return nullptr;
*IsWrite = true; *IsWrite = true;
*Alignment = SI->getAlignment();
return SI->getPointerOperand(); return SI->getPointerOperand();
} }
if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) { if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
if (!ClInstrumentAtomics) return nullptr; if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true; *IsWrite = true;
*Alignment = 0;
return RMW->getPointerOperand(); return RMW->getPointerOperand();
} }
if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) { if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
if (!ClInstrumentAtomics) return nullptr; if (!ClInstrumentAtomics) return nullptr;
*IsWrite = true; *IsWrite = true;
*Alignment = 0;
return XCHG->getPointerOperand(); return XCHG->getPointerOperand();
} }
return nullptr; return nullptr;
@ -692,7 +697,8 @@ AddressSanitizer::instrumentPointerComparisonOrSubtraction(Instruction *I) {
void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) { void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
bool IsWrite = false; bool IsWrite = false;
Value *Addr = isInterestingMemoryAccess(I, &IsWrite); unsigned Alignment = 0;
Value *Addr = isInterestingMemoryAccess(I, &IsWrite, &Alignment);
assert(Addr); assert(Addr);
if (ClOpt && ClOptGlobals) { if (ClOpt && ClOptGlobals) {
if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) { if (GlobalVariable *G = dyn_cast<GlobalVariable>(Addr)) {
@ -727,11 +733,14 @@ void AddressSanitizer::instrumentMop(Instruction *I, bool UseCalls) {
else else
NumInstrumentedReads++; NumInstrumentedReads++;
// Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check. unsigned Granularity = 1 << Mapping.Scale;
if (TypeSize == 8 || TypeSize == 16 || // Instrument a 1-, 2-, 4-, 8-, or 16- byte access with one check
TypeSize == 32 || TypeSize == 64 || TypeSize == 128) // if the data is properly aligned.
if ((TypeSize == 8 || TypeSize == 16 || TypeSize == 32 || TypeSize == 64 ||
TypeSize == 128) &&
(Alignment >= Granularity || Alignment == 0 || Alignment >= TypeSize / 8))
return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls); return instrumentAddress(I, I, Addr, TypeSize, IsWrite, nullptr, UseCalls);
// Instrument unusual size (but still multiple of 8). // Instrument unusual size or unusual alignment.
// We can not do it with a single check, so we do 1-byte check for the first // We can not do it with a single check, so we do 1-byte check for the first
// and the last bytes. We call __asan_report_*_n(addr, real_size) to be able // and the last bytes. We call __asan_report_*_n(addr, real_size) to be able
// to report the actual access size. // to report the actual access size.
@ -1328,6 +1337,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts; SmallVector<Instruction*, 16> PointerComparisonsOrSubtracts;
int NumAllocas = 0; int NumAllocas = 0;
bool IsWrite; bool IsWrite;
unsigned Alignment;
// Fill the set of memory operations to instrument. // Fill the set of memory operations to instrument.
for (Function::iterator FI = F.begin(), FE = F.end(); for (Function::iterator FI = F.begin(), FE = F.end();
@ -1338,7 +1348,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
for (BasicBlock::iterator BI = FI->begin(), BE = FI->end(); for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
BI != BE; ++BI) { BI != BE; ++BI) {
if (LooksLikeCodeInBug11395(BI)) return false; if (LooksLikeCodeInBug11395(BI)) return false;
if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite)) { if (Value *Addr = isInterestingMemoryAccess(BI, &IsWrite, &Alignment)) {
if (ClOpt && ClOptSameTemp) { if (ClOpt && ClOptSameTemp) {
if (!TempsToInstrument.insert(Addr)) if (!TempsToInstrument.insert(Addr))
continue; // We've seen this temp in the current BB. continue; // We've seen this temp in the current BB.
@ -1390,7 +1400,7 @@ bool AddressSanitizer::runOnFunction(Function &F) {
Instruction *Inst = ToInstrument[i]; Instruction *Inst = ToInstrument[i];
if (ClDebugMin < 0 || ClDebugMax < 0 || if (ClDebugMin < 0 || ClDebugMax < 0 ||
(NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) { (NumInstrumented >= ClDebugMin && NumInstrumented <= ClDebugMax)) {
if (isInterestingMemoryAccess(Inst, &IsWrite)) if (isInterestingMemoryAccess(Inst, &IsWrite, &Alignment))
instrumentMop(Inst, UseCalls); instrumentMop(Inst, UseCalls);
else else
instrumentMemIntrinsic(cast<MemIntrinsic>(Inst)); instrumentMemIntrinsic(cast<MemIntrinsic>(Inst));

View File

@ -34,7 +34,7 @@ define i32 @test_load(i32* %a) sanitize_address {
entry: entry:
%tmp1 = load i32* %a %tmp1 = load i32* %a, align 4
ret i32 %tmp1 ret i32 %tmp1
} }
@ -66,7 +66,7 @@ define void @test_store(i32* %a) sanitize_address {
; ;
entry: entry:
store i32 42, i32* %a store i32 42, i32* %a, align 4
ret void ret void
} }
@ -115,6 +115,18 @@ define void @i40test(i40* %a, i40* %b) nounwind uwtable sanitize_address {
; CHECK: __asan_report_store_n{{.*}}, i64 5) ; CHECK: __asan_report_store_n{{.*}}, i64 5)
; CHECK: ret void ; CHECK: ret void
define void @i64test_align1(i64* %b) nounwind uwtable sanitize_address {
entry:
store i64 0, i64* %b, align 1
ret void
}
; CHECK-LABEL: i64test_align1
; CHECK: __asan_report_store_n{{.*}}, i64 8)
; CHECK: __asan_report_store_n{{.*}}, i64 8)
; CHECK: ret void
define void @i80test(i80* %a, i80* %b) nounwind uwtable sanitize_address { define void @i80test(i80* %a, i80* %b) nounwind uwtable sanitize_address {
entry: entry:
%t = load i80* %a %t = load i80* %a

View File

@ -20,10 +20,10 @@ entry:
; CHECK-CUSTOM-PREFIX: call void @__foo_load8 ; CHECK-CUSTOM-PREFIX: call void @__foo_load8
; CHECK-CUSTOM-PREFIX: call void @__foo_loadN ; CHECK-CUSTOM-PREFIX: call void @__foo_loadN
; CHECK-INLINE-NOT: call void @__asan_load ; CHECK-INLINE-NOT: call void @__asan_load
%tmp1 = load i32* %a %tmp1 = load i32* %a, align 4
%tmp2 = load i64* %b %tmp2 = load i64* %b, align 8
%tmp3 = load i512* %c %tmp3 = load i512* %c, align 32
%tmp4 = load i80* %d %tmp4 = load i80* %d, align 8
ret void ret void
} }

View File

@ -6,7 +6,7 @@ entry:
%tmp1 = load i32* %a, align 4 %tmp1 = load i32* %a, align 4
ret i32 %tmp1 ret i32 %tmp1
} }
; CHECK: @read_4_bytes ; CHECK-LABEL: @read_4_bytes
; CHECK-NOT: ret ; CHECK-NOT: ret
; CHECK: lshr {{.*}} 3 ; CHECK: lshr {{.*}} 3
; Check for ASAN's Offset for 64-bit (7fff8000) ; Check for ASAN's Offset for 64-bit (7fff8000)
@ -19,8 +19,10 @@ entry:
ret void ret void
} }
; CHECK: @example_atomicrmw ; CHECK-LABEL: @example_atomicrmw
; CHECK: lshr {{.*}} 3 ; CHECK: lshr {{.*}} 3
; CHECK: __asan_report_store8
; CHECK-NOT: __asan_report
; CHECK: atomicrmw ; CHECK: atomicrmw
; CHECK: ret ; CHECK: ret
@ -30,7 +32,9 @@ entry:
ret void ret void
} }
; CHECK: @example_cmpxchg ; CHECK-LABEL: @example_cmpxchg
; CHECK: lshr {{.*}} 3 ; CHECK: lshr {{.*}} 3
; CHECK: __asan_report_store8
; CHECK-NOT: __asan_report
; CHECK: cmpxchg ; CHECK: cmpxchg
; CHECK: ret ; CHECK: ret