Don't use internal symbolizer if we are in process of reporting Out-of-Memory.

Reviewed by eugenis offline, as reviews.llvm.org is down.

llvm-svn: 282805
This commit is contained in:
Vitaly Buka 2016-09-29 23:00:54 +00:00
parent 27e610f986
commit 0ec5a2830d
10 changed files with 60 additions and 31 deletions

View File

@ -390,7 +390,7 @@ struct Allocator {
if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
(void*)size);
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
}
AsanThread *t = GetCurrentThread();
@ -407,8 +407,7 @@ struct Allocator {
allocator.Allocate(cache, needed_size, 8, false, check_rss_limit);
}
if (!allocated)
return allocator.ReturnNullOrDie();
if (!allocated) return allocator.ReturnNullOrDieOnOOM();
if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
// Heap poisoning is enabled, but the allocator provides an unpoisoned
@ -597,7 +596,7 @@ struct Allocator {
void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
// If the memory comes from the secondary allocator no need to clear it
// as it comes directly from mmap.

View File

@ -121,7 +121,7 @@ static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
if (size > kMaxAllowedMallocSize) {
Report("WARNING: MemorySanitizer failed to allocate %p bytes\n",
(void *)size);
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
}
MsanThread *t = GetCurrentThread();
void *allocated;
@ -179,7 +179,7 @@ void MsanDeallocate(StackTrace *stack, void *p) {
void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
if (CallocShouldReturnNullDueToOverflow(size, nmemb))
return allocator.ReturnNullOrDie();
return allocator.ReturnNullOrDieOnBadRequest();
return MsanReallocate(stack, nullptr, nmemb * size, sizeof(u64), true);
}

View File

@ -13,7 +13,9 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_allocator.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
namespace __sanitizer {
@ -159,7 +161,7 @@ void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
if (CallocShouldReturnNullDueToOverflow(count, size))
return internal_allocator()->ReturnNullOrDie();
return internal_allocator()->ReturnNullOrDieOnBadRequest();
void *p = InternalAlloc(count * size, cache);
if (p) internal_memset(p, 0, count * size);
return p;
@ -206,7 +208,12 @@ bool CallocShouldReturnNullDueToOverflow(uptr size, uptr n) {
return (max / size) < n;
}
void NORETURN ReportAllocatorCannotReturnNull() {
static atomic_uint8_t reporting_out_of_memory = {0};
bool IsReportingOOM() { return atomic_load_relaxed(&reporting_out_of_memory); }
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory) {
if (out_of_memory) atomic_store_relaxed(&reporting_out_of_memory, 1);
Report("%s's allocator is terminating the process instead of returning 0\n",
SanitizerToolName);
Report("If you don't like this behavior set allocator_may_return_null=1\n");

View File

@ -24,8 +24,13 @@
namespace __sanitizer {
// Returns true if ReportAllocatorCannotReturnNull(true) was called.
// Can be use to avoid memory hungry operations.
bool IsReportingOOM();
// Prints error message and kills the program.
void NORETURN ReportAllocatorCannotReturnNull();
void NORETURN ReportAllocatorCannotReturnNull(bool out_of_memory);
// Allocators call these callbacks on mmap/munmap.
struct NoOpMapUnmapCallback {
void OnMap(uptr p, uptr size) const { }

View File

@ -46,10 +46,8 @@ class CombinedAllocator {
// Returning 0 on malloc(0) may break a lot of code.
if (size == 0)
size = 1;
if (size + alignment < size)
return ReturnNullOrDie();
if (check_rss_limit && RssLimitIsExceeded())
return ReturnNullOrDie();
if (size + alignment < size) return ReturnNullOrDieOnBadRequest();
if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM();
if (alignment > 8)
size = RoundUpTo(size, alignment);
void *res;
@ -69,10 +67,15 @@ class CombinedAllocator {
return atomic_load(&may_return_null_, memory_order_acquire);
}
void *ReturnNullOrDie() {
void *ReturnNullOrDieOnBadRequest() {
if (MayReturnNull())
return nullptr;
ReportAllocatorCannotReturnNull();
ReportAllocatorCannotReturnNull(false);
}
void *ReturnNullOrDieOnOOM() {
if (MayReturnNull()) return nullptr;
ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool may_return_null) {

View File

@ -36,8 +36,7 @@ class LargeMmapAllocator {
if (alignment > page_size_)
map_size += alignment;
// Overflow.
if (map_size < size)
return ReturnNullOrDie();
if (map_size < size) return ReturnNullOrDieOnBadRequest();
uptr map_beg = reinterpret_cast<uptr>(
MmapOrDie(map_size, "LargeMmapAllocator"));
CHECK(IsAligned(map_beg, page_size_));
@ -73,10 +72,18 @@ class LargeMmapAllocator {
return reinterpret_cast<void*>(res);
}
void *ReturnNullOrDie() {
if (atomic_load(&may_return_null_, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull();
bool MayReturnNull() const {
return atomic_load(&may_return_null_, memory_order_acquire);
}
void *ReturnNullOrDieOnBadRequest() {
if (MayReturnNull()) return nullptr;
ReportAllocatorCannotReturnNull(false);
}
void *ReturnNullOrDieOnOOM() {
if (MayReturnNull()) return nullptr;
ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool may_return_null) {

View File

@ -463,7 +463,9 @@ static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
VReport(2, "Symbolizer is disabled.\n");
return;
}
if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
if (IsReportingOOM()) {
VReport(2, "Cannot use internal symbolizer: out of memory\n");
} else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
VReport(2, "Using internal symbolizer.\n");
list->push_back(tool);
return;

View File

@ -328,20 +328,20 @@ struct Allocator {
dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
}
if (Alignment > MaxAlignment)
return BackendAllocator.ReturnNullOrDie();
return BackendAllocator.ReturnNullOrDieOnBadRequest();
if (Alignment < MinAlignment)
Alignment = MinAlignment;
if (Size == 0)
Size = 1;
if (Size >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDie();
return BackendAllocator.ReturnNullOrDieOnBadRequest();
uptr RoundedSize = RoundUpTo(Size, MinAlignment);
uptr ExtraBytes = ChunkHeaderSize;
if (Alignment > MinAlignment)
ExtraBytes += Alignment;
uptr NeededSize = RoundedSize + ExtraBytes;
if (NeededSize >= MaxAllowedMallocSize)
return BackendAllocator.ReturnNullOrDie();
return BackendAllocator.ReturnNullOrDieOnBadRequest();
void *Ptr;
if (LIKELY(!ThreadTornDown)) {
@ -352,7 +352,7 @@ struct Allocator {
MinAlignment);
}
if (!Ptr)
return BackendAllocator.ReturnNullOrDie();
return BackendAllocator.ReturnNullOrDieOnOOM();
// If requested, we will zero out the entire contents of the returned chunk.
if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
@ -514,7 +514,7 @@ struct Allocator {
initThread();
uptr Total = NMemB * Size;
if (Size != 0 && Total / Size != NMemB) // Overflow check
return BackendAllocator.ReturnNullOrDie();
return BackendAllocator.ReturnNullOrDieOnBadRequest();
void *Ptr = allocate(Total, MinAlignment, FromMalloc);
// If ZeroContents, the content of the chunk has already been zero'd out.
if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))

View File

@ -51,10 +51,16 @@ class ScudoLargeMmapAllocator {
return reinterpret_cast<void *>(Ptr);
}
void *ReturnNullOrDie() {
void *ReturnNullOrDieOnBadRequest() {
if (atomic_load(&MayReturnNull, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull();
ReportAllocatorCannotReturnNull(false);
}
void *ReturnNullOrDieOnOOM() {
if (atomic_load(&MayReturnNull, memory_order_acquire))
return nullptr;
ReportAllocatorCannotReturnNull(true);
}
void SetMayReturnNull(bool AllocatorMayReturnNull) {

View File

@ -148,7 +148,7 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
if ((sz >= (1ull << 40)) || (align >= (1ull << 40)))
return allocator()->ReturnNullOrDie();
return allocator()->ReturnNullOrDieOnBadRequest();
void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
if (p == 0)
return 0;
@ -161,7 +161,7 @@ void *user_alloc(ThreadState *thr, uptr pc, uptr sz, uptr align, bool signal) {
void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
if (CallocShouldReturnNullDueToOverflow(size, n))
return allocator()->ReturnNullOrDie();
return allocator()->ReturnNullOrDieOnBadRequest();
void *p = user_alloc(thr, pc, n * size);
if (p)
internal_memset(p, 0, n * size);