Bug 1582741 - Balance the frees to the allocations in native allocation tracking; r=njn,gerald

This patch creates a HashSet that tracks the allocations that are tracked by the profiler.
This way, we only collect markers for deallocations that have a matching allocation. A
following commit makes it so that all of the markers are collected on the main thread, but
for now this is still done on a per-thread basis.

Differential Revision: https://phabricator.services.mozilla.com/D51935

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Greg Tatum 2019-11-13 16:17:38 +00:00
parent 2e113971f3
commit 6d997950d6
3 changed files with 176 additions and 8 deletions

View File

@ -14,6 +14,7 @@
#include "mozilla/IntegerPrintfMacros.h"
#include "mozilla/JSONWriter.h"
#include "mozilla/MemoryReporting.h"
#include "mozilla/PlatformMutex.h"
#include "mozilla/ProfilerCounts.h"
#include "mozilla/ThreadLocal.h"
@ -94,6 +95,155 @@ static void EnsureBernoulliIsInstalled() {
}
}
// This class provides infallible allocations (they abort on OOM) like
// mozalloc's InfallibleAllocPolicy, except that memory hooks are bypassed. This
// policy is used by the HashSet.
class InfallibleAllocWithoutHooksPolicy {
static void ExitOnFailure(const void* aP) {
if (!aP) {
MOZ_CRASH("Profiler memory hooks out of memory; aborting");
}
}
public:
template <typename T>
static T* maybe_pod_malloc(size_t aNumElems) {
if (aNumElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
return nullptr;
}
return (T*)gMallocTable.malloc(aNumElems * sizeof(T));
}
template <typename T>
static T* maybe_pod_calloc(size_t aNumElems) {
return (T*)gMallocTable.calloc(aNumElems, sizeof(T));
}
template <typename T>
static T* maybe_pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
if (aNewSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value) {
return nullptr;
}
return (T*)gMallocTable.realloc(aPtr, aNewSize * sizeof(T));
}
template <typename T>
static T* pod_malloc(size_t aNumElems) {
T* p = maybe_pod_malloc<T>(aNumElems);
ExitOnFailure(p);
return p;
}
template <typename T>
static T* pod_calloc(size_t aNumElems) {
T* p = maybe_pod_calloc<T>(aNumElems);
ExitOnFailure(p);
return p;
}
template <typename T>
static T* pod_realloc(T* aPtr, size_t aOldSize, size_t aNewSize) {
T* p = maybe_pod_realloc(aPtr, aOldSize, aNewSize);
ExitOnFailure(p);
return p;
}
template <typename T>
static void free_(T* aPtr, size_t aSize = 0) {
gMallocTable.free(aPtr);
}
static void reportAllocOverflow() { ExitOnFailure(nullptr); }
bool checkSimulatedOOM() const { return true; }
};
// We can't use mozilla::Mutex because it causes re-entry into the memory hooks.
// Define a custom implementation here.
class Mutex : private ::mozilla::detail::MutexImpl {
public:
Mutex()
: ::mozilla::detail::MutexImpl(
::mozilla::recordreplay::Behavior::DontPreserve) {}
void Lock() { ::mozilla::detail::MutexImpl::lock(); }
void Unlock() { ::mozilla::detail::MutexImpl::unlock(); }
};
class MutexAutoLock {
MutexAutoLock(const MutexAutoLock&) = delete;
void operator=(const MutexAutoLock&) = delete;
Mutex& mMutex;
public:
explicit MutexAutoLock(Mutex& aMutex) : mMutex(aMutex) { mMutex.Lock(); }
~MutexAutoLock() { mMutex.Unlock(); }
};
//---------------------------------------------------------------------------
// Tracked allocations
//---------------------------------------------------------------------------
// The allocation tracker is shared between multiple threads, and is the
// coordinator for knowing when allocations have been tracked. The mutable
// internal state is protected by a mutex, and managed by the methods.
//
// The tracker knows about all the allocations that we have added to the
// profiler. This way, whenever any given piece of memory is freed, we can see
// if it was previously tracked, and we can track its deallocation.
class AllocationTracker {
// This type tracks all of the allocations that we have captured. This way, we
// can see if a deallocation is inside of this set. We want to provide a
// balanced view into the allocations and deallocations.
typedef mozilla::HashSet<const void*, mozilla::DefaultHasher<const void*>,
InfallibleAllocWithoutHooksPolicy>
AllocationSet;
public:
AllocationTracker() : mAllocations(), mMutex() {}
void AddMemoryAddress(const void* memoryAddress) {
MutexAutoLock lock(mMutex);
if (!mAllocations.put(memoryAddress)) {
MOZ_CRASH("Out of memory while tracking native allocations.");
};
}
void Reset() {
MutexAutoLock lock(mMutex);
mAllocations.clearAndCompact();
}
// Returns true when the memory address is found and removed, otherwise that
// memory address is not being tracked and it returns false.
bool RemoveMemoryAddressIfFound(const void* memoryAddress) {
MutexAutoLock lock(mMutex);
auto ptr = mAllocations.lookup(memoryAddress);
if (ptr) {
// The memory was present. It no longer needs to be tracked.
mAllocations.remove(ptr);
return true;
}
return false;
}
private:
AllocationSet mAllocations;
Mutex mMutex;
};
static AllocationTracker* gAllocationTracker;
static void EnsureAllocationTrackerIsInstalled() {
if (!gAllocationTracker) {
// This is only installed once.
gAllocationTracker = new AllocationTracker();
}
}
//---------------------------------------------------------------------------
// Per-thread blocking of intercepts
//---------------------------------------------------------------------------
@ -228,8 +378,17 @@ static void AllocCallback(void* aPtr, size_t aReqSize) {
// larger allocations are weighted heavier than smaller allocations.
MOZ_ASSERT(gBernoulli,
"gBernoulli must be properly installed for the memory hooks.");
if (gBernoulli->trial(actualSize)) {
profiler_add_native_allocation_marker((int64_t)actualSize);
if (
// First perform the Bernoulli trial.
gBernoulli->trial(actualSize) &&
// Second, attempt to add a marker if the Bernoulli trial passed.
profiler_add_native_allocation_marker(static_cast<int64_t>(actualSize))) {
MOZ_ASSERT(gAllocationTracker,
"gAllocationTracker must be properly installed for the memory "
"hooks.");
// Only track the memory if the allocation marker was actually added to the
// profiler.
gAllocationTracker->AddMemoryAddress(aPtr);
}
// We're ignoring aReqSize here
@ -260,9 +419,11 @@ static void FreeCallback(void* aPtr) {
// Perform a bernoulli trial, which will return true or false based on its
// configured probability. It takes into account the byte size so that
// larger allocations are weighted heavier than smaller allocations.
MOZ_ASSERT(gBernoulli,
"gBernoulli must be properly installed for the memory hooks.");
if (gBernoulli->trial(unsignedSize)) {
MOZ_ASSERT(
gAllocationTracker,
"gAllocationTracker must be properly installed for the memory hooks.");
if (gAllocationTracker->RemoveMemoryAddressIfFound(aPtr)) {
// This size here is negative, indicating a deallocation.
profiler_add_native_allocation_marker(signedSize);
}
}
@ -399,7 +560,7 @@ void install_memory_hooks() {
void remove_memory_hooks() { jemalloc_replace_dynamic(nullptr); }
void enable_native_allocations() {
// The bloat log tracks allocations and de-allocations. This can conflict
// The bloat log tracks allocations and deallocations. This can conflict
// with the memory hook machinery, as the bloat log creates its own
// allocations. This means we can re-enter inside the bloat log machinery. At
// this time, the bloat log does not know about cannot handle the native
@ -423,6 +584,7 @@ void enable_native_allocations() {
// ...
if (!PR_GetEnv("XPCOM_MEM_BLOAT_LOG")) {
EnsureBernoulliIsInstalled();
EnsureAllocationTrackerIsInstalled();
ThreadIntercept::EnableAllocationFeature();
}
}
@ -430,6 +592,9 @@ void enable_native_allocations() {
// This is safe to call even if native allocations hasn't been enabled.
void disable_native_allocations() {
ThreadIntercept::DisableAllocationFeature();
if (gAllocationTracker) {
gAllocationTracker->Reset();
}
}
} // namespace profiler

View File

@ -4643,7 +4643,7 @@ bool profiler_is_locked_on_current_thread() {
return gPSMutex.IsLockedOnCurrentThread();
}
void profiler_add_native_allocation_marker(const int64_t aSize) {
bool profiler_add_native_allocation_marker(const int64_t aSize) {
if (!profiler_can_accept_markers()) {
return;
}

View File

@ -775,7 +775,10 @@ void profiler_add_marker(const char* aMarkerName,
void profiler_add_js_marker(const char* aMarkerName);
void profiler_add_js_allocation_marker(JS::RecordAllocationInfo&& info);
void profiler_add_native_allocation_marker(int64_t aSize);
// Returns true or or false depending on whether the marker was actually added
// or not.
bool profiler_add_native_allocation_marker(int64_t aSize);
// Returns true if the profiler lock is currently held *on the current thread*.
// This may be used by re-entrant code that may call profiler functions while