diff --git a/mozglue/baseprofiler/core/platform.cpp b/mozglue/baseprofiler/core/platform.cpp index e66137fbc95f..9f9b134733b7 100644 --- a/mozglue/baseprofiler/core/platform.cpp +++ b/mozglue/baseprofiler/core/platform.cpp @@ -3535,59 +3535,7 @@ double profiler_time() { return delta.ToMilliseconds(); } -static void locked_profiler_fill_backtrace(PSLockRef aLock, - RegisteredThread& aRegisteredThread, - ProfileBuffer& aProfileBuffer) { - Registers regs; -#if defined(HAVE_NATIVE_UNWIND) - regs.SyncPopulate(); -#else - regs.Clear(); -#endif - - DoSyncSample(aLock, aRegisteredThread, TimeStamp::NowUnfuzzed(), regs, - aProfileBuffer); -} - -static UniqueProfilerBacktrace locked_profiler_get_backtrace(PSLockRef aLock) { - MOZ_RELEASE_ASSERT(CorePS::Exists()); - - if (!ActivePS::Exists(aLock)) { - return nullptr; - } - - RegisteredThread* registeredThread = - TLSRegisteredThread::RegisteredThread(aLock); - if (!registeredThread) { - MOZ_ASSERT(registeredThread); - return nullptr; - } - - auto bufferManager = MakeUnique( - ProfileChunkedBuffer::ThreadSafety::WithoutMutex, - MakeUnique(scExpectedMaximumStackSize)); - ProfileBuffer buffer(*bufferManager); - - locked_profiler_fill_backtrace(aLock, *registeredThread, buffer); - - return UniqueProfilerBacktrace( - new ProfilerBacktrace("SyncProfile", registeredThread->Info()->ThreadId(), - std::move(bufferManager))); -} - -UniqueProfilerBacktrace profiler_get_backtrace() { - MOZ_RELEASE_ASSERT(CorePS::Exists()); - - PSAutoLock lock; - - return locked_profiler_get_backtrace(lock); -} - -void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) { - delete aBacktrace; -} - -bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) { +bool profiler_capture_backtrace_into(ProfileChunkedBuffer& aChunkedBuffer) { MOZ_RELEASE_ASSERT(CorePS::Exists()); PSAutoLock lock; @@ -3605,11 +3553,53 @@ bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) { ProfileBuffer profileBuffer(aChunkedBuffer); - locked_profiler_fill_backtrace(lock, *registeredThread, profileBuffer); + Registers regs; +#if defined(HAVE_NATIVE_UNWIND) + regs.SyncPopulate(); +#else + regs.Clear(); +#endif + + DoSyncSample(lock, *registeredThread, TimeStamp::NowUnfuzzed(), regs, + profileBuffer); return true; } +UniquePtr profiler_capture_backtrace() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + // Quick is-active check before allocating a buffer. + if (!profiler_is_active()) { + return nullptr; + } + + auto buffer = MakeUnique( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, + MakeUnique(scExpectedMaximumStackSize)); + + if (!profiler_capture_backtrace_into(*buffer)) { + return nullptr; + } + + return buffer; +} + +UniqueProfilerBacktrace profiler_get_backtrace() { + UniquePtr buffer = profiler_capture_backtrace(); + + if (!buffer) { + return nullptr; + } + + return UniqueProfilerBacktrace(new ProfilerBacktrace( + "SyncProfile", profiler_current_thread_id(), std::move(buffer))); +} + +void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) { + delete aBacktrace; +} + bool profiler_is_locked_on_current_thread() { // This function is used to help users avoid calling `profiler_...` functions // when the profiler may already have a lock in place, which would prevent a diff --git a/mozglue/baseprofiler/public/BaseProfiler.h b/mozglue/baseprofiler/public/BaseProfiler.h index d88fad539446..1a83f176ffb4 100644 --- a/mozglue/baseprofiler/public/BaseProfiler.h +++ b/mozglue/baseprofiler/public/BaseProfiler.h @@ -33,8 +33,8 @@ // This file can be #included unconditionally. However, everything within this // file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the -// following macros, which encapsulate the most common operations and thus -// avoid the need for many #ifdefs. +// following macros and functions, which encapsulate the most common operations +// and thus avoid the need for many #ifdefs. # define AUTO_BASE_PROFILER_INIT @@ -66,20 +66,30 @@ # define AUTO_PROFILER_STATS(name) +// Function stubs for when MOZ_GECKO_PROFILER is not defined. + namespace mozilla { class ProfileChunkedBuffer; namespace baseprofiler { struct ProfilerBacktrace {}; using UniqueProfilerBacktrace = UniquePtr; + +// Get/Capture-backtrace functions can return nullptr or false, the result +// should be fed to another empty macro or stub anyway. + static inline UniqueProfilerBacktrace profiler_get_backtrace() { return nullptr; } -static inline bool profiler_capture_backtrace( +static inline bool profiler_capture_backtrace_into( ProfileChunkedBuffer& aChunkedBuffer) { return false; } + +static inline UniquePtr profiler_capture_backtrace() { + return nullptr; +} } // namespace baseprofiler } // namespace mozilla @@ -539,10 +549,22 @@ struct ProfilerBacktraceDestructor { using UniqueProfilerBacktrace = UniquePtr; -// Immediately capture the current thread's call stack and return it. A no-op -// if the profiler is inactive. +// Immediately capture the current thread's call stack, store it in the provided +// buffer (usually to avoid allocations if you can construct the buffer on the +// stack). Returns false if unsuccessful, or if the profiler is inactive. +MFBT_API bool profiler_capture_backtrace_into( + ProfileChunkedBuffer& aChunkedBuffer); + +// Immediately capture the current thread's call stack, and return it in a +// ProfileChunkedBuffer (usually for later use in MarkerStack::TakeBacktrace()). +// May be null if unsuccessful, or if the profiler is inactive. +MFBT_API UniquePtr profiler_capture_backtrace(); + +// Immediately capture the current thread's call stack, and return it in a +// ProfilerBacktrace (usually for later use in marker function that take a +// ProfilerBacktrace). May be null if unsuccessful, or if the profiler is +// inactive. MFBT_API UniqueProfilerBacktrace profiler_get_backtrace(); -MFBT_API bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer); struct ProfilerStats { unsigned n = 0; diff --git a/tools/profiler/core/platform.cpp b/tools/profiler/core/platform.cpp index 10957c045cbb..37b751e21fd2 100644 --- a/tools/profiler/core/platform.cpp +++ b/tools/profiler/core/platform.cpp @@ -5279,65 +5279,7 @@ double profiler_time() { return delta.ToMilliseconds(); } -static void locked_profiler_fill_backtrace(PSLockRef aLock, - RegisteredThread& aRegisteredThread, - ProfileBuffer& aProfileBuffer) { - Registers regs; -#if defined(HAVE_NATIVE_UNWIND) - regs.SyncPopulate(); -#else - regs.Clear(); -#endif - - DoSyncSample(aLock, aRegisteredThread, TimeStamp::NowUnfuzzed(), regs, - aProfileBuffer); -} - -static UniqueProfilerBacktrace locked_profiler_get_backtrace(PSLockRef aLock) { - if (!ActivePS::Exists(aLock)) { - return nullptr; - } - - RegisteredThread* registeredThread = - TLSRegisteredThread::RegisteredThread(aLock); - if (!registeredThread) { - // If this was called from a non-registered thread, return a nullptr - // and do no more work. This can happen from a memory hook. Before - // the allocation tracking there was a MOZ_ASSERT() here checking - // for the existence of a registeredThread. - return nullptr; - } - - auto bufferManager = MakeUnique( - ProfileChunkedBuffer::ThreadSafety::WithoutMutex, - MakeUnique(scExpectedMaximumStackSize)); - ProfileBuffer buffer(*bufferManager); - - locked_profiler_fill_backtrace(aLock, *registeredThread, buffer); - - return UniqueProfilerBacktrace( - new ProfilerBacktrace("SyncProfile", registeredThread->Info()->ThreadId(), - std::move(bufferManager))); -} - -UniqueProfilerBacktrace profiler_get_backtrace() { - MOZ_RELEASE_ASSERT(CorePS::Exists()); - - // Fast racy early return. - if (!profiler_is_active()) { - return nullptr; - } - - PSAutoLock lock(gPSMutex); - - return locked_profiler_get_backtrace(lock); -} - -void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) { - delete aBacktrace; -} - -bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) { +bool profiler_capture_backtrace_into(ProfileChunkedBuffer& aChunkedBuffer) { MOZ_RELEASE_ASSERT(CorePS::Exists()); PSAutoLock lock(gPSMutex); @@ -5349,17 +5291,62 @@ bool profiler_capture_backtrace(ProfileChunkedBuffer& aChunkedBuffer) { RegisteredThread* registeredThread = TLSRegisteredThread::RegisteredThread(lock); if (!registeredThread) { - MOZ_ASSERT(registeredThread); + // If this was called from a non-registered thread, return false and do no + // more work. This can happen from a memory hook. Before the allocation + // tracking there was a MOZ_ASSERT() here checking for the existence of a + // registeredThread. return false; } ProfileBuffer profileBuffer(aChunkedBuffer); - locked_profiler_fill_backtrace(lock, *registeredThread, profileBuffer); + Registers regs; +#if defined(HAVE_NATIVE_UNWIND) + regs.SyncPopulate(); +#else + regs.Clear(); +#endif + + DoSyncSample(lock, *registeredThread, TimeStamp::NowUnfuzzed(), regs, + profileBuffer); return true; } +UniquePtr profiler_capture_backtrace() { + MOZ_RELEASE_ASSERT(CorePS::Exists()); + + // Quick is-active check before allocating a buffer. + if (!profiler_is_active()) { + return nullptr; + } + + auto buffer = MakeUnique( + ProfileChunkedBuffer::ThreadSafety::WithoutMutex, + MakeUnique(scExpectedMaximumStackSize)); + + if (!profiler_capture_backtrace_into(*buffer)) { + return nullptr; + } + + return buffer; +} + +UniqueProfilerBacktrace profiler_get_backtrace() { + UniquePtr buffer = profiler_capture_backtrace(); + + if (!buffer) { + return nullptr; + } + + return UniqueProfilerBacktrace(new ProfilerBacktrace( + "SyncProfile", profiler_current_thread_id(), std::move(buffer))); +} + +void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) { + delete aBacktrace; +} + static void racy_profiler_add_marker(const char* aMarkerName, JS::ProfilingCategoryPair aCategoryPair, const ProfilerMarkerPayload* aPayload) { @@ -5532,21 +5519,20 @@ bool profiler_add_native_allocation_marker(int aMainThreadId, int64_t aSize, // locking the profiler mutex here could end up causing a deadlock if another // mutex is taken, which the profiler may indirectly need elsewhere. // See bug 1642726 for such a scenario. - // So instead we only try to lock, and bail out if the mutex is already - // locked. Native allocations are statistically sampled anyway, so missing a - // few because of this is acceptable. - PSAutoTryLock tryLock(gPSMutex); - if (!tryLock.IsLocked()) { + // So instead we bail out if the mutex is already locked. Native allocations + // are statistically sampled anyway, so missing a few because of this is + // acceptable. + if (gPSMutex.IsLockedOnCurrentThread()) { return false; } AUTO_PROFILER_STATS(add_marker_with_NativeAllocationMarkerPayload); maybelocked_profiler_add_marker_for_thread( aMainThreadId, JS::ProfilingCategoryPair::OTHER, "Native allocation", - NativeAllocationMarkerPayload( - TimeStamp::Now(), aSize, aMemoryAddress, profiler_current_thread_id(), - locked_profiler_get_backtrace(tryLock.LockRef())), - &tryLock.LockRef()); + NativeAllocationMarkerPayload(TimeStamp::Now(), aSize, aMemoryAddress, + profiler_current_thread_id(), + profiler_get_backtrace()), + nullptr); return true; } diff --git a/tools/profiler/public/GeckoProfiler.h b/tools/profiler/public/GeckoProfiler.h index a27ed0e5f7f3..0b9d5ab7e656 100644 --- a/tools/profiler/public/GeckoProfiler.h +++ b/tools/profiler/public/GeckoProfiler.h @@ -31,8 +31,8 @@ // This file can be #included unconditionally. However, everything within this // file must be guarded by a #ifdef MOZ_GECKO_PROFILER, *except* for the -// following macros, which encapsulate the most common operations and thus -// avoid the need for many #ifdefs. +// following macros and functions, which encapsulate the most common operations +// and thus avoid the need for many #ifdefs. # define AUTO_PROFILER_INIT # define AUTO_PROFILER_INIT2 @@ -82,8 +82,14 @@ # define AUTO_PROFILER_TEXT_MARKER_DOCSHELL_CAUSE( \ markerName, text, categoryPair, docShell, cause) +// Function stubs for when MOZ_GECKO_PROFILER is not defined. + struct ProfilerBacktrace {}; using UniqueProfilerBacktrace = mozilla::UniquePtr; + +// Get/Capture-backtrace functions can return nullptr or false, the result +// should be fed to another empty macro or stub anyway. + static inline UniqueProfilerBacktrace profiler_get_backtrace() { return nullptr; } @@ -91,10 +97,14 @@ static inline UniqueProfilerBacktrace profiler_get_backtrace() { namespace mozilla { class ProfileChunkedBuffer; } // namespace mozilla -static inline bool profiler_capture_backtrace( +static inline bool profiler_capture_backtrace_into( mozilla::ProfileChunkedBuffer& aChunkedBuffer) { return false; } +static inline mozilla::UniquePtr +profiler_capture_backtrace() { + return nullptr; +} #else // !MOZ_GECKO_PROFILER @@ -688,10 +698,22 @@ struct ProfilerBacktraceDestructor { using UniqueProfilerBacktrace = mozilla::UniquePtr; -// Immediately capture the current thread's call stack and return it. A no-op -// if the profiler is inactive. +// Immediately capture the current thread's call stack, store it in the provided +// buffer (usually to avoid allocations if you can construct the buffer on the +// stack). Returns false if unsuccessful, or if the profiler is inactive. +bool profiler_capture_backtrace_into( + mozilla::ProfileChunkedBuffer& aChunkedBuffer); + +// Immediately capture the current thread's call stack, and return it in a +// ProfileChunkedBuffer (usually for later use in MarkerStack::TakeBacktrace()). +// May be null if unsuccessful, or if the profiler is inactive. +mozilla::UniquePtr profiler_capture_backtrace(); + +// Immediately capture the current thread's call stack, and return it in a +// ProfilerBacktrace (usually for later use in marker function that take a +// ProfilerBacktrace). May be null if unsuccessful, or if the profiler is +// inactive. UniqueProfilerBacktrace profiler_get_backtrace(); -bool profiler_capture_backtrace(mozilla::ProfileChunkedBuffer& aChunkedBuffer); struct ProfilerStats { unsigned n = 0;