mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 05:11:16 +00:00
Backed out 8 changesets (bug 1753192) for causing Gtest failures on GeckoProfiler.cpp. CLOSED TREE
Backed out changeset 2f0c24b1f049 (bug 1753192) Backed out changeset 1a71d954b83f (bug 1753192) Backed out changeset 07a8ffa8d12f (bug 1753192) Backed out changeset f79cd543e537 (bug 1753192) Backed out changeset c2f22d8a8fc8 (bug 1753192) Backed out changeset c8a0d3c41d2f (bug 1753192) Backed out changeset 1d5d69d92db9 (bug 1753192) Backed out changeset 5bc8bd033453 (bug 1753192)
This commit is contained in:
parent
f65b9ea3f5
commit
bb43a35448
@ -183,16 +183,6 @@ void PrintToConsole(const char* aFmt, ...) {
|
||||
va_end(args);
|
||||
}
|
||||
|
||||
ProfileChunkedBuffer& profiler_get_core_buffer() {
|
||||
// This needs its own mutex, because it is used concurrently from functions
|
||||
// guarded by gPSMutex as well as others without safety (e.g.,
|
||||
// profiler_add_marker). It is *not* used inside the critical section of the
|
||||
// sampler, because mutexes cannot be used there.
|
||||
static ProfileChunkedBuffer sProfileChunkedBuffer{
|
||||
ProfileChunkedBuffer::ThreadSafety::WithMutex};
|
||||
return sProfileChunkedBuffer;
|
||||
}
|
||||
|
||||
Atomic<int, MemoryOrdering::Relaxed> gSkipSampling;
|
||||
|
||||
constexpr static bool ValidateFeatures() {
|
||||
@ -326,7 +316,12 @@ typedef const PSAutoLock& PSLockRef;
|
||||
class CorePS {
|
||||
private:
|
||||
CorePS()
|
||||
: mProcessStartTime(TimeStamp::ProcessCreation())
|
||||
: mProcessStartTime(TimeStamp::ProcessCreation()),
|
||||
// This needs its own mutex, because it is used concurrently from
|
||||
// functions guarded by gPSMutex as well as others without safety (e.g.,
|
||||
// profiler_add_marker). It is *not* used inside the critical section of
|
||||
// the sampler, because mutexes cannot be used there.
|
||||
mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex)
|
||||
#ifdef USE_LUL_STACKWALK
|
||||
,
|
||||
mLul(nullptr)
|
||||
@ -385,6 +380,9 @@ class CorePS {
|
||||
// No PSLockRef is needed for this field because it's immutable.
|
||||
PS_GET_LOCKLESS(const TimeStamp&, ProcessStartTime)
|
||||
|
||||
// No PSLockRef is needed for this field because it's thread-safe.
|
||||
PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer)
|
||||
|
||||
PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads)
|
||||
|
||||
static void AppendRegisteredThread(
|
||||
@ -490,6 +488,17 @@ class CorePS {
|
||||
// The time that the process started.
|
||||
const TimeStamp mProcessStartTime;
|
||||
|
||||
// The thread-safe blocks-oriented buffer into which all profiling data is
|
||||
// recorded.
|
||||
// ActivePS controls the lifetime of the underlying contents buffer: When
|
||||
// ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes;
|
||||
// see ActivePS for further details.
|
||||
// Note: This needs to live here outside of ActivePS, because some producers
|
||||
// are indirectly controlled (e.g., by atomic flags) and therefore may still
|
||||
// attempt to write some data shortly after ActivePS has shutdown and deleted
|
||||
// the underlying buffer in memory.
|
||||
ProfileChunkedBuffer mCoreBuffer;
|
||||
|
||||
// Info on all the registered threads.
|
||||
// ThreadIds in mRegisteredThreads are unique.
|
||||
Vector<UniquePtr<RegisteredThread>> mRegisteredThreads;
|
||||
@ -515,6 +524,11 @@ class CorePS {
|
||||
|
||||
CorePS* CorePS::sInstance = nullptr;
|
||||
|
||||
ProfileChunkedBuffer& profiler_get_core_buffer() {
|
||||
MOZ_ASSERT(CorePS::Exists());
|
||||
return CorePS::CoreBuffer();
|
||||
}
|
||||
|
||||
class SamplerThread;
|
||||
|
||||
static SamplerThread* NewSamplerThread(PSLockRef aLock, uint32_t aGeneration,
|
||||
@ -612,14 +626,11 @@ class ActivePS {
|
||||
mInterval(aInterval),
|
||||
mFeatures(AdjustFeatures(aFeatures, aFilterCount)),
|
||||
mProfileBufferChunkManager(
|
||||
MakeUnique<ProfileBufferChunkManagerWithLocalLimit>(
|
||||
size_t(ClampToAllowedEntries(aCapacity.Value())) *
|
||||
scBytesPerEntry,
|
||||
ChunkSizeForEntries(aCapacity.Value()))),
|
||||
size_t(ClampToAllowedEntries(aCapacity.Value())) * scBytesPerEntry,
|
||||
ChunkSizeForEntries(aCapacity.Value())),
|
||||
mProfileBuffer([this]() -> ProfileChunkedBuffer& {
|
||||
ProfileChunkedBuffer& buffer = profiler_get_core_buffer();
|
||||
buffer.SetChunkManager(*mProfileBufferChunkManager);
|
||||
return buffer;
|
||||
CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager);
|
||||
return CorePS::CoreBuffer();
|
||||
}()),
|
||||
// The new sampler thread doesn't start sampling immediately because the
|
||||
// main loop within Run() is blocked until this function's caller
|
||||
@ -639,12 +650,7 @@ class ActivePS {
|
||||
}
|
||||
}
|
||||
|
||||
~ActivePS() {
|
||||
if (mProfileBufferChunkManager) {
|
||||
// We still control the chunk manager, remove it from the core buffer.
|
||||
profiler_get_core_buffer().ResetChunkManager();
|
||||
}
|
||||
}
|
||||
~ActivePS() { CorePS::CoreBuffer().ResetChunkManager(); }
|
||||
|
||||
bool ThreadSelected(const char* aThreadName) {
|
||||
if (mFiltersLowered.empty()) {
|
||||
@ -730,12 +736,6 @@ class ActivePS {
|
||||
return n;
|
||||
}
|
||||
|
||||
static UniquePtr<ProfileBufferChunkManagerWithLocalLimit>
|
||||
ExtractBaseProfilerChunkManager(PSLockRef) {
|
||||
MOZ_ASSERT(sInstance);
|
||||
return std::move(sInstance->mProfileBufferChunkManager);
|
||||
}
|
||||
|
||||
static bool ShouldProfileThread(PSLockRef aLock, ThreadInfo* aInfo) {
|
||||
MOZ_ASSERT(sInstance);
|
||||
return sInstance->ThreadSelected(aInfo->Name());
|
||||
@ -766,9 +766,7 @@ class ActivePS {
|
||||
|
||||
static void FulfillChunkRequests(PSLockRef) {
|
||||
MOZ_ASSERT(sInstance);
|
||||
if (sInstance->mProfileBufferChunkManager) {
|
||||
sInstance->mProfileBufferChunkManager->FulfillChunkRequests();
|
||||
}
|
||||
sInstance->mProfileBufferChunkManager.FulfillChunkRequests();
|
||||
}
|
||||
|
||||
static ProfileBuffer& Buffer(PSLockRef) {
|
||||
@ -1018,8 +1016,7 @@ class ActivePS {
|
||||
Vector<std::string> mFiltersLowered;
|
||||
|
||||
// The chunk manager used by `mProfileBuffer` below.
|
||||
// May become null if it gets transferred to the Gecko Profiler.
|
||||
UniquePtr<ProfileBufferChunkManagerWithLocalLimit> mProfileBufferChunkManager;
|
||||
ProfileBufferChunkManagerWithLocalLimit mProfileBufferChunkManager;
|
||||
|
||||
// The buffer into which all samples are recorded.
|
||||
ProfileBuffer mProfileBuffer;
|
||||
@ -1063,19 +1060,6 @@ uint32_t ActivePS::sNextGeneration = 0;
|
||||
#undef PS_GET_LOCKLESS
|
||||
#undef PS_GET_AND_SET
|
||||
|
||||
namespace detail {
|
||||
|
||||
[[nodiscard]] MFBT_API UniquePtr<ProfileBufferChunkManagerWithLocalLimit>
|
||||
ExtractBaseProfilerChunkManager() {
|
||||
PSAutoLock lock;
|
||||
if (MOZ_UNLIKELY(!ActivePS::Exists(lock))) {
|
||||
return nullptr;
|
||||
}
|
||||
return ActivePS::ExtractBaseProfilerChunkManager(lock);
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
Atomic<uint32_t, MemoryOrdering::Relaxed> RacyFeatures::sActiveAndFeatures(0);
|
||||
|
||||
/* static */
|
||||
@ -1191,26 +1175,6 @@ ProfilingStack* AutoProfilerLabel::GetProfilingStack() {
|
||||
// constraints. TLSRegisteredThread is responsible for updating it.
|
||||
MOZ_THREAD_LOCAL(ProfilingStack*) AutoProfilerLabel::sProfilingStack;
|
||||
|
||||
namespace detail {
|
||||
|
||||
[[nodiscard]] MFBT_API TimeStamp GetThreadRegistrationTime() {
|
||||
if (!CorePS::Exists()) {
|
||||
return {};
|
||||
}
|
||||
|
||||
PSAutoLock lock;
|
||||
|
||||
RegisteredThread* registeredThread =
|
||||
TLSRegisteredThread::RegisteredThread(lock);
|
||||
if (!registeredThread) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return registeredThread->Info()->RegisterTime();
|
||||
}
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// The name of the main thread.
|
||||
static const char* const kMainThreadName = "GeckoMain";
|
||||
|
||||
@ -2374,11 +2338,11 @@ void SamplerThread::Run() {
|
||||
LOG("Stack sample too big for local storage, needed %u bytes",
|
||||
unsigned(state.mRangeEnd - previousState.mRangeEnd));
|
||||
} else if (state.mRangeEnd - previousState.mRangeEnd >=
|
||||
*profiler_get_core_buffer().BufferLength()) {
|
||||
*CorePS::CoreBuffer().BufferLength()) {
|
||||
LOG("Stack sample too big for profiler storage, needed %u bytes",
|
||||
unsigned(state.mRangeEnd - previousState.mRangeEnd));
|
||||
} else {
|
||||
profiler_get_core_buffer().AppendContents(localBuffer);
|
||||
CorePS::CoreBuffer().AppendContents(localBuffer);
|
||||
}
|
||||
|
||||
// Clean up for the next run.
|
||||
@ -3687,7 +3651,7 @@ bool profiler_is_locked_on_current_thread() {
|
||||
// - The buffer mutex, used directly in some functions without locking the
|
||||
// main mutex, e.g., marker-related functions.
|
||||
return PSAutoLock::IsLockedOnCurrentThread() ||
|
||||
profiler_get_core_buffer().IsThreadSafeAndLockedOnCurrentThread();
|
||||
CorePS::CoreBuffer().IsThreadSafeAndLockedOnCurrentThread();
|
||||
}
|
||||
|
||||
// This is a simplified version of profiler_add_marker that can be easily passed
|
||||
|
@ -104,7 +104,6 @@ EXPORTS.mozilla += [
|
||||
"public/ProfileBufferEntrySerialization.h",
|
||||
"public/ProfileBufferIndex.h",
|
||||
"public/ProfileChunkedBuffer.h",
|
||||
"public/ProfileChunkedBufferDetail.h",
|
||||
"public/ProgressLogger.h",
|
||||
"public/ProportionValue.h",
|
||||
]
|
||||
|
@ -15,26 +15,9 @@
|
||||
|
||||
#include "mozilla/BaseProfilerUtils.h"
|
||||
#include "mozilla/Span.h"
|
||||
#include "mozilla/TimeStamp.h"
|
||||
#include "mozilla/Types.h"
|
||||
#include "mozilla/UniquePtr.h"
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
class ProfileBufferChunkManagerWithLocalLimit;
|
||||
|
||||
namespace baseprofiler::detail {
|
||||
|
||||
[[nodiscard]] MFBT_API UniquePtr<ProfileBufferChunkManagerWithLocalLimit>
|
||||
ExtractBaseProfilerChunkManager();
|
||||
|
||||
// If the current thread is registered, returns its registration time, otherwise
|
||||
// a null timestamp.
|
||||
[[nodiscard]] MFBT_API TimeStamp GetThreadRegistrationTime();
|
||||
|
||||
} // namespace baseprofiler::detail
|
||||
|
||||
namespace profiler::detail {
|
||||
namespace mozilla::profiler::detail {
|
||||
|
||||
// True if the filter is exactly "pid:<aPid>".
|
||||
[[nodiscard]] MFBT_API bool FilterHasPid(
|
||||
@ -55,8 +38,6 @@ namespace profiler::detail {
|
||||
baseprofiler::BaseProfilerProcessId aPid =
|
||||
baseprofiler::profiler_current_process_id());
|
||||
|
||||
} // namespace profiler::detail
|
||||
|
||||
} // namespace mozilla
|
||||
} // namespace mozilla::profiler::detail
|
||||
|
||||
#endif // BaseAndGeckoProfilerDetail_h
|
||||
|
@ -98,7 +98,7 @@ ProfileBufferBlockIndex AddMarker(
|
||||
return {};
|
||||
}
|
||||
return ::mozilla::baseprofiler::AddMarkerToBuffer(
|
||||
::mozilla::baseprofiler::profiler_get_core_buffer(), aName, aCategory,
|
||||
base_profiler_markers_detail::CachedBaseCoreBuffer(), aName, aCategory,
|
||||
std::move(aOptions), aMarkerType, aPayloadArguments...);
|
||||
#endif
|
||||
}
|
||||
|
@ -31,6 +31,14 @@ MFBT_API ProfileChunkedBuffer& profiler_get_core_buffer();
|
||||
|
||||
namespace mozilla::base_profiler_markers_detail {
|
||||
|
||||
// Get the core buffer from the profiler, and cache it in a
|
||||
// non-templated-function static reference.
|
||||
inline ProfileChunkedBuffer& CachedBaseCoreBuffer() {
|
||||
static ProfileChunkedBuffer& coreBuffer =
|
||||
baseprofiler::profiler_get_core_buffer();
|
||||
return coreBuffer;
|
||||
}
|
||||
|
||||
struct Streaming {
|
||||
// A `MarkerDataDeserializer` is a free function that can read a serialized
|
||||
// payload from an `EntryReader` and streams it as JSON object properties.
|
||||
|
@ -7,25 +7,406 @@
|
||||
#ifndef ProfileChunkedBuffer_h
|
||||
#define ProfileChunkedBuffer_h
|
||||
|
||||
#include "mozilla/Attributes.h"
|
||||
#include "mozilla/BaseProfilerDetail.h"
|
||||
#include "mozilla/NotNull.h"
|
||||
#include "mozilla/ProfileBufferChunkManager.h"
|
||||
#include "mozilla/ProfileBufferChunkManagerSingle.h"
|
||||
#include "mozilla/ProfileBufferEntrySerialization.h"
|
||||
#include "mozilla/ProfileChunkedBufferDetail.h"
|
||||
#include "mozilla/RefCounted.h"
|
||||
#include "mozilla/RefPtr.h"
|
||||
#include "mozilla/ScopeExit.h"
|
||||
#include "mozilla/Unused.h"
|
||||
|
||||
#include <cstdio>
|
||||
#include <utility>
|
||||
|
||||
#ifdef DEBUG
|
||||
# include <cstdio>
|
||||
#endif
|
||||
|
||||
namespace mozilla {
|
||||
|
||||
namespace detail {
|
||||
|
||||
// Internal accessor pointing at a position inside a chunk.
|
||||
// It can handle two groups of chunks (typically the extant chunks stored in
|
||||
// the store manager, and the current chunk).
|
||||
// The main operations are:
|
||||
// - ReadEntrySize() to read an entry size, 0 means failure.
|
||||
// - operator+=(Length) to skip a number of bytes.
|
||||
// - EntryReader() creates an entry reader at the current position for a given
|
||||
// size (it may fail with an empty reader), and skips the entry.
|
||||
// Note that there is no "past-the-end" position -- as soon as InChunkPointer
|
||||
// reaches the end, it becomes effectively null.
|
||||
class InChunkPointer {
|
||||
public:
|
||||
using Byte = ProfileBufferChunk::Byte;
|
||||
using Length = ProfileBufferChunk::Length;
|
||||
|
||||
// Nullptr-like InChunkPointer, may be used as end iterator.
|
||||
InChunkPointer()
|
||||
: mChunk(nullptr), mNextChunkGroup(nullptr), mOffsetInChunk(0) {}
|
||||
|
||||
// InChunkPointer over one or two chunk groups, pointing at the given
|
||||
// block index (if still in range).
|
||||
// This constructor should only be used with *trusted* block index values!
|
||||
InChunkPointer(const ProfileBufferChunk* aChunk,
|
||||
const ProfileBufferChunk* aNextChunkGroup,
|
||||
ProfileBufferBlockIndex aBlockIndex)
|
||||
: mChunk(aChunk), mNextChunkGroup(aNextChunkGroup) {
|
||||
if (mChunk) {
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else if (mNextChunkGroup) {
|
||||
mChunk = mNextChunkGroup;
|
||||
mNextChunkGroup = nullptr;
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else {
|
||||
mOffsetInChunk = 0;
|
||||
}
|
||||
|
||||
// Try to advance to given position.
|
||||
if (!AdvanceToGlobalRangePosition(aBlockIndex)) {
|
||||
// Block does not exist anymore (or block doesn't look valid), reset the
|
||||
// in-chunk pointer.
|
||||
mChunk = nullptr;
|
||||
mNextChunkGroup = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// InChunkPointer over one or two chunk groups, will start at the first
|
||||
// block (if any). This may be slow, so avoid using it too much.
|
||||
InChunkPointer(const ProfileBufferChunk* aChunk,
|
||||
const ProfileBufferChunk* aNextChunkGroup,
|
||||
ProfileBufferIndex aIndex = ProfileBufferIndex(0))
|
||||
: mChunk(aChunk), mNextChunkGroup(aNextChunkGroup) {
|
||||
if (mChunk) {
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else if (mNextChunkGroup) {
|
||||
mChunk = mNextChunkGroup;
|
||||
mNextChunkGroup = nullptr;
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else {
|
||||
mOffsetInChunk = 0;
|
||||
}
|
||||
|
||||
// Try to advance to given position.
|
||||
if (!AdvanceToGlobalRangePosition(aIndex)) {
|
||||
// Block does not exist anymore, reset the in-chunk pointer.
|
||||
mChunk = nullptr;
|
||||
mNextChunkGroup = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the current position in the global range.
|
||||
// 0 if null (including if we're reached the end).
|
||||
[[nodiscard]] ProfileBufferIndex GlobalRangePosition() const {
|
||||
if (IsNull()) {
|
||||
return 0;
|
||||
}
|
||||
return mChunk->RangeStart() + mOffsetInChunk;
|
||||
}
|
||||
|
||||
// Move InChunkPointer forward to the block at the given global block
|
||||
// position, which is assumed to be valid exactly -- but it may be obsolete.
|
||||
// 0 stays where it is (if valid already).
|
||||
// MOZ_ASSERTs if the index is invalid.
|
||||
[[nodiscard]] bool AdvanceToGlobalRangePosition(
|
||||
ProfileBufferBlockIndex aBlockIndex) {
|
||||
if (IsNull()) {
|
||||
// Pointer is null already. (Not asserting because it's acceptable.)
|
||||
return false;
|
||||
}
|
||||
if (!aBlockIndex) {
|
||||
// Special null position, just stay where we are.
|
||||
return ShouldPointAtValidBlock();
|
||||
}
|
||||
if (aBlockIndex.ConvertToProfileBufferIndex() < GlobalRangePosition()) {
|
||||
// Past the requested position, stay where we are (assuming the current
|
||||
// position was valid).
|
||||
return ShouldPointAtValidBlock();
|
||||
}
|
||||
for (;;) {
|
||||
if (aBlockIndex.ConvertToProfileBufferIndex() <
|
||||
mChunk->RangeStart() + mChunk->OffsetPastLastBlock()) {
|
||||
// Target position is in this chunk's written space, move to it.
|
||||
mOffsetInChunk =
|
||||
aBlockIndex.ConvertToProfileBufferIndex() - mChunk->RangeStart();
|
||||
return ShouldPointAtValidBlock();
|
||||
}
|
||||
// Position is after this chunk, try next chunk.
|
||||
GoToNextChunk();
|
||||
if (IsNull()) {
|
||||
return false;
|
||||
}
|
||||
// Skip whatever block tail there is, we don't allow pointing in the
|
||||
// middle of a block.
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
if (aBlockIndex.ConvertToProfileBufferIndex() < GlobalRangePosition()) {
|
||||
// Past the requested position, meaning that the given position was in-
|
||||
// between blocks -> Failure.
|
||||
MOZ_ASSERT(false, "AdvanceToGlobalRangePosition - In-between blocks");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move InChunkPointer forward to the block at or after the given global
|
||||
// range position.
|
||||
// 0 stays where it is (if valid already).
|
||||
[[nodiscard]] bool AdvanceToGlobalRangePosition(
|
||||
ProfileBufferIndex aPosition) {
|
||||
if (aPosition == 0) {
|
||||
// Special position '0', just stay where we are.
|
||||
// Success if this position is already valid.
|
||||
return !IsNull();
|
||||
}
|
||||
for (;;) {
|
||||
ProfileBufferIndex currentPosition = GlobalRangePosition();
|
||||
if (currentPosition == 0) {
|
||||
// Pointer is null.
|
||||
return false;
|
||||
}
|
||||
if (aPosition <= currentPosition) {
|
||||
// At or past the requested position, stay where we are.
|
||||
return true;
|
||||
}
|
||||
if (aPosition < mChunk->RangeStart() + mChunk->OffsetPastLastBlock()) {
|
||||
// Target position is in this chunk's written space, move to it.
|
||||
for (;;) {
|
||||
// Skip the current block.
|
||||
mOffsetInChunk += ReadEntrySize();
|
||||
if (mOffsetInChunk >= mChunk->OffsetPastLastBlock()) {
|
||||
// Reached the end of the chunk, this can happen for the last
|
||||
// block, let's just continue to the next chunk.
|
||||
break;
|
||||
}
|
||||
if (aPosition <= mChunk->RangeStart() + mOffsetInChunk) {
|
||||
// We're at or after the position, return at this block position.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Position is after this chunk, try next chunk.
|
||||
GoToNextChunk();
|
||||
if (IsNull()) {
|
||||
return false;
|
||||
}
|
||||
// Skip whatever block tail there is, we don't allow pointing in the
|
||||
// middle of a block.
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] Byte ReadByte() {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
Byte byte = mChunk->ByteAt(mOffsetInChunk);
|
||||
if (MOZ_UNLIKELY(++mOffsetInChunk == mChunk->OffsetPastLastBlock())) {
|
||||
Adjust();
|
||||
}
|
||||
return byte;
|
||||
}
|
||||
|
||||
// Read and skip a ULEB128-encoded size.
|
||||
// 0 means failure (0-byte entries are not allowed.)
|
||||
// Note that this doesn't guarantee that there are actually that many bytes
|
||||
// available to read! (EntryReader() below may gracefully fail.)
|
||||
[[nodiscard]] Length ReadEntrySize() {
|
||||
ULEB128Reader<Length> reader;
|
||||
if (IsNull()) {
|
||||
return 0;
|
||||
}
|
||||
for (;;) {
|
||||
const bool isComplete = reader.FeedByteIsComplete(ReadByte());
|
||||
if (MOZ_UNLIKELY(IsNull())) {
|
||||
// End of chunks, so there's no actual entry after this anyway.
|
||||
return 0;
|
||||
}
|
||||
if (MOZ_LIKELY(isComplete)) {
|
||||
if (MOZ_UNLIKELY(reader.Value() > mChunk->BufferBytes())) {
|
||||
// Don't allow entries larger than a chunk.
|
||||
return 0;
|
||||
}
|
||||
return reader.Value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InChunkPointer& operator+=(Length aLength) {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
mOffsetInChunk += aLength;
|
||||
Adjust();
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] ProfileBufferEntryReader EntryReader(Length aLength) {
|
||||
if (IsNull() || aLength == 0) {
|
||||
return ProfileBufferEntryReader();
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
|
||||
// We should be pointing at the entry, past the entry size.
|
||||
const ProfileBufferIndex entryIndex = GlobalRangePosition();
|
||||
// Verify that there's enough space before for the size (starting at index
|
||||
// 1 at least).
|
||||
MOZ_ASSERT(entryIndex >= 1u + ULEB128Size(aLength));
|
||||
|
||||
const Length remaining = mChunk->OffsetPastLastBlock() - mOffsetInChunk;
|
||||
Span<const Byte> mem0 = mChunk->BufferSpan();
|
||||
mem0 = mem0.From(mOffsetInChunk);
|
||||
if (aLength <= remaining) {
|
||||
// Move to the end of this block, which could make this null if we have
|
||||
// reached the end of all buffers.
|
||||
*this += aLength;
|
||||
return ProfileBufferEntryReader(
|
||||
mem0.To(aLength),
|
||||
// Block starts before the entry size.
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
entryIndex - ULEB128Size(aLength)),
|
||||
// Block ends right after the entry (could be null for last entry).
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
GlobalRangePosition()));
|
||||
}
|
||||
|
||||
// We need to go to the next chunk for the 2nd part of this block.
|
||||
GoToNextChunk();
|
||||
if (IsNull()) {
|
||||
return ProfileBufferEntryReader();
|
||||
}
|
||||
|
||||
Span<const Byte> mem1 = mChunk->BufferSpan();
|
||||
const Length tail = aLength - remaining;
|
||||
MOZ_ASSERT(tail <= mChunk->BufferBytes());
|
||||
MOZ_ASSERT(tail == mChunk->OffsetFirstBlock());
|
||||
// We are in the correct chunk, move the offset to the end of the block.
|
||||
mOffsetInChunk = tail;
|
||||
// And adjust as needed, which could make this null if we have reached the
|
||||
// end of all buffers.
|
||||
Adjust();
|
||||
return ProfileBufferEntryReader(
|
||||
mem0, mem1.To(tail),
|
||||
// Block starts before the entry size.
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
entryIndex - ULEB128Size(aLength)),
|
||||
// Block ends right after the entry (could be null for last entry).
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
GlobalRangePosition()));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsNull() const { return !mChunk; }
|
||||
|
||||
[[nodiscard]] bool operator==(const InChunkPointer& aOther) const {
|
||||
if (IsNull() || aOther.IsNull()) {
|
||||
return IsNull() && aOther.IsNull();
|
||||
}
|
||||
return mChunk == aOther.mChunk && mOffsetInChunk == aOther.mOffsetInChunk;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool operator!=(const InChunkPointer& aOther) const {
|
||||
return !(*this == aOther);
|
||||
}
|
||||
|
||||
[[nodiscard]] Byte operator*() const {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
return mChunk->ByteAt(mOffsetInChunk);
|
||||
}
|
||||
|
||||
InChunkPointer& operator++() {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
if (MOZ_UNLIKELY(++mOffsetInChunk == mChunk->OffsetPastLastBlock())) {
|
||||
mOffsetInChunk = 0;
|
||||
GoToNextChunk();
|
||||
Adjust();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
void GoToNextChunk() {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
const ProfileBufferIndex expectedNextRangeStart =
|
||||
mChunk->RangeStart() + mChunk->BufferBytes();
|
||||
|
||||
mChunk = mChunk->GetNext();
|
||||
if (!mChunk) {
|
||||
// Reached the end of the current chunk group, try the next one (which
|
||||
// may be null too, especially on the 2nd try).
|
||||
mChunk = mNextChunkGroup;
|
||||
mNextChunkGroup = nullptr;
|
||||
}
|
||||
|
||||
if (mChunk && mChunk->RangeStart() == 0) {
|
||||
// Reached a chunk without a valid (non-null) range start, assume there
|
||||
// are only unused chunks from here on.
|
||||
mChunk = nullptr;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!mChunk || mChunk->RangeStart() == expectedNextRangeStart,
|
||||
"We don't handle discontinuous buffers (yet)");
|
||||
// Non-DEBUG fallback: Stop reading past discontinuities.
|
||||
// (They should be rare, only happening on temporary OOMs.)
|
||||
// TODO: Handle discontinuities (by skipping over incomplete blocks).
|
||||
if (mChunk && mChunk->RangeStart() != expectedNextRangeStart) {
|
||||
mChunk = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// We want `InChunkPointer` to always point at a valid byte (or be null).
|
||||
// After some operations, `mOffsetInChunk` may point past the end of the
|
||||
// current `mChunk`, in which case we need to adjust our position to be inside
|
||||
// the appropriate chunk. E.g., if we're 10 bytes after the end of the current
|
||||
// chunk, we should end up at offset 10 in the next chunk.
|
||||
// Note that we may "fall off" the last chunk and make this `InChunkPointer`
|
||||
// effectively null.
|
||||
void Adjust() {
|
||||
while (mChunk && mOffsetInChunk >= mChunk->OffsetPastLastBlock()) {
|
||||
// TODO: Try to adjust offset between chunks relative to mRangeStart
|
||||
// differences. But we don't handle discontinuities yet.
|
||||
if (mOffsetInChunk < mChunk->BufferBytes()) {
|
||||
mOffsetInChunk -= mChunk->BufferBytes();
|
||||
} else {
|
||||
mOffsetInChunk -= mChunk->OffsetPastLastBlock();
|
||||
}
|
||||
GoToNextChunk();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the current position is likely to point at a valid block.
|
||||
// (Size should be reasonable, and block should fully fit inside buffer.)
|
||||
// MOZ_ASSERTs on failure, to catch incorrect uses of block indices (which
|
||||
// should only point at valid blocks if still in range). Non-asserting build
|
||||
// fallback should still be handled.
|
||||
[[nodiscard]] bool ShouldPointAtValidBlock() const {
|
||||
if (IsNull()) {
|
||||
// Pointer is null, no blocks here.
|
||||
MOZ_ASSERT(false, "ShouldPointAtValidBlock - null pointer");
|
||||
return false;
|
||||
}
|
||||
// Use a copy, so we don't modify `*this`.
|
||||
InChunkPointer pointer = *this;
|
||||
// Try to read the entry size.
|
||||
Length entrySize = pointer.ReadEntrySize();
|
||||
if (entrySize == 0) {
|
||||
// Entry size of zero means we read 0 or a way-too-big value.
|
||||
MOZ_ASSERT(false, "ShouldPointAtValidBlock - invalid size");
|
||||
return false;
|
||||
}
|
||||
// See if the last byte of the entry is still inside the buffer.
|
||||
pointer += entrySize - 1;
|
||||
MOZ_ASSERT(!IsNull(), "ShouldPointAtValidBlock - past end of buffer");
|
||||
return !IsNull();
|
||||
}
|
||||
|
||||
const ProfileBufferChunk* mChunk;
|
||||
const ProfileBufferChunk* mNextChunkGroup;
|
||||
Length mOffsetInChunk;
|
||||
};
|
||||
|
||||
} // namespace detail
|
||||
|
||||
// Thread-safe buffer that can store blocks of different sizes during defined
|
||||
// sessions, using Chunks (from a ChunkManager) as storage.
|
||||
//
|
||||
@ -137,16 +518,10 @@ class ProfileChunkedBuffer {
|
||||
}
|
||||
}
|
||||
|
||||
// Set the current chunk manager, except if it's already the one provided.
|
||||
// The caller is responsible for keeping the chunk manager alive as along as
|
||||
// it's used here (until the next (Re)SetChunkManager, or
|
||||
// ~ProfileChunkedBuffer).
|
||||
void SetChunkManagerIfDifferent(ProfileBufferChunkManager& aChunkManager) {
|
||||
// Stop using the current chunk manager, and return it if owned here.
|
||||
[[nodiscard]] UniquePtr<ProfileBufferChunkManager> ExtractChunkManager() {
|
||||
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
|
||||
if (!mChunkManager || mChunkManager != &aChunkManager) {
|
||||
Unused << ResetChunkManager(lock);
|
||||
SetChunkManager(aChunkManager, lock);
|
||||
}
|
||||
return ResetChunkManager(lock);
|
||||
}
|
||||
|
||||
// Clear the contents of this buffer, ready to receive new chunks.
|
||||
@ -487,7 +862,7 @@ class ProfileChunkedBuffer {
|
||||
mBuffer->mMutex.AssertCurrentThreadOwns();
|
||||
}
|
||||
|
||||
profiler::detail::InChunkPointer mNextBlockPointer;
|
||||
detail::InChunkPointer mNextBlockPointer;
|
||||
|
||||
ProfileBufferBlockIndex mCurrentBlockIndex;
|
||||
|
||||
@ -645,7 +1020,7 @@ class ProfileChunkedBuffer {
|
||||
ProfileBufferBlockIndex>,
|
||||
"ReadEach callback must take ProfileBufferEntryReader& and "
|
||||
"optionally a ProfileBufferBlockIndex");
|
||||
profiler::detail::InChunkPointer p{aChunks0, aChunks1};
|
||||
detail::InChunkPointer p{aChunks0, aChunks1};
|
||||
while (!p.IsNull()) {
|
||||
// The position right before an entry size *is* a block index.
|
||||
const ProfileBufferBlockIndex blockIndex =
|
||||
@ -703,7 +1078,7 @@ class ProfileChunkedBuffer {
|
||||
std::is_invocable_v<Callback, Maybe<ProfileBufferEntryReader>&&>,
|
||||
"ReadAt callback must take a Maybe<ProfileBufferEntryReader>&&");
|
||||
Maybe<ProfileBufferEntryReader> maybeEntryReader;
|
||||
if (profiler::detail::InChunkPointer p{aChunks0, aChunks1}; !p.IsNull()) {
|
||||
if (detail::InChunkPointer p{aChunks0, aChunks1}; !p.IsNull()) {
|
||||
// If the pointer position is before the given position, try to advance.
|
||||
if (p.GlobalRangePosition() >=
|
||||
aMinimumBlockIndex.ConvertToProfileBufferIndex() ||
|
||||
@ -1211,8 +1586,11 @@ class ProfileChunkedBuffer {
|
||||
// asynchronously, and either side may be destroyed during the request.
|
||||
// It cannot use the `ProfileChunkedBuffer` mutex, because that buffer and its
|
||||
// mutex could be destroyed during the request.
|
||||
class RequestedChunkRefCountedHolder {
|
||||
class RequestedChunkRefCountedHolder
|
||||
: public external::AtomicRefCounted<RequestedChunkRefCountedHolder> {
|
||||
public:
|
||||
MOZ_DECLARE_REFCOUNTED_TYPENAME(RequestedChunkRefCountedHolder)
|
||||
|
||||
enum class State { Unused, Requested, Fulfilled };
|
||||
|
||||
// Get the current state. Note that it may change after the function
|
||||
@ -1258,32 +1636,9 @@ class ProfileChunkedBuffer {
|
||||
return maybeChunk;
|
||||
}
|
||||
|
||||
// Ref-counting implementation. Hand-rolled, because mozilla::RefCounted
|
||||
// logs AddRefs and Releases in xpcom, but this object could be AddRef'd
|
||||
// by the Base Profiler before xpcom starts, then Release'd by the Gecko
|
||||
// Profiler in xpcom, leading to apparent negative leaks.
|
||||
|
||||
void AddRef() {
|
||||
baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
|
||||
++mRefCount;
|
||||
}
|
||||
|
||||
void Release() {
|
||||
{
|
||||
baseprofiler::detail::BaseProfilerAutoLock lock(mRequestMutex);
|
||||
if (--mRefCount > 0) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
delete this;
|
||||
}
|
||||
|
||||
private:
|
||||
~RequestedChunkRefCountedHolder() = default;
|
||||
|
||||
// Mutex guarding the following members.
|
||||
mutable baseprofiler::detail::BaseProfilerMutex mRequestMutex;
|
||||
int mRefCount = 0;
|
||||
State mState = State::Unused;
|
||||
UniquePtr<ProfileBufferChunk> mRequestedChunk;
|
||||
};
|
||||
|
@ -1,400 +0,0 @@
|
||||
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
||||
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
|
||||
/* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef ProfileChunkedBufferDetail_h
|
||||
#define ProfileChunkedBufferDetail_h
|
||||
|
||||
#include "mozilla/Assertions.h"
|
||||
#include "mozilla/Likely.h"
|
||||
#include "mozilla/ProfileBufferChunk.h"
|
||||
#include "mozilla/ProfileBufferEntrySerialization.h"
|
||||
|
||||
namespace mozilla::profiler::detail {
|
||||
|
||||
// Internal accessor pointing at a position inside a chunk.
|
||||
// It can handle two groups of chunks (typically the extant chunks stored in
|
||||
// the store manager, and the current chunk).
|
||||
// The main operations are:
|
||||
// - ReadEntrySize() to read an entry size, 0 means failure.
|
||||
// - operator+=(Length) to skip a number of bytes.
|
||||
// - EntryReader() creates an entry reader at the current position for a given
|
||||
// size (it may fail with an empty reader), and skips the entry.
|
||||
// Note that there is no "past-the-end" position -- as soon as InChunkPointer
|
||||
// reaches the end, it becomes effectively null.
|
||||
class InChunkPointer {
|
||||
public:
|
||||
using Byte = ProfileBufferChunk::Byte;
|
||||
using Length = ProfileBufferChunk::Length;
|
||||
|
||||
// Nullptr-like InChunkPointer, may be used as end iterator.
|
||||
InChunkPointer()
|
||||
: mChunk(nullptr), mNextChunkGroup(nullptr), mOffsetInChunk(0) {}
|
||||
|
||||
// InChunkPointer over one or two chunk groups, pointing at the given
|
||||
// block index (if still in range).
|
||||
// This constructor should only be used with *trusted* block index values!
|
||||
InChunkPointer(const ProfileBufferChunk* aChunk,
|
||||
const ProfileBufferChunk* aNextChunkGroup,
|
||||
ProfileBufferBlockIndex aBlockIndex)
|
||||
: mChunk(aChunk), mNextChunkGroup(aNextChunkGroup) {
|
||||
if (mChunk) {
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else if (mNextChunkGroup) {
|
||||
mChunk = mNextChunkGroup;
|
||||
mNextChunkGroup = nullptr;
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else {
|
||||
mOffsetInChunk = 0;
|
||||
}
|
||||
|
||||
// Try to advance to given position.
|
||||
if (!AdvanceToGlobalRangePosition(aBlockIndex)) {
|
||||
// Block does not exist anymore (or block doesn't look valid), reset the
|
||||
// in-chunk pointer.
|
||||
mChunk = nullptr;
|
||||
mNextChunkGroup = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// InChunkPointer over one or two chunk groups, will start at the first
|
||||
// block (if any). This may be slow, so avoid using it too much.
|
||||
InChunkPointer(const ProfileBufferChunk* aChunk,
|
||||
const ProfileBufferChunk* aNextChunkGroup,
|
||||
ProfileBufferIndex aIndex = ProfileBufferIndex(0))
|
||||
: mChunk(aChunk), mNextChunkGroup(aNextChunkGroup) {
|
||||
if (mChunk) {
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else if (mNextChunkGroup) {
|
||||
mChunk = mNextChunkGroup;
|
||||
mNextChunkGroup = nullptr;
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
Adjust();
|
||||
} else {
|
||||
mOffsetInChunk = 0;
|
||||
}
|
||||
|
||||
// Try to advance to given position.
|
||||
if (!AdvanceToGlobalRangePosition(aIndex)) {
|
||||
// Block does not exist anymore, reset the in-chunk pointer.
|
||||
mChunk = nullptr;
|
||||
mNextChunkGroup = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the current position in the global range.
|
||||
// 0 if null (including if we're reached the end).
|
||||
[[nodiscard]] ProfileBufferIndex GlobalRangePosition() const {
|
||||
if (IsNull()) {
|
||||
return 0;
|
||||
}
|
||||
return mChunk->RangeStart() + mOffsetInChunk;
|
||||
}
|
||||
|
||||
// Move InChunkPointer forward to the block at the given global block
|
||||
// position, which is assumed to be valid exactly -- but it may be obsolete.
|
||||
// 0 stays where it is (if valid already).
|
||||
// MOZ_ASSERTs if the index is invalid.
|
||||
[[nodiscard]] bool AdvanceToGlobalRangePosition(
|
||||
ProfileBufferBlockIndex aBlockIndex) {
|
||||
if (IsNull()) {
|
||||
// Pointer is null already. (Not asserting because it's acceptable.)
|
||||
return false;
|
||||
}
|
||||
if (!aBlockIndex) {
|
||||
// Special null position, just stay where we are.
|
||||
return ShouldPointAtValidBlock();
|
||||
}
|
||||
if (aBlockIndex.ConvertToProfileBufferIndex() < GlobalRangePosition()) {
|
||||
// Past the requested position, stay where we are (assuming the current
|
||||
// position was valid).
|
||||
return ShouldPointAtValidBlock();
|
||||
}
|
||||
for (;;) {
|
||||
if (aBlockIndex.ConvertToProfileBufferIndex() <
|
||||
mChunk->RangeStart() + mChunk->OffsetPastLastBlock()) {
|
||||
// Target position is in this chunk's written space, move to it.
|
||||
mOffsetInChunk =
|
||||
aBlockIndex.ConvertToProfileBufferIndex() - mChunk->RangeStart();
|
||||
return ShouldPointAtValidBlock();
|
||||
}
|
||||
// Position is after this chunk, try next chunk.
|
||||
GoToNextChunk();
|
||||
if (IsNull()) {
|
||||
return false;
|
||||
}
|
||||
// Skip whatever block tail there is, we don't allow pointing in the
|
||||
// middle of a block.
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
if (aBlockIndex.ConvertToProfileBufferIndex() < GlobalRangePosition()) {
|
||||
// Past the requested position, meaning that the given position was in-
|
||||
// between blocks -> Failure.
|
||||
MOZ_ASSERT(false, "AdvanceToGlobalRangePosition - In-between blocks");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Move InChunkPointer forward to the block at or after the given global
|
||||
// range position.
|
||||
// 0 stays where it is (if valid already).
|
||||
[[nodiscard]] bool AdvanceToGlobalRangePosition(
|
||||
ProfileBufferIndex aPosition) {
|
||||
if (aPosition == 0) {
|
||||
// Special position '0', just stay where we are.
|
||||
// Success if this position is already valid.
|
||||
return !IsNull();
|
||||
}
|
||||
for (;;) {
|
||||
ProfileBufferIndex currentPosition = GlobalRangePosition();
|
||||
if (currentPosition == 0) {
|
||||
// Pointer is null.
|
||||
return false;
|
||||
}
|
||||
if (aPosition <= currentPosition) {
|
||||
// At or past the requested position, stay where we are.
|
||||
return true;
|
||||
}
|
||||
if (aPosition < mChunk->RangeStart() + mChunk->OffsetPastLastBlock()) {
|
||||
// Target position is in this chunk's written space, move to it.
|
||||
for (;;) {
|
||||
// Skip the current block.
|
||||
mOffsetInChunk += ReadEntrySize();
|
||||
if (mOffsetInChunk >= mChunk->OffsetPastLastBlock()) {
|
||||
// Reached the end of the chunk, this can happen for the last
|
||||
// block, let's just continue to the next chunk.
|
||||
break;
|
||||
}
|
||||
if (aPosition <= mChunk->RangeStart() + mOffsetInChunk) {
|
||||
// We're at or after the position, return at this block position.
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
// Position is after this chunk, try next chunk.
|
||||
GoToNextChunk();
|
||||
if (IsNull()) {
|
||||
return false;
|
||||
}
|
||||
// Skip whatever block tail there is, we don't allow pointing in the
|
||||
// middle of a block.
|
||||
mOffsetInChunk = mChunk->OffsetFirstBlock();
|
||||
}
|
||||
}
|
||||
|
||||
[[nodiscard]] Byte ReadByte() {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
Byte byte = mChunk->ByteAt(mOffsetInChunk);
|
||||
if (MOZ_UNLIKELY(++mOffsetInChunk == mChunk->OffsetPastLastBlock())) {
|
||||
Adjust();
|
||||
}
|
||||
return byte;
|
||||
}
|
||||
|
||||
// Read and skip a ULEB128-encoded size.
|
||||
// 0 means failure (0-byte entries are not allowed.)
|
||||
// Note that this doesn't guarantee that there are actually that many bytes
|
||||
// available to read! (EntryReader() below may gracefully fail.)
|
||||
[[nodiscard]] Length ReadEntrySize() {
|
||||
ULEB128Reader<Length> reader;
|
||||
if (IsNull()) {
|
||||
return 0;
|
||||
}
|
||||
for (;;) {
|
||||
const bool isComplete = reader.FeedByteIsComplete(ReadByte());
|
||||
if (MOZ_UNLIKELY(IsNull())) {
|
||||
// End of chunks, so there's no actual entry after this anyway.
|
||||
return 0;
|
||||
}
|
||||
if (MOZ_LIKELY(isComplete)) {
|
||||
if (MOZ_UNLIKELY(reader.Value() > mChunk->BufferBytes())) {
|
||||
// Don't allow entries larger than a chunk.
|
||||
return 0;
|
||||
}
|
||||
return reader.Value();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InChunkPointer& operator+=(Length aLength) {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
mOffsetInChunk += aLength;
|
||||
Adjust();
|
||||
return *this;
|
||||
}
|
||||
|
||||
[[nodiscard]] ProfileBufferEntryReader EntryReader(Length aLength) {
|
||||
if (IsNull() || aLength == 0) {
|
||||
return ProfileBufferEntryReader();
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
|
||||
// We should be pointing at the entry, past the entry size.
|
||||
const ProfileBufferIndex entryIndex = GlobalRangePosition();
|
||||
// Verify that there's enough space before for the size (starting at index
|
||||
// 1 at least).
|
||||
MOZ_ASSERT(entryIndex >= 1u + ULEB128Size(aLength));
|
||||
|
||||
const Length remaining = mChunk->OffsetPastLastBlock() - mOffsetInChunk;
|
||||
Span<const Byte> mem0 = mChunk->BufferSpan();
|
||||
mem0 = mem0.From(mOffsetInChunk);
|
||||
if (aLength <= remaining) {
|
||||
// Move to the end of this block, which could make this null if we have
|
||||
// reached the end of all buffers.
|
||||
*this += aLength;
|
||||
return ProfileBufferEntryReader(
|
||||
mem0.To(aLength),
|
||||
// Block starts before the entry size.
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
entryIndex - ULEB128Size(aLength)),
|
||||
// Block ends right after the entry (could be null for last entry).
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
GlobalRangePosition()));
|
||||
}
|
||||
|
||||
// We need to go to the next chunk for the 2nd part of this block.
|
||||
GoToNextChunk();
|
||||
if (IsNull()) {
|
||||
return ProfileBufferEntryReader();
|
||||
}
|
||||
|
||||
Span<const Byte> mem1 = mChunk->BufferSpan();
|
||||
const Length tail = aLength - remaining;
|
||||
MOZ_ASSERT(tail <= mChunk->BufferBytes());
|
||||
MOZ_ASSERT(tail == mChunk->OffsetFirstBlock());
|
||||
// We are in the correct chunk, move the offset to the end of the block.
|
||||
mOffsetInChunk = tail;
|
||||
// And adjust as needed, which could make this null if we have reached the
|
||||
// end of all buffers.
|
||||
Adjust();
|
||||
return ProfileBufferEntryReader(
|
||||
mem0, mem1.To(tail),
|
||||
// Block starts before the entry size.
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
entryIndex - ULEB128Size(aLength)),
|
||||
// Block ends right after the entry (could be null for last entry).
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
GlobalRangePosition()));
|
||||
}
|
||||
|
||||
[[nodiscard]] bool IsNull() const { return !mChunk; }
|
||||
|
||||
[[nodiscard]] bool operator==(const InChunkPointer& aOther) const {
|
||||
if (IsNull() || aOther.IsNull()) {
|
||||
return IsNull() && aOther.IsNull();
|
||||
}
|
||||
return mChunk == aOther.mChunk && mOffsetInChunk == aOther.mOffsetInChunk;
|
||||
}
|
||||
|
||||
[[nodiscard]] bool operator!=(const InChunkPointer& aOther) const {
|
||||
return !(*this == aOther);
|
||||
}
|
||||
|
||||
[[nodiscard]] Byte operator*() const {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
return mChunk->ByteAt(mOffsetInChunk);
|
||||
}
|
||||
|
||||
InChunkPointer& operator++() {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
MOZ_ASSERT(mOffsetInChunk < mChunk->OffsetPastLastBlock());
|
||||
if (MOZ_UNLIKELY(++mOffsetInChunk == mChunk->OffsetPastLastBlock())) {
|
||||
mOffsetInChunk = 0;
|
||||
GoToNextChunk();
|
||||
Adjust();
|
||||
}
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
void GoToNextChunk() {
|
||||
MOZ_ASSERT(!IsNull());
|
||||
const ProfileBufferIndex expectedNextRangeStart =
|
||||
mChunk->RangeStart() + mChunk->BufferBytes();
|
||||
|
||||
mChunk = mChunk->GetNext();
|
||||
if (!mChunk) {
|
||||
// Reached the end of the current chunk group, try the next one (which
|
||||
// may be null too, especially on the 2nd try).
|
||||
mChunk = mNextChunkGroup;
|
||||
mNextChunkGroup = nullptr;
|
||||
}
|
||||
|
||||
if (mChunk && mChunk->RangeStart() == 0) {
|
||||
// Reached a chunk without a valid (non-null) range start, assume there
|
||||
// are only unused chunks from here on.
|
||||
mChunk = nullptr;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!mChunk || mChunk->RangeStart() == expectedNextRangeStart,
|
||||
"We don't handle discontinuous buffers (yet)");
|
||||
// Non-DEBUG fallback: Stop reading past discontinuities.
|
||||
// (They should be rare, only happening on temporary OOMs.)
|
||||
// TODO: Handle discontinuities (by skipping over incomplete blocks).
|
||||
if (mChunk && mChunk->RangeStart() != expectedNextRangeStart) {
|
||||
mChunk = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// We want `InChunkPointer` to always point at a valid byte (or be null).
|
||||
// After some operations, `mOffsetInChunk` may point past the end of the
|
||||
// current `mChunk`, in which case we need to adjust our position to be inside
|
||||
// the appropriate chunk. E.g., if we're 10 bytes after the end of the current
|
||||
// chunk, we should end up at offset 10 in the next chunk.
|
||||
// Note that we may "fall off" the last chunk and make this `InChunkPointer`
|
||||
// effectively null.
|
||||
void Adjust() {
|
||||
while (mChunk && mOffsetInChunk >= mChunk->OffsetPastLastBlock()) {
|
||||
// TODO: Try to adjust offset between chunks relative to mRangeStart
|
||||
// differences. But we don't handle discontinuities yet.
|
||||
if (mOffsetInChunk < mChunk->BufferBytes()) {
|
||||
mOffsetInChunk -= mChunk->BufferBytes();
|
||||
} else {
|
||||
mOffsetInChunk -= mChunk->OffsetPastLastBlock();
|
||||
}
|
||||
GoToNextChunk();
|
||||
}
|
||||
}
|
||||
|
||||
// Check if the current position is likely to point at a valid block.
|
||||
// (Size should be reasonable, and block should fully fit inside buffer.)
|
||||
// MOZ_ASSERTs on failure, to catch incorrect uses of block indices (which
|
||||
// should only point at valid blocks if still in range). Non-asserting build
|
||||
// fallback should still be handled.
|
||||
[[nodiscard]] bool ShouldPointAtValidBlock() const {
|
||||
if (IsNull()) {
|
||||
// Pointer is null, no blocks here.
|
||||
MOZ_ASSERT(false, "ShouldPointAtValidBlock - null pointer");
|
||||
return false;
|
||||
}
|
||||
// Use a copy, so we don't modify `*this`.
|
||||
InChunkPointer pointer = *this;
|
||||
// Try to read the entry size.
|
||||
Length entrySize = pointer.ReadEntrySize();
|
||||
if (entrySize == 0) {
|
||||
// Entry size of zero means we read 0 or a way-too-big value.
|
||||
MOZ_ASSERT(false, "ShouldPointAtValidBlock - invalid size");
|
||||
return false;
|
||||
}
|
||||
// See if the last byte of the entry is still inside the buffer.
|
||||
pointer += entrySize - 1;
|
||||
MOZ_ASSERT(!IsNull(), "ShouldPointAtValidBlock - past end of buffer");
|
||||
return !IsNull();
|
||||
}
|
||||
|
||||
const ProfileBufferChunk* mChunk;
|
||||
const ProfileBufferChunk* mNextChunkGroup;
|
||||
Length mOffsetInChunk;
|
||||
};
|
||||
|
||||
} // namespace mozilla::profiler::detail
|
||||
|
||||
#endif // ProfileChunkedBufferDetail_h
|
@ -341,7 +341,7 @@ void gecko_profiler_add_marker(
|
||||
markerOptions.Set(mozilla::MarkerThreadId::CurrentThread());
|
||||
}
|
||||
|
||||
auto& buffer = profiler_get_core_buffer();
|
||||
auto& buffer = profiler_markers_detail::CachedCoreBuffer();
|
||||
mozilla::Span payload(aPayload, aPayloadSize);
|
||||
|
||||
mozilla::StackCaptureOptions captureOptions =
|
||||
|
@ -196,14 +196,6 @@ using ThreadRegistry = mozilla::profiler::ThreadRegistry;
|
||||
|
||||
LazyLogModule gProfilerLog("prof");
|
||||
|
||||
ProfileChunkedBuffer& profiler_get_core_buffer() {
|
||||
// Defer to the Base Profiler in mozglue to create the core buffer if needed,
|
||||
// and keep a reference here, for quick access in xul.
|
||||
static ProfileChunkedBuffer& sProfileChunkedBuffer =
|
||||
baseprofiler::profiler_get_core_buffer();
|
||||
return sProfileChunkedBuffer;
|
||||
}
|
||||
|
||||
mozilla::Atomic<int, mozilla::MemoryOrdering::Relaxed> gSkipSampling;
|
||||
|
||||
#if defined(GP_OS_android)
|
||||
@ -378,7 +370,12 @@ using JsFrameBuffer = mozilla::profiler::ThreadRegistrationData::JsFrameBuffer;
|
||||
class CorePS {
|
||||
private:
|
||||
CorePS()
|
||||
: mProcessStartTime(TimeStamp::ProcessCreation())
|
||||
: mProcessStartTime(TimeStamp::ProcessCreation()),
|
||||
// This needs its own mutex, because it is used concurrently from
|
||||
// functions guarded by gPSMutex as well as others without safety (e.g.,
|
||||
// profiler_add_marker). It is *not* used inside the critical section of
|
||||
// the sampler, because mutexes cannot be used there.
|
||||
mCoreBuffer(ProfileChunkedBuffer::ThreadSafety::WithMutex)
|
||||
#ifdef USE_LUL_STACKWALK
|
||||
,
|
||||
mLul(nullptr)
|
||||
@ -439,6 +436,9 @@ class CorePS {
|
||||
// No PSLockRef is needed for this field because it's immutable.
|
||||
PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
|
||||
|
||||
// No PSLockRef is needed for this field because it's thread-safe.
|
||||
PS_GET_LOCKLESS(ProfileChunkedBuffer&, CoreBuffer)
|
||||
|
||||
PS_GET(JsFrameBuffer&, JsFrames)
|
||||
|
||||
PS_GET(Vector<RefPtr<PageInformation>>&, RegisteredPages)
|
||||
@ -527,6 +527,17 @@ class CorePS {
|
||||
// The time that the process started.
|
||||
const TimeStamp mProcessStartTime;
|
||||
|
||||
// The thread-safe blocks-oriented buffer into which all profiling data is
|
||||
// recorded.
|
||||
// ActivePS controls the lifetime of the underlying contents buffer: When
|
||||
// ActivePS does not exist, mCoreBuffer is empty and rejects all reads&writes;
|
||||
// see ActivePS for further details.
|
||||
// Note: This needs to live here outside of ActivePS, because some producers
|
||||
// are indirectly controlled (e.g., by atomic flags) and therefore may still
|
||||
// attempt to write some data shortly after ActivePS has shutdown and deleted
|
||||
// the underlying buffer in memory.
|
||||
ProfileChunkedBuffer mCoreBuffer;
|
||||
|
||||
// Info on all the registered pages.
|
||||
// InnerWindowIDs in mRegisteredPages are unique.
|
||||
Vector<RefPtr<PageInformation>> mRegisteredPages;
|
||||
@ -557,6 +568,11 @@ class CorePS {
|
||||
|
||||
CorePS* CorePS::sInstance = nullptr;
|
||||
|
||||
ProfileChunkedBuffer& profiler_get_core_buffer() {
|
||||
MOZ_ASSERT(CorePS::Exists());
|
||||
return CorePS::CoreBuffer();
|
||||
}
|
||||
|
||||
void locked_profiler_add_sampled_counter(PSLockRef aLock,
|
||||
BaseProfilerCount* aCounter) {
|
||||
CorePS::AppendCounter(aLock, aCounter);
|
||||
@ -662,11 +678,9 @@ class ActivePS {
|
||||
return aFeatures;
|
||||
}
|
||||
|
||||
ActivePS(
|
||||
PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval,
|
||||
uint32_t aFeatures, const char** aFilters, uint32_t aFilterCount,
|
||||
uint64_t aActiveTabID, const Maybe<double>& aDuration,
|
||||
UniquePtr<ProfileBufferChunkManagerWithLocalLimit> aChunkManagerOrNull)
|
||||
ActivePS(PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval,
|
||||
uint32_t aFeatures, const char** aFilters, uint32_t aFilterCount,
|
||||
uint64_t aActiveTabID, const Maybe<double>& aDuration)
|
||||
: mGeneration(sNextGeneration++),
|
||||
mCapacity(aCapacity),
|
||||
mDuration(aDuration),
|
||||
@ -674,16 +688,11 @@ class ActivePS {
|
||||
mFeatures(AdjustFeatures(aFeatures, aFilterCount)),
|
||||
mActiveTabID(aActiveTabID),
|
||||
mProfileBufferChunkManager(
|
||||
aChunkManagerOrNull
|
||||
? std::move(aChunkManagerOrNull)
|
||||
: MakeUnique<ProfileBufferChunkManagerWithLocalLimit>(
|
||||
size_t(ClampToAllowedEntries(aCapacity.Value())) *
|
||||
scBytesPerEntry,
|
||||
ChunkSizeForEntries(aCapacity.Value()))),
|
||||
size_t(ClampToAllowedEntries(aCapacity.Value())) * scBytesPerEntry,
|
||||
ChunkSizeForEntries(aCapacity.Value())),
|
||||
mProfileBuffer([this]() -> ProfileChunkedBuffer& {
|
||||
ProfileChunkedBuffer& coreBuffer = profiler_get_core_buffer();
|
||||
coreBuffer.SetChunkManagerIfDifferent(*mProfileBufferChunkManager);
|
||||
return coreBuffer;
|
||||
CorePS::CoreBuffer().SetChunkManager(mProfileBufferChunkManager);
|
||||
return CorePS::CoreBuffer();
|
||||
}()),
|
||||
mMaybeProcessCPUCounter(ProfilerFeature::HasProcessCPU(aFeatures)
|
||||
? new ProcessCPUCounter(aLock)
|
||||
@ -751,10 +760,7 @@ class ActivePS {
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (mProfileBufferChunkManager) {
|
||||
// We still control the chunk manager, remove it from the core buffer.
|
||||
profiler_get_core_buffer().ResetChunkManager();
|
||||
}
|
||||
CorePS::CoreBuffer().ResetChunkManager();
|
||||
}
|
||||
|
||||
bool ThreadSelected(const char* aThreadName) {
|
||||
@ -785,15 +791,13 @@ class ActivePS {
|
||||
}
|
||||
|
||||
public:
|
||||
static void Create(
|
||||
PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval,
|
||||
uint32_t aFeatures, const char** aFilters, uint32_t aFilterCount,
|
||||
uint64_t aActiveTabID, const Maybe<double>& aDuration,
|
||||
UniquePtr<ProfileBufferChunkManagerWithLocalLimit> aChunkManagerOrNull) {
|
||||
static void Create(PSLockRef aLock, PowerOfTwo32 aCapacity, double aInterval,
|
||||
uint32_t aFeatures, const char** aFilters,
|
||||
uint32_t aFilterCount, uint64_t aActiveTabID,
|
||||
const Maybe<double>& aDuration) {
|
||||
MOZ_ASSERT(!sInstance);
|
||||
sInstance = new ActivePS(aLock, aCapacity, aInterval, aFeatures, aFilters,
|
||||
aFilterCount, aActiveTabID, aDuration,
|
||||
std::move(aChunkManagerOrNull));
|
||||
aFilterCount, aActiveTabID, aDuration);
|
||||
}
|
||||
|
||||
[[nodiscard]] static SamplerThread* Destroy(PSLockRef aLock) {
|
||||
@ -972,15 +976,12 @@ class ActivePS {
|
||||
static ProfileBufferChunkManagerWithLocalLimit& ControlledChunkManager(
|
||||
PSLockRef) {
|
||||
MOZ_ASSERT(sInstance);
|
||||
MOZ_ASSERT(sInstance->mProfileBufferChunkManager);
|
||||
return *sInstance->mProfileBufferChunkManager;
|
||||
return sInstance->mProfileBufferChunkManager;
|
||||
}
|
||||
|
||||
static void FulfillChunkRequests(PSLockRef) {
|
||||
MOZ_ASSERT(sInstance);
|
||||
if (sInstance->mProfileBufferChunkManager) {
|
||||
sInstance->mProfileBufferChunkManager->FulfillChunkRequests();
|
||||
}
|
||||
sInstance->mProfileBufferChunkManager.FulfillChunkRequests();
|
||||
}
|
||||
|
||||
static ProfileBuffer& Buffer(PSLockRef) {
|
||||
@ -1197,7 +1198,7 @@ class ActivePS {
|
||||
if (sInstance->mBaseProfileThreads &&
|
||||
sInstance->mGeckoIndexWhenBaseProfileAdded
|
||||
.ConvertToProfileBufferIndex() <
|
||||
profiler_get_core_buffer().GetState().mRangeStart) {
|
||||
CorePS::CoreBuffer().GetState().mRangeStart) {
|
||||
DEBUG_LOG("ClearExpiredExitProfiles() - Discarding base profile %p",
|
||||
sInstance->mBaseProfileThreads.get());
|
||||
sInstance->mBaseProfileThreads.reset();
|
||||
@ -1215,7 +1216,7 @@ class ActivePS {
|
||||
sInstance->mBaseProfileThreads = std::move(aBaseProfileThreads);
|
||||
sInstance->mGeckoIndexWhenBaseProfileAdded =
|
||||
ProfileBufferBlockIndex::CreateFromProfileBufferIndex(
|
||||
profiler_get_core_buffer().GetState().mRangeEnd);
|
||||
CorePS::CoreBuffer().GetState().mRangeEnd);
|
||||
}
|
||||
|
||||
static UniquePtr<char[]> MoveBaseProfileThreads(PSLockRef aLock) {
|
||||
@ -1312,8 +1313,7 @@ class ActivePS {
|
||||
const uint64_t mActiveTabID;
|
||||
|
||||
// The chunk manager used by `mProfileBuffer` below.
|
||||
// May become null if it gets transferred ouf of the Gecko Profiler.
|
||||
UniquePtr<ProfileBufferChunkManagerWithLocalLimit> mProfileBufferChunkManager;
|
||||
ProfileBufferChunkManagerWithLocalLimit mProfileBufferChunkManager;
|
||||
|
||||
// The buffer into which all samples are recorded.
|
||||
ProfileBuffer mProfileBuffer;
|
||||
@ -3745,9 +3745,9 @@ void SamplerThread::Run() {
|
||||
const bool cpuUtilization = ProfilerFeature::HasCPUUtilization(features);
|
||||
|
||||
// Use local ProfileBuffer and underlying buffer to capture the stack.
|
||||
// (This is to avoid touching the core buffer lock while a thread is
|
||||
// suspended, because that thread could be working with the core buffer as
|
||||
// well.
|
||||
// (This is to avoid touching the CorePS::CoreBuffer lock while a thread is
|
||||
// suspended, because that thread could be working with the CorePS::CoreBuffer
|
||||
// as well.)
|
||||
mozilla::ProfileBufferChunkManagerSingle localChunkManager(
|
||||
ProfileBufferChunkManager::scExpectedMaximumStackSize);
|
||||
ProfileChunkedBuffer localBuffer(
|
||||
@ -3955,7 +3955,7 @@ void SamplerThread::Run() {
|
||||
// Note: It is not stored inside the CompactStack so that it doesn't
|
||||
// get incorrectly duplicated when the thread is sleeping.
|
||||
if (!runningTimesDiff.IsEmpty()) {
|
||||
profiler_get_core_buffer().PutObjects(
|
||||
CorePS::CoreBuffer().PutObjects(
|
||||
ProfileBufferEntry::Kind::RunningTimes, runningTimesDiff);
|
||||
}
|
||||
|
||||
@ -4171,7 +4171,7 @@ void SamplerThread::Run() {
|
||||
// Note: It is not stored inside the CompactStack so that it
|
||||
// doesn't get incorrectly duplicated when the thread is sleeping.
|
||||
if (unresponsiveDuration_ms.isSome()) {
|
||||
profiler_get_core_buffer().PutObjects(
|
||||
CorePS::CoreBuffer().PutObjects(
|
||||
ProfileBufferEntry::Kind::UnresponsiveDurationMs,
|
||||
*unresponsiveDuration_ms);
|
||||
}
|
||||
@ -4192,20 +4192,20 @@ void SamplerThread::Run() {
|
||||
previousState.mFailedPutBytes));
|
||||
// There *must* be a CompactStack after a TimeBeforeCompactStack,
|
||||
// even an empty one.
|
||||
profiler_get_core_buffer().PutObjects(
|
||||
CorePS::CoreBuffer().PutObjects(
|
||||
ProfileBufferEntry::Kind::CompactStack,
|
||||
UniquePtr<ProfileChunkedBuffer>(nullptr));
|
||||
} else if (state.mRangeEnd - previousState.mRangeEnd >=
|
||||
*profiler_get_core_buffer().BufferLength()) {
|
||||
*CorePS::CoreBuffer().BufferLength()) {
|
||||
LOG("Stack sample too big for profiler storage, needed %u bytes",
|
||||
unsigned(state.mRangeEnd - previousState.mRangeEnd));
|
||||
// There *must* be a CompactStack after a TimeBeforeCompactStack,
|
||||
// even an empty one.
|
||||
profiler_get_core_buffer().PutObjects(
|
||||
CorePS::CoreBuffer().PutObjects(
|
||||
ProfileBufferEntry::Kind::CompactStack,
|
||||
UniquePtr<ProfileChunkedBuffer>(nullptr));
|
||||
} else {
|
||||
profiler_get_core_buffer().PutObjects(
|
||||
CorePS::CoreBuffer().PutObjects(
|
||||
ProfileBufferEntry::Kind::CompactStack, localBuffer);
|
||||
}
|
||||
|
||||
@ -5424,6 +5424,18 @@ static void TriggerPollJSSamplingOnMainThread() {
|
||||
}
|
||||
}
|
||||
|
||||
static bool HasMinimumLength(const char* aString, size_t aMinimumLength) {
|
||||
if (!aString) {
|
||||
return false;
|
||||
}
|
||||
for (size_t i = 0; i < aMinimumLength; ++i) {
|
||||
if (aString[i] == '\0') {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
|
||||
double aInterval, uint32_t aFeatures,
|
||||
const char** aFilters, uint32_t aFilterCount,
|
||||
@ -5456,24 +5468,15 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
|
||||
// (if any) alive for our use.
|
||||
mozilla::base_profiler_markers_detail::EnsureBufferForMainThreadAddMarker();
|
||||
|
||||
UniquePtr<ProfileBufferChunkManagerWithLocalLimit> baseChunkManager;
|
||||
bool profilersHandOver = false;
|
||||
UniquePtr<char[]> baseprofile;
|
||||
if (baseprofiler::profiler_is_active()) {
|
||||
// Note that we still hold the lock, so the sampler cannot run yet and
|
||||
// interact negatively with the still-active BaseProfiler sampler.
|
||||
// Assume that Base Profiler is active because of MOZ_PROFILER_STARTUP.
|
||||
|
||||
// Take ownership of the chunk manager from the Base Profiler, to extend its
|
||||
// lifetime during the new Gecko Profiler session. Since we're using the
|
||||
// same core buffer, all the base profiler data remains.
|
||||
baseChunkManager = baseprofiler::detail::ExtractBaseProfilerChunkManager();
|
||||
|
||||
if (baseChunkManager) {
|
||||
profilersHandOver = true;
|
||||
BASE_PROFILER_MARKER_TEXT(
|
||||
"Profilers handover", PROFILER, MarkerTiming::IntervalStart(),
|
||||
"Transition from Base to Gecko Profiler, some data may be missing");
|
||||
}
|
||||
// Capture the Base Profiler startup profile threads (if any).
|
||||
baseprofile = baseprofiler::profiler_get_profile(
|
||||
/* aSinceTime */ 0, /* aIsShuttingDown */ false,
|
||||
/* aOnlyThreads */ true);
|
||||
|
||||
// Now stop Base Profiler (BP), as further recording will be ignored anyway,
|
||||
// and so that it won't clash with Gecko Profiler (GP) sampling starting
|
||||
@ -5510,11 +5513,20 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
|
||||
double interval = aInterval > 0 ? aInterval : PROFILER_DEFAULT_INTERVAL;
|
||||
|
||||
ActivePS::Create(aLock, capacity, interval, aFeatures, aFilters, aFilterCount,
|
||||
aActiveTabID, duration, std::move(baseChunkManager));
|
||||
aActiveTabID, duration);
|
||||
|
||||
// ActivePS::Create can only succeed or crash.
|
||||
MOZ_ASSERT(ActivePS::Exists(aLock));
|
||||
|
||||
// An "empty" profile string may in fact contain 1 character (a newline), so
|
||||
// we want at least 2 characters to register a profile.
|
||||
if (HasMinimumLength(baseprofile.get(), 2)) {
|
||||
// The BaseProfiler startup profile will be stored as a separate "process"
|
||||
// in the Gecko Profiler profile, and shown as a new track under the
|
||||
// corresponding Gecko Profiler thread.
|
||||
ActivePS::AddBaseProfileThreads(aLock, std::move(baseprofile));
|
||||
}
|
||||
|
||||
// Set up profiling for each registered thread, if appropriate.
|
||||
#if defined(MOZ_REPLACE_MALLOC) && defined(MOZ_PROFILER_MEMORY)
|
||||
bool isMainThreadBeingProfiled = false;
|
||||
@ -5598,11 +5610,6 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
|
||||
|
||||
// At the very end, set up RacyFeatures.
|
||||
RacyFeatures::SetActive(ActivePS::Features(aLock));
|
||||
|
||||
if (profilersHandOver) {
|
||||
PROFILER_MARKER_UNTYPED("Profilers handover", PROFILER,
|
||||
MarkerTiming::IntervalEnd());
|
||||
}
|
||||
}
|
||||
|
||||
void profiler_start(PowerOfTwo32 aCapacity, double aInterval,
|
||||
@ -6296,7 +6303,7 @@ bool profiler_is_locked_on_current_thread() {
|
||||
return PSAutoLock::IsLockedOnCurrentThread() ||
|
||||
ThreadRegistry::IsRegistryMutexLockedOnCurrentThread() ||
|
||||
ThreadRegistration::IsDataMutexLockedOnCurrentThread() ||
|
||||
profiler_get_core_buffer().IsThreadSafeAndLockedOnCurrentThread() ||
|
||||
CorePS::CoreBuffer().IsThreadSafeAndLockedOnCurrentThread() ||
|
||||
ProfilerParent::IsLockedOnCurrentThread() ||
|
||||
ProfilerChild::IsLockedOnCurrentThread();
|
||||
}
|
||||
|
@ -150,8 +150,8 @@ mozilla::ProfileBufferBlockIndex profiler_add_marker(
|
||||
aOptions.ThreadId().ThreadId())) {
|
||||
return {};
|
||||
}
|
||||
return ::AddMarkerToBuffer(profiler_get_core_buffer(), aName, aCategory,
|
||||
std::move(aOptions), aMarkerType,
|
||||
return ::AddMarkerToBuffer(profiler_markers_detail::CachedCoreBuffer(), aName,
|
||||
aCategory, std::move(aOptions), aMarkerType,
|
||||
aPayloadArguments...);
|
||||
#endif
|
||||
}
|
||||
|
@ -26,6 +26,17 @@
|
||||
// Implemented in platform.cpp
|
||||
mozilla::ProfileChunkedBuffer& profiler_get_core_buffer();
|
||||
|
||||
namespace profiler_markers_detail {
|
||||
|
||||
// Get the core buffer from the profiler, and cache it in a
|
||||
// non-templated-function static reference.
|
||||
inline mozilla::ProfileChunkedBuffer& CachedCoreBuffer() {
|
||||
static mozilla::ProfileChunkedBuffer& coreBuffer = profiler_get_core_buffer();
|
||||
return coreBuffer;
|
||||
}
|
||||
|
||||
} // namespace profiler_markers_detail
|
||||
|
||||
#endif // MOZ_GECKO_PROFILER
|
||||
|
||||
#endif // ProfilerMarkersDetail_h
|
||||
|
@ -7,7 +7,6 @@
|
||||
#ifndef ProfilerThreadRegistrationInfo_h
|
||||
#define ProfilerThreadRegistrationInfo_h
|
||||
|
||||
#include "mozilla/BaseAndGeckoProfilerDetail.h"
|
||||
#include "mozilla/ProfilerUtils.h"
|
||||
#include "mozilla/TimeStamp.h"
|
||||
|
||||
@ -45,16 +44,8 @@ class ThreadRegistrationInfo {
|
||||
[[nodiscard]] bool IsMainThread() const { return mIsMainThread; }
|
||||
|
||||
private:
|
||||
static TimeStamp ExistingRegisterTimeOrNow() {
|
||||
TimeStamp registerTime = baseprofiler::detail::GetThreadRegistrationTime();
|
||||
if (!registerTime) {
|
||||
registerTime = TimeStamp::Now();
|
||||
}
|
||||
return registerTime;
|
||||
}
|
||||
|
||||
const std::string mName;
|
||||
const TimeStamp mRegisterTime = ExistingRegisterTimeOrNow();
|
||||
const TimeStamp mRegisterTime = TimeStamp::Now();
|
||||
const ProfilerThreadId mThreadId = profiler_current_thread_id();
|
||||
const bool mIsMainThread = profiler_is_main_thread();
|
||||
};
|
||||
|
@ -4076,7 +4076,7 @@ TEST(GeckoProfiler, BaseProfilerHandOff)
|
||||
for (const Json::Value& thread : threads) {
|
||||
ASSERT_TRUE(thread.isObject());
|
||||
GET_JSON(name, thread["name"], String);
|
||||
if (name.asString() == "GeckoMain") {
|
||||
if (name.asString() == "GeckoMain (pre-xul)") {
|
||||
found = true;
|
||||
EXPECT_JSON_ARRAY_CONTAINS(thread["stringTable"], String,
|
||||
"Marker from base profiler");
|
||||
|
Loading…
Reference in New Issue
Block a user