Backed out 21 changesets (bug 1578327, bug 1575448, bug 1580091, bug 1576554, bug 1581049, bug 1576551, bug 1578329, bug 1576555) for platform.cpp related bustage CLOSED TREE

Backed out changeset 27afea20c396 (bug 1581049)
Backed out changeset be1ec4577d37 (bug 1581049)
Backed out changeset b4b6bbb18cc1 (bug 1581049)
Backed out changeset b30942f9db54 (bug 1581049)
Backed out changeset 473c431866f7 (bug 1578329)
Backed out changeset ac492dc3df20 (bug 1578329)
Backed out changeset f51875652f6f (bug 1578327)
Backed out changeset 512b7cbd18f6 (bug 1576555)
Backed out changeset 2d63a9934c00 (bug 1576555)
Backed out changeset ff73f648ab6c (bug 1576555)
Backed out changeset 49f49079bbb5 (bug 1576555)
Backed out changeset e8323157c6f3 (bug 1575448)
Backed out changeset 3b0d726f2dd6 (bug 1575448)
Backed out changeset 5924790abc4b (bug 1576554)
Backed out changeset a14ac9bb5338 (bug 1576554)
Backed out changeset b6d73f5042a7 (bug 1576551)
Backed out changeset 366030bd2d84 (bug 1576551)
Backed out changeset d7ee4148aad9 (bug 1576551)
Backed out changeset ec72dfc7301e (bug 1576551)
Backed out changeset 79b29286f906 (bug 1580091)
Backed out changeset 6f34c2e57ccf (bug 1580091)
This commit is contained in:
Bogdan Tara 2019-09-17 10:16:14 +03:00
parent 9ee5b3bbef
commit 361c83c1a9
47 changed files with 2565 additions and 5095 deletions

View File

@ -142,7 +142,7 @@ void TimeoutManager::MoveIdleToActive() {
timeout->remove();
mTimeouts.InsertFront(timeout);
#if MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
if (num == 0) {
now = TimeStamp::Now();
}
@ -891,7 +891,7 @@ void TimeoutManager::RunTimeout(const TimeStamp& aNow,
// This timeout is good to run.
bool timeout_was_cleared = window->RunTimeoutHandler(timeout, scx);
#if MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
TimeDuration elapsed = now - timeout->SubmitTime();
TimeDuration target = timeout->When() - timeout->SubmitTime();
TimeDuration delta = now - timeout->When();

View File

@ -153,7 +153,7 @@ void nsDOMNavigationTiming::NotifyLoadEventEnd() {
if (IsTopLevelContentDocumentInContentProcess()) {
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers() || PAGELOAD_LOG_ENABLED()) {
if (profiler_is_active() || PAGELOAD_LOG_ENABLED()) {
TimeDuration elapsed = mLoadEventEnd - mNavigationStart;
TimeDuration duration = mLoadEventEnd - mLoadEventStart;
nsAutoCString spec;
@ -345,7 +345,7 @@ void nsDOMNavigationTiming::TTITimeout(nsITimer* aTimer) {
mTTITimer = nullptr;
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers() || PAGELOAD_LOG_ENABLED()) {
if (profiler_is_active() || PAGELOAD_LOG_ENABLED()) {
TimeDuration elapsed = mTTFI - mNavigationStart;
MOZ_ASSERT(elapsed.ToMilliseconds() > 0);
TimeDuration elapsedLongTask =
@ -428,7 +428,7 @@ void nsDOMNavigationTiming::NotifyContentfulPaintForRootContentDocument(
mContentfulPaint = aCompositeEndTime;
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers() || PAGELOAD_LOG_ENABLED()) {
if (profiler_is_active() || PAGELOAD_LOG_ENABLED()) {
TimeDuration elapsed = mContentfulPaint - mNavigationStart;
nsAutoCString spec;
if (mLoadedURI) {

View File

@ -5865,9 +5865,9 @@ bool nsGlobalWindowInner::RunTimeoutHandler(Timeout* aTimeout,
const char* reason = GetTimeoutReasonString(timeout);
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
nsCOMPtr<nsIDocShell> docShell = GetDocShell();
nsCString str;
nsCOMPtr<nsIDocShell> docShell = GetDocShell();
nsCString str;
if (profiler_is_active()) {
TimeDuration originalInterval = timeout->When() - timeout->SubmitTime();
str.Append(reason);
str.Append(" with interval ");
@ -5876,10 +5876,10 @@ bool nsGlobalWindowInner::RunTimeoutHandler(Timeout* aTimeout,
nsCString handlerDescription;
timeout->mScriptHandler->GetDescription(handlerDescription);
str.Append(handlerDescription);
AUTO_PROFILER_TEXT_MARKER_DOCSHELL_CAUSE("setTimeout callback", str, JS,
docShell,
timeout->TakeProfilerBacktrace());
}
AUTO_PROFILER_TEXT_MARKER_DOCSHELL_CAUSE("setTimeout callback", str, JS,
docShell,
timeout->TakeProfilerBacktrace());
#endif
bool abortIntervalHandler;

View File

@ -54,32 +54,9 @@ class VideoFrameMarkerPayload : public ProfilerMarkerPayload {
mAudioPositionUs(aAudioPositionUs),
mVideoFrameTimeUs(aVideoFrameTimeUs) {}
BlocksRingBuffer::Length TagAndSerializationBytes() const override {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mAudioPositionUs, mVideoFrameTimeUs);
}
void SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const override {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mAudioPositionUs);
aEntryWriter.WriteObject(mVideoFrameTimeUs);
}
static UniquePtr<ProfilerMarkerPayload> Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto audioPositionUs = aEntryReader.ReadObject<int64_t>();
auto videoFrameTimeUs = aEntryReader.ReadObject<int64_t>();
return UniquePtr<ProfilerMarkerPayload>(new VideoFrameMarkerPayload(
std::move(props), audioPositionUs, videoFrameTimeUs));
}
void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const override {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("UpdateRenderVideoFrames", aWriter, aProcessStartTime,
aUniqueStacks);
aWriter.IntProperty("audio", mAudioPositionUs);
@ -87,12 +64,6 @@ class VideoFrameMarkerPayload : public ProfilerMarkerPayload {
}
private:
VideoFrameMarkerPayload(CommonProps&& aCommonProps, int64_t aAudioPositionUs,
int64_t aVideoFrameTimeUs)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mAudioPositionUs(aAudioPositionUs),
mVideoFrameTimeUs(aVideoFrameTimeUs) {}
int64_t mAudioPositionUs;
int64_t mVideoFrameTimeUs;
};

View File

@ -221,7 +221,7 @@ void Performance::Mark(const nsAString& aName, ErrorResult& aRv) {
InsertUserEntry(performanceMark);
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
nsCOMPtr<EventTarget> et = do_QueryInterface(GetOwner());
nsCOMPtr<nsIDocShell> docShell =
nsContentUtils::GetDocShellForEventTarget(et);
@ -305,7 +305,7 @@ void Performance::Measure(const nsAString& aName,
InsertUserEntry(performanceMeasure);
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
TimeStamp startTimeStamp =
CreationTimeStamp() + TimeDuration::FromMilliseconds(startTime);
TimeStamp endTimeStamp =

View File

@ -2310,7 +2310,7 @@ void RecordCompositionPayloadsPresented(
TimeStamp presented = TimeStamp::Now();
for (const CompositionPayload& payload : aPayloads) {
#if MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
nsPrintfCString marker(
"Payload Presented, type: %d latency: %dms\n",
int32_t(payload.mType),

View File

@ -94,7 +94,7 @@ void ProfilerScreenshots::SubmitScreenshot(
originalSize, scaledSize, timeStamp]() {
// Create a new surface that wraps backingSurface's data but has the
// correct size.
if (profiler_can_accept_markers()) {
{
DataSourceSurface::ScopedMap scopedMap(backingSurface,
DataSourceSurface::READ);
RefPtr<DataSourceSurface> surf =

View File

@ -2524,38 +2524,18 @@ int32_t RecordContentFrameTime(
int32_t fracLatencyNorm = lround(latencyNorm * 100.0);
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
class ContentFramePayload : public ProfilerMarkerPayload {
public:
ContentFramePayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
mozilla::BlocksRingBuffer::Length TagAndSerializationBytes()
const override {
return CommonPropsTagAndSerializationBytes();
}
void SerializeTagAndPayload(
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) const override {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const override {
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) override {
StreamCommonProps("CONTENT_FRAME_TIME", aWriter, aProcessStartTime,
aUniqueStacks);
}
private:
explicit ContentFramePayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
static mozilla::UniquePtr<ProfilerMarkerPayload> Deserialize(
mozilla::BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new ContentFramePayload(std::move(props)));
}
};
AUTO_PROFILER_STATS(add_marker_with_ContentFramePayload);
profiler_add_marker_for_thread(

View File

@ -361,38 +361,18 @@ void ContentCompositorBridgeParent::ShadowLayersUpdated(
auto endTime = TimeStamp::Now();
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
class ContentBuildPayload : public ProfilerMarkerPayload {
public:
ContentBuildPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
mozilla::BlocksRingBuffer::Length TagAndSerializationBytes()
const override {
return CommonPropsTagAndSerializationBytes();
}
void SerializeTagAndPayload(
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) const override {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const override {
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) override {
StreamCommonProps("CONTENT_FULL_PAINT_TIME", aWriter, aProcessStartTime,
aUniqueStacks);
}
private:
explicit ContentBuildPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
static mozilla::UniquePtr<ProfilerMarkerPayload> Deserialize(
mozilla::BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new ContentBuildPayload(std::move(props)));
}
};
AUTO_PROFILER_STATS(add_marker_with_ContentBuildPayload);
profiler_add_marker_for_thread(

View File

@ -205,40 +205,18 @@ class SceneBuiltNotification : public wr::NotificationHandler {
"SceneBuiltNotificationRunnable", [parent, epoch, startTime]() {
auto endTime = TimeStamp::Now();
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
class ContentFullPaintPayload : public ProfilerMarkerPayload {
public:
ContentFullPaintPayload(const mozilla::TimeStamp& aStartTime,
const mozilla::TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
mozilla::BlocksRingBuffer::Length TagAndSerializationBytes()
const override {
return CommonPropsTagAndSerializationBytes();
}
void SerializeTagAndPayload(
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter)
const override {
static const DeserializerTag tag =
TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const override {
UniqueStacks& aUniqueStacks) override {
StreamCommonProps("CONTENT_FULL_PAINT_TIME", aWriter,
aProcessStartTime, aUniqueStacks);
}
private:
explicit ContentFullPaintPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
static mozilla::UniquePtr<ProfilerMarkerPayload> Deserialize(
mozilla::BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new ContentFullPaintPayload(std::move(props)));
}
};
AUTO_PROFILER_STATS(add_marker_with_ContentFullPaintPayload);

View File

@ -1605,12 +1605,13 @@ void gfxFontFamily::FindFontForChar(GlobalFontMatch* aMatchData) {
}
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
nsCString charAndName =
nsPrintfCString("\\u%x %s", aMatchData->mCh, mName.get());
AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING("gfxFontFamily::FindFontForChar",
LAYOUT, charAndName);
nsCString charAndName;
if (profiler_is_active()) {
charAndName = nsPrintfCString("\\u%x %s", aMatchData->mCh, mName.get());
}
AUTO_PROFILER_LABEL_DYNAMIC_NSCSTRING("gfxFontFamily::FindFontForChar",
LAYOUT, charAndName);
#endif
AutoTArray<gfxFontEntry*, 4> entries;

View File

@ -20,7 +20,7 @@ class MOZ_RAII AutoProfilerStyleMarker {
explicit AutoProfilerStyleMarker(UniqueProfilerBacktrace aCause,
const Maybe<nsID>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: mActive(profiler_can_accept_markers()),
: mActive(profiler_is_active()),
mStartTime(TimeStamp::Now()),
mCause(std::move(aCause)),
mDocShellId(aDocShellId),

View File

@ -10,69 +10,45 @@
# include "ProfileBuffer.h"
# include "ProfilerMarker.h"
# include "mozilla/MathAlgorithms.h"
namespace mozilla {
namespace baseprofiler {
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto DuplicationBufferBytes = MakePowerOfTwo32<65536>();
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer, PowerOfTwo32 aCapacity)
: mEntries(aBuffer),
mDuplicationBuffer(MakeUnique<BlocksRingBuffer::Byte[]>(
DuplicationBufferBytes.Value())) {
// Only ProfileBuffer should control this buffer, and it should be empty when
// there is no ProfileBuffer using it.
MOZ_ASSERT(mEntries.BufferLength().isNothing());
// Allocate the requested capacity.
mEntries.Set(aCapacity);
}
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer) : mEntries(aBuffer) {
// Assume the given buffer is not empty.
MOZ_ASSERT(mEntries.BufferLength().isSome());
}
ProfileBuffer::ProfileBuffer(PowerOfTwo32 aCapacity)
: mEntries(MakeUnique<ProfileBufferEntry[]>(aCapacity.Value())),
mEntryIndexMask(aCapacity.Mask()),
mRangeStart(0),
mRangeEnd(0) {}
ProfileBuffer::~ProfileBuffer() {
// Only ProfileBuffer controls this buffer, and it should be empty when there
// is no ProfileBuffer using it.
mEntries.Reset();
MOZ_ASSERT(mEntries.BufferLength().isNothing());
}
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
# define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
break; \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)
# undef SWITCH_KIND
default:
MOZ_ASSERT(false, "Unhandled baseprofiler::ProfilerBuffer entry KIND");
return BlockIndex{};
while (mStoredMarkers.peek()) {
delete mStoredMarkers.popHead();
}
}
// Called from signal, call only reentrant functions
uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
return AddEntry(mEntries, aEntry).ConvertToU64();
}
void ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
GetEntry(mRangeEnd++) = aEntry;
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId) {
return AddEntry(aBlocksRingBuffer, ProfileBufferEntry::ThreadId(aThreadId));
// The distance between mRangeStart and mRangeEnd must never exceed
// capacity, so advance mRangeStart if necessary.
if (mRangeEnd - mRangeStart > mEntryIndexMask.MaskValue() + 1) {
mRangeStart++;
}
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
return AddThreadIdEntry(mEntries, aThreadId).ConvertToU64();
uint64_t pos = mRangeEnd;
AddEntry(ProfileBufferEntry::ThreadId(aThreadId));
return pos;
}
void ProfileBuffer::AddStoredMarker(ProfilerMarker* aStoredMarker) {
aStoredMarker->SetPositionInBuffer(mRangeEnd);
mStoredMarkers.insert(aStoredMarker);
}
void ProfileBuffer::CollectCodeLocation(
@ -112,15 +88,27 @@ void ProfileBuffer::CollectCodeLocation(
}
}
size_t ProfileBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - memory pointed to by the elements within mEntries
return mEntries.SizeOfExcludingThis(aMallocSizeOf);
void ProfileBuffer::DeleteExpiredStoredMarkers() {
AUTO_PROFILER_STATS(base_ProfileBuffer_DeleteExpiredStoredMarkers);
// Delete markers of samples that have been overwritten due to circular
// buffer wraparound.
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(mRangeStart)) {
delete mStoredMarkers.popHead();
}
}
size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
size_t n = aMallocSizeOf(this);
n += aMallocSizeOf(mEntries.get());
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - memory pointed to by the elements within mEntries
// - mStoredMarkers
return n;
}
void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
@ -164,15 +152,9 @@ void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
}
ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const {
return {BufferRangeStart(),
BufferRangeEnd(),
mEntries.BufferLength()->Value() / 8, // 8 bytes per entry.
mIntervalsNs,
mOverheadsNs,
mLockingsNs,
mCleaningsNs,
mCountersNs,
mThreadsNs};
return {mRangeStart, mRangeEnd, mEntryIndexMask.MaskValue() + 1,
mIntervalsNs, mOverheadsNs, mLockingsNs,
mCleaningsNs, mCountersNs, mThreadsNs};
}
/* ProfileBufferCollector */

View File

@ -7,43 +7,37 @@
#define MOZ_PROFILE_BUFFER_H
#include "ProfileBufferEntry.h"
#include "ProfilerMarker.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/PowerOfTwo.h"
namespace mozilla {
namespace baseprofiler {
// Class storing most profiling data in a BlocksRingBuffer.
//
// A fixed-capacity circular buffer.
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
// Entries are appended at the end. Once the queue capacity has been reached,
// adding a new entry will evict an old entry from the start of the queue.
// Positions in the queue are represented as 64-bit unsigned integers which
// only increase and never wrap around.
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
// covered by the queue contents.
// Internally, the buffer uses a fixed-size storage and applies a modulo
// operation when accessing entries in that storage buffer. "Evicting" an entry
// really just means that an existing entry in the storage buffer gets
// overwritten and that mRangeStart gets incremented.
class ProfileBuffer final {
public:
// Opaque type containing a block index, which should not be modified outside
// of BlocksRingBuffer.
// TODO: Eventually, all uint64_t values should be replaced with BlockIndex,
// because external users should only store and compare them, but not do other
// arithmetic operations (that uint64_t supports).
using BlockIndex = BlocksRingBuffer::BlockIndex;
// ProfileBuffer constructor
// @param aBuffer The empty BlocksRingBuffer to use as buffer manager.
// @param aCapacity The capacity of the buffer in memory.
ProfileBuffer(BlocksRingBuffer& aBuffer, PowerOfTwo32 aCapacity);
// ProfileBuffer constructor
// @param aBuffer The pre-filled BlocksRingBuffer to use as buffer manager.
explicit ProfileBuffer(BlocksRingBuffer& aBuffer);
// @param aCapacity The capacity of the buffer.
explicit ProfileBuffer(PowerOfTwo32 aCapacity);
~ProfileBuffer();
bool IsThreadSafe() const { return mEntries.IsThreadSafe(); }
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
// Returns the position of the entry.
uint64_t AddEntry(const ProfileBufferEntry& aEntry);
void AddEntry(const ProfileBufferEntry& aEntry);
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
// Returns the position of the entry.
@ -88,7 +82,16 @@ class ProfileBuffer final {
void DiscardSamplesBeforeTime(double aTime);
size_t SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const;
void AddStoredMarker(ProfilerMarker* aStoredMarker);
// The following method is not signal safe!
void DeleteExpiredStoredMarkers();
// Access an entry in the buffer.
ProfileBufferEntry& GetEntry(uint64_t aPosition) const {
return mEntries[aPosition & mEntryIndexMask];
}
size_t SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const;
void CollectOverheadStats(TimeDuration aSamplingTime, TimeDuration aLocking,
@ -98,47 +101,38 @@ class ProfileBuffer final {
ProfilerBufferInfo GetProfilerBufferInfo() const;
private:
// Add |aEntry| to the provider BlocksRingBuffer.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddEntry(BlocksRingBuffer& aBlocksRingBuffer,
const ProfileBufferEntry& aEntry);
// The storage that backs our buffer. Holds capacity entries.
// All accesses to entries in mEntries need to go through GetEntry(), which
// translates the given buffer position from the near-infinite uint64_t space
// into the entry storage space.
UniquePtr<ProfileBufferEntry[]> mEntries;
// Add a sample start (ThreadId) entry for aThreadId to the provided
// BlocksRingBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddThreadIdEntry(BlocksRingBuffer& aBlocksRingBuffer,
int aThreadId);
// The circular-ring storage in which this ProfileBuffer stores its data.
BlocksRingBuffer& mEntries;
// A mask such that pos & mEntryIndexMask == pos % capacity.
PowerOfTwoMask32 mEntryIndexMask;
public:
// `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
// corresponding to the first entry and past the last entry stored in
// `mEntries`.
// mRangeStart and mRangeEnd are uint64_t values that strictly advance and
// never wrap around. mRangeEnd is always greater than or equal to
// mRangeStart, but never gets more than capacity steps ahead of
// mRangeStart, because we can only store a fixed number of entries in the
// buffer. Once the entire buffer is in use, adding a new entry will evict an
// entry from the front of the buffer (and increase mRangeStart).
// In other words, the following conditions hold true at all times:
// (1) mRangeStart <= mRangeEnd
// (2) mRangeEnd - mRangeStart <= capacity
//
// The returned values are not guaranteed to be stable, because other threads
// may also be accessing the buffer concurrently. But they will always
// increase, and can therefore give an indication of how far these values have
// *at least* reached. In particular:
// - Entries whose index is strictly less that `BufferRangeStart()` have been
// discarded by now, so any related data may also be safely discarded.
// - It is safe to try and read entries at any index strictly less than
// `BufferRangeEnd()` -- but note that these reads may fail by the time you
// request them, as old entries get overwritten by new ones.
uint64_t BufferRangeStart() const {
return mEntries.GetState().mRangeStart.ConvertToU64();
}
uint64_t BufferRangeEnd() const {
return mEntries.GetState().mRangeEnd.ConvertToU64();
}
// If there are no live entries, then mRangeStart == mRangeEnd.
// Otherwise, mRangeStart is the first live entry and mRangeEnd is one past
// the last live entry, and also the position at which the next entry will be
// added.
// (mRangeEnd - mRangeStart) always gives the number of live entries.
uint64_t mRangeStart;
uint64_t mRangeEnd;
// Markers that marker entries in the buffer might refer to.
ProfilerMarkerLinkedList mStoredMarkers;
private:
// Used when duplicating sleeping stacks (to avoid spurious mallocs).
const UniquePtr<BlocksRingBuffer::Byte[]> mDuplicationBuffer;
// Time from launch (ns) when first sampling was recorded.
double mFirstSamplingTimeNs = 0.0;
// Time from launch (ns) when last sampling was recorded.
@ -173,9 +167,7 @@ class ProfileBufferCollector final : public ProfilerStackCollector {
return Some(mSamplePositionInBuffer);
}
Maybe<uint64_t> BufferRangeStart() override {
return Some(mBuf.BufferRangeStart());
}
Maybe<uint64_t> BufferRangeStart() override { return Some(mBuf.mRangeStart); }
virtual void CollectNativeLeafAddr(void* aAddr) override;
virtual void CollectProfilingStackFrame(

File diff suppressed because it is too large Load Diff

View File

@ -23,62 +23,42 @@
namespace mozilla {
namespace baseprofiler {
class ProfilerMarker;
// NOTE! If you add entries, you need to verify if they need to be added to the
// switch statement in DuplicateLastSample!
// This will evaluate the MACRO with (KIND, TYPE, SIZE)
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int, sizeof(int)) \
MACRO(CollectionStart, double, sizeof(double)) \
MACRO(CollectionEnd, double, sizeof(double)) \
MACRO(Label, const char*, sizeof(const char*)) \
MACRO(FrameFlags, uint64_t, sizeof(uint64_t)) \
MACRO(DynamicStringFragment, char*, ProfileBufferEntry::kNumChars) \
MACRO(JitReturnAddr, void*, sizeof(void*)) \
MACRO(LineNumber, int, sizeof(int)) \
MACRO(ColumnNumber, int, sizeof(int)) \
MACRO(NativeLeafAddr, void*, sizeof(void*)) \
MACRO(Pause, double, sizeof(double)) \
MACRO(Responsiveness, double, sizeof(double)) \
MACRO(Resume, double, sizeof(double)) \
MACRO(ThreadId, int, sizeof(int)) \
MACRO(Time, double, sizeof(double)) \
MACRO(CounterId, void*, sizeof(void*)) \
MACRO(CounterKey, uint64_t, sizeof(uint64_t)) \
MACRO(Number, uint64_t, sizeof(uint64_t)) \
MACRO(Count, int64_t, sizeof(int64_t)) \
MACRO(ProfilerOverheadTime, double, sizeof(double)) \
MACRO(ProfilerOverheadDuration, double, sizeof(double))
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int) \
MACRO(CollectionStart, double) \
MACRO(CollectionEnd, double) \
MACRO(Label, const char*) \
MACRO(FrameFlags, uint64_t) \
MACRO(DynamicStringFragment, char*) /* char[kNumChars], really */ \
MACRO(JitReturnAddr, void*) \
MACRO(LineNumber, int) \
MACRO(ColumnNumber, int) \
MACRO(NativeLeafAddr, void*) \
MACRO(Marker, ProfilerMarker*) \
MACRO(Pause, double) \
MACRO(Responsiveness, double) \
MACRO(Resume, double) \
MACRO(ThreadId, int) \
MACRO(Time, double) \
MACRO(CounterId, void*) \
MACRO(CounterKey, uint64_t) \
MACRO(Number, uint64_t) \
MACRO(Count, int64_t) \
MACRO(ProfilerOverheadTime, double) \
MACRO(ProfilerOverheadDuration, double)
class ProfileBufferEntry {
public:
// The `Kind` is a single byte identifying the type of data that is actually
// stored in a `ProfileBufferEntry`, as per the list in
// `FOR_EACH_PROFILE_BUFFER_ENTRY_KIND`.
//
// This byte is also used to identify entries in BlocksRingBuffer blocks, for
// both "legacy" entries that do contain a `ProfileBufferEntry`, and for new
// types of entries that may carry more data of different types.
// TODO: Eventually each type of "legacy" entry should be replaced with newer,
// more efficient kinds of entries (e.g., stack frames could be stored in one
// bigger entry, instead of multiple `ProfileBufferEntry`s); then we could
// discard `ProfileBufferEntry` and move this enum to a more appropriate spot.
using KindUnderlyingType = uint8_t;
enum class Kind : KindUnderlyingType {
enum class Kind : uint8_t {
INVALID = 0,
#define KIND(KIND, TYPE, SIZE) KIND,
#define KIND(k, t) k,
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(KIND)
#undef KIND
// Any value under `LEGACY_LIMIT` represents a `ProfileBufferEntry`.
LEGACY_LIMIT,
// Any value starting here does *not* represent a `ProfileBufferEntry` and
// requires separate decoding and handling.
// Marker data, including payload.
MarkerData = LEGACY_LIMIT,
MODERN_LIMIT
LIMIT
};
ProfileBufferEntry();
@ -92,23 +72,24 @@ class ProfileBufferEntry {
ProfileBufferEntry(Kind aKind, const char* aString);
ProfileBufferEntry(Kind aKind, char aChars[kNumChars]);
ProfileBufferEntry(Kind aKind, void* aPtr);
ProfileBufferEntry(Kind aKind, ProfilerMarker* aMarker);
ProfileBufferEntry(Kind aKind, double aDouble);
ProfileBufferEntry(Kind aKind, int64_t aInt64);
ProfileBufferEntry(Kind aKind, uint64_t aUint64);
ProfileBufferEntry(Kind aKind, int aInt);
public:
#define CTOR(KIND, TYPE, SIZE) \
static ProfileBufferEntry KIND(TYPE aVal) { \
return ProfileBufferEntry(Kind::KIND, aVal); \
#define CTOR(k, t) \
static ProfileBufferEntry k(t aVal) { \
return ProfileBufferEntry(Kind::k, aVal); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR)
#undef CTOR
Kind GetKind() const { return mKind; }
#define IS_KIND(KIND, TYPE, SIZE) \
bool Is##KIND() const { return mKind == Kind::KIND; }
#define IS_KIND(k, t) \
bool Is##k() const { return mKind == Kind::k; }
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND)
#undef IS_KIND
@ -125,6 +106,7 @@ class ProfileBufferEntry {
const char* GetString() const;
void* GetPtr() const;
ProfilerMarker* GetMarker() const;
double GetDouble() const;
int GetInt() const;
int64_t GetInt64() const;

View File

@ -18,23 +18,9 @@
namespace mozilla {
namespace baseprofiler {
ProfilerBacktrace::ProfilerBacktrace(
const char* aName, int aThreadId,
UniquePtr<BlocksRingBuffer> aBlocksRingBuffer,
UniquePtr<ProfileBuffer> aProfileBuffer)
: mName(strdup(aName)),
mThreadId(aThreadId),
mBlocksRingBuffer(std::move(aBlocksRingBuffer)),
mProfileBuffer(std::move(aProfileBuffer)) {
MOZ_ASSERT(
!!mBlocksRingBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<BlocksRingBuffer>");
MOZ_ASSERT(
!!mProfileBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<ProfileBuffer>");
MOZ_ASSERT(!mBlocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only takes a non-thread-safe BlocksRingBuffer");
}
ProfilerBacktrace::ProfilerBacktrace(const char* aName, int aThreadId,
UniquePtr<ProfileBuffer> aBuffer)
: mName(strdup(aName)), mThreadId(aThreadId), mBuffer(std::move(aBuffer)) {}
ProfilerBacktrace::~ProfilerBacktrace() {}
@ -42,10 +28,10 @@ void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
// Unlike ProfiledThreadData::StreamJSON, we don't need to call
// ProfileBuffer::AddJITInfoForRange because mProfileBuffer does not contain
// any JitReturnAddr entries. For synchronous samples, JIT frames get expanded
// ProfileBuffer::AddJITInfoForRange because mBuffer does not contain any
// JitReturnAddr entries. For synchronous samples, JIT frames get expanded
// at sample time.
StreamSamplesAndMarkers(mName.get(), mThreadId, *mProfileBuffer, aWriter, "",
StreamSamplesAndMarkers(mName.get(), mThreadId, *mBuffer.get(), aWriter, "",
aProcessStartTime,
/* aRegisterTime */ TimeStamp(),
/* aUnregisterTime */ TimeStamp(),
@ -53,28 +39,6 @@ void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
}
} // namespace baseprofiler
// static
template <typename Destructor>
UniquePtr<baseprofiler::ProfilerBacktrace, Destructor> BlocksRingBuffer::
Deserializer<UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>>::Read(
BlocksRingBuffer::EntryReader& aER) {
auto blocksRingBuffer = aER.ReadObject<UniquePtr<BlocksRingBuffer>>();
if (!blocksRingBuffer) {
return nullptr;
}
MOZ_ASSERT(!blocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only stores non-thread-safe BlocksRingBuffers");
int threadId = aER.ReadObject<int>();
std::string name = aER.ReadObject<std::string>();
auto profileBuffer =
MakeUnique<baseprofiler::ProfileBuffer>(*blocksRingBuffer);
return UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>{
new baseprofiler::ProfilerBacktrace(name.c_str(), threadId,
std::move(blocksRingBuffer),
std::move(profileBuffer))};
};
} // namespace mozilla
#endif // MOZ_BASE_PROFILER

View File

@ -11,7 +11,6 @@
namespace mozilla {
class BlocksRingBuffer;
class TimeStamp;
namespace baseprofiler {
@ -25,8 +24,7 @@ class UniqueStacks;
class ProfilerBacktrace {
public:
ProfilerBacktrace(const char* aName, int aThreadId,
UniquePtr<BlocksRingBuffer> aBlocksRingBuffer,
UniquePtr<ProfileBuffer> aProfileBuffer);
UniquePtr<ProfileBuffer> aBuffer);
~ProfilerBacktrace();
// ProfilerBacktraces' stacks are deduplicated in the context of the
@ -40,90 +38,15 @@ class ProfilerBacktrace {
UniqueStacks& aUniqueStacks);
private:
// Used to de/serialize a ProfilerBacktrace.
friend struct BlocksRingBuffer::Serializer<ProfilerBacktrace>;
friend struct BlocksRingBuffer::Deserializer<ProfilerBacktrace>;
ProfilerBacktrace(const ProfilerBacktrace&);
ProfilerBacktrace& operator=(const ProfilerBacktrace&);
UniqueFreePtr<char> mName;
int mThreadId;
// `BlocksRingBuffer` in which `mProfileBuffer` stores its data; must be
// located before `mProfileBuffer` so that it's destroyed after.
UniquePtr<BlocksRingBuffer> mBlocksRingBuffer;
UniquePtr<ProfileBuffer> mProfileBuffer;
UniquePtr<ProfileBuffer> mBuffer;
};
} // namespace baseprofiler
// Format: [ UniquePtr<BlockRingsBuffer> | threadId | name ]
// Initial len==0 marks a nullptr or empty backtrace.
template <>
struct BlocksRingBuffer::Serializer<baseprofiler::ProfilerBacktrace> {
static Length Bytes(const baseprofiler::ProfilerBacktrace& aBacktrace) {
if (!aBacktrace.mProfileBuffer) {
// No backtrace buffer.
return ULEB128Size<Length>(0);
}
auto bufferBytes = SumBytes(*aBacktrace.mBlocksRingBuffer);
if (bufferBytes == 0) {
// Empty backtrace buffer.
return ULEB128Size<Length>(0);
}
return bufferBytes +
SumBytes(aBacktrace.mThreadId,
WrapBlocksRingBufferUnownedCString(aBacktrace.mName.get()));
}
static void Write(EntryWriter& aEW,
const baseprofiler::ProfilerBacktrace& aBacktrace) {
if (!aBacktrace.mProfileBuffer ||
SumBytes(aBacktrace.mBlocksRingBuffer) == 0) {
// No backtrace buffer, or it is empty.
aEW.WriteULEB128<Length>(0);
return;
}
aEW.WriteObject(aBacktrace.mBlocksRingBuffer);
aEW.WriteObject(aBacktrace.mThreadId);
aEW.WriteObject(WrapBlocksRingBufferUnownedCString(aBacktrace.mName.get()));
}
};
template <typename Destructor>
struct BlocksRingBuffer::Serializer<
UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>> {
static Length Bytes(const UniquePtr<baseprofiler::ProfilerBacktrace,
Destructor>& aBacktrace) {
if (!aBacktrace) {
// Null backtrace pointer (treated like an empty backtrace).
return ULEB128Size<Length>(0);
}
return SumBytes(*aBacktrace);
}
static void Write(EntryWriter& aEW,
const UniquePtr<baseprofiler::ProfilerBacktrace,
Destructor>& aBacktrace) {
if (!aBacktrace) {
// Null backtrace pointer (treated like an empty backtrace).
aEW.WriteULEB128<Length>(0);
return;
}
aEW.WriteObject(*aBacktrace);
}
};
template <typename Destructor>
struct BlocksRingBuffer::Deserializer<
UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>> {
static void ReadInto(
EntryReader& aER,
UniquePtr<baseprofiler::ProfilerBacktrace, Destructor>& aBacktrace) {
aBacktrace = Read(aER);
}
static UniquePtr<baseprofiler::ProfilerBacktrace, Destructor> Read(
EntryReader& aER);
};
} // namespace mozilla
#endif // __PROFILER_BACKTRACE_H

View File

@ -0,0 +1,175 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerMarker_h
#define ProfilerMarker_h
#include "ProfileBufferEntry.h"
#include "BaseProfileJSONWriter.h"
#include "BaseProfilerMarkerPayload.h"
#include "mozilla/UniquePtrExtensions.h"
namespace mozilla {
namespace baseprofiler {
template <typename T>
class ProfilerLinkedList;
class ProfilerMarker {
friend class ProfilerLinkedList<ProfilerMarker>;
public:
explicit ProfilerMarker(const char* aMarkerName,
ProfilingCategoryPair aCategoryPair, int aThreadId,
UniquePtr<ProfilerMarkerPayload> aPayload = nullptr,
double aTime = 0)
: mMarkerName(strdup(aMarkerName)),
mPayload(std::move(aPayload)),
mNext{nullptr},
mTime(aTime),
mPositionInBuffer{0},
mThreadId{aThreadId},
mCategoryPair{aCategoryPair} {}
void SetPositionInBuffer(uint64_t aPosition) {
mPositionInBuffer = aPosition;
}
bool HasExpired(uint64_t aBufferRangeStart) const {
return mPositionInBuffer < aBufferRangeStart;
}
double GetTime() const { return mTime; }
int GetThreadId() const { return mThreadId; }
void StreamJSON(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
// Schema:
// [name, time, category, data]
aWriter.StartArrayElement();
{
aUniqueStacks.mUniqueStrings->WriteElement(aWriter, mMarkerName.get());
aWriter.DoubleElement(mTime);
const ProfilingCategoryPairInfo& info =
GetProfilingCategoryPairInfo(mCategoryPair);
aWriter.IntElement(unsigned(info.mCategory));
// TODO: Store the callsite for this marker if available:
// if have location data
// b.NameValue(marker, "location", ...);
if (mPayload) {
aWriter.StartObjectElement(SpliceableJSONWriter::SingleLineStyle);
{ mPayload->StreamPayload(aWriter, aProcessStartTime, aUniqueStacks); }
aWriter.EndObject();
}
}
aWriter.EndArray();
}
private:
UniqueFreePtr<char> mMarkerName;
UniquePtr<ProfilerMarkerPayload> mPayload;
ProfilerMarker* mNext;
double mTime;
uint64_t mPositionInBuffer;
int mThreadId;
ProfilingCategoryPair mCategoryPair;
};
template <typename T>
class ProfilerLinkedList {
public:
ProfilerLinkedList() : mHead(nullptr), mTail(nullptr) {}
void insert(T* aElem) {
if (!mTail) {
mHead = aElem;
mTail = aElem;
} else {
mTail->mNext = aElem;
mTail = aElem;
}
aElem->mNext = nullptr;
}
T* popHead() {
if (!mHead) {
MOZ_ASSERT(false);
return nullptr;
}
T* head = mHead;
mHead = head->mNext;
if (!mHead) {
mTail = nullptr;
}
return head;
}
const T* peek() { return mHead; }
private:
T* mHead;
T* mTail;
};
typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList;
template <typename T>
class ProfilerSignalSafeLinkedList {
public:
ProfilerSignalSafeLinkedList() : mSignalLock(false) {}
~ProfilerSignalSafeLinkedList() {
if (mSignalLock) {
// Some thread is modifying the list. We should only be released on that
// thread.
abort();
}
while (mList.peek()) {
delete mList.popHead();
}
}
// Insert an item into the list. Must only be called from the owning thread.
// Must not be called while the list from accessList() is being accessed.
// In the profiler, we ensure that by interrupting the profiled thread
// (which is the one that owns this list and calls insert() on it) until
// we're done reading the list from the signal handler.
void insert(T* aElement) {
MOZ_ASSERT(aElement);
mSignalLock = true;
mList.insert(aElement);
mSignalLock = false;
}
// Called within signal, from any thread, possibly while insert() is in the
// middle of modifying the list (on the owning thread). Will return null if
// that is the case.
// Function must be reentrant.
ProfilerLinkedList<T>* accessList() { return mSignalLock ? nullptr : &mList; }
private:
ProfilerLinkedList<T> mList;
// If this is set, then it's not safe to read the list because its contents
// are being changed.
Atomic<bool> mSignalLock;
};
} // namespace baseprofiler
} // namespace mozilla
#endif // ProfilerMarker_h

View File

@ -21,77 +21,6 @@
namespace mozilla {
namespace baseprofiler {
static UniquePtr<ProfilerMarkerPayload> DeserializeNothing(
mozilla::BlocksRingBuffer::EntryReader&) {
return nullptr;
}
// Number of currently-registered deserializers.
// Starting at 1 for the initial `DeserializeNothing`.
// static
Atomic<ProfilerMarkerPayload::DeserializerTagAtomic, ReleaseAcquire,
recordreplay::Behavior::DontPreserve>
ProfilerMarkerPayload::sDeserializerCount{1};
// Initialize `sDeserializers` with `DeserializeNothing` at index 0, all others
// are nullptrs.
// static
ProfilerMarkerPayload::Deserializer
ProfilerMarkerPayload::sDeserializers[DeserializerMax] = {
DeserializeNothing};
// static
ProfilerMarkerPayload::DeserializerTag
ProfilerMarkerPayload::TagForDeserializer(
ProfilerMarkerPayload::Deserializer aDeserializer) {
if (!aDeserializer) {
return 0;
}
// Start first search at index 0.
DeserializerTagAtomic start = 0;
for (;;) {
// Read the current count of deserializers.
const DeserializerTagAtomic tagCount = sDeserializerCount;
if (tagCount == 0) {
// Someone else is currently writing into the array, loop around until we
// get a valid count.
continue;
}
for (DeserializerTagAtomic i = start; i < tagCount; ++i) {
if (sDeserializers[i] == aDeserializer) {
// Deserializer already registered, return its tag.
return static_cast<ProfilerMarkerPayload::DeserializerTag>(i);
}
}
// Not found yet, let's register this new deserializer.
// Make sure we haven't reached the limit yet.
MOZ_RELEASE_ASSERT(tagCount < DeserializerMax);
// Reserve `tagCount` as an index, if not already claimed:
// If `sDeserializerCount` is still at our previously-read `tagCount`,
// replace it with a special 0 value to indicate a write.
if (sDeserializerCount.compareExchange(tagCount, 0)) {
// Here we own the `tagCount` index, write the deserializer there.
sDeserializers[tagCount] = aDeserializer;
// And publish by writing the real new count (1 past our index).
sDeserializerCount = tagCount + 1;
return static_cast<ProfilerMarkerPayload::DeserializerTag>(tagCount);
}
// Someone else beat us to grab an index, and it could be for the same
// deserializer! So let's just try searching starting from our recorded
// `tagCount` (and maybe attempting again to register). It should be rare
// enough and quick enough that it won't impact performances.
start = tagCount;
}
}
// static
ProfilerMarkerPayload::Deserializer ProfilerMarkerPayload::DeserializerForTag(
ProfilerMarkerPayload::DeserializerTag aTag) {
MOZ_RELEASE_ASSERT(aTag < DeserializerMax);
MOZ_RELEASE_ASSERT(aTag < sDeserializerCount);
return sDeserializers[aTag];
}
static void MOZ_ALWAYS_INLINE WriteTime(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
const TimeStamp& aTime,
@ -102,113 +31,33 @@ static void MOZ_ALWAYS_INLINE WriteTime(SpliceableJSONWriter& aWriter,
}
void ProfilerMarkerPayload::StreamType(const char* aMarkerType,
SpliceableJSONWriter& aWriter) const {
SpliceableJSONWriter& aWriter) {
MOZ_ASSERT(aMarkerType);
aWriter.StringProperty("type", aMarkerType);
}
BlocksRingBuffer::Length
ProfilerMarkerPayload::CommonPropsTagAndSerializationBytes() const {
return sizeof(DeserializerTag) +
BlocksRingBuffer::SumBytes(mCommonProps.mStartTime,
mCommonProps.mEndTime, mCommonProps.mStack,
mCommonProps.mDocShellId,
mCommonProps.mDocShellHistoryId);
}
void ProfilerMarkerPayload::SerializeTagAndCommonProps(
DeserializerTag aDeserializerTag,
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
aEntryWriter.WriteObject(aDeserializerTag);
aEntryWriter.WriteObject(mCommonProps.mStartTime);
aEntryWriter.WriteObject(mCommonProps.mEndTime);
aEntryWriter.WriteObject(mCommonProps.mStack);
aEntryWriter.WriteObject(mCommonProps.mDocShellId);
aEntryWriter.WriteObject(mCommonProps.mDocShellHistoryId);
}
// static
ProfilerMarkerPayload::CommonProps
ProfilerMarkerPayload::DeserializeCommonProps(
BlocksRingBuffer::EntryReader& aEntryReader) {
CommonProps props;
aEntryReader.ReadIntoObject(props.mStartTime);
aEntryReader.ReadIntoObject(props.mEndTime);
aEntryReader.ReadIntoObject(props.mStack);
aEntryReader.ReadIntoObject(props.mDocShellId);
aEntryReader.ReadIntoObject(props.mDocShellHistoryId);
return props;
}
void ProfilerMarkerPayload::StreamCommonProps(
const char* aMarkerType, SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime, UniqueStacks& aUniqueStacks) const {
const TimeStamp& aProcessStartTime, UniqueStacks& aUniqueStacks) {
StreamType(aMarkerType, aWriter);
WriteTime(aWriter, aProcessStartTime, mCommonProps.mStartTime, "startTime");
WriteTime(aWriter, aProcessStartTime, mCommonProps.mEndTime, "endTime");
if (mCommonProps.mDocShellId) {
aWriter.StringProperty("docShellId", mCommonProps.mDocShellId->c_str());
WriteTime(aWriter, aProcessStartTime, mStartTime, "startTime");
WriteTime(aWriter, aProcessStartTime, mEndTime, "endTime");
if (mDocShellId) {
aWriter.StringProperty("docShellId", mDocShellId->c_str());
}
if (mCommonProps.mDocShellHistoryId) {
aWriter.DoubleProperty("docshellHistoryId",
mCommonProps.mDocShellHistoryId.ref());
if (mDocShellHistoryId) {
aWriter.DoubleProperty("docshellHistoryId", mDocShellHistoryId.ref());
}
if (mCommonProps.mStack) {
if (mStack) {
aWriter.StartObjectProperty("stack");
{
mCommonProps.mStack->StreamJSON(aWriter, aProcessStartTime,
aUniqueStacks);
}
{ mStack->StreamJSON(aWriter, aProcessStartTime, aUniqueStacks); }
aWriter.EndObject();
}
}
TracingMarkerPayload::TracingMarkerPayload(
const char* aCategory, TracingKind aKind,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId, UniqueProfilerBacktrace aCause)
: ProfilerMarkerPayload(aDocShellId, aDocShellHistoryId, std::move(aCause)),
mCategory(aCategory),
mKind(aKind) {}
TracingMarkerPayload::TracingMarkerPayload(CommonProps&& aCommonProps,
const char* aCategory,
TracingKind aKind)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mCategory(aCategory),
mKind(aKind) {}
TracingMarkerPayload::~TracingMarkerPayload() = default;
BlocksRingBuffer::Length TracingMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mCategory),
mKind);
}
void TracingMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mCategory));
aEntryWriter.WriteObject(mKind);
}
// static
UniquePtr<ProfilerMarkerPayload> TracingMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
const char* category = aEntryReader.ReadObject<const char*>();
TracingKind kind = aEntryReader.ReadObject<TracingKind>();
return UniquePtr<ProfilerMarkerPayload>(
new TracingMarkerPayload(std::move(props), category, kind));
}
void TracingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("tracing", aWriter, aProcessStartTime, aUniqueStacks);
if (mCategory) {
@ -222,135 +71,20 @@ void TracingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
FileIOMarkerPayload::FileIOMarkerPayload(const char* aOperation,
const char* aSource,
const char* aFilename,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
UniqueProfilerBacktrace aStack)
: ProfilerMarkerPayload(aStartTime, aEndTime, Nothing(), Nothing(),
std::move(aStack)),
mSource(aSource),
mOperation(aOperation ? strdup(aOperation) : nullptr),
mFilename(aFilename ? strdup(aFilename) : nullptr) {
MOZ_ASSERT(aSource);
}
FileIOMarkerPayload::FileIOMarkerPayload(CommonProps&& aCommonProps,
const char* aSource,
UniqueFreePtr<char>&& aOperation,
UniqueFreePtr<char>&& aFilename)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mSource(aSource),
mOperation(std::move(aOperation)),
mFilename(std::move(aFilename)) {}
FileIOMarkerPayload::~FileIOMarkerPayload() = default;
BlocksRingBuffer::Length FileIOMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mSource),
mOperation, mFilename);
}
void FileIOMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mSource));
aEntryWriter.WriteObject(mOperation);
aEntryWriter.WriteObject(mFilename);
}
// static
UniquePtr<ProfilerMarkerPayload> FileIOMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto source = aEntryReader.ReadObject<const char*>();
auto operation = aEntryReader.ReadObject<UniqueFreePtr<char>>();
auto filename = aEntryReader.ReadObject<UniqueFreePtr<char>>();
return UniquePtr<ProfilerMarkerPayload>(new FileIOMarkerPayload(
std::move(props), source, std::move(operation), std::move(filename)));
}
void FileIOMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("FileIO", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("operation", mOperation.get());
aWriter.StringProperty("source", mSource);
if (mFilename && *mFilename) {
if (mFilename) {
aWriter.StringProperty("filename", mFilename.get());
}
}
UserTimingMarkerPayload::UserTimingMarkerPayload(
const std::string& aName, const TimeStamp& aStartTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mEntryType("mark"),
mName(aName) {}
UserTimingMarkerPayload::UserTimingMarkerPayload(
const std::string& aName, const Maybe<std::string>& aStartMark,
const Maybe<std::string>& aEndMark, const TimeStamp& aStartTime,
const TimeStamp& aEndTime, const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId),
mEntryType("measure"),
mName(aName),
mStartMark(aStartMark),
mEndMark(aEndMark) {}
UserTimingMarkerPayload::UserTimingMarkerPayload(
CommonProps&& aCommonProps, const char* aEntryType, std::string&& aName,
Maybe<std::string>&& aStartMark, Maybe<std::string>&& aEndMark)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mEntryType(aEntryType),
mName(std::move(aName)),
mStartMark(std::move(aStartMark)),
mEndMark(std::move(aEndMark)) {}
UserTimingMarkerPayload::~UserTimingMarkerPayload() = default;
BlocksRingBuffer::Length UserTimingMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mEntryType),
mName, mStartMark, mEndMark);
}
void UserTimingMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mEntryType));
aEntryWriter.WriteObject(mName);
aEntryWriter.WriteObject(mStartMark);
aEntryWriter.WriteObject(mEndMark);
}
// static
UniquePtr<ProfilerMarkerPayload> UserTimingMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto entryType = aEntryReader.ReadObject<const char*>();
auto name = aEntryReader.ReadObject<std::string>();
auto startMark = aEntryReader.ReadObject<Maybe<std::string>>();
auto endMark = aEntryReader.ReadObject<Maybe<std::string>>();
return UniquePtr<ProfilerMarkerPayload>(
new UserTimingMarkerPayload(std::move(props), entryType, std::move(name),
std::move(startMark), std::move(endMark)));
}
void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("UserTiming", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mName.c_str());
aWriter.StringProperty("entryType", mEntryType);
@ -367,181 +101,31 @@ void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
TextMarkerPayload::TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime), mText(aText) {}
TextMarkerPayload::TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime), mText(aText) {}
TextMarkerPayload::TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mText(aText) {}
TextMarkerPayload::TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId,
UniqueProfilerBacktrace aCause)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)),
mText(aText) {}
TextMarkerPayload::TextMarkerPayload(CommonProps&& aCommonProps,
std::string&& aText)
: ProfilerMarkerPayload(std::move(aCommonProps)), mText(std::move(aText)) {}
TextMarkerPayload::~TextMarkerPayload() = default;
BlocksRingBuffer::Length TextMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mText);
}
void TextMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mText);
}
// static
UniquePtr<ProfilerMarkerPayload> TextMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto text = aEntryReader.ReadObject<std::string>();
return UniquePtr<ProfilerMarkerPayload>(
new TextMarkerPayload(std::move(props), std::move(text)));
}
void TextMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Text", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.c_str());
}
LogMarkerPayload::LogMarkerPayload(const char* aModule, const char* aText,
const TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime),
mModule(aModule),
mText(aText) {}
LogMarkerPayload::LogMarkerPayload(CommonProps&& aCommonProps,
std::string&& aModule, std::string&& aText)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mModule(std::move(aModule)),
mText(std::move(aText)) {}
LogMarkerPayload::~LogMarkerPayload() = default;
BlocksRingBuffer::Length LogMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mModule, mText);
}
void LogMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mModule);
aEntryWriter.WriteObject(mText);
}
// static
UniquePtr<ProfilerMarkerPayload> LogMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto module = aEntryReader.ReadObject<std::string>();
auto text = aEntryReader.ReadObject<std::string>();
return UniquePtr<ProfilerMarkerPayload>(new LogMarkerPayload(
std::move(props), std::move(module), std::move(text)));
}
void LogMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Log", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.c_str());
aWriter.StringProperty("module", mModule.c_str());
}
HangMarkerPayload::HangMarkerPayload(const TimeStamp& aStartTime,
const TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
HangMarkerPayload::HangMarkerPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
HangMarkerPayload::~HangMarkerPayload() = default;
BlocksRingBuffer::Length HangMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes();
}
void HangMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
// static
UniquePtr<ProfilerMarkerPayload> HangMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new HangMarkerPayload(std::move(props)));
}
void HangMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("BHR-detected hang", aWriter, aProcessStartTime,
aUniqueStacks);
}
LongTaskMarkerPayload::LongTaskMarkerPayload(const TimeStamp& aStartTime,
const TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
LongTaskMarkerPayload::LongTaskMarkerPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
LongTaskMarkerPayload::~LongTaskMarkerPayload() = default;
BlocksRingBuffer::Length LongTaskMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes();
}
void LongTaskMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
// static
UniquePtr<ProfilerMarkerPayload> LongTaskMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new LongTaskMarkerPayload(std::move(props)));
}
void LongTaskMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("MainThreadLongTask", aWriter, aProcessStartTime,
aUniqueStacks);
aWriter.StringProperty("category", "LongTask");

View File

@ -8,6 +8,8 @@
#define RegisteredThread_h
#include "platform.h"
#include "ProfilerMarker.h"
#include "BaseProfilerMarkerPayload.h"
#include "ThreadInfo.h"
#include "mozilla/UniquePtr.h"
@ -33,6 +35,25 @@ class RacyRegisteredThread final {
bool IsBeingProfiled() const { return mIsBeingProfiled; }
void AddPendingMarker(const char* aMarkerName,
ProfilingCategoryPair aCategoryPair,
UniquePtr<ProfilerMarkerPayload> aPayload,
double aTime) {
// Note: We don't assert on mIsBeingProfiled, because it could have changed
// between the check in the caller and now.
ProfilerMarker* marker = new ProfilerMarker(
aMarkerName, aCategoryPair, mThreadId, std::move(aPayload), aTime);
mPendingMarkers.insert(marker);
}
// Called within signal. Function must be reentrant.
ProfilerMarkerLinkedList* GetPendingMarkers() {
// The profiled thread is interrupted, so we can access the list safely.
// Unless the profiled thread was in the middle of changing the list when
// we interrupted it - in that case, accessList() will return null.
return mPendingMarkers.accessList();
}
// This is called on every profiler restart. Put things that should happen at
// that time here.
void ReinitializeOnResume() {
@ -82,6 +103,9 @@ class RacyRegisteredThread final {
private:
class ProfilingStack mProfilingStack;
// A list of pending markers that must be moved to the circular buffer.
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
// mThreadId contains the thread ID of the current thread. It is safe to read
// this from multiple threads concurrently, as it will never be mutated.
const int mThreadId;

View File

@ -261,12 +261,7 @@ class CorePS {
private:
CorePS()
: mMainThreadId(profiler_current_thread_id()),
mProcessStartTime(TimeStamp::ProcessCreation()),
// This needs its own mutex, because it is used concurrently from
// functions guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of
// the sampler, because mutexes cannot be used there.
mCoreBlocksRingBuffer(BlocksRingBuffer::ThreadSafety::WithMutex)
mProcessStartTime(TimeStamp::ProcessCreation())
# ifdef USE_LUL_STACKWALK
,
mLul(nullptr)
@ -324,9 +319,6 @@ class CorePS {
// No PSLockRef is needed for this field because it's immutable.
PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
// No PSLockRef is needed for this field because it's thread-safe.
PS_GET_LOCKLESS(BlocksRingBuffer&, CoreBlocksRingBuffer)
PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads)
static void AppendRegisteredThread(
@ -414,17 +406,6 @@ class CorePS {
// The time that the process started.
const TimeStamp mProcessStartTime;
// The thread-safe blocks-oriented ring buffer into which all profiling data
// is recorded.
// ActivePS controls the lifetime of the underlying contents buffer: When
// ActivePS does not exist, mCoreBlocksRingBuffer is empty and rejects all
// reads&writes; see ActivePS for further details.
// Note: This needs to live here outside of ActivePS, because some producers
// are indirectly controlled (e.g., by atomic flags) and therefore may still
// attempt to write some data shortly after ActivePS has shutdown and deleted
// the underlying buffer in memory.
BlocksRingBuffer mCoreBlocksRingBuffer;
// Info on all the registered threads.
// ThreadIds in mRegisteredThreads are unique.
Vector<UniquePtr<RegisteredThread>> mRegisteredThreads;
@ -487,13 +468,11 @@ class ActivePS {
mDuration(aDuration),
mInterval(aInterval),
mFeatures(AdjustFeatures(aFeatures, aFilterCount)),
// 8 bytes per entry.
mProfileBuffer(
MakeUnique<ProfileBuffer>(CorePS::CoreBlocksRingBuffer(),
PowerOfTwo32(aCapacity.Value() * 8))),
mBuffer(MakeUnique<ProfileBuffer>(aCapacity))
// The new sampler thread doesn't start sampling immediately because the
// main loop within Run() is blocked until this function's caller
// unlocks gPSMutex.
,
mSamplerThread(NewSamplerThread(aLock, mGeneration, aInterval))
# undef HAS_FEATURE
,
@ -584,7 +563,7 @@ class ActivePS {
static size_t SizeOf(PSLockRef, MallocSizeOf aMallocSizeOf) {
size_t n = aMallocSizeOf(sInstance);
n += sInstance->mProfileBuffer->SizeOfIncludingThis(aMallocSizeOf);
n += sInstance->mBuffer->SizeOfIncludingThis(aMallocSizeOf);
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
@ -623,7 +602,7 @@ class ActivePS {
PS_GET(const Vector<std::string>&, Filters)
static ProfileBuffer& Buffer(PSLockRef) { return *sInstance->mProfileBuffer; }
static ProfileBuffer& Buffer(PSLockRef) { return *sInstance->mBuffer.get(); }
static const Vector<LiveProfiledThreadData>& LiveProfiledThreads(PSLockRef) {
return sInstance->mLiveProfiledThreads;
@ -711,7 +690,7 @@ class ActivePS {
LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i];
if (thread.mRegisteredThread == aRegisteredThread) {
thread.mProfiledThreadData->NotifyUnregistered(
sInstance->mProfileBuffer->BufferRangeEnd());
sInstance->mBuffer->mRangeEnd);
MOZ_RELEASE_ASSERT(sInstance->mDeadProfiledThreads.append(
std::move(thread.mProfiledThreadData)));
sInstance->mLiveProfiledThreads.erase(
@ -728,7 +707,7 @@ class ActivePS {
# endif
static void DiscardExpiredDeadProfiledThreads(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mProfileBuffer->BufferRangeStart();
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
// Discard any dead threads that were unregistered before bufferRangeStart.
sInstance->mDeadProfiledThreads.eraseIf(
[bufferRangeStart](
@ -747,7 +726,7 @@ class ActivePS {
for (size_t i = 0; i < registeredPages.length(); i++) {
RefPtr<PageInformation>& page = registeredPages[i];
if (page->DocShellId() == aRegisteredDocShellId) {
page->NotifyUnregistered(sInstance->mProfileBuffer->BufferRangeEnd());
page->NotifyUnregistered(sInstance->mBuffer->mRangeEnd);
MOZ_RELEASE_ASSERT(
sInstance->mDeadProfiledPages.append(std::move(page)));
registeredPages.erase(&registeredPages[i--]);
@ -756,7 +735,7 @@ class ActivePS {
}
static void DiscardExpiredPages(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mProfileBuffer->BufferRangeStart();
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
// Discard any dead pages that were unregistered before
// bufferRangeStart.
sInstance->mDeadProfiledPages.eraseIf(
@ -774,7 +753,7 @@ class ActivePS {
}
static void ClearExpiredExitProfiles(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mProfileBuffer->BufferRangeStart();
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
// Discard exit profiles that were gathered before our buffer RangeStart.
sInstance->mExitProfiles.eraseIf(
[bufferRangeStart](const ExitProfile& aExitProfile) {
@ -785,8 +764,8 @@ class ActivePS {
static void AddExitProfile(PSLockRef aLock, const std::string& aExitProfile) {
ClearExpiredExitProfiles(aLock);
MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append(ExitProfile{
aExitProfile, sInstance->mProfileBuffer->BufferRangeEnd()}));
MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append(
ExitProfile{aExitProfile, sInstance->mBuffer->mRangeEnd}));
}
static Vector<std::string> MoveExitProfiles(PSLockRef aLock) {
@ -826,10 +805,10 @@ class ActivePS {
const uint32_t mGeneration;
static uint32_t sNextGeneration;
// The maximum number of 8-byte entries in mProfileBuffer.
// The maximum number of entries in mBuffer.
const PowerOfTwo32 mCapacity;
// The maximum duration of entries in mProfileBuffer, in seconds.
// The maximum duration of entries in mBuffer, in seconds.
const Maybe<double> mDuration;
// The interval between samples, measured in milliseconds.
@ -843,7 +822,7 @@ class ActivePS {
// The buffer into which all samples are recorded. Always non-null. Always
// used in conjunction with CorePS::m{Live,Dead}Threads.
const UniquePtr<ProfileBuffer> mProfileBuffer;
const UniquePtr<ProfileBuffer> mBuffer;
// ProfiledThreadData objects for any threads that were profiled at any point
// during this run of the profiler:
@ -901,12 +880,6 @@ void RacyFeatures::SetInactive() { sActiveAndFeatures = 0; }
/* static */
bool RacyFeatures::IsActive() { return uint32_t(sActiveAndFeatures) & Active; }
/* static */
void RacyFeatures::SetPaused() { sActiveAndFeatures |= Paused; }
/* static */
void RacyFeatures::SetUnpaused() { sActiveAndFeatures &= ~Paused; }
/* static */
bool RacyFeatures::IsActiveWithFeature(uint32_t aFeature) {
uint32_t af = sActiveAndFeatures; // copy it first
@ -919,12 +892,6 @@ bool RacyFeatures::IsActiveWithoutPrivacy() {
return (af & Active) && !(af & ProfilerFeature::Privacy);
}
/* static */
bool RacyFeatures::IsActiveAndUnpausedWithoutPrivacy() {
uint32_t af = sActiveAndFeatures; // copy it first
return (af & Active) && !(af & (Paused | ProfilerFeature::Privacy));
}
// Each live thread has a RegisteredThread, and we store a reference to it in
// TLS. This class encapsulates that TLS.
class TLSRegisteredThread {
@ -1426,17 +1393,24 @@ static void DoNativeBacktrace(PSLockRef aLock,
// ProfileBuffer::StreamSamplesToJSON.
static inline void DoSharedSample(PSLockRef aLock, bool aIsSynchronous,
RegisteredThread& aRegisteredThread,
const Registers& aRegs, uint64_t aSamplePos,
const TimeStamp& aNow, const Registers& aRegs,
Maybe<uint64_t>* aLastSample,
ProfileBuffer& aBuffer) {
// WARNING: this function runs within the profiler's "critical section".
MOZ_ASSERT(!aBuffer.IsThreadSafe(),
"Mutexes cannot be used inside this critical section");
MOZ_RELEASE_ASSERT(ActivePS::Exists(aLock));
uint64_t samplePos =
aBuffer.AddThreadIdEntry(aRegisteredThread.Info()->ThreadId());
if (aLastSample) {
*aLastSample = Some(samplePos);
}
TimeDuration delta = aNow - CorePS::ProcessStartTime();
aBuffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
ProfileBufferCollector collector(aBuffer, ActivePS::Features(aLock),
aSamplePos);
samplePos);
NativeStack nativeStack;
# if defined(HAVE_NATIVE_UNWIND)
if (ActivePS::FeatureStackWalk(aLock)) {
@ -1463,28 +1437,29 @@ static void DoSyncSample(PSLockRef aLock, RegisteredThread& aRegisteredThread,
ProfileBuffer& aBuffer) {
// WARNING: this function runs within the profiler's "critical section".
uint64_t samplePos =
aBuffer.AddThreadIdEntry(aRegisteredThread.Info()->ThreadId());
TimeDuration delta = aNow - CorePS::ProcessStartTime();
aBuffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
DoSharedSample(aLock, /* aIsSynchronous = */ true, aRegisteredThread, aRegs,
samplePos, aBuffer);
DoSharedSample(aLock, /* aIsSynchronous = */ true, aRegisteredThread, aNow,
aRegs, /* aLastSample = */ nullptr, aBuffer);
}
// Writes the components of a periodic sample to ActivePS's ProfileBuffer.
// The ThreadId entry is already written in the main ProfileBuffer, its location
// is `aSamplePos`, we can write the rest to `aBuffer` (which may be different).
static void DoPeriodicSample(PSLockRef aLock,
RegisteredThread& aRegisteredThread,
ProfiledThreadData& aProfiledThreadData,
const Registers& aRegs, uint64_t aSamplePos,
ProfileBuffer& aBuffer) {
const TimeStamp& aNow, const Registers& aRegs) {
// WARNING: this function runs within the profiler's "critical section".
DoSharedSample(aLock, /* aIsSynchronous = */ false, aRegisteredThread, aRegs,
aSamplePos, aBuffer);
ProfileBuffer& buffer = ActivePS::Buffer(aLock);
DoSharedSample(aLock, /* aIsSynchronous = */ false, aRegisteredThread, aNow,
aRegs, &aProfiledThreadData.LastSample(), buffer);
ProfilerMarkerLinkedList* pendingMarkersList =
aRegisteredThread.RacyRegisteredThread().GetPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
}
}
// END sampling/unwinding code
@ -1632,19 +1607,10 @@ static void locked_profiler_stream_json_for_this_process(
MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock));
AUTO_PROFILER_STATS(base_locked_profiler_stream_json_for_this_process);
const double collectionStartMs = profiler_time();
double collectionStart = profiler_time();
ProfileBuffer& buffer = ActivePS::Buffer(aLock);
// If there is a set "Window length", discard older data.
Maybe<double> durationS = ActivePS::Duration(aLock);
if (durationS.isSome()) {
const double durationStartMs = collectionStartMs - *durationS * 1000;
buffer.DiscardSamplesBeforeTime(durationStartMs);
}
if (!aOnlyThreads) {
// Put shared library info
aWriter.StartArrayProperty("libs");
@ -1691,15 +1657,15 @@ static void locked_profiler_stream_json_for_this_process(
aWriter.EndArray();
}
const double collectionEndMs = profiler_time();
double collectionEnd = profiler_time();
// Record timestamps for the collection into the buffer, so that consumers
// know why we didn't collect any samples for its duration.
// We put these entries into the buffer after we've collected the profile,
// so they'll be visible for the *next* profile collection (if they haven't
// been overwritten due to buffer wraparound by then).
buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStartMs));
buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEndMs));
buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStart));
buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEnd));
}
bool profiler_stream_json_for_this_process(SpliceableJSONWriter& aWriter,
@ -1770,7 +1736,7 @@ static void PrintUsageThenExit(int aExitCode) {
" first started.\n"
" If unset, the platform default is used:\n"
" %u entries per process, or %u when MOZ_BASE_PROFILER_STARTUP is set.\n"
" (8 bytes per entry -> %u or %u total bytes per process)\n"
" (%zu bytes per entry -> %zu or %zu total bytes per process)\n"
"\n"
" MOZ_BASE_PROFILER_STARTUP_DURATION=<1..>\n"
" If MOZ_BASE_PROFILER_STARTUP is set, specifies the maximum life time\n"
@ -1801,8 +1767,10 @@ static void PrintUsageThenExit(int aExitCode) {
"default/unavailable)\n",
unsigned(BASE_PROFILER_DEFAULT_ENTRIES.Value()),
unsigned(BASE_PROFILER_DEFAULT_STARTUP_ENTRIES.Value()),
unsigned(BASE_PROFILER_DEFAULT_ENTRIES.Value() * 8),
unsigned(BASE_PROFILER_DEFAULT_STARTUP_ENTRIES.Value() * 8));
sizeof(ProfileBufferEntry),
sizeof(ProfileBufferEntry) * BASE_PROFILER_DEFAULT_ENTRIES.Value(),
sizeof(ProfileBufferEntry) *
BASE_PROFILER_DEFAULT_STARTUP_ENTRIES.Value());
# define PRINT_FEATURE(n_, str_, Name_, desc_) \
printf(" %c %5u: \"%s\" (%s)\n", \
@ -1968,18 +1936,6 @@ void SamplerThread::Run() {
// TODO: If possible, name this thread later on, after NSPR becomes available.
// PR_SetCurrentThreadName("SamplerThread");
// Use local BlocksRingBuffer&ProfileBuffer to capture the stack.
// (This is to avoid touching the CorePS::BlocksRingBuffer lock while
// a thread is suspended, because that thread could be working with
// the CorePS::BlocksRingBuffer as well.)
BlocksRingBuffer localBlocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
ProfileBuffer localProfileBuffer(localBlocksRingBuffer,
MakePowerOfTwo32<65536>());
// Will be kept between collections, to know what each collection does.
auto previousState = localBlocksRingBuffer.GetState();
// This will be positive if we are running behind schedule (sampling less
// frequently than desired) and negative if we are ahead of schedule.
TimeDuration lastSleepOvershoot = 0;
@ -2004,6 +1960,7 @@ void SamplerThread::Run() {
ActivePS::ClearExpiredExitProfiles(lock);
ActivePS::Buffer(lock).DeleteExpiredStoredMarkers();
TimeStamp expiredMarkersCleaned = TimeStamp::NowUnfuzzed();
if (!ActivePS::IsPaused(lock)) {
@ -2015,6 +1972,7 @@ void SamplerThread::Run() {
// handle per-process generic counters
const Vector<BaseProfilerCount*>& counters = CorePS::Counters(lock);
TimeStamp now = TimeStamp::NowUnfuzzed();
for (auto& counter : counters) {
// create Buffer entries for each counter
buffer.AddEntry(ProfileBufferEntry::CounterId(counter));
@ -2055,46 +2013,12 @@ void SamplerThread::Run() {
AUTO_PROFILER_STATS(base_SamplerThread_Run_DoPeriodicSample);
TimeStamp now = TimeStamp::NowUnfuzzed();
// Add the thread ID now, so we know its position in the main buffer,
// which is used by some JS data.
// (DoPeriodicSample only knows about the temporary local buffer.)
uint64_t samplePos =
buffer.AddThreadIdEntry(registeredThread->Info()->ThreadId());
profiledThreadData->LastSample() = Some(samplePos);
// Also add the time, so it's always there after the thread ID, as
// expected by the parser. (Other stack data is optional.)
TimeDuration delta = now - CorePS::ProcessStartTime();
buffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
now = TimeStamp::NowUnfuzzed();
mSampler.SuspendAndSampleAndResumeThread(
lock, *registeredThread, [&](const Registers& aRegs) {
DoPeriodicSample(lock, *registeredThread, *profiledThreadData,
aRegs, samplePos, localProfileBuffer);
now, aRegs);
});
// If data is complete, copy it into the global buffer.
auto state = localBlocksRingBuffer.GetState();
if (state.mClearedBlockCount != previousState.mClearedBlockCount) {
LOG("Stack sample too big for local storage, needed %u bytes",
unsigned(state.mRangeEnd.ConvertToU64() -
previousState.mRangeEnd.ConvertToU64()));
} else if (state.mRangeEnd.ConvertToU64() -
previousState.mRangeEnd.ConvertToU64() >=
CorePS::CoreBlocksRingBuffer().BufferLength()->Value()) {
LOG("Stack sample too big for profiler storage, needed %u bytes",
unsigned(state.mRangeEnd.ConvertToU64() -
previousState.mRangeEnd.ConvertToU64()));
} else {
CorePS::CoreBlocksRingBuffer().AppendContents(
localBlocksRingBuffer);
}
// Clean up for the next run.
localBlocksRingBuffer.Clear();
previousState = localBlocksRingBuffer.GetState();
}
# if defined(USE_LUL_STACKWALK)
@ -2112,6 +2036,14 @@ void SamplerThread::Run() {
countersSampled - expiredMarkersCleaned,
threadsSampled - countersSampled);
}
Maybe<double> duration = ActivePS::Duration(lock);
if (duration) {
ActivePS::Buffer(lock).DiscardSamplesBeforeTime(
(TimeStamp::NowUnfuzzed() - TimeDuration::FromSeconds(*duration) -
CorePS::ProcessStartTime())
.ToMilliseconds());
}
}
// gPSMutex is not held after this point.
@ -2737,11 +2669,10 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
# endif
// Fall back to the default values if the passed-in values are unreasonable.
// Less than 8192 entries (65536 bytes) may not be enough for the most complex
// stack, so we should be able to store at least one full stack.
// TODO: Review magic numbers.
// Less than 1024 would not be enough for the most complex stack, so we should
// be able to store at least one full stack. TODO: Review magic numbers.
PowerOfTwo32 capacity =
(aCapacity.Value() >= 8192u) ? aCapacity : BASE_PROFILER_DEFAULT_ENTRIES;
(aCapacity.Value() >= 1024u) ? aCapacity : BASE_PROFILER_DEFAULT_ENTRIES;
Maybe<double> duration = aDuration;
if (aDuration && *aDuration <= 0) {
@ -2945,7 +2876,6 @@ void profiler_pause() {
return;
}
RacyFeatures::SetPaused();
ActivePS::SetIsPaused(lock, true);
ActivePS::Buffer(lock).AddEntry(ProfileBufferEntry::Pause(profiler_time()));
}
@ -2966,7 +2896,6 @@ void profiler_resume() {
ActivePS::Buffer(lock).AddEntry(
ProfileBufferEntry::Resume(profiler_time()));
ActivePS::SetIsPaused(lock, false);
RacyFeatures::SetUnpaused();
}
}
@ -3187,35 +3116,30 @@ UniqueProfilerBacktrace profiler_get_backtrace() {
regs.Clear();
# endif
// 65536 bytes should be plenty for a single backtrace.
auto bufferManager = MakeUnique<BlocksRingBuffer>(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
auto buffer =
MakeUnique<ProfileBuffer>(*bufferManager, MakePowerOfTwo32<65536>());
// 1024 should be plenty for a single backtrace.
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<1024>());
DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
return UniqueProfilerBacktrace(new ProfilerBacktrace(
"SyncProfile", tid, std::move(bufferManager), std::move(buffer)));
return UniqueProfilerBacktrace(
new ProfilerBacktrace("SyncProfile", tid, std::move(buffer)));
}
void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) {
delete aBacktrace;
}
static void racy_profiler_add_marker(const char* aMarkerName,
ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload* aPayload) {
static void racy_profiler_add_marker(
const char* aMarkerName, ProfilingCategoryPair aCategoryPair,
UniquePtr<ProfilerMarkerPayload> aPayload) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// This function is hot enough that we use RacyFeatures, not ActivePS.
if (!profiler_can_accept_markers()) {
return;
}
// Note that it's possible that the above test would change again before we
// actually record the marker. Because of this imprecision it's possible to
// miss a marker or record one we shouldn't. Either way is not a big deal.
// We don't assert that RacyFeatures::IsActiveWithoutPrivacy() or
// RacyRegisteredThread::IsBeingProfiled() is true here, because it's
// possible that the result has changed since we tested it in the caller.
//
// Because of this imprecision it's possible to miss a marker or record one
// we shouldn't. Either way is not a big deal.
RacyRegisteredThread* racyRegisteredThread =
TLSRegisteredThread::RacyRegisteredThread();
@ -3227,28 +3151,33 @@ static void racy_profiler_add_marker(const char* aMarkerName,
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, racyRegisteredThread->ThreadId(),
WrapBlocksRingBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());
racyRegisteredThread->AddPendingMarker(
aMarkerName, aCategoryPair, std::move(aPayload), delta.ToMilliseconds());
}
void profiler_add_marker(const char* aMarkerName,
ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload& aPayload) {
racy_profiler_add_marker(aMarkerName, aCategoryPair, &aPayload);
UniquePtr<ProfilerMarkerPayload> aPayload) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// This function is hot enough that we use RacyFeatures, not ActivePS.
if (!RacyFeatures::IsActiveWithoutPrivacy()) {
return;
}
racy_profiler_add_marker(aMarkerName, aCategoryPair, std::move(aPayload));
}
void profiler_add_marker(const char* aMarkerName,
ProfilingCategoryPair aCategoryPair) {
racy_profiler_add_marker(aMarkerName, aCategoryPair, nullptr);
profiler_add_marker(aMarkerName, aCategoryPair, nullptr);
}
// This is a simplified version of profiler_add_marker that can be easily passed
// into the JS engine.
void profiler_add_js_marker(const char* aMarkerName) {
AUTO_PROFILER_STATS(base_add_marker);
profiler_add_marker(aMarkerName, ProfilingCategoryPair::JS);
profiler_add_marker(aMarkerName, ProfilingCategoryPair::JS, nullptr);
}
// This logic needs to add a marker for a different thread, so we actually need
@ -3259,41 +3188,39 @@ void profiler_add_marker_for_thread(int aThreadId,
UniquePtr<ProfilerMarkerPayload> aPayload) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
if (!profiler_can_accept_markers()) {
PSAutoLock lock;
if (!ActivePS::Exists(lock)) {
return;
}
# ifdef DEBUG
{
PSAutoLock lock;
if (!ActivePS::Exists(lock)) {
return;
}
// Assert that our thread ID makes sense
bool realThread = false;
const Vector<UniquePtr<RegisteredThread>>& registeredThreads =
CorePS::RegisteredThreads(lock);
for (auto& thread : registeredThreads) {
RefPtr<ThreadInfo> info = thread->Info();
if (info->ThreadId() == aThreadId) {
realThread = true;
break;
}
}
MOZ_ASSERT(realThread, "Invalid thread id");
}
# endif
// Insert the marker into the buffer
// Create the ProfilerMarker which we're going to store.
TimeStamp origin = (aPayload && !aPayload->GetStartTime().IsNull())
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, aThreadId,
WrapBlocksRingBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());
ProfilerMarker* marker =
new ProfilerMarker(aMarkerName, aCategoryPair, aThreadId,
std::move(aPayload), delta.ToMilliseconds());
# ifdef DEBUG
// Assert that our thread ID makes sense
bool realThread = false;
const Vector<UniquePtr<RegisteredThread>>& registeredThreads =
CorePS::RegisteredThreads(lock);
for (auto& thread : registeredThreads) {
RefPtr<ThreadInfo> info = thread->Info();
if (info->ThreadId() == aThreadId) {
realThread = true;
break;
}
}
MOZ_ASSERT(realThread, "Invalid thread id");
# endif
// Insert the marker into the buffer
ProfileBuffer& buffer = ActivePS::Buffer(lock);
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
}
void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
@ -3305,14 +3232,14 @@ void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
VTUNE_TRACING(aMarkerName, aKind);
// This function is hot enough that we use RacyFeatures, notActivePS.
if (!profiler_can_accept_markers()) {
if (!RacyFeatures::IsActiveWithoutPrivacy()) {
return;
}
AUTO_PROFILER_STATS(base_add_marker_with_TracingMarkerPayload);
profiler_add_marker(aMarkerName, aCategoryPair,
TracingMarkerPayload(aCategoryString, aKind, aDocShellId,
aDocShellHistoryId));
auto payload = MakeUnique<TracingMarkerPayload>(
aCategoryString, aKind, aDocShellId, aDocShellHistoryId);
racy_profiler_add_marker(aMarkerName, aCategoryPair, std::move(payload));
}
void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
@ -3325,15 +3252,15 @@ void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
VTUNE_TRACING(aMarkerName, aKind);
// This function is hot enough that we use RacyFeatures, notActivePS.
if (!profiler_can_accept_markers()) {
if (!RacyFeatures::IsActiveWithoutPrivacy()) {
return;
}
AUTO_PROFILER_STATS(base_add_marker_with_TracingMarkerPayload);
profiler_add_marker(
aMarkerName, aCategoryPair,
TracingMarkerPayload(aCategoryString, aKind, aDocShellId,
aDocShellHistoryId, std::move(aCause)));
auto payload =
MakeUnique<TracingMarkerPayload>(aCategoryString, aKind, aDocShellId,
aDocShellHistoryId, std::move(aCause));
racy_profiler_add_marker(aMarkerName, aCategoryPair, std::move(payload));
}
void profiler_add_text_marker(const char* aMarkerName, const std::string& aText,
@ -3346,8 +3273,8 @@ void profiler_add_text_marker(const char* aMarkerName, const std::string& aText,
AUTO_PROFILER_STATS(base_add_marker_with_TextMarkerPayload);
profiler_add_marker(
aMarkerName, aCategoryPair,
TextMarkerPayload(aText, aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)));
MakeUnique<TextMarkerPayload>(aText, aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)));
}
// NOTE: aCollector's methods will be called while the target thread is paused.

View File

@ -200,25 +200,18 @@ class RacyFeatures {
MFBT_API static void SetInactive();
MFBT_API static void SetPaused();
MFBT_API static void SetUnpaused();
MFBT_API static bool IsActive();
MFBT_API static bool IsActiveWithFeature(uint32_t aFeature);
MFBT_API static bool IsActiveWithoutPrivacy();
MFBT_API static bool IsActiveAndUnpausedWithoutPrivacy();
private:
static constexpr uint32_t Active = 1u << 31;
static constexpr uint32_t Paused = 1u << 30;
static const uint32_t Active = 1u << 31;
// Ensure Active/Paused don't overlap with any of the feature bits.
// Ensure Active doesn't overlap with any of the feature bits.
# define NO_OVERLAP(n_, str_, Name_, desc_) \
static_assert(ProfilerFeature::Name_ != Paused, "bad feature value");
static_assert(ProfilerFeature::Name_ != Active, "bad Active value");
BASE_PROFILER_FOR_EACH_FEATURE(NO_OVERLAP);
@ -243,18 +236,18 @@ MFBT_API bool IsThreadBeingProfiled();
static constexpr PowerOfTwo32 BASE_PROFILER_DEFAULT_ENTRIES =
# if !defined(ARCH_ARMV6)
MakePowerOfTwo32<1u << 20>(); // 1'048'576 entries = 8MB
MakePowerOfTwo32<1u << 20>(); // 1'048'576
# else
MakePowerOfTwo32<1u << 17>(); // 131'072 entries = 1MB
MakePowerOfTwo32<1u << 17>(); // 131'072
# endif
// Startup profiling usually need to capture more data, especially on slow
// systems.
static constexpr PowerOfTwo32 BASE_PROFILER_DEFAULT_STARTUP_ENTRIES =
# if !defined(ARCH_ARMV6)
MakePowerOfTwo32<1u << 22>(); // 4'194'304 entries = 32MB
MakePowerOfTwo32<1u << 22>(); // 4'194'304
# else
MakePowerOfTwo32<1u << 17>(); // 131'072 = 1MB
MakePowerOfTwo32<1u << 17>(); // 131'072
# endif
# define BASE_PROFILER_DEFAULT_DURATION 20
@ -278,8 +271,8 @@ MFBT_API void profiler_shutdown();
// selected options. Stops and restarts the profiler if it is already active.
// After starting the profiler is "active". The samples will be recorded in a
// circular buffer.
// "aCapacity" is the maximum number of 8-byte entries in the profiler's
// circular buffer.
// "aCapacity" is the maximum number of entries in the profiler's circular
// buffer.
// "aInterval" the sampling interval, measured in millseconds.
// "aFeatures" is the feature set. Features unsupported by this
// platform/configuration are ignored.
@ -410,19 +403,6 @@ inline bool profiler_is_active() {
return baseprofiler::detail::RacyFeatures::IsActive();
}
// Same as profiler_is_active(), but with the same extra checks that determine
// if the profiler would currently store markers. So this should be used before
// doing some potentially-expensive work that's used in a marker. E.g.:
//
// if (profiler_can_accept_markers()) {
// ExpensiveMarkerPayload expensivePayload = CreateExpensivePayload();
// BASE_PROFILER_ADD_MARKER_WITH_PAYLOAD(name, OTHER, expensivePayload);
// }
inline bool profiler_can_accept_markers() {
return baseprofiler::detail::RacyFeatures::
IsActiveAndUnpausedWithoutPrivacy();
}
// Is the profiler active, and is the current thread being profiled?
// (Same caveats and recommented usage as profiler_is_active().)
inline bool profiler_thread_is_being_profiled() {
@ -530,7 +510,7 @@ struct ProfilerBufferInfo {
uint64_t mRangeStart;
// Index of the newest entry.
uint64_t mRangeEnd;
// Buffer capacity in number of 8-byte entries.
// Buffer capacity in number of entries.
uint32_t mEntryCount;
// Sampling stats: Interval (ns) between successive samplings.
ProfilerStats mIntervalsNs;
@ -732,13 +712,12 @@ MFBT_API void profiler_add_marker(const char* aMarkerName,
::mozilla::baseprofiler::profiler_add_marker( \
markerName, \
::mozilla::baseprofiler::ProfilingCategoryPair::categoryPair, \
PayloadType parenthesizedPayloadArgs); \
::mozilla::MakeUnique<PayloadType> parenthesizedPayloadArgs); \
} while (false)
MFBT_API void profiler_add_marker(const char* aMarkerName,
ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload& aPayload);
UniquePtr<ProfilerMarkerPayload> aPayload);
MFBT_API void profiler_add_js_marker(const char* aMarkerName);
// Insert a marker in the profile timeline for a specified thread.

View File

@ -9,7 +9,6 @@
#ifndef BaseProfilerDetail_h
#define BaseProfilerDetail_h
#include "mozilla/Maybe.h"
#include "mozilla/PlatformMutex.h"
#ifdef DEBUG
@ -101,70 +100,6 @@ class MOZ_RAII BaseProfilerAutoLock {
BaseProfilerMutex& mMutex;
};
// Thin shell around mozglue PlatformMutex, for Base Profiler internal use.
// Actual mutex may be disabled at construction time.
// Does not preserve behavior in JS record/replay.
class BaseProfilerMaybeMutex : private ::mozilla::detail::MutexImpl {
public:
explicit BaseProfilerMaybeMutex(bool aActivate) {
if (aActivate) {
mMaybeMutex.emplace();
}
}
BaseProfilerMaybeMutex(const BaseProfilerMaybeMutex&) = delete;
BaseProfilerMaybeMutex& operator=(const BaseProfilerMaybeMutex&) = delete;
BaseProfilerMaybeMutex(BaseProfilerMaybeMutex&&) = delete;
BaseProfilerMaybeMutex& operator=(BaseProfilerMaybeMutex&&) = delete;
~BaseProfilerMaybeMutex() = default;
bool IsActivated() const { return mMaybeMutex.isSome(); }
void Lock() {
if (IsActivated()) {
mMaybeMutex->Lock();
}
}
void Unlock() {
if (IsActivated()) {
mMaybeMutex->Unlock();
}
}
void AssertCurrentThreadOwns() const {
#ifdef MOZ_BASE_PROFILER_DEBUG
if (IsActivated()) {
mMaybeMutex->AssertCurrentThreadOwns();
}
#endif // MOZ_BASE_PROFILER_DEBUG
}
private:
Maybe<BaseProfilerMutex> mMaybeMutex;
};
// RAII class to lock a mutex.
class MOZ_RAII BaseProfilerMaybeAutoLock {
public:
explicit BaseProfilerMaybeAutoLock(BaseProfilerMaybeMutex& aMaybeMutex)
: mMaybeMutex(aMaybeMutex) {
mMaybeMutex.Lock();
}
BaseProfilerMaybeAutoLock(const BaseProfilerMaybeAutoLock&) = delete;
BaseProfilerMaybeAutoLock& operator=(const BaseProfilerMaybeAutoLock&) =
delete;
BaseProfilerMaybeAutoLock(BaseProfilerMaybeAutoLock&&) = delete;
BaseProfilerMaybeAutoLock& operator=(BaseProfilerMaybeAutoLock&&) = delete;
~BaseProfilerMaybeAutoLock() { mMaybeMutex.Unlock(); }
private:
BaseProfilerMaybeMutex& mMaybeMutex;
};
} // namespace detail
} // namespace baseprofiler
} // namespace mozilla

View File

@ -13,9 +13,7 @@
# error Do not #include this header when MOZ_BASE_PROFILER is not #defined.
#endif
#include "mozilla/Atomics.h"
#include "mozilla/Attributes.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TimeStamp.h"
@ -39,184 +37,86 @@ class ProfilerMarkerPayload {
const Maybe<std::string>& aDocShellId = Nothing(),
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mCommonProps{TimeStamp{}, TimeStamp{}, std::move(aStack),
std::move(aDocShellId), std::move(aDocShellHistoryId)} {}
: mStack(std::move(aStack)),
mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId) {}
ProfilerMarkerPayload(const TimeStamp& aStartTime, const TimeStamp& aEndTime,
const Maybe<std::string>& aDocShellId = Nothing(),
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mCommonProps{aStartTime, aEndTime, std::move(aStack),
std::move(aDocShellId), std::move(aDocShellHistoryId)} {}
: mStartTime(aStartTime),
mEndTime(aEndTime),
mStack(std::move(aStack)),
mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId) {}
virtual ~ProfilerMarkerPayload() {}
// Compute the number of bytes needed to serialize the `DeserializerTag` and
// payload, including in the no-payload (nullptr) case.
static BlocksRingBuffer::Length TagAndSerializationBytes(
const ProfilerMarkerPayload* aPayload) {
if (!aPayload) {
return sizeof(DeserializerTag);
}
return aPayload->TagAndSerializationBytes();
}
// Serialize the payload into an EntryWriter, including in the no-payload
// (nullptr) case. Must be of the exact size given by
// `TagAndSerializationBytes(aPayload)`.
static void TagAndSerialize(const ProfilerMarkerPayload* aPayload,
BlocksRingBuffer::EntryWriter& aEntryWriter) {
if (!aPayload) {
aEntryWriter.WriteObject(DeserializerTag(0));
return;
}
aPayload->SerializeTagAndPayload(aEntryWriter);
}
// Deserialize a payload from an EntryReader, including in the no-payload
// (nullptr) case.
static UniquePtr<ProfilerMarkerPayload> DeserializeTagAndPayload(
mozilla::BlocksRingBuffer::EntryReader& aER) {
const auto tag = aER.ReadObject<DeserializerTag>();
Deserializer deserializer = DeserializerForTag(tag);
return deserializer(aER);
}
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const = 0;
UniqueStacks& aUniqueStacks) = 0;
TimeStamp GetStartTime() const { return mCommonProps.mStartTime; }
TimeStamp GetStartTime() const { return mStartTime; }
protected:
// A `Deserializer` is a free function that can read a serialized payload from
// an `EntryReader` and return a reconstructed `ProfilerMarkerPayload`
// sub-object (may be null if there was no payload).
typedef UniquePtr<ProfilerMarkerPayload> (*Deserializer)(
BlocksRingBuffer::EntryReader&);
// A `DeserializerTag` will be added before the payload, to help select the
// correct deserializer when reading back the payload.
using DeserializerTag = unsigned char;
// This needs to be big enough to handle all possible sub-types of
// ProfilerMarkerPayload.
static constexpr DeserializerTag DeserializerMax = 32;
// We need an atomic type that can hold a `DeserializerTag`. (Atomic doesn't
// work with too-small types.)
using DeserializerTagAtomic = int;
// Number of currently-registered deserializers.
static Atomic<DeserializerTagAtomic, ReleaseAcquire,
recordreplay::Behavior::DontPreserve>
sDeserializerCount;
// List of currently-registered deserializers.
// sDeserializers[0] is a no-payload deserializer.
static Deserializer sDeserializers[DeserializerMax];
// Get the `DeserializerTag` for a `Deserializer` (which gets registered on
// the first call.) Tag 0 means no payload; a null `aDeserializer` gives that
// 0 tag.
MFBT_API static DeserializerTag TagForDeserializer(
Deserializer aDeserializer);
// Get the `Deserializer` for a given `DeserializerTag`.
// Tag 0 is reserved as no-payload deserializer (which returns nullptr).
MFBT_API static Deserializer DeserializerForTag(DeserializerTag aTag);
struct CommonProps {
TimeStamp mStartTime;
TimeStamp mEndTime;
UniqueProfilerBacktrace mStack;
Maybe<std::string> mDocShellId;
Maybe<uint32_t> mDocShellHistoryId;
};
// Deserializers can use this base constructor.
explicit ProfilerMarkerPayload(CommonProps&& aCommonProps)
: mCommonProps(std::move(aCommonProps)) {}
// Serialization/deserialization of common props in ProfilerMarkerPayload.
MFBT_API BlocksRingBuffer::Length CommonPropsTagAndSerializationBytes() const;
MFBT_API void SerializeTagAndCommonProps(
DeserializerTag aDeserializerTag,
BlocksRingBuffer::EntryWriter& aEntryWriter) const;
MFBT_API static CommonProps DeserializeCommonProps(
BlocksRingBuffer::EntryReader& aEntryReader);
MFBT_API void StreamType(const char* aMarkerType,
SpliceableJSONWriter& aWriter) const;
SpliceableJSONWriter& aWriter);
MFBT_API void StreamCommonProps(const char* aMarkerType,
SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const;
UniqueStacks& aUniqueStacks);
private:
// Compute the number of bytes needed to serialize the `DeserializerTag` and
// payload in `SerializeTagAndPayload` below.
virtual BlocksRingBuffer::Length TagAndSerializationBytes() const = 0;
// Serialize the `DeserializerTag` and payload into an EntryWriter.
// Must be of the exact size given by `TagAndSerializationBytes()`.
virtual void SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const = 0;
CommonProps mCommonProps;
TimeStamp mStartTime;
TimeStamp mEndTime;
UniqueProfilerBacktrace mStack;
Maybe<std::string> mDocShellId;
Maybe<uint32_t> mDocShellHistoryId;
};
#define DECL_BASE_STREAM_PAYLOAD \
MFBT_API void StreamPayload( \
::mozilla::baseprofiler::SpliceableJSONWriter& aWriter, \
const ::mozilla::TimeStamp& aProcessStartTime, \
::mozilla::baseprofiler::UniqueStacks& aUniqueStacks) const override; \
static UniquePtr<ProfilerMarkerPayload> Deserialize( \
BlocksRingBuffer::EntryReader& aEntryReader); \
MFBT_API BlocksRingBuffer::Length TagAndSerializationBytes() const override; \
MFBT_API void SerializeTagAndPayload( \
BlocksRingBuffer::EntryWriter& aEntryWriter) const override;
#define DECL_BASE_STREAM_PAYLOAD \
virtual void StreamPayload( \
::mozilla::baseprofiler::SpliceableJSONWriter& aWriter, \
const ::mozilla::TimeStamp& aProcessStartTime, \
::mozilla::baseprofiler::UniqueStacks& aUniqueStacks) override;
// TODO: Increase the coverage of tracing markers that include DocShell
// information
class TracingMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API TracingMarkerPayload(
const char* aCategory, TracingKind aKind,
const Maybe<std::string>& aDocShellId = Nothing(),
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
UniqueProfilerBacktrace aCause = nullptr);
MFBT_API ~TracingMarkerPayload() override;
TracingMarkerPayload(const char* aCategory, TracingKind aKind,
const Maybe<std::string>& aDocShellId = Nothing(),
const Maybe<uint32_t>& aDocShellHistoryId = Nothing(),
UniqueProfilerBacktrace aCause = nullptr)
: ProfilerMarkerPayload(aDocShellId, aDocShellHistoryId,
std::move(aCause)),
mCategory(aCategory),
mKind(aKind) {}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API TracingMarkerPayload(CommonProps&& aCommonProps,
const char* aCategory, TracingKind aKind);
const char* mCategory;
TracingKind mKind;
};
class FileIOMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API FileIOMarkerPayload(const char* aOperation, const char* aSource,
const char* aFilename,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
UniqueProfilerBacktrace aStack);
MFBT_API ~FileIOMarkerPayload() override;
FileIOMarkerPayload(const char* aOperation, const char* aSource,
const char* aFilename, const TimeStamp& aStartTime,
const TimeStamp& aEndTime, UniqueProfilerBacktrace aStack)
: ProfilerMarkerPayload(aStartTime, aEndTime, Nothing(), Nothing(),
std::move(aStack)),
mSource(aSource),
mOperation(aOperation ? strdup(aOperation) : nullptr),
mFilename(aFilename ? strdup(aFilename) : nullptr) {
MOZ_ASSERT(aSource);
}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API FileIOMarkerPayload(CommonProps&& aCommonProps, const char* aSource,
UniqueFreePtr<char>&& aOperation,
UniqueFreePtr<char>&& aFilename);
const char* mSource;
UniqueFreePtr<char> mOperation;
UniqueFreePtr<char> mFilename;
@ -224,29 +124,31 @@ class FileIOMarkerPayload : public ProfilerMarkerPayload {
class UserTimingMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API UserTimingMarkerPayload(const std::string& aName,
const TimeStamp& aStartTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId);
UserTimingMarkerPayload(const std::string& aName, const TimeStamp& aStartTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mEntryType("mark"),
mName(aName) {}
MFBT_API UserTimingMarkerPayload(const std::string& aName,
const Maybe<std::string>& aStartMark,
const Maybe<std::string>& aEndMark,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId);
MFBT_API ~UserTimingMarkerPayload() override;
UserTimingMarkerPayload(const std::string& aName,
const Maybe<std::string>& aStartMark,
const Maybe<std::string>& aEndMark,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId),
mEntryType("measure"),
mName(aName),
mStartMark(aStartMark),
mEndMark(aEndMark) {}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API UserTimingMarkerPayload(CommonProps&& aCommonProps,
const char* aEntryType, std::string&& aName,
Maybe<std::string>&& aStartMark,
Maybe<std::string>&& aEndMark);
// Either "mark" or "measure".
const char* mEntryType;
std::string mName;
@ -256,129 +158,68 @@ class UserTimingMarkerPayload : public ProfilerMarkerPayload {
class HangMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API HangMarkerPayload(const TimeStamp& aStartTime,
const TimeStamp& aEndTime);
MFBT_API ~HangMarkerPayload() override;
HangMarkerPayload(const TimeStamp& aStartTime, const TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API explicit HangMarkerPayload(CommonProps&& aCommonProps);
};
class LongTaskMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API LongTaskMarkerPayload(const TimeStamp& aStartTime,
const TimeStamp& aEndTime);
MFBT_API ~LongTaskMarkerPayload() override;
LongTaskMarkerPayload(const TimeStamp& aStartTime, const TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API explicit LongTaskMarkerPayload(CommonProps&& aCommonProps);
};
class TextMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime);
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime), mText(aText) {}
MFBT_API TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime);
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime,
const TimeStamp& aEndTime)
: ProfilerMarkerPayload(aStartTime, aEndTime), mText(aText) {}
MFBT_API TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId);
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId)
: ProfilerMarkerPayload(aStartTime, aStartTime, aDocShellId,
aDocShellHistoryId),
mText(aText) {}
MFBT_API TextMarkerPayload(const std::string& aText,
const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId,
UniqueProfilerBacktrace aCause = nullptr);
MFBT_API ~TextMarkerPayload() override;
TextMarkerPayload(const std::string& aText, const TimeStamp& aStartTime,
const TimeStamp& aEndTime,
const Maybe<std::string>& aDocShellId,
const Maybe<uint32_t>& aDocShellHistoryId,
UniqueProfilerBacktrace aCause = nullptr)
: ProfilerMarkerPayload(aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)),
mText(aText) {}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API TextMarkerPayload(CommonProps&& aCommonProps, std::string&& aText);
std::string mText;
};
class LogMarkerPayload : public ProfilerMarkerPayload {
public:
MFBT_API LogMarkerPayload(const char* aModule, const char* aText,
const TimeStamp& aStartTime);
MFBT_API ~LogMarkerPayload() override;
LogMarkerPayload(const char* aModule, const char* aText,
const TimeStamp& aStartTime)
: ProfilerMarkerPayload(aStartTime, aStartTime),
mModule(aModule),
mText(aText) {}
DECL_BASE_STREAM_PAYLOAD
private:
MFBT_API LogMarkerPayload(CommonProps&& aCommonProps, std::string&& aModule,
std::string&& aText);
std::string mModule; // longest known LazyLogModule name is ~24
std::string mText;
};
} // namespace baseprofiler
// Serialize a pointed-at ProfilerMarkerPayload, may be null when there are no
// payloads.
template <>
struct BlocksRingBuffer::Serializer<
const baseprofiler::ProfilerMarkerPayload*> {
static Length Bytes(const baseprofiler::ProfilerMarkerPayload* aPayload) {
return baseprofiler::ProfilerMarkerPayload::TagAndSerializationBytes(
aPayload);
}
static void Write(EntryWriter& aEW,
const baseprofiler::ProfilerMarkerPayload* aPayload) {
baseprofiler::ProfilerMarkerPayload::TagAndSerialize(aPayload, aEW);
}
};
// Serialize a pointed-at ProfilerMarkerPayload, may be null for no payloads.
template <>
struct BlocksRingBuffer::Serializer<
UniquePtr<baseprofiler::ProfilerMarkerPayload>> {
static Length Bytes(
const UniquePtr<baseprofiler::ProfilerMarkerPayload>& aPayload) {
return baseprofiler::ProfilerMarkerPayload::TagAndSerializationBytes(
aPayload.get());
}
static void Write(
EntryWriter& aEW,
const UniquePtr<baseprofiler::ProfilerMarkerPayload>& aPayload) {
baseprofiler::ProfilerMarkerPayload::TagAndSerialize(aPayload.get(), aEW);
}
};
// Deserialize a ProfilerMarkerPayload into a UniquePtr, may be null if there
// are no payloads.
template <>
struct BlocksRingBuffer::Deserializer<
UniquePtr<baseprofiler::ProfilerMarkerPayload>> {
static void ReadInto(
EntryReader& aER,
UniquePtr<baseprofiler::ProfilerMarkerPayload>& aPayload) {
aPayload = Read(aER);
}
static UniquePtr<baseprofiler::ProfilerMarkerPayload> Read(EntryReader& aER) {
return baseprofiler::ProfilerMarkerPayload::DeserializeTagAndPayload(aER);
}
};
} // namespace mozilla
#endif // BaseProfilerMarkerPayload_h

View File

@ -84,13 +84,12 @@ class BlocksRingBuffer {
// Near-infinite index type, not expecting overflow.
using Index = uint64_t;
public:
// Using ModuloBuffer as underlying circular byte buffer.
using Buffer = ModuloBuffer<uint32_t, Index>;
using Byte = Buffer::Byte;
using BufferWriter = Buffer::Writer;
using BufferReader = Buffer::Reader;
public:
// Length type for total buffer (as PowerOfTwo<Length>) and each entry.
using Length = uint32_t;
@ -160,13 +159,6 @@ class BlocksRingBuffer {
return mBlockIndex >= aRhs.mBlockIndex;
}
// Temporary escape hatches to let legacy code access block indices.
// TODO: Remove this when legacy code has been modernized.
uint64_t ConvertToU64() const { return uint64_t(mBlockIndex); }
static BlockIndex ConvertFromU64(uint64_t aIndex) {
return BlockIndex(Index(aIndex));
}
private:
// Only BlocksRingBuffer internal functions and serializers can convert
// between `BlockIndex` and `Index`.
@ -177,34 +169,25 @@ class BlocksRingBuffer {
Index mBlockIndex;
};
enum class ThreadSafety { WithoutMutex, WithMutex };
// Default constructor starts out-of-session (nothing to read or write).
explicit BlocksRingBuffer(ThreadSafety aThreadSafety)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex) {}
BlocksRingBuffer() = default;
// Constructors with no entry destructor, the oldest entries will be silently
// overwritten/destroyed.
// Create a buffer of the given length.
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
PowerOfTwo<Length> aLength)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(aLength))) {}
explicit BlocksRingBuffer(PowerOfTwo<Length> aLength)
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(aLength))) {}
// Take ownership of an existing buffer.
BlocksRingBuffer(ThreadSafety aThreadSafety,
UniquePtr<Buffer::Byte[]> aExistingBuffer,
BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(
: mMaybeUnderlyingBuffer(
Some(UnderlyingBuffer(std::move(aExistingBuffer), aLength))) {}
// Use an externally-owned buffer.
BlocksRingBuffer(ThreadSafety aThreadSafety, Buffer::Byte* aExternalBuffer,
PowerOfTwo<Length> aLength)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(
BlocksRingBuffer(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength)
: mMaybeUnderlyingBuffer(
Some(UnderlyingBuffer(aExternalBuffer, aLength))) {}
// Constructors with an entry destructor, which will be called with an
@ -215,32 +198,26 @@ class BlocksRingBuffer {
// Create a buffer of the given length.
template <typename EntryDestructor>
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
PowerOfTwo<Length> aLength,
explicit BlocksRingBuffer(PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
aLength, std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Take ownership of an existing buffer.
template <typename EntryDestructor>
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
UniquePtr<Buffer::Byte[]> aExistingBuffer,
explicit BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
std::move(aExistingBuffer), aLength,
std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Use an externally-owned buffer.
template <typename EntryDestructor>
explicit BlocksRingBuffer(ThreadSafety aThreadSafety,
Buffer::Byte* aExternalBuffer,
explicit BlocksRingBuffer(Buffer::Byte* aExternalBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mMutex(aThreadSafety != ThreadSafety::WithoutMutex),
mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(
aExternalBuffer, aLength,
std::forward<EntryDestructor>(aEntryDestructor)))) {}
@ -249,20 +226,20 @@ class BlocksRingBuffer {
~BlocksRingBuffer() {
#ifdef DEBUG
// Needed because of lock DEBUG-check in `DestroyAllEntries()`.
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
#endif // DEBUG
DestroyAllEntries();
}
// Remove underlying buffer, if any.
void Reset() {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
}
// Create a buffer of the given length.
void Set(PowerOfTwo<Length> aLength) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(aLength);
}
@ -270,14 +247,14 @@ class BlocksRingBuffer {
// Take ownership of an existing buffer.
void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(std::move(aExistingBuffer), aLength);
}
// Use an externally-owned buffer.
void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(aExternalBuffer, aLength);
}
@ -285,7 +262,7 @@ class BlocksRingBuffer {
// Create a buffer of the given length, with entry destructor.
template <typename EntryDestructor>
void Set(PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
aLength, std::forward<EntryDestructor>(aEntryDestructor));
@ -295,7 +272,7 @@ class BlocksRingBuffer {
template <typename EntryDestructor>
void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
std::move(aExistingBuffer), aLength,
@ -306,27 +283,25 @@ class BlocksRingBuffer {
template <typename EntryDestructor>
void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
aExternalBuffer, aLength,
std::forward<EntryDestructor>(aEntryDestructor));
}
bool IsThreadSafe() const { return mMutex.IsActivated(); }
// Lock the buffer mutex and run the provided callback.
// This can be useful when the caller needs to explicitly lock down this
// buffer, but not do anything else with it.
template <typename Callback>
auto LockAndRun(Callback&& aCallback) const {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
return std::forward<Callback>(aCallback)();
}
// Buffer length in bytes.
Maybe<PowerOfTwo<Length>> BufferLength() const {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
return mMaybeUnderlyingBuffer.map([](const UnderlyingBuffer& aBuffer) {
return aBuffer.mBuffer.BufferLength();
});
@ -369,7 +344,7 @@ class BlocksRingBuffer {
// Note that these may change right after this thread-safe call, so they
// should only be used for statistical purposes.
State GetState() const {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
return {
mFirstReadIndex, mNextWriteIndex,
mMaybeUnderlyingBuffer ? mMaybeUnderlyingBuffer->mPushedBlockCount : 0,
@ -667,7 +642,7 @@ class BlocksRingBuffer {
template <typename Callback>
auto Read(Callback&& aCallback) const {
{
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
Reader reader(*this);
return std::forward<Callback>(aCallback)(&reader);
@ -695,7 +670,7 @@ class BlocksRingBuffer {
// store `EntryReader`, because it may become invalid after this call.
template <typename Callback>
auto ReadAt(BlockIndex aBlockIndex, Callback&& aCallback) const {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
MOZ_ASSERT(aBlockIndex <= mNextWriteIndex);
Maybe<EntryReader> maybeEntryReader;
if (MOZ_LIKELY(mMaybeUnderlyingBuffer) && aBlockIndex >= mFirstReadIndex &&
@ -839,7 +814,7 @@ class BlocksRingBuffer {
template <typename CallbackBytes, typename Callback>
auto ReserveAndPut(CallbackBytes aCallbackBytes, Callback&& aCallback) {
{ // Locked block.
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
Length bytes = std::forward<CallbackBytes>(aCallbackBytes)();
// Don't allow even half of the buffer length. More than that would
@ -926,81 +901,10 @@ class BlocksRingBuffer {
return PutObjects(aOb);
}
// Append the contents of another BlocksRingBuffer to this one.
BlockIndex AppendContents(const BlocksRingBuffer& aSrc) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
if (MOZ_UNLIKELY(!mMaybeUnderlyingBuffer)) {
// We are out-of-session, could not append contents.
return BlockIndex{};
}
baseprofiler::detail::BaseProfilerMaybeAutoLock srcLock(aSrc.mMutex);
if (MOZ_UNLIKELY(!aSrc.mMaybeUnderlyingBuffer)) {
// The other BRB is out-of-session, nothing to copy, we're done.
return BlockIndex{};
}
const Index srcStartIndex = Index(aSrc.mFirstReadIndex);
const Index srcEndIndex = Index(aSrc.mNextWriteIndex);
const Length bytesToCopy = static_cast<Length>(srcEndIndex - srcStartIndex);
if (MOZ_UNLIKELY(bytesToCopy == 0)) {
// The other BRB is empty, nothing to copy, we're done.
return BlockIndex{};
}
// Don't allow an entry to wrap around and overwrite itself!
MOZ_RELEASE_ASSERT(bytesToCopy <=
mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value());
// We will put all copied blocks at the end of the current buffer.
const Index dstStartIndex = Index(mNextWriteIndex);
// Compute where the copy will end...
const Index dstEndIndex = dstStartIndex + bytesToCopy;
// ... which is where the following block will go.
mNextWriteIndex = BlockIndex(dstEndIndex);
while (dstEndIndex >
Index(mFirstReadIndex) +
mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value()) {
// About to trample on an old block.
EntryReader reader = ReaderInBlockAt(mFirstReadIndex);
// Call provided entry destructor for that entry.
if (mMaybeUnderlyingBuffer->mEntryDestructor) {
mMaybeUnderlyingBuffer->mEntryDestructor(reader);
}
mMaybeUnderlyingBuffer->mClearedBlockCount += 1;
MOZ_ASSERT(reader.CurrentIndex() <= Index(reader.NextBlockIndex()));
// Move the buffer reading start past this cleared block.
mFirstReadIndex = reader.NextBlockIndex();
}
// Update our pushed count with the number of live blocks we are copying.
mMaybeUnderlyingBuffer->mPushedBlockCount +=
aSrc.mMaybeUnderlyingBuffer->mPushedBlockCount -
aSrc.mMaybeUnderlyingBuffer->mClearedBlockCount;
const auto readerEnd =
aSrc.mMaybeUnderlyingBuffer->mBuffer.ReaderAt(srcEndIndex);
auto writer = mMaybeUnderlyingBuffer->mBuffer.WriterAt(dstStartIndex);
// Copy all the bytes. TODO: Optimize with memcpy's?
for (auto reader =
aSrc.mMaybeUnderlyingBuffer->mBuffer.ReaderAt(srcStartIndex);
reader != readerEnd; ++reader, ++writer) {
*writer = *reader;
}
MOZ_ASSERT(writer == mMaybeUnderlyingBuffer->mBuffer.WriterAt(
Index(mNextWriteIndex)));
return BlockIndex(dstStartIndex);
}
// Clear all entries, calling entry destructor (if any), and move read index
// to the end so that these entries cannot be read anymore.
void Clear() {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ClearAllEntries();
}
@ -1008,7 +912,7 @@ class BlocksRingBuffer {
// destructor (if any), and move read index to the end so that these entries
// cannot be read anymore.
void ClearBefore(BlockIndex aBlockIndex) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
if (!mMaybeUnderlyingBuffer) {
return;
}
@ -1053,7 +957,7 @@ class BlocksRingBuffer {
#ifdef DEBUG
void Dump() const {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
if (!mMaybeUnderlyingBuffer) {
printf("empty BlocksRingBuffer\n");
return;
@ -1183,7 +1087,7 @@ class BlocksRingBuffer {
friend struct Deserializer<UniquePtr<BlocksRingBuffer>>;
// Mutex guarding the following members.
mutable baseprofiler::detail::BaseProfilerMaybeMutex mMutex;
mutable baseprofiler::detail::BaseProfilerMutex mMutex;
struct UnderlyingBuffer {
// Create a buffer of the given length.
@ -1965,7 +1869,7 @@ struct BlocksRingBuffer::Deserializer<Variant<Ts...>> {
template <>
struct BlocksRingBuffer::Serializer<BlocksRingBuffer> {
static Length Bytes(const BlocksRingBuffer& aBuffer) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(aBuffer.mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(aBuffer.mMutex);
if (aBuffer.mMaybeUnderlyingBuffer.isNothing()) {
// Out-of-session, we only need 1 byte to store a length of 0.
return ULEB128Size<Length>(0);
@ -1983,7 +1887,7 @@ struct BlocksRingBuffer::Serializer<BlocksRingBuffer> {
}
static void Write(EntryWriter& aEW, const BlocksRingBuffer& aBuffer) {
baseprofiler::detail::BaseProfilerMaybeAutoLock lock(aBuffer.mMutex);
baseprofiler::detail::BaseProfilerAutoLock lock(aBuffer.mMutex);
if (aBuffer.mMaybeUnderlyingBuffer.isNothing()) {
// Out-of-session, only store a length of 0.
aEW.WriteULEB128<Length>(0);
@ -2003,8 +1907,13 @@ struct BlocksRingBuffer::Serializer<BlocksRingBuffer> {
aEW.WriteULEB128<Length>(len);
aEW.WriteObject(start);
aEW.WriteObject(end);
// Write all the bytes.
aBuffer.mMaybeUnderlyingBuffer->mBuffer.ReaderAt(start).ReadInto(aEW, len);
// Write all the bytes. TODO: Optimize with memcpy's?
const auto readerEnd =
aBuffer.mMaybeUnderlyingBuffer->mBuffer.ReaderAt(end);
for (auto reader = aBuffer.mMaybeUnderlyingBuffer->mBuffer.ReaderAt(start);
reader != readerEnd; ++reader) {
aEW.WriteObject(*reader);
}
// And write stats.
aEW.WriteObject(aBuffer.mMaybeUnderlyingBuffer->mPushedBlockCount);
aEW.WriteObject(aBuffer.mMaybeUnderlyingBuffer->mClearedBlockCount);
@ -2041,9 +1950,12 @@ struct BlocksRingBuffer::Deserializer<BlocksRingBuffer> {
aBuffer.mNextWriteIndex = BlocksRingBuffer::BlockIndex(end);
MOZ_ASSERT(end - start == len);
// Copy bytes into the buffer.
auto writer = aBuffer.mMaybeUnderlyingBuffer->mBuffer.WriterAt(start);
aER.ReadInto(writer, len);
MOZ_ASSERT(writer.CurrentIndex() == end);
const auto writerEnd =
aBuffer.mMaybeUnderlyingBuffer->mBuffer.WriterAt(end);
for (auto writer = aBuffer.mMaybeUnderlyingBuffer->mBuffer.WriterAt(start);
writer != writerEnd; ++writer, ++aER) {
*writer = *aER;
}
// Finally copy stats.
aBuffer.mMaybeUnderlyingBuffer->mPushedBlockCount = aER.ReadObject<decltype(
aBuffer.mMaybeUnderlyingBuffer->mPushedBlockCount)>();
@ -2102,9 +2014,8 @@ struct BlocksRingBuffer::Deserializer<UniquePtr<BlocksRingBuffer>> {
return bufferUPtr;
}
// We have a non-empty buffer.
// allocate an empty BlocksRingBuffer without mutex.
bufferUPtr = MakeUnique<BlocksRingBuffer>(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
// allocate an empty BlocksRingBuffer.
bufferUPtr = MakeUnique<BlocksRingBuffer>();
// Rewind the reader before the length and deserialize the contents, using
// the non-UniquePtr Deserializer.
aER -= ULEB128Size(len);

View File

@ -417,27 +417,6 @@ class ModuloBuffer {
mIndex += aLength;
}
// Read data into a mutable iterator and move both iterators ahead.
void ReadInto(Iterator</* IsBufferConst */ false>& aDst, Length aLength) {
// Don't allow data larger than the buffer.
MOZ_ASSERT(aLength <= mModuloBuffer->BufferLength().Value());
MOZ_ASSERT(aLength <= aDst.mModuloBuffer->BufferLength().Value());
// Offset inside the buffer (corresponding to our Index).
Offset offset = OffsetInBuffer();
// Compute remaining bytes between this offset and the end of the buffer.
Length remaining = mModuloBuffer->BufferLength().Value() - offset;
if (MOZ_LIKELY(remaining >= aLength)) {
// Can read everything we need before the end of the buffer.
aDst.Write(&mModuloBuffer->mBuffer[offset], aLength);
} else {
// Read as much as possible before the end of the buffer.
aDst.Write(&mModuloBuffer->mBuffer[offset], remaining);
// And then continue from the beginning of the buffer.
aDst.Write(&mModuloBuffer->mBuffer[0], (aLength - remaining));
}
mIndex += aLength;
}
// Read data into an object and move iterator ahead.
// Note that this overwrites `aObject` with bytes from the buffer.
// Restricted to trivially-copyable types, which support this without

View File

@ -8,8 +8,6 @@
#ifdef MOZ_BASE_PROFILER
# include "BaseProfileJSONWriter.h"
# include "BaseProfilerMarkerPayload.h"
# include "mozilla/BlocksRingBuffer.h"
# include "mozilla/leb128iterator.h"
# include "mozilla/ModuloBuffer.h"
@ -486,75 +484,6 @@ void TestModuloBuffer() {
MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
}
// This test function does a `ReadInto` as directed, and checks that the
// result is the same as if the copy had been done manually byte-by-byte.
// `TestReadInto(3, 7, 2)` copies from index 3 to index 7, 2 bytes long.
// Return the output string (from `ReadInto`) for external checks.
auto TestReadInto = [](MB::Index aReadFrom, MB::Index aWriteTo,
MB::Length aBytes) {
constexpr uint32_t TRISize = 16;
// Prepare an input buffer, all different elements.
uint8_t input[TRISize + 1] = "ABCDEFGHIJKLMNOP";
const MB mbInput(input, MakePowerOfTwo32<TRISize>());
// Prepare an output buffer, different from input.
uint8_t output[TRISize + 1] = "abcdefghijklmnop";
MB mbOutput(output, MakePowerOfTwo32<TRISize>());
// Run ReadInto.
auto writer = mbOutput.WriterAt(aWriteTo);
mbInput.ReaderAt(aReadFrom).ReadInto(writer, aBytes);
// Do the same operation manually.
uint8_t outputCheck[TRISize + 1] = "abcdefghijklmnop";
MB mbOutputCheck(outputCheck, MakePowerOfTwo32<TRISize>());
auto readerCheck = mbInput.ReaderAt(aReadFrom);
auto writerCheck = mbOutputCheck.WriterAt(aWriteTo);
for (MB::Length i = 0; i < aBytes; ++i) {
*writerCheck++ = *readerCheck++;
}
// Compare the two outputs.
for (uint32_t i = 0; i < TRISize; ++i) {
# ifdef TEST_MODULOBUFFER_FAILURE_DEBUG
// Only used when debugging failures.
if (output[i] != outputCheck[i]) {
printf(
"*** from=%u to=%u bytes=%u i=%u\ninput: '%s'\noutput: "
"'%s'\ncheck: '%s'\n",
unsigned(aReadFrom), unsigned(aWriteTo), unsigned(aBytes),
unsigned(i), input, output, outputCheck);
}
# endif
MOZ_RELEASE_ASSERT(output[i] == outputCheck[i]);
}
# ifdef TEST_MODULOBUFFER_HELPER
// Only used when adding more tests.
printf("*** from=%u to=%u bytes=%u output: %s\n", unsigned(aReadFrom),
unsigned(aWriteTo), unsigned(aBytes), output);
# endif
return std::string(reinterpret_cast<const char*>(output));
};
// A few manual checks:
constexpr uint32_t TRISize = 16;
MOZ_RELEASE_ASSERT(TestReadInto(0, 0, 0) == "abcdefghijklmnop");
MOZ_RELEASE_ASSERT(TestReadInto(0, 0, TRISize) == "ABCDEFGHIJKLMNOP");
MOZ_RELEASE_ASSERT(TestReadInto(0, 5, TRISize) == "LMNOPABCDEFGHIJK");
MOZ_RELEASE_ASSERT(TestReadInto(5, 0, TRISize) == "FGHIJKLMNOPABCDE");
// Test everything! (16^3 = 4096, not too much.)
for (MB::Index r = 0; r < TRISize; ++r) {
for (MB::Index w = 0; w < TRISize; ++w) {
for (MB::Length len = 0; len < TRISize; ++len) {
TestReadInto(r, w, len);
}
}
}
printf("TestModuloBuffer done\n");
}
@ -583,8 +512,7 @@ void TestBlocksRingBufferAPI() {
// Start a temporary block to constrain buffer lifetime.
{
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>(),
BlocksRingBuffer rb(&buffer[MBSize], MakePowerOfTwo32<MBSize>(),
[&](BlocksRingBuffer::EntryReader& aReader) {
lastDestroyed = aReader.ReadObject<uint32_t>();
});
@ -898,53 +826,10 @@ void TestBlocksRingBufferAPI() {
// ? ? ? ? ? ? ? ? ? ? S[4 | int(6) ]E ?
VERIFY_START_END_DESTROYED(26, 31, 0);
{
// Create a 2nd buffer and fill it with `7` and `8`.
uint8_t buffer2[MBSize];
BlocksRingBuffer rb2(BlocksRingBuffer::ThreadSafety::WithoutMutex,
buffer2, MakePowerOfTwo32<MBSize>());
rb2.PutObject(uint32_t(7));
rb2.PutObject(uint32_t(8));
// Main buffer shouldn't have changed.
VERIFY_START_END_DESTROYED(26, 31, 0);
// Append contents of rb2 to rb, this should end up being the same as
// pushing the two numbers.
rb.AppendContents(rb2);
// 32 33 34 35 36 37 38 39 40 41 26 27 28 29 30 31
// int(7) ] [4 | int(8) ]E ? S[4 | int(6) ] [4 |
VERIFY_START_END_DESTROYED(26, 41, 0);
// Append contents of rb2 to rb again, to verify that rb2 was not modified
// above. This should destroy `6` and the first `7`.
rb.AppendContents(rb2);
// 48 49 50 51 36 37 38 39 40 41 42 43 44 45 46 47
// int(8) ]E ? S[4 | int(8) ] [4 | int(7) ] [4 |
VERIFY_START_END_DESTROYED(36, 51, 7);
// End of block where rb2 lives, to verify that it is not needed anymore
// for its copied values to survive in rb.
}
VERIFY_START_END_DESTROYED(36, 51, 7);
// bi6 should now have been cleared.
rb.ReadAt(bi6, [](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeReader) {
MOZ_RELEASE_ASSERT(aMaybeReader.isNothing());
});
// Check that we have `8`, `7`, `8`.
count = 0;
uint32_t expected[3] = {8, 7, 8};
rb.ReadEach([&](BlocksRingBuffer::EntryReader& aReader) {
MOZ_RELEASE_ASSERT(count < 3);
MOZ_RELEASE_ASSERT(aReader.ReadObject<uint32_t>() == expected[count++]);
});
MOZ_RELEASE_ASSERT(count == 3);
// End of block where rb lives, BlocksRingBuffer destructor should call
// entry destructor for remaining entries.
}
MOZ_RELEASE_ASSERT(lastDestroyed == 8);
MOZ_RELEASE_ASSERT(lastDestroyed == 6);
// Check that only the provided stack-based sub-buffer was modified.
uint32_t changed = 0;
@ -969,7 +854,7 @@ void TestBlocksRingBufferUnderlyingBufferChanges() {
printf("TestBlocksRingBufferUnderlyingBufferChanges...\n");
// Out-of-session BlocksRingBuffer to start with.
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex);
BlocksRingBuffer rb;
// Block index to read at. Initially "null", but may be changed below.
BlocksRingBuffer::BlockIndex bi;
@ -1175,8 +1060,7 @@ void TestBlocksRingBufferThreading() {
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer[i] = uint8_t('A' + i);
}
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>(),
BlocksRingBuffer rb(&buffer[MBSize], MakePowerOfTwo32<MBSize>(),
[&](BlocksRingBuffer::EntryReader& aReader) {
lastDestroyed = aReader.ReadObject<int>();
});
@ -1264,8 +1148,7 @@ void TestBlocksRingBufferSerialization() {
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer[i] = uint8_t('A' + i);
}
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>());
BlocksRingBuffer rb(&buffer[MBSize], MakePowerOfTwo32<MBSize>());
// Will expect literal string to always have the same address.
# define THE_ANSWER "The answer is "
@ -1395,8 +1278,7 @@ void TestBlocksRingBufferSerialization() {
for (size_t i = 0; i < MBSize2 * 3; ++i) {
buffer2[i] = uint8_t('B' + i);
}
BlocksRingBuffer rb2(BlocksRingBuffer::ThreadSafety::WithoutMutex,
&buffer2[MBSize2], MakePowerOfTwo32<MBSize2>());
BlocksRingBuffer rb2(&buffer2[MBSize2], MakePowerOfTwo32<MBSize2>());
rb2.PutObject(rb);
// 3rd BlocksRingBuffer deserialized from the 2nd one.
@ -1404,8 +1286,7 @@ void TestBlocksRingBufferSerialization() {
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer3[i] = uint8_t('C' + i);
}
BlocksRingBuffer rb3(BlocksRingBuffer::ThreadSafety::WithoutMutex,
&buffer3[MBSize], MakePowerOfTwo32<MBSize>());
BlocksRingBuffer rb3(&buffer3[MBSize], MakePowerOfTwo32<MBSize>());
rb2.ReadEach(
[&](BlocksRingBuffer::EntryReader& aER) { aER.ReadIntoObject(rb3); });
@ -1478,101 +1359,6 @@ void TestBlocksRingBufferSerialization() {
printf("TestBlocksRingBufferSerialization done\n");
}
class BaseTestMarkerPayload : public baseprofiler::ProfilerMarkerPayload {
public:
explicit BaseTestMarkerPayload(int aData) : mData(aData) {}
int GetData() const { return mData; }
// Exploded DECL_BASE_STREAM_PAYLOAD, but without `MFBT_API`s.
static UniquePtr<ProfilerMarkerPayload> Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader);
BlocksRingBuffer::Length TagAndSerializationBytes() const override;
void SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const override;
void StreamPayload(
::mozilla::baseprofiler::SpliceableJSONWriter& aWriter,
const ::mozilla::TimeStamp& aProcessStartTime,
::mozilla::baseprofiler::UniqueStacks& aUniqueStacks) const override;
private:
BaseTestMarkerPayload(CommonProps&& aProps, int aData)
: baseprofiler::ProfilerMarkerPayload(std::move(aProps)), mData(aData) {}
int mData;
};
// static
UniquePtr<baseprofiler::ProfilerMarkerPayload>
BaseTestMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
CommonProps props = DeserializeCommonProps(aEntryReader);
int data = aEntryReader.ReadObject<int>();
return UniquePtr<baseprofiler::ProfilerMarkerPayload>(
new BaseTestMarkerPayload(std::move(props), data));
}
BlocksRingBuffer::Length BaseTestMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() + sizeof(int);
}
void BaseTestMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mData);
}
void BaseTestMarkerPayload::StreamPayload(
baseprofiler::SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
baseprofiler::UniqueStacks& aUniqueStacks) const {
aWriter.IntProperty("data", mData);
}
void TestProfilerMarkerSerialization() {
printf("TestProfilerMarkerSerialization...\n");
constexpr uint32_t MBSize = 256;
uint8_t buffer[MBSize * 3];
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer[i] = uint8_t('A' + i);
}
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>());
constexpr int data = 42;
{
BaseTestMarkerPayload payload(data);
rb.PutObject(
static_cast<const baseprofiler::ProfilerMarkerPayload*>(&payload));
}
int read = 0;
rb.ReadEach([&](BlocksRingBuffer::EntryReader& aER) {
UniquePtr<baseprofiler::ProfilerMarkerPayload> payload =
aER.ReadObject<UniquePtr<baseprofiler::ProfilerMarkerPayload>>();
MOZ_RELEASE_ASSERT(!!payload);
++read;
BaseTestMarkerPayload* testPayload =
static_cast<BaseTestMarkerPayload*>(payload.get());
MOZ_RELEASE_ASSERT(testPayload);
MOZ_RELEASE_ASSERT(testPayload->GetData() == data);
});
MOZ_RELEASE_ASSERT(read == 1);
// Everything around the sub-buffer should be unchanged.
for (size_t i = 0; i < MBSize; ++i) {
MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
}
for (size_t i = MBSize * 2; i < MBSize * 3; ++i) {
MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
}
printf("TestProfilerMarkerSerialization done\n");
}
// Increase the depth, to a maximum (to avoid too-deep recursion).
static constexpr size_t NextDepth(size_t aDepth) {
constexpr size_t MAX_DEPTH = 128;
@ -1628,7 +1414,6 @@ void TestProfiler() {
TestBlocksRingBufferUnderlyingBufferChanges();
TestBlocksRingBufferThreading();
TestBlocksRingBufferSerialization();
TestProfilerMarkerSerialization();
{
printf("profiler_init()...\n");
@ -1658,15 +1443,8 @@ void TestProfiler() {
std::thread threadFib([]() {
AUTO_BASE_PROFILER_REGISTER_THREAD("fibonacci");
SleepMilli(5);
auto cause =
# if defined(__linux__) || defined(__ANDROID__)
// Currently disabled on these platforms, so just return a null.
decltype(baseprofiler::profiler_get_backtrace()){};
# else
baseprofiler::profiler_get_backtrace();
# endif
AUTO_BASE_PROFILER_TEXT_MARKER_CAUSE("fibonacci", "First leaf call",
OTHER, std::move(cause));
OTHER, nullptr);
static const unsigned long long fibStart = 37;
printf("Fibonacci(%llu)...\n", fibStart);
AUTO_BASE_PROFILER_LABEL("Label around Fibonacci", OTHER);
@ -1709,53 +1487,6 @@ void TestProfiler() {
threadCancelFib.join();
}
// Just making sure all payloads know how to (de)serialize and stream.
baseprofiler::profiler_add_marker(
"TracingMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::TracingMarkerPayload("category",
baseprofiler::TRACING_EVENT));
auto cause =
# if defined(__linux__) || defined(__ANDROID__)
// Currently disabled on these platforms, so just return a null.
decltype(baseprofiler::profiler_get_backtrace()){};
# else
baseprofiler::profiler_get_backtrace();
# endif
baseprofiler::profiler_add_marker(
"FileIOMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::FileIOMarkerPayload(
"operation", "source", "filename", TimeStamp::NowUnfuzzed(),
TimeStamp::NowUnfuzzed(), std::move(cause)));
baseprofiler::profiler_add_marker(
"UserTimingMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::UserTimingMarkerPayload("name", TimeStamp::NowUnfuzzed(),
Nothing{}, Nothing{}));
baseprofiler::profiler_add_marker(
"HangMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::HangMarkerPayload(TimeStamp::NowUnfuzzed(),
TimeStamp::NowUnfuzzed()));
baseprofiler::profiler_add_marker(
"LongTaskMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::LongTaskMarkerPayload(TimeStamp::NowUnfuzzed(),
TimeStamp::NowUnfuzzed()));
{
std::string s = "text payload";
baseprofiler::profiler_add_marker(
"TextMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::TextMarkerPayload(s, TimeStamp::NowUnfuzzed(),
TimeStamp::NowUnfuzzed()));
}
baseprofiler::profiler_add_marker(
"LogMarkerPayload", baseprofiler::ProfilingCategoryPair::OTHER,
baseprofiler::LogMarkerPayload("module", "text",
TimeStamp::NowUnfuzzed()));
printf("Sleep 1s...\n");
{
AUTO_BASE_PROFILER_THREAD_SLEEP;

View File

@ -1114,7 +1114,7 @@ void HttpChannelChild::OnStopRequest(
mCacheReadEnd = timing.cacheReadEnd;
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
int32_t priority = PRIORITY_NORMAL;
GetPriority(&priority);
profiler_add_network_marker(

View File

@ -5916,7 +5916,7 @@ nsresult nsHttpChannel::ContinueProcessRedirectionAfterFallback(nsresult rv) {
}
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
int32_t priority = PRIORITY_NORMAL;
GetPriority(&priority);
@ -6354,7 +6354,7 @@ nsHttpChannel::AsyncOpen(nsIStreamListener* aListener) {
#ifdef MOZ_GECKO_PROFILER
mLastStatusReported =
TimeStamp::Now(); // in case we enable the profiler after AsyncOpen()
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
profiler_add_network_marker(mURI, mPriority, mChannelId,
NetworkLoadType::LOAD_START,
mChannelCreationTimestamp, mLastStatusReported,
@ -8230,7 +8230,7 @@ nsresult nsHttpChannel::ContinueOnStopRequest(nsresult aStatus, bool aIsFromNet,
MaybeReportTimingData();
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers() && !mRedirectURI) {
if (profiler_is_active() && !mRedirectURI) {
// Don't include this if we already redirected
// These do allocations/frees/etc; avoid if not active
nsCOMPtr<nsIURI> uri;

View File

@ -494,7 +494,7 @@ void BackgroundHangThread::ReportHang(TimeDuration aHangTime) {
// If the profiler is enabled, add a marker.
#ifdef MOZ_GECKO_PROFILER
if (profiler_can_accept_markers()) {
if (profiler_is_active()) {
TimeStamp endTime = TimeStamp::Now();
TimeStamp startTime = endTime - aHangTime;
AUTO_PROFILER_STATS(add_marker_with_HangMarkerPayload);

View File

@ -6,6 +6,8 @@
#include "ProfileBuffer.h"
#include "ProfilerMarker.h"
#include "BaseProfiler.h"
#include "jsfriendapi.h"
#include "mozilla/MathAlgorithms.h"
@ -14,64 +16,38 @@
using namespace mozilla;
// 65536 bytes should be plenty for a single backtrace.
static constexpr auto WorkerBufferBytes = MakePowerOfTwo32<65536>();
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer, PowerOfTwo32 aCapacity)
: mEntries(aBuffer),
mWorkerBuffer(
MakeUnique<BlocksRingBuffer::Byte[]>(WorkerBufferBytes.Value())) {
// Only ProfileBuffer should control this buffer, and it should be empty when
// there is no ProfileBuffer using it.
MOZ_ASSERT(mEntries.BufferLength().isNothing());
// Allocate the requested capacity.
mEntries.Set(aCapacity);
}
ProfileBuffer::ProfileBuffer(BlocksRingBuffer& aBuffer) : mEntries(aBuffer) {
// Assume the given buffer is not empty.
MOZ_ASSERT(mEntries.BufferLength().isSome());
}
ProfileBuffer::ProfileBuffer(PowerOfTwo32 aCapacity)
: mEntries(MakeUnique<ProfileBufferEntry[]>(aCapacity.Value())),
mEntryIndexMask(aCapacity.Mask()),
mRangeStart(0),
mRangeEnd(0) {}
ProfileBuffer::~ProfileBuffer() {
// Only ProfileBuffer controls this buffer, and it should be empty when there
// is no ProfileBuffer using it.
mEntries.Reset();
MOZ_ASSERT(mEntries.BufferLength().isNothing());
}
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddEntry(
BlocksRingBuffer& aBlocksRingBuffer, const ProfileBufferEntry& aEntry) {
switch (aEntry.GetKind()) {
#define SWITCH_KIND(KIND, TYPE, SIZE) \
case ProfileBufferEntry::Kind::KIND: { \
return aBlocksRingBuffer.PutFrom(&aEntry, 1 + (SIZE)); \
break; \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(SWITCH_KIND)
#undef SWITCH_KIND
default:
MOZ_ASSERT(false, "Unhandled ProfilerBuffer entry KIND");
return BlockIndex{};
while (mStoredMarkers.peek()) {
delete mStoredMarkers.popHead();
}
}
// Called from signal, call only reentrant functions
uint64_t ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
return AddEntry(mEntries, aEntry).ConvertToU64();
}
void ProfileBuffer::AddEntry(const ProfileBufferEntry& aEntry) {
GetEntry(mRangeEnd++) = aEntry;
/* static */
BlocksRingBuffer::BlockIndex ProfileBuffer::AddThreadIdEntry(
BlocksRingBuffer& aBlocksRingBuffer, int aThreadId) {
return AddEntry(aBlocksRingBuffer, ProfileBufferEntry::ThreadId(aThreadId));
// The distance between mRangeStart and mRangeEnd must never exceed
// capacity, so advance mRangeStart if necessary.
if (mRangeEnd - mRangeStart > mEntryIndexMask.MaskValue() + 1) {
mRangeStart++;
}
}
uint64_t ProfileBuffer::AddThreadIdEntry(int aThreadId) {
return AddThreadIdEntry(mEntries, aThreadId).ConvertToU64();
uint64_t pos = mRangeEnd;
AddEntry(ProfileBufferEntry::ThreadId(aThreadId));
return pos;
}
void ProfileBuffer::AddStoredMarker(ProfilerMarker* aStoredMarker) {
aStoredMarker->SetPositionInBuffer(mRangeEnd);
mStoredMarkers.insert(aStoredMarker);
}
void ProfileBuffer::CollectCodeLocation(
@ -111,15 +87,28 @@ void ProfileBuffer::CollectCodeLocation(
}
}
size_t ProfileBuffer::SizeOfExcludingThis(MallocSizeOf aMallocSizeOf) const {
void ProfileBuffer::DeleteExpiredStoredMarkers() {
AUTO_PROFILER_STATS(gecko_ProfileBuffer_DeleteExpiredStoredMarkers);
// Delete markers of samples that have been overwritten due to circular
// buffer wraparound.
while (mStoredMarkers.peek() &&
mStoredMarkers.peek()->HasExpired(mRangeStart)) {
delete mStoredMarkers.popHead();
}
}
size_t ProfileBuffer::SizeOfIncludingThis(
mozilla::MallocSizeOf aMallocSizeOf) const {
size_t n = aMallocSizeOf(this);
n += aMallocSizeOf(mEntries.get());
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
// - memory pointed to by the elements within mEntries
return mEntries.SizeOfExcludingThis(aMallocSizeOf);
}
// - mStoredMarkers
size_t ProfileBuffer::SizeOfIncludingThis(MallocSizeOf aMallocSizeOf) const {
return aMallocSizeOf(this) + SizeOfExcludingThis(aMallocSizeOf);
return n;
}
void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
@ -157,15 +146,9 @@ void ProfileBuffer::CollectOverheadStats(TimeDuration aSamplingTime,
}
ProfilerBufferInfo ProfileBuffer::GetProfilerBufferInfo() const {
return {BufferRangeStart(),
BufferRangeEnd(),
mEntries.BufferLength()->Value() / 8, // 8 bytes per entry.
mIntervalsNs,
mOverheadsNs,
mLockingsNs,
mCleaningsNs,
mCountersNs,
mThreadsNs};
return {mRangeStart, mRangeEnd, mEntryIndexMask.MaskValue() + 1,
mIntervalsNs, mOverheadsNs, mLockingsNs,
mCleaningsNs, mCountersNs, mThreadsNs};
}
/* ProfileBufferCollector */

View File

@ -6,42 +6,35 @@
#ifndef MOZ_PROFILE_BUFFER_H
#define MOZ_PROFILE_BUFFER_H
#include "GeckoProfiler.h"
#include "ProfileBufferEntry.h"
#include "ProfilerMarker.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/PowerOfTwo.h"
// Class storing most profiling data in a BlocksRingBuffer.
//
// A fixed-capacity circular buffer.
// This class is used as a queue of entries which, after construction, never
// allocates. This makes it safe to use in the profiler's "critical section".
// Entries are appended at the end. Once the queue capacity has been reached,
// adding a new entry will evict an old entry from the start of the queue.
// Positions in the queue are represented as 64-bit unsigned integers which
// only increase and never wrap around.
// mRangeStart and mRangeEnd describe the range in that uint64_t space which is
// covered by the queue contents.
// Internally, the buffer uses a fixed-size storage and applies a modulo
// operation when accessing entries in that storage buffer. "Evicting" an entry
// really just means that an existing entry in the storage buffer gets
// overwritten and that mRangeStart gets incremented.
class ProfileBuffer final {
public:
// Opaque type containing a block index, which should not be modified outside
// of BlocksRingBuffer.
// TODO: Eventually, all uint64_t values should be replaced with BlockIndex,
// because external users should only store and compare them, but not do other
// arithmetic operations (that uint64_t supports).
using BlockIndex = mozilla::BlocksRingBuffer::BlockIndex;
// ProfileBuffer constructor
// @param aBuffer The empty BlocksRingBuffer to use as buffer manager.
// @param aCapacity The capacity of the buffer.
ProfileBuffer(mozilla::BlocksRingBuffer& aBuffer,
mozilla::PowerOfTwo32 aCapacity);
// ProfileBuffer constructor
// @param aBuffer The pre-filled BlocksRingBuffer to use as buffer manager.
explicit ProfileBuffer(mozilla::BlocksRingBuffer& aBuffer);
explicit ProfileBuffer(mozilla::PowerOfTwo32 aCapacity);
~ProfileBuffer();
bool IsThreadSafe() const { return mEntries.IsThreadSafe(); }
// Add |aEntry| to the buffer, ignoring what kind of entry it is.
uint64_t AddEntry(const ProfileBufferEntry& aEntry);
void AddEntry(const ProfileBufferEntry& aEntry);
// Add to the buffer a sample start (ThreadId) entry for aThreadId.
// Returns the position of the entry.
@ -98,28 +91,16 @@ class ProfileBuffer final {
void DiscardSamplesBeforeTime(double aTime);
// Read an entry in the buffer. Slow!
ProfileBufferEntry GetEntry(uint64_t aPosition) const {
ProfileBufferEntry entry;
mEntries.Read([&](mozilla::BlocksRingBuffer::Reader* aReader) {
// BlocksRingBuffer cannot be out-of-session when sampler is running.
MOZ_ASSERT(aReader);
for (mozilla::BlocksRingBuffer::EntryReader er : *aReader) {
if (er.CurrentBlockIndex().ConvertToU64() > aPosition) {
// Passed the block. (We need a precise position.)
return;
}
if (er.CurrentBlockIndex().ConvertToU64() == aPosition) {
MOZ_RELEASE_ASSERT(er.RemainingBytes() <= sizeof(entry));
er.Read(&entry, er.RemainingBytes());
return;
}
}
});
return entry;
void AddStoredMarker(ProfilerMarker* aStoredMarker);
// The following method is not signal safe!
void DeleteExpiredStoredMarkers();
// Access an entry in the buffer.
ProfileBufferEntry& GetEntry(uint64_t aPosition) const {
return mEntries[aPosition & mEntryIndexMask];
}
size_t SizeOfExcludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
size_t SizeOfIncludingThis(mozilla::MallocSizeOf aMallocSizeOf) const;
void CollectOverheadStats(mozilla::TimeDuration aSamplingTime,
@ -131,50 +112,38 @@ class ProfileBuffer final {
ProfilerBufferInfo GetProfilerBufferInfo() const;
private:
// Add |aEntry| to the provided BlocksRingBuffer.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddEntry(mozilla::BlocksRingBuffer& aBlocksRingBuffer,
const ProfileBufferEntry& aEntry);
// The storage that backs our buffer. Holds capacity entries.
// All accesses to entries in mEntries need to go through GetEntry(), which
// translates the given buffer position from the near-infinite uint64_t space
// into the entry storage space.
mozilla::UniquePtr<ProfileBufferEntry[]> mEntries;
// Add a sample start (ThreadId) entry for aThreadId to the provided
// BlocksRingBuffer. Returns the position of the entry.
// `static` because it may be used to add an entry to a `BlocksRingBuffer`
// that is not attached to a `ProfileBuffer`.
static BlockIndex AddThreadIdEntry(
mozilla::BlocksRingBuffer& aBlocksRingBuffer, int aThreadId);
// The circular-ring storage in which this ProfileBuffer stores its data.
mozilla::BlocksRingBuffer& mEntries;
// A mask such that pos & mEntryIndexMask == pos % capacity.
mozilla::PowerOfTwoMask32 mEntryIndexMask;
public:
// `BufferRangeStart()` and `BufferRangeEnd()` return `uint64_t` values
// corresponding to the first entry and past the last entry stored in
// `mEntries`.
// mRangeStart and mRangeEnd are uint64_t values that strictly advance and
// never wrap around. mRangeEnd is always greater than or equal to
// mRangeStart, but never gets more than capacity steps ahead of
// mRangeStart, because we can only store a fixed number of entries in the
// buffer. Once the entire buffer is in use, adding a new entry will evict an
// entry from the front of the buffer (and increase mRangeStart).
// In other words, the following conditions hold true at all times:
// (1) mRangeStart <= mRangeEnd
// (2) mRangeEnd - mRangeStart <= capacity
//
// The returned values are not guaranteed to be stable, because other threads
// may also be accessing the buffer concurrently. But they will always
// increase, and can therefore give an indication of how far these values have
// *at least* reached. In particular:
// - Entries whose index is strictly less that `BufferRangeStart()` have been
// discarded by now, so any related data may also be safely discarded.
// - It is safe to try and read entries at any index strictly less than
// `BufferRangeEnd()` -- but note that these reads may fail by the time you
// request them, as old entries get overwritten by new ones.
uint64_t BufferRangeStart() const {
return mEntries.GetState().mRangeStart.ConvertToU64();
}
uint64_t BufferRangeEnd() const {
return mEntries.GetState().mRangeEnd.ConvertToU64();
}
// If there are no live entries, then mRangeStart == mRangeEnd.
// Otherwise, mRangeStart is the first live entry and mRangeEnd is one past
// the last live entry, and also the position at which the next entry will be
// added.
// (mRangeEnd - mRangeStart) always gives the number of live entries.
uint64_t mRangeStart;
uint64_t mRangeEnd;
// Markers that marker entries in the buffer might refer to.
ProfilerMarkerLinkedList mStoredMarkers;
private:
// Pre-allocated (to avoid spurious mallocs) temporary buffer used when:
// - Duplicating sleeping stacks.
// - Adding JIT info.
// - Streaming stacks to JSON.
mozilla::UniquePtr<mozilla::BlocksRingBuffer::Byte[]> mWorkerBuffer;
double mFirstSamplingTimeNs = 0.0;
double mLastSamplingTimeNs = 0.0;
ProfilerStats mIntervalsNs;
@ -202,7 +171,7 @@ class ProfileBufferCollector final : public ProfilerStackCollector {
}
mozilla::Maybe<uint64_t> BufferRangeStart() override {
return mozilla::Some(mBuf.BufferRangeStart());
return mozilla::Some(mBuf.mRangeStart);
}
virtual void CollectNativeLeafAddr(void* aAddr) override;

File diff suppressed because it is too large Load Diff

View File

@ -22,70 +22,42 @@
#include "nsString.h"
class ProfilerCodeAddressService;
class ProfilerMarker;
// NOTE! If you add entries, you need to verify if they need to be added to the
// switch statement in DuplicateLastSample!
// This will evaluate the MACRO with (KIND, TYPE, SIZE)
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int, sizeof(int)) \
MACRO(CollectionStart, double, sizeof(double)) \
MACRO(CollectionEnd, double, sizeof(double)) \
MACRO(Label, const char*, sizeof(const char*)) \
MACRO(FrameFlags, uint64_t, sizeof(uint64_t)) \
MACRO(DynamicStringFragment, char*, ProfileBufferEntry::kNumChars) \
MACRO(JitReturnAddr, void*, sizeof(void*)) \
MACRO(LineNumber, int, sizeof(int)) \
MACRO(ColumnNumber, int, sizeof(int)) \
MACRO(NativeLeafAddr, void*, sizeof(void*)) \
MACRO(Pause, double, sizeof(double)) \
MACRO(Responsiveness, double, sizeof(double)) \
MACRO(Resume, double, sizeof(double)) \
MACRO(ThreadId, int, sizeof(int)) \
MACRO(Time, double, sizeof(double)) \
MACRO(TimeBeforeCompactStack, double, sizeof(double)) \
MACRO(CounterId, void*, sizeof(void*)) \
MACRO(CounterKey, uint64_t, sizeof(uint64_t)) \
MACRO(Number, uint64_t, sizeof(uint64_t)) \
MACRO(Count, int64_t, sizeof(int64_t)) \
MACRO(ProfilerOverheadTime, double, sizeof(double)) \
MACRO(ProfilerOverheadDuration, double, sizeof(double))
#define FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(MACRO) \
MACRO(CategoryPair, int) \
MACRO(CollectionStart, double) \
MACRO(CollectionEnd, double) \
MACRO(Label, const char*) \
MACRO(FrameFlags, uint64_t) \
MACRO(DynamicStringFragment, char*) /* char[kNumChars], really */ \
MACRO(JitReturnAddr, void*) \
MACRO(LineNumber, int) \
MACRO(ColumnNumber, int) \
MACRO(NativeLeafAddr, void*) \
MACRO(Marker, ProfilerMarker*) \
MACRO(Pause, double) \
MACRO(Responsiveness, double) \
MACRO(Resume, double) \
MACRO(ThreadId, int) \
MACRO(Time, double) \
MACRO(CounterId, void*) \
MACRO(CounterKey, uint64_t) \
MACRO(Number, uint64_t) \
MACRO(Count, int64_t) \
MACRO(ProfilerOverheadTime, double) \
MACRO(ProfilerOverheadDuration, double)
class ProfileBufferEntry {
public:
// The `Kind` is a single byte identifying the type of data that is actually
// stored in a `ProfileBufferEntry`, as per the list in
// `FOR_EACH_PROFILE_BUFFER_ENTRY_KIND`.
//
// This byte is also used to identify entries in BlocksRingBuffer blocks, for
// both "legacy" entries that do contain a `ProfileBufferEntry`, and for new
// types of entries that may carry more data of different types.
// TODO: Eventually each type of "legacy" entry should be replaced with newer,
// more efficient kinds of entries (e.g., stack frames could be stored in one
// bigger entry, instead of multiple `ProfileBufferEntry`s); then we could
// discard `ProfileBufferEntry` and move this enum to a more appropriate spot.
using KindUnderlyingType = uint8_t;
enum class Kind : KindUnderlyingType {
enum class Kind : uint8_t {
INVALID = 0,
#define KIND(KIND, TYPE, SIZE) KIND,
#define KIND(k, t) k,
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(KIND)
#undef KIND
// Any value under `LEGACY_LIMIT` represents a `ProfileBufferEntry`.
LEGACY_LIMIT,
// Any value starting here does *not* represent a `ProfileBufferEntry` and
// requires separate decoding and handling.
// Marker data, including payload.
MarkerData = LEGACY_LIMIT,
// Collection of legacy stack entries, must follow a ThreadId and
// TimeBeforeCompactStack (which are not included in the CompactStack;
// TimeBeforeCompactStack is equivalent to Time, but indicates that a
// CompactStack follows shortly afterwards).
CompactStack,
MODERN_LIMIT
LIMIT
};
ProfileBufferEntry();
@ -99,23 +71,24 @@ class ProfileBufferEntry {
ProfileBufferEntry(Kind aKind, const char* aString);
ProfileBufferEntry(Kind aKind, char aChars[kNumChars]);
ProfileBufferEntry(Kind aKind, void* aPtr);
ProfileBufferEntry(Kind aKind, ProfilerMarker* aMarker);
ProfileBufferEntry(Kind aKind, double aDouble);
ProfileBufferEntry(Kind aKind, int64_t aInt64);
ProfileBufferEntry(Kind aKind, uint64_t aUint64);
ProfileBufferEntry(Kind aKind, int aInt);
public:
#define CTOR(KIND, TYPE, SIZE) \
static ProfileBufferEntry KIND(TYPE aVal) { \
return ProfileBufferEntry(Kind::KIND, aVal); \
#define CTOR(k, t) \
static ProfileBufferEntry k(t aVal) { \
return ProfileBufferEntry(Kind::k, aVal); \
}
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(CTOR)
#undef CTOR
Kind GetKind() const { return mKind; }
#define IS_KIND(KIND, TYPE, SIZE) \
bool Is##KIND() const { return mKind == Kind::KIND; }
#define IS_KIND(k, t) \
bool Is##k() const { return mKind == Kind::k; }
FOR_EACH_PROFILE_BUFFER_ENTRY_KIND(IS_KIND)
#undef IS_KIND
@ -132,6 +105,7 @@ class ProfileBufferEntry {
const char* GetString() const;
void* GetPtr() const;
ProfilerMarker* GetMarker() const;
double GetDouble() const;
int GetInt() const;
int64_t GetInt64() const;

View File

@ -38,8 +38,7 @@ void ProfiledThreadData::StreamJSON(const ProfileBuffer& aBuffer,
double aSinceTime, bool JSTracerEnabled,
ProfilerCodeAddressService* aService) {
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(
aBuffer.BufferRangeStart())) {
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}
@ -288,8 +287,7 @@ void ProfiledThreadData::NotifyAboutToLoseJSContext(
MOZ_RELEASE_ASSERT(aContext);
if (mJITFrameInfoForPreviousJSContexts &&
mJITFrameInfoForPreviousJSContexts->HasExpired(
aBuffer.BufferRangeStart())) {
mJITFrameInfoForPreviousJSContexts->HasExpired(aBuffer.mRangeStart)) {
mJITFrameInfoForPreviousJSContexts = nullptr;
}

View File

@ -11,23 +11,10 @@
#include "ProfileJSONWriter.h"
#include "ThreadInfo.h"
ProfilerBacktrace::ProfilerBacktrace(
const char* aName, int aThreadId,
UniquePtr<mozilla::BlocksRingBuffer> aBlocksRingBuffer,
mozilla::UniquePtr<ProfileBuffer> aProfileBuffer)
: mName(strdup(aName)),
mThreadId(aThreadId),
mBlocksRingBuffer(std::move(aBlocksRingBuffer)),
mProfileBuffer(std::move(aProfileBuffer)) {
ProfilerBacktrace::ProfilerBacktrace(const char* aName, int aThreadId,
mozilla::UniquePtr<ProfileBuffer> aBuffer)
: mName(strdup(aName)), mThreadId(aThreadId), mBuffer(std::move(aBuffer)) {
MOZ_COUNT_CTOR(ProfilerBacktrace);
MOZ_ASSERT(
!!mBlocksRingBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<BlocksRingBuffer>");
MOZ_ASSERT(
!!mProfileBuffer,
"ProfilerBacktrace only takes a non-null UniquePtr<ProfileBuffer>");
MOZ_ASSERT(!mBlocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only takes a non-thread-safe BlocksRingBuffer");
}
ProfilerBacktrace::~ProfilerBacktrace() { MOZ_COUNT_DTOR(ProfilerBacktrace); }
@ -36,10 +23,10 @@ void ProfilerBacktrace::StreamJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) {
// Unlike ProfiledThreadData::StreamJSON, we don't need to call
// ProfileBuffer::AddJITInfoForRange because mProfileBuffer does not contain
// any JitReturnAddr entries. For synchronous samples, JIT frames get expanded
// ProfileBuffer::AddJITInfoForRange because mBuffer does not contain any
// JitReturnAddr entries. For synchronous samples, JIT frames get expanded
// at sample time.
StreamSamplesAndMarkers(mName.get(), mThreadId, *mProfileBuffer, aWriter,
StreamSamplesAndMarkers(mName.get(), mThreadId, *mBuffer.get(), aWriter,
NS_LITERAL_CSTRING(""), aProcessStartTime,
/* aRegisterTime */ mozilla::TimeStamp(),
/* aUnregisterTime */ mozilla::TimeStamp(),

View File

@ -7,8 +7,6 @@
#ifndef __PROFILER_BACKTRACE_H
#define __PROFILER_BACKTRACE_H
#include "ProfileBuffer.h"
#include "mozilla/UniquePtrExtensions.h"
class ProfileBuffer;
@ -18,16 +16,14 @@ class ThreadInfo;
class UniqueStacks;
namespace mozilla {
class BlocksRingBuffer;
class TimeStamp;
} // namespace mozilla
}
// ProfilerBacktrace encapsulates a synchronous sample.
class ProfilerBacktrace {
public:
ProfilerBacktrace(const char* aName, int aThreadId,
UniquePtr<mozilla::BlocksRingBuffer> aBlocksRingBuffer,
mozilla::UniquePtr<ProfileBuffer> aProfileBuffer);
mozilla::UniquePtr<ProfileBuffer> aBuffer);
~ProfilerBacktrace();
// ProfilerBacktraces' stacks are deduplicated in the context of the
@ -41,91 +37,12 @@ class ProfilerBacktrace {
UniqueStacks& aUniqueStacks);
private:
// Used to serialize a ProfilerBacktrace.
friend struct BlocksRingBuffer::Serializer<ProfilerBacktrace>;
friend struct BlocksRingBuffer::Deserializer<ProfilerBacktrace>;
ProfilerBacktrace(const ProfilerBacktrace&);
ProfilerBacktrace& operator=(const ProfilerBacktrace&);
mozilla::UniqueFreePtr<char> mName;
int mThreadId;
// `BlocksRingBuffer` in which `mProfileBuffer` stores its data; must be
// located before `mProfileBuffer` so that it's destroyed after.
UniquePtr<mozilla::BlocksRingBuffer> mBlocksRingBuffer;
mozilla::UniquePtr<ProfileBuffer> mProfileBuffer;
mozilla::UniquePtr<ProfileBuffer> mBuffer;
};
namespace mozilla {
// Format: [ UniquePtr<BlockRingsBuffer> | threadId | name ]
// Initial len==0 marks a nullptr or empty backtrace.
template <>
struct BlocksRingBuffer::Serializer<ProfilerBacktrace> {
static Length Bytes(const ProfilerBacktrace& aBacktrace) {
if (!aBacktrace.mProfileBuffer) {
return ULEB128Size<Length>(0);
}
auto bufferBytes = SumBytes(*aBacktrace.mBlocksRingBuffer);
if (bufferBytes == 0) {
return ULEB128Size<Length>(0);
}
return bufferBytes +
SumBytes(aBacktrace.mThreadId,
WrapBlocksRingBufferUnownedCString(aBacktrace.mName.get()));
}
static void Write(EntryWriter& aEW, const ProfilerBacktrace& aBacktrace) {
if (!aBacktrace.mProfileBuffer ||
SumBytes(*aBacktrace.mBlocksRingBuffer) == 0) {
aEW.WriteULEB128(0u);
return;
}
aEW.WriteObject(*aBacktrace.mBlocksRingBuffer);
aEW.WriteObject(aBacktrace.mThreadId);
aEW.WriteObject(WrapBlocksRingBufferUnownedCString(aBacktrace.mName.get()));
}
};
template <typename Destructor>
struct BlocksRingBuffer::Serializer<UniquePtr<ProfilerBacktrace, Destructor>> {
static Length Bytes(
const UniquePtr<ProfilerBacktrace, Destructor>& aBacktrace) {
if (!aBacktrace) {
return ULEB128Size<Length>(0);
}
return SumBytes(*aBacktrace);
}
static void Write(
EntryWriter& aEW,
const UniquePtr<ProfilerBacktrace, Destructor>& aBacktrace) {
if (!aBacktrace) {
aEW.WriteULEB128(0u);
return;
}
aEW.WriteObject(*aBacktrace);
}
};
template <typename Destructor>
struct BlocksRingBuffer::Deserializer<
UniquePtr<ProfilerBacktrace, Destructor>> {
static void ReadInto(EntryReader& aER,
UniquePtr<ProfilerBacktrace, Destructor>& aBacktrace) {
aBacktrace = Read(aER);
}
static UniquePtr<ProfilerBacktrace, Destructor> Read(EntryReader& aER) {
auto blocksRingBuffer = aER.ReadObject<UniquePtr<BlocksRingBuffer>>();
if (!blocksRingBuffer) {
return nullptr;
}
MOZ_ASSERT(
!blocksRingBuffer->IsThreadSafe(),
"ProfilerBacktrace only stores non-thread-safe BlocksRingBuffers");
int threadId = aER.ReadObject<int>();
std::string name = aER.ReadObject<std::string>();
auto profileBuffer = MakeUnique<ProfileBuffer>(*blocksRingBuffer);
return UniquePtr<ProfilerBacktrace, Destructor>{new ProfilerBacktrace(
name.c_str(), threadId, std::move(blocksRingBuffer),
std::move(profileBuffer))};
}
};
} // namespace mozilla
#endif // __PROFILER_BACKTRACE_H

View File

@ -0,0 +1,180 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
/* vim: set ts=8 sts=2 et sw=2 tw=80: */
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#ifndef ProfilerMarker_h
#define ProfilerMarker_h
#include "ProfileBufferEntry.h"
#include "ProfileJSONWriter.h"
#include "ProfilerMarkerPayload.h"
#include "mozilla/UniquePtrExtensions.h"
template <typename T>
class ProfilerLinkedList;
class ProfilerMarker {
friend class ProfilerLinkedList<ProfilerMarker>;
public:
explicit ProfilerMarker(
const char* aMarkerName, JS::ProfilingCategoryPair aCategoryPair,
int aThreadId,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload = nullptr,
double aTime = 0)
: mMarkerName(strdup(aMarkerName)),
mPayload(std::move(aPayload)),
mNext{nullptr},
mTime(aTime),
mPositionInBuffer{0},
mThreadId{aThreadId},
mCategoryPair{aCategoryPair} {}
void SetPositionInBuffer(uint64_t aPosition) {
mPositionInBuffer = aPosition;
}
bool HasExpired(uint64_t aBufferRangeStart) const {
return mPositionInBuffer < aBufferRangeStart;
}
double GetTime() const { return mTime; }
int GetThreadId() const { return mThreadId; }
void StreamJSON(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
// Schema:
// [name, time, category, data]
aWriter.StartArrayElement();
{
aUniqueStacks.mUniqueStrings->WriteElement(aWriter, mMarkerName.get());
aWriter.DoubleElement(mTime);
const JS::ProfilingCategoryPairInfo& info =
JS::GetProfilingCategoryPairInfo(mCategoryPair);
aWriter.IntElement(unsigned(info.mCategory));
// TODO: Store the callsite for this marker if available:
// if have location data
// b.NameValue(marker, "location", ...);
if (mPayload) {
aWriter.StartObjectElement(SpliceableJSONWriter::SingleLineStyle);
{ mPayload->StreamPayload(aWriter, aProcessStartTime, aUniqueStacks); }
aWriter.EndObject();
}
}
aWriter.EndArray();
}
private:
mozilla::UniqueFreePtr<char> mMarkerName;
mozilla::UniquePtr<ProfilerMarkerPayload> mPayload;
ProfilerMarker* mNext;
double mTime;
uint64_t mPositionInBuffer;
int mThreadId;
JS::ProfilingCategoryPair mCategoryPair;
};
template <typename T>
class ProfilerLinkedList {
public:
ProfilerLinkedList() : mHead(nullptr), mTail(nullptr) {}
void insert(T* aElem) {
if (!mTail) {
mHead = aElem;
mTail = aElem;
} else {
mTail->mNext = aElem;
mTail = aElem;
}
aElem->mNext = nullptr;
}
T* popHead() {
if (!mHead) {
MOZ_ASSERT(false);
return nullptr;
}
T* head = mHead;
mHead = head->mNext;
if (!mHead) {
mTail = nullptr;
}
return head;
}
const T* peek() { return mHead; }
private:
T* mHead;
T* mTail;
};
typedef ProfilerLinkedList<ProfilerMarker> ProfilerMarkerLinkedList;
template <typename T>
class ProfilerSignalSafeLinkedList {
public:
ProfilerSignalSafeLinkedList() : mSignalLock(false) {}
~ProfilerSignalSafeLinkedList() {
if (mSignalLock) {
// Some thread is modifying the list. We should only be released on that
// thread.
abort();
}
reset();
}
// Reset the list of pending signals in this list.
// We assume that this is called at a time when it is
// guaranteed that no more than a single user (the caller)
// is accessing the list. In particular, it is only
// called from within the RacyRegisteredThread::ReinitializeOnResume
// method.
void reset() {
while (mList.peek()) {
delete mList.popHead();
}
}
// Insert an item into the list. Must only be called from the owning thread.
// Must not be called while the list from accessList() is being accessed.
// In the profiler, we ensure that by interrupting the profiled thread
// (which is the one that owns this list and calls insert() on it) until
// we're done reading the list from the signal handler.
void insert(T* aElement) {
MOZ_ASSERT(aElement);
mSignalLock = true;
mList.insert(aElement);
mSignalLock = false;
}
// Called within signal, from any thread, possibly while insert() is in the
// middle of modifying the list (on the owning thread). Will return null if
// that is the case.
// Function must be reentrant.
ProfilerLinkedList<T>* accessList() { return mSignalLock ? nullptr : &mList; }
private:
ProfilerLinkedList<T> mList;
// If this is set, then it's not safe to read the list because its contents
// are being changed.
mozilla::Atomic<bool> mSignalLock;
};
#endif // ProfilerMarker_h

View File

@ -12,7 +12,6 @@
#include "gfxASurface.h"
#include "Layers.h"
#include "mozilla/BlocksRingBufferGeckoExtensions.h"
#include "mozilla/Maybe.h"
#include "mozilla/net/HttpBaseChannel.h"
#include "mozilla/Preferences.h"
@ -22,76 +21,6 @@
using namespace mozilla;
static UniquePtr<ProfilerMarkerPayload> DeserializeNothing(
BlocksRingBuffer::EntryReader&) {
return nullptr;
}
// Starting at 1 for the initial `DeserializeNothing`.
// static
Atomic<ProfilerMarkerPayload::DeserializerTagAtomic, ReleaseAcquire,
recordreplay::Behavior::DontPreserve>
ProfilerMarkerPayload::sDeserializerCount{1};
// Initialize `sDeserializers` with `DeserializeNothing` at index 0, all others
// are nullptrs.
// static
ProfilerMarkerPayload::Deserializer
ProfilerMarkerPayload::sDeserializers[DeserializerMax] = {
DeserializeNothing};
// static
ProfilerMarkerPayload::DeserializerTag
ProfilerMarkerPayload::TagForDeserializer(
ProfilerMarkerPayload::Deserializer aDeserializer) {
if (!aDeserializer) {
return 0;
}
// Start first search at index 0.
DeserializerTagAtomic start = 0;
for (;;) {
// Read the current count of deserializers.
const DeserializerTagAtomic tagCount = sDeserializerCount;
if (tagCount == 0) {
// Someone else is currently writing into the array, loop around until we
// get a valid count.
continue;
}
for (DeserializerTagAtomic i = start; i < tagCount; ++i) {
if (sDeserializers[i] == aDeserializer) {
// Deserializer already registered, return its tag.
return static_cast<ProfilerMarkerPayload::DeserializerTag>(i);
}
}
// Not found yet, let's register this new deserializer.
// Make sure we haven't reached the limit yet.
MOZ_RELEASE_ASSERT(tagCount < DeserializerMax);
// Reserve `tagCount` as an index, if not already claimed:
// If `sDeserializerCount` is still at our previously-read `tagCount`,
// replace it with a special 0 value to indicate a write.
if (sDeserializerCount.compareExchange(tagCount, 0)) {
// Here we own the `tagCount` index, write the deserializer there.
sDeserializers[tagCount] = aDeserializer;
// And publish by writing the real new count (1 past our index).
sDeserializerCount = tagCount + 1;
return static_cast<ProfilerMarkerPayload::DeserializerTag>(tagCount);
}
// Someone else beat us to grab an index, and it could be for the same
// deserializer! So let's just try searching starting from our recorded
// `tagCount` (and maybe attempting again to register). It should be rare
// enough and quick enough that it won't impact performances.
start = tagCount;
}
}
// static
ProfilerMarkerPayload::Deserializer ProfilerMarkerPayload::DeserializerForTag(
ProfilerMarkerPayload::DeserializerTag aTag) {
MOZ_RELEASE_ASSERT(aTag < DeserializerMax);
MOZ_RELEASE_ASSERT(aTag < sDeserializerCount);
return sDeserializers[aTag];
}
static void MOZ_ALWAYS_INLINE WriteTime(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
const TimeStamp& aTime,
@ -102,103 +31,33 @@ static void MOZ_ALWAYS_INLINE WriteTime(SpliceableJSONWriter& aWriter,
}
void ProfilerMarkerPayload::StreamType(const char* aMarkerType,
SpliceableJSONWriter& aWriter) const {
SpliceableJSONWriter& aWriter) {
MOZ_ASSERT(aMarkerType);
aWriter.StringProperty("type", aMarkerType);
}
BlocksRingBuffer::Length
ProfilerMarkerPayload::CommonPropsTagAndSerializationBytes() const {
return sizeof(DeserializerTag) +
BlocksRingBuffer::SumBytes(mCommonProps.mStartTime,
mCommonProps.mEndTime, mCommonProps.mStack,
mCommonProps.mDocShellId,
mCommonProps.mDocShellHistoryId);
}
void ProfilerMarkerPayload::SerializeTagAndCommonProps(
DeserializerTag aDeserializerTag,
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
aEntryWriter.WriteObject(aDeserializerTag);
aEntryWriter.WriteObject(mCommonProps.mStartTime);
aEntryWriter.WriteObject(mCommonProps.mEndTime);
aEntryWriter.WriteObject(mCommonProps.mStack);
aEntryWriter.WriteObject(mCommonProps.mDocShellId);
aEntryWriter.WriteObject(mCommonProps.mDocShellHistoryId);
}
// static
ProfilerMarkerPayload::CommonProps
ProfilerMarkerPayload::DeserializeCommonProps(
BlocksRingBuffer::EntryReader& aEntryReader) {
CommonProps props;
aEntryReader.ReadIntoObject(props.mStartTime);
aEntryReader.ReadIntoObject(props.mEndTime);
aEntryReader.ReadIntoObject(props.mStack);
aEntryReader.ReadIntoObject(props.mDocShellId);
aEntryReader.ReadIntoObject(props.mDocShellHistoryId);
return props;
}
void ProfilerMarkerPayload::StreamCommonProps(
const char* aMarkerType, SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime, UniqueStacks& aUniqueStacks) const {
const TimeStamp& aProcessStartTime, UniqueStacks& aUniqueStacks) {
StreamType(aMarkerType, aWriter);
WriteTime(aWriter, aProcessStartTime, mCommonProps.mStartTime, "startTime");
WriteTime(aWriter, aProcessStartTime, mCommonProps.mEndTime, "endTime");
if (mCommonProps.mDocShellId) {
aWriter.StringProperty("docShellId",
nsIDToCString(*mCommonProps.mDocShellId).get());
WriteTime(aWriter, aProcessStartTime, mStartTime, "startTime");
WriteTime(aWriter, aProcessStartTime, mEndTime, "endTime");
if (mDocShellId) {
aWriter.StringProperty("docShellId", nsIDToCString(*mDocShellId).get());
}
if (mCommonProps.mDocShellHistoryId) {
aWriter.DoubleProperty("docshellHistoryId",
mCommonProps.mDocShellHistoryId.ref());
if (mDocShellHistoryId) {
aWriter.DoubleProperty("docshellHistoryId", mDocShellHistoryId.ref());
}
if (mCommonProps.mStack) {
if (mStack) {
aWriter.StartObjectProperty("stack");
{
mCommonProps.mStack->StreamJSON(aWriter, aProcessStartTime,
aUniqueStacks);
}
{ mStack->StreamJSON(aWriter, aProcessStartTime, aUniqueStacks); }
aWriter.EndObject();
}
}
BlocksRingBuffer::Length TracingMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mCategory),
mKind);
}
void TracingMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndPayload(tag, aEntryWriter);
}
void TracingMarkerPayload::SerializeTagAndPayload(
DeserializerTag aDeserializerTag,
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
SerializeTagAndCommonProps(aDeserializerTag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mCategory));
aEntryWriter.WriteObject(mKind);
}
// static
UniquePtr<ProfilerMarkerPayload> TracingMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
const char* category = aEntryReader.ReadObject<const char*>();
TracingKind kind = aEntryReader.ReadObject<TracingKind>();
return UniquePtr<ProfilerMarkerPayload>(
new TracingMarkerPayload(std::move(props), category, kind));
}
void TracingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("tracing", aWriter, aProcessStartTime, aUniqueStacks);
if (mCategory) {
@ -212,36 +71,9 @@ void TracingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length FileIOMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mSource),
mOperation, mFilename);
}
void FileIOMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mSource));
aEntryWriter.WriteObject(mOperation);
aEntryWriter.WriteObject(mFilename);
}
// static
UniquePtr<ProfilerMarkerPayload> FileIOMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto source = aEntryReader.ReadObject<const char*>();
auto operation = aEntryReader.ReadObject<UniqueFreePtr<char>>();
auto filename = aEntryReader.ReadObject<UniqueFreePtr<char>>();
return UniquePtr<ProfilerMarkerPayload>(new FileIOMarkerPayload(
std::move(props), source, std::move(operation), std::move(filename)));
}
void FileIOMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("FileIO", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("operation", mOperation.get());
aWriter.StringProperty("source", mSource);
@ -250,40 +82,9 @@ void FileIOMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length UserTimingMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mEntryType),
mName, mStartMark, mEndMark);
}
void UserTimingMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mEntryType));
aEntryWriter.WriteObject(mName);
aEntryWriter.WriteObject(mStartMark);
aEntryWriter.WriteObject(mEndMark);
}
// static
UniquePtr<ProfilerMarkerPayload> UserTimingMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto entryType = aEntryReader.ReadObject<const char*>();
auto name = aEntryReader.ReadObject<nsString>();
auto startMark = aEntryReader.ReadObject<Maybe<nsString>>();
auto endMark = aEntryReader.ReadObject<Maybe<nsString>>();
return UniquePtr<ProfilerMarkerPayload>(
new UserTimingMarkerPayload(std::move(props), entryType, std::move(name),
std::move(startMark), std::move(endMark)));
}
void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("UserTiming", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", NS_ConvertUTF16toUTF8(mName).get());
aWriter.StringProperty("entryType", mEntryType);
@ -302,99 +103,24 @@ void UserTimingMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length TextMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mText);
}
void TextMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mText);
}
// static
UniquePtr<ProfilerMarkerPayload> TextMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto text = aEntryReader.ReadObject<nsCString>();
return UniquePtr<ProfilerMarkerPayload>(
new TextMarkerPayload(std::move(props), std::move(text)));
}
void TextMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Text", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.get());
}
BlocksRingBuffer::Length LogMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mModule, mText);
}
void LogMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mModule);
aEntryWriter.WriteObject(mText);
}
// static
UniquePtr<ProfilerMarkerPayload> LogMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto module = aEntryReader.ReadObject<nsAutoCStringN<32>>();
auto text = aEntryReader.ReadObject<nsCString>();
return UniquePtr<ProfilerMarkerPayload>(new LogMarkerPayload(
std::move(props), std::move(module), std::move(text)));
}
void LogMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Log", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("name", mText.get());
aWriter.StringProperty("module", mModule.get());
}
BlocksRingBuffer::Length DOMEventMarkerPayload::TagAndSerializationBytes()
const {
return TracingMarkerPayload::TagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mTimeStamp, mEventType);
}
void DOMEventMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
// Let our parent class serialize our tag with its payload.
TracingMarkerPayload::SerializeTagAndPayload(tag, aEntryWriter);
// Then write our extra data.
aEntryWriter.WriteObject(mTimeStamp);
aEntryWriter.WriteObject(mEventType);
}
// static
UniquePtr<ProfilerMarkerPayload> DOMEventMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
const char* category = aEntryReader.ReadObject<const char*>();
TracingKind kind = aEntryReader.ReadObject<TracingKind>();
auto timeStamp = aEntryReader.ReadObject<TimeStamp>();
auto eventType = aEntryReader.ReadObject<nsString>();
return UniquePtr<ProfilerMarkerPayload>(new DOMEventMarkerPayload(
std::move(props), category, kind, timeStamp, std::move(eventType)));
}
void DOMEventMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
TracingMarkerPayload::StreamPayload(aWriter, aProcessStartTime,
aUniqueStacks);
@ -402,46 +128,15 @@ void DOMEventMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
aWriter.StringProperty("eventType", NS_ConvertUTF16toUTF8(mEventType).get());
}
BlocksRingBuffer::Length PrefMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mPrefAccessTime, mPrefName, mPrefKind,
mPrefType, mPrefValue);
}
void PrefMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mPrefAccessTime);
aEntryWriter.WriteObject(mPrefName);
aEntryWriter.WriteObject(mPrefKind);
aEntryWriter.WriteObject(mPrefType);
aEntryWriter.WriteObject(mPrefValue);
}
// static
UniquePtr<ProfilerMarkerPayload> PrefMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto prefAccessTime = aEntryReader.ReadObject<TimeStamp>();
auto prefName = aEntryReader.ReadObject<nsCString>();
auto prefKind = aEntryReader.ReadObject<Maybe<PrefValueKind>>();
auto prefType = aEntryReader.ReadObject<Maybe<PrefType>>();
auto prefValue = aEntryReader.ReadObject<nsCString>();
return UniquePtr<ProfilerMarkerPayload>(new PrefMarkerPayload(
std::move(props), prefAccessTime, std::move(prefName),
std::move(prefKind), std::move(prefType), std::move(prefValue)));
}
static const char* PrefValueKindToString(const Maybe<PrefValueKind>& aKind) {
static const char* PrefValueKindToString(
const mozilla::Maybe<PrefValueKind>& aKind) {
if (aKind) {
return *aKind == PrefValueKind::Default ? "Default" : "User";
}
return "Shared";
}
static const char* PrefTypeToString(const Maybe<PrefType>& type) {
static const char* PrefTypeToString(const mozilla::Maybe<PrefType>& type) {
if (type) {
switch (*type) {
case PrefType::None:
@ -461,7 +156,7 @@ static const char* PrefTypeToString(const Maybe<PrefType>& type) {
void PrefMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("PreferenceRead", aWriter, aProcessStartTime,
aUniqueStacks);
WriteTime(aWriter, aProcessStartTime, mPrefAccessTime, "prefAccessTime");
@ -471,35 +166,9 @@ void PrefMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
aWriter.StringProperty("prefValue", mPrefValue.get());
}
BlocksRingBuffer::Length
LayerTranslationMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(WrapBlocksRingBufferRawPointer(mLayer),
mPoint);
}
void LayerTranslationMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mLayer));
aEntryWriter.WriteObject(mPoint);
}
// static
UniquePtr<ProfilerMarkerPayload> LayerTranslationMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto layer = aEntryReader.ReadObject<layers::Layer*>();
auto point = aEntryReader.ReadObject<gfx::Point>();
return UniquePtr<ProfilerMarkerPayload>(
new LayerTranslationMarkerPayload(std::move(props), layer, point));
}
void LayerTranslationMarkerPayload::StreamPayload(
SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamType("LayerTranslation", aWriter);
const size_t bufferSize = 32;
char buffer[bufferSize];
@ -510,70 +179,12 @@ void LayerTranslationMarkerPayload::StreamPayload(
aWriter.IntProperty("y", mPoint.y);
}
BlocksRingBuffer::Length VsyncMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes();
}
void VsyncMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
// static
UniquePtr<ProfilerMarkerPayload> VsyncMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new VsyncMarkerPayload(std::move(props)));
}
void VsyncMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamType("VsyncTimestamp", aWriter);
}
BlocksRingBuffer::Length NetworkMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mID, mURI, mRedirectURI, mType, mPri,
mCount, mTimings, mCacheDisposition);
}
void NetworkMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mID);
aEntryWriter.WriteObject(mURI);
aEntryWriter.WriteObject(mRedirectURI);
aEntryWriter.WriteObject(mType);
aEntryWriter.WriteObject(mPri);
aEntryWriter.WriteObject(mCount);
aEntryWriter.WriteObject(mTimings);
aEntryWriter.WriteObject(mCacheDisposition);
}
// static
UniquePtr<ProfilerMarkerPayload> NetworkMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto id = aEntryReader.ReadObject<int64_t>();
auto uri = aEntryReader.ReadObject<UniqueFreePtr<char>>();
auto redirectURI = aEntryReader.ReadObject<UniqueFreePtr<char>>();
auto type = aEntryReader.ReadObject<NetworkLoadType>();
auto pri = aEntryReader.ReadObject<int32_t>();
auto count = aEntryReader.ReadObject<int64_t>();
auto timings = aEntryReader.ReadObject<net::TimingStruct>();
auto cacheDisposition = aEntryReader.ReadObject<net::CacheDisposition>();
return UniquePtr<ProfilerMarkerPayload>(new NetworkMarkerPayload(
std::move(props), id, std::move(uri), std::move(redirectURI), type, pri,
count, timings, cacheDisposition));
}
static const char* GetNetworkState(NetworkLoadType aType) {
switch (aType) {
case NetworkLoadType::LOAD_START:
@ -586,19 +197,20 @@ static const char* GetNetworkState(NetworkLoadType aType) {
return "";
}
static const char* GetCacheState(net::CacheDisposition aCacheDisposition) {
static const char* GetCacheState(
mozilla::net::CacheDisposition aCacheDisposition) {
switch (aCacheDisposition) {
case net::kCacheUnresolved:
case mozilla::net::kCacheUnresolved:
return "Unresolved";
case net::kCacheHit:
case mozilla::net::kCacheHit:
return "Hit";
case net::kCacheHitViaReval:
case mozilla::net::kCacheHitViaReval:
return "HitViaReval";
case net::kCacheMissedViaReval:
case mozilla::net::kCacheMissedViaReval:
return "MissedViaReval";
case net::kCacheMissed:
case mozilla::net::kCacheMissed:
return "Missed";
case net::kCacheUnknown:
case mozilla::net::kCacheUnknown:
default:
return nullptr;
}
@ -606,7 +218,7 @@ static const char* GetCacheState(net::CacheDisposition aCacheDisposition) {
void NetworkMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Network", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.IntProperty("id", mID);
const char* typeString = GetNetworkState(mType);
@ -647,37 +259,9 @@ void NetworkMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length ScreenshotPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mScreenshotDataURL, mWindowSize,
mWindowIdentifier);
}
void ScreenshotPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mScreenshotDataURL);
aEntryWriter.WriteObject(mWindowSize);
aEntryWriter.WriteObject(mWindowIdentifier);
}
// static
UniquePtr<ProfilerMarkerPayload> ScreenshotPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto screenshotDataURL = aEntryReader.ReadObject<nsCString>();
auto windowSize = aEntryReader.ReadObject<gfx::IntSize>();
auto windowIdentifier = aEntryReader.ReadObject<uintptr_t>();
return UniquePtr<ProfilerMarkerPayload>(
new ScreenshotPayload(std::move(props), std::move(screenshotDataURL),
windowSize, windowIdentifier));
}
void ScreenshotPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamType("CompositorScreenshot", aWriter);
aUniqueStacks.mUniqueStrings->WriteProperty(aWriter, "url",
mScreenshotDataURL.get());
@ -689,32 +273,9 @@ void ScreenshotPayload::StreamPayload(SpliceableJSONWriter& aWriter,
aWriter.DoubleProperty("windowHeight", mWindowSize.height);
}
BlocksRingBuffer::Length GCSliceMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mTimingJSON);
}
void GCSliceMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mTimingJSON);
}
// static
UniquePtr<ProfilerMarkerPayload> GCSliceMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto timingJSON = aEntryReader.ReadObject<JS::UniqueChars>();
return UniquePtr<ProfilerMarkerPayload>(
new GCSliceMarkerPayload(std::move(props), std::move(timingJSON)));
}
void GCSliceMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingJSON);
StreamCommonProps("GCSlice", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingJSON) {
@ -724,32 +285,9 @@ void GCSliceMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length GCMajorMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mTimingJSON);
}
void GCMajorMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mTimingJSON);
}
// static
UniquePtr<ProfilerMarkerPayload> GCMajorMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto timingJSON = aEntryReader.ReadObject<JS::UniqueChars>();
return UniquePtr<ProfilerMarkerPayload>(
new GCMajorMarkerPayload(std::move(props), std::move(timingJSON)));
}
void GCMajorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingJSON);
StreamCommonProps("GCMajor", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingJSON) {
@ -759,32 +297,9 @@ void GCMajorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length GCMinorMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mTimingData);
}
void GCMinorMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mTimingData);
}
// static
UniquePtr<ProfilerMarkerPayload> GCMinorMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto timingData = aEntryReader.ReadObject<JS::UniqueChars>();
return UniquePtr<ProfilerMarkerPayload>(
new GCMinorMarkerPayload(std::move(props), std::move(timingData)));
}
void GCMinorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
MOZ_ASSERT(mTimingData);
StreamCommonProps("GCMinor", aWriter, aProcessStartTime, aUniqueStacks);
if (mTimingData) {
@ -794,57 +309,16 @@ void GCMinorMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
}
}
BlocksRingBuffer::Length HangMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes();
}
void HangMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
// static
UniquePtr<ProfilerMarkerPayload> HangMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new HangMarkerPayload(std::move(props)));
}
void HangMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("BHR-detected hang", aWriter, aProcessStartTime,
aUniqueStacks);
}
BlocksRingBuffer::Length StyleMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mStats);
}
void StyleMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mStats);
}
// static
UniquePtr<ProfilerMarkerPayload> StyleMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto stats = aEntryReader.ReadObject<ServoTraversalStatistics>();
return UniquePtr<ProfilerMarkerPayload>(
new StyleMarkerPayload(std::move(props), stats));
}
void StyleMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("Styles", aWriter, aProcessStartTime, aUniqueStacks);
aWriter.StringProperty("category", "Paint");
aWriter.IntProperty("elementsTraversed", mStats.mElementsTraversed);
@ -854,80 +328,17 @@ void StyleMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
aWriter.IntProperty("stylesReused", mStats.mStylesReused);
}
BlocksRingBuffer::Length LongTaskMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes();
}
void LongTaskMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
}
// static
UniquePtr<ProfilerMarkerPayload> LongTaskMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
return UniquePtr<ProfilerMarkerPayload>(
new LongTaskMarkerPayload(std::move(props)));
}
void LongTaskMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("MainThreadLongTask", aWriter, aProcessStartTime,
aUniqueStacks);
aWriter.StringProperty("category", "LongTask");
}
UniqueFreePtr<const char16_t> mTypeName;
UniqueFreePtr<const char> mClassName;
UniqueFreePtr<const char16_t> mDescriptiveTypeName;
const char* mCoarseType;
uint64_t mSize;
bool mInNursery;
BlocksRingBuffer::Length JsAllocationMarkerPayload::TagAndSerializationBytes()
const {
return CommonPropsTagAndSerializationBytes() +
BlocksRingBuffer::SumBytes(mTypeName, mClassName, mDescriptiveTypeName,
mCoarseType, mSize, mInNursery);
}
void JsAllocationMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mTypeName);
aEntryWriter.WriteObject(mClassName);
aEntryWriter.WriteObject(mDescriptiveTypeName);
aEntryWriter.WriteObject(WrapBlocksRingBufferRawPointer(mCoarseType));
aEntryWriter.WriteObject(mSize);
aEntryWriter.WriteObject(mInNursery);
}
// static
UniquePtr<ProfilerMarkerPayload> JsAllocationMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto typeName = aEntryReader.ReadObject<UniqueFreePtr<const char16_t>>();
auto className = aEntryReader.ReadObject<UniqueFreePtr<const char>>();
auto descriptiveTypeName =
aEntryReader.ReadObject<UniqueFreePtr<const char16_t>>();
auto coarseType = aEntryReader.ReadObject<const char*>();
auto size = aEntryReader.ReadObject<uint64_t>();
auto inNursery = aEntryReader.ReadObject<bool>();
return UniquePtr<ProfilerMarkerPayload>(new JsAllocationMarkerPayload(
std::move(props), std::move(typeName), std::move(className),
std::move(descriptiveTypeName), coarseType, size, inNursery));
}
void JsAllocationMarkerPayload::StreamPayload(
SpliceableJSONWriter& aWriter, const TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const {
UniqueStacks& aUniqueStacks) {
StreamCommonProps("JS allocation", aWriter, aProcessStartTime, aUniqueStacks);
if (mClassName) {

View File

@ -8,6 +8,8 @@
#define RegisteredThread_h
#include "platform.h"
#include "ProfilerMarker.h"
#include "ProfilerMarkerPayload.h"
#include "ThreadInfo.h"
#include "js/TraceLoggerAPI.h"
@ -35,9 +37,30 @@ class RacyRegisteredThread final {
bool IsBeingProfiled() const { return mIsBeingProfiled; }
void AddPendingMarker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload,
double aTime) {
// Note: We don't assert on mIsBeingProfiled, because it could have changed
// between the check in the caller and now.
ProfilerMarker* marker = new ProfilerMarker(
aMarkerName, aCategoryPair, mThreadId, std::move(aPayload), aTime);
mPendingMarkers.insert(marker);
}
// Called within signal. Function must be reentrant.
ProfilerMarkerLinkedList* GetPendingMarkers() {
// The profiled thread is interrupted, so we can access the list safely.
// Unless the profiled thread was in the middle of changing the list when
// we interrupted it - in that case, accessList() will return null.
return mPendingMarkers.accessList();
}
// This is called on every profiler restart. Put things that should happen at
// that time here.
void ReinitializeOnResume() {
mPendingMarkers.reset();
// This is needed to cause an initial sample to be taken from sleeping
// threads that had been observed prior to the profiler stopping and
// restarting. Otherwise sleeping threads would not have any samples to
@ -84,6 +107,9 @@ class RacyRegisteredThread final {
private:
class ProfilingStack mProfilingStack;
// A list of pending markers that must be moved to the circular buffer.
ProfilerSignalSafeLinkedList<ProfilerMarker> mPendingMarkers;
// mThreadId contains the thread ID of the current thread. It is safe to read
// this from multiple threads concurrently, as it will never be mutated.
const int mThreadId;

View File

@ -279,12 +279,7 @@ typedef const PSAutoLock& PSLockRef;
class CorePS {
private:
CorePS()
: mProcessStartTime(TimeStamp::ProcessCreation()),
// This needs its own mutex, because it is used concurrently from
// functions guarded by gPSMutex as well as others without safety (e.g.,
// profiler_add_marker). It is *not* used inside the critical section of
// the sampler, because mutexes cannot be used there.
mCoreBlocksRingBuffer(BlocksRingBuffer::ThreadSafety::WithMutex)
: mProcessStartTime(TimeStamp::ProcessCreation())
#ifdef USE_LUL_STACKWALK
,
mLul(nullptr)
@ -337,9 +332,6 @@ class CorePS {
// No PSLockRef is needed for this field because it's immutable.
PS_GET_LOCKLESS(TimeStamp, ProcessStartTime)
// No PSLockRef is needed for this field because it's thread-safe.
PS_GET_LOCKLESS(BlocksRingBuffer&, CoreBlocksRingBuffer)
PS_GET(const Vector<UniquePtr<RegisteredThread>>&, RegisteredThreads)
static void AppendRegisteredThread(
@ -424,17 +416,6 @@ class CorePS {
// The time that the process started.
const TimeStamp mProcessStartTime;
// The thread-safe blocks-oriented ring buffer into which all profiling data
// is recorded.
// ActivePS controls the lifetime of the underlying contents buffer: When
// ActivePS does not exist, mCoreBlocksRingBuffer is empty and rejects all
// reads&writes; see ActivePS for further details.
// Note: This needs to live here outside of ActivePS, because some producers
// are indirectly controlled (e.g., by atomic flags) and therefore may still
// attempt to write some data shortly after ActivePS has shutdown and deleted
// the underlying buffer in memory.
BlocksRingBuffer mCoreBlocksRingBuffer;
// Info on all the registered threads.
// ThreadIds in mRegisteredThreads are unique.
Vector<UniquePtr<RegisteredThread>> mRegisteredThreads;
@ -497,13 +478,11 @@ class ActivePS {
mDuration(aDuration),
mInterval(aInterval),
mFeatures(AdjustFeatures(aFeatures, aFilterCount)),
// 8 bytes per entry.
mProfileBuffer(
MakeUnique<ProfileBuffer>(CorePS::CoreBlocksRingBuffer(),
PowerOfTwo32(aCapacity.Value() * 8))),
mBuffer(MakeUnique<ProfileBuffer>(aCapacity))
// The new sampler thread doesn't start sampling immediately because the
// main loop within Run() is blocked until this function's caller
// unlocks gPSMutex.
,
mSamplerThread(NewSamplerThread(aLock, mGeneration, aInterval)),
mInterposeObserver(ProfilerFeature::HasMainThreadIO(aFeatures)
? new ProfilerIOInterposeObserver()
@ -634,7 +613,7 @@ class ActivePS {
static size_t SizeOf(PSLockRef, MallocSizeOf aMallocSizeOf) {
size_t n = aMallocSizeOf(sInstance);
n += sInstance->mProfileBuffer->SizeOfIncludingThis(aMallocSizeOf);
n += sInstance->mBuffer->SizeOfIncludingThis(aMallocSizeOf);
// Measurement of the following members may be added later if DMD finds it
// is worthwhile:
@ -689,7 +668,7 @@ class ActivePS {
PS_GET(const Vector<std::string>&, Filters)
static ProfileBuffer& Buffer(PSLockRef) { return *sInstance->mProfileBuffer; }
static ProfileBuffer& Buffer(PSLockRef) { return *sInstance->mBuffer.get(); }
static const Vector<LiveProfiledThreadData>& LiveProfiledThreads(PSLockRef) {
return sInstance->mLiveProfiledThreads;
@ -777,7 +756,7 @@ class ActivePS {
LiveProfiledThreadData& thread = sInstance->mLiveProfiledThreads[i];
if (thread.mRegisteredThread == aRegisteredThread) {
thread.mProfiledThreadData->NotifyUnregistered(
sInstance->mProfileBuffer->BufferRangeEnd());
sInstance->mBuffer->mRangeEnd);
MOZ_RELEASE_ASSERT(sInstance->mDeadProfiledThreads.append(
std::move(thread.mProfiledThreadData)));
sInstance->mLiveProfiledThreads.erase(
@ -794,7 +773,7 @@ class ActivePS {
#endif
static void DiscardExpiredDeadProfiledThreads(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mProfileBuffer->BufferRangeStart();
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
// Discard any dead threads that were unregistered before bufferRangeStart.
sInstance->mDeadProfiledThreads.eraseIf(
[bufferRangeStart](
@ -813,7 +792,7 @@ class ActivePS {
for (size_t i = 0; i < registeredPages.length(); i++) {
RefPtr<PageInformation>& page = registeredPages[i];
if (page->DocShellId().Equals(aRegisteredDocShellId)) {
page->NotifyUnregistered(sInstance->mProfileBuffer->BufferRangeEnd());
page->NotifyUnregistered(sInstance->mBuffer->mRangeEnd);
MOZ_RELEASE_ASSERT(
sInstance->mDeadProfiledPages.append(std::move(page)));
registeredPages.erase(&registeredPages[i--]);
@ -822,7 +801,7 @@ class ActivePS {
}
static void DiscardExpiredPages(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mProfileBuffer->BufferRangeStart();
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
// Discard any dead pages that were unregistered before
// bufferRangeStart.
sInstance->mDeadProfiledPages.eraseIf(
@ -871,7 +850,7 @@ class ActivePS {
#endif
static void ClearExpiredExitProfiles(PSLockRef) {
uint64_t bufferRangeStart = sInstance->mProfileBuffer->BufferRangeStart();
uint64_t bufferRangeStart = sInstance->mBuffer->mRangeStart;
// Discard exit profiles that were gathered before our buffer RangeStart.
#ifdef MOZ_BASE_PROFILER
if (bufferRangeStart != 0 && sInstance->mBaseProfileThreads) {
@ -900,8 +879,8 @@ class ActivePS {
static void AddExitProfile(PSLockRef aLock, const nsCString& aExitProfile) {
ClearExpiredExitProfiles(aLock);
MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append(ExitProfile{
aExitProfile, sInstance->mProfileBuffer->BufferRangeEnd()}));
MOZ_RELEASE_ASSERT(sInstance->mExitProfiles.append(
ExitProfile{aExitProfile, sInstance->mBuffer->mRangeEnd}));
}
static Vector<nsCString> MoveExitProfiles(PSLockRef aLock) {
@ -941,10 +920,10 @@ class ActivePS {
const uint32_t mGeneration;
static uint32_t sNextGeneration;
// The maximum number of entries in mProfileBuffer.
// The maximum number of entries in mBuffer.
const PowerOfTwo32 mCapacity;
// The maximum duration of entries in mProfileBuffer, in seconds.
// The maximum duration of entries in mBuffer, in seconds.
const Maybe<double> mDuration;
// The interval between samples, measured in milliseconds.
@ -958,7 +937,7 @@ class ActivePS {
// The buffer into which all samples are recorded. Always non-null. Always
// used in conjunction with CorePS::m{Live,Dead}Threads.
const UniquePtr<ProfileBuffer> mProfileBuffer;
const UniquePtr<ProfileBuffer> mBuffer;
// ProfiledThreadData objects for any threads that were profiled at any point
// during this run of the profiler:
@ -1696,17 +1675,24 @@ static void DoNativeBacktrace(PSLockRef aLock,
// ProfileBuffer::StreamSamplesToJSON.
static inline void DoSharedSample(PSLockRef aLock, bool aIsSynchronous,
RegisteredThread& aRegisteredThread,
const Registers& aRegs, uint64_t aSamplePos,
const TimeStamp& aNow, const Registers& aRegs,
Maybe<uint64_t>* aLastSample,
ProfileBuffer& aBuffer) {
// WARNING: this function runs within the profiler's "critical section".
MOZ_ASSERT(!aBuffer.IsThreadSafe(),
"Mutexes cannot be used inside this critical section");
MOZ_RELEASE_ASSERT(ActivePS::Exists(aLock));
uint64_t samplePos =
aBuffer.AddThreadIdEntry(aRegisteredThread.Info()->ThreadId());
if (aLastSample) {
*aLastSample = Some(samplePos);
}
TimeDuration delta = aNow - CorePS::ProcessStartTime();
aBuffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
ProfileBufferCollector collector(aBuffer, ActivePS::Features(aLock),
aSamplePos);
samplePos);
NativeStack nativeStack;
#if defined(HAVE_NATIVE_UNWIND)
if (ActivePS::FeatureStackWalk(aLock)) {
@ -1733,34 +1719,35 @@ static void DoSyncSample(PSLockRef aLock, RegisteredThread& aRegisteredThread,
ProfileBuffer& aBuffer) {
// WARNING: this function runs within the profiler's "critical section".
uint64_t samplePos =
aBuffer.AddThreadIdEntry(aRegisteredThread.Info()->ThreadId());
TimeDuration delta = aNow - CorePS::ProcessStartTime();
aBuffer.AddEntry(ProfileBufferEntry::Time(delta.ToMilliseconds()));
DoSharedSample(aLock, /* aIsSynchronous = */ true, aRegisteredThread, aRegs,
samplePos, aBuffer);
DoSharedSample(aLock, /* aIsSynchronous = */ true, aRegisteredThread, aNow,
aRegs, /* aLastSample = */ nullptr, aBuffer);
}
// Writes the components of a periodic sample to ActivePS's ProfileBuffer.
// The ThreadId entry is already written in the main ProfileBuffer, its location
// is `aSamplePos`, we can write the rest to `aBuffer` (which may be different).
static void DoPeriodicSample(PSLockRef aLock,
RegisteredThread& aRegisteredThread,
ProfiledThreadData& aProfiledThreadData,
const TimeStamp& aNow, const Registers& aRegs,
uint64_t aSamplePos, ProfileBuffer& aBuffer) {
const TimeStamp& aNow, const Registers& aRegs) {
// WARNING: this function runs within the profiler's "critical section".
DoSharedSample(aLock, /* aIsSynchronous = */ false, aRegisteredThread, aRegs,
aSamplePos, aBuffer);
ProfileBuffer& buffer = ActivePS::Buffer(aLock);
DoSharedSample(aLock, /* aIsSynchronous = */ false, aRegisteredThread, aNow,
aRegs, &aProfiledThreadData.LastSample(), buffer);
ProfilerMarkerLinkedList* pendingMarkersList =
aRegisteredThread.RacyRegisteredThread().GetPendingMarkers();
while (pendingMarkersList && pendingMarkersList->peek()) {
ProfilerMarker* marker = pendingMarkersList->popHead();
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
}
ThreadResponsiveness* resp = aProfiledThreadData.GetThreadResponsiveness();
if (resp && resp->HasData()) {
double delta = resp->GetUnresponsiveDuration(
(aNow - CorePS::ProcessStartTime()).ToMilliseconds());
aBuffer.AddEntry(ProfileBufferEntry::Responsiveness(delta));
buffer.AddEntry(ProfileBufferEntry::Responsiveness(delta));
}
}
@ -2058,13 +2045,11 @@ static void StreamPages(PSLockRef aLock, SpliceableJSONWriter& aWriter) {
}
#if defined(GP_OS_android)
static UniquePtr<ProfileBuffer> CollectJavaThreadProfileData(
BlocksRingBuffer& bufferManager) {
static UniquePtr<ProfileBuffer> CollectJavaThreadProfileData() {
// locked_profiler_start uses sample count is 1000 for Java thread.
// This entry size is enough now, but we might have to estimate it
// if we can customize it
auto buffer = MakeUnique<ProfileBuffer>(bufferManager,
MakePowerOfTwo32<8 * 1024 * 1024>());
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<1024 * 1024>());
int sampleId = 0;
while (true) {
@ -2124,19 +2109,10 @@ static void locked_profiler_stream_json_for_this_process(
MOZ_RELEASE_ASSERT(CorePS::Exists() && ActivePS::Exists(aLock));
AUTO_PROFILER_STATS(locked_profiler_stream_json_for_this_process);
const double collectionStartMs = profiler_time();
double collectionStart = profiler_time();
ProfileBuffer& buffer = ActivePS::Buffer(aLock);
// If there is a set "Window length", discard older data.
Maybe<double> durationS = ActivePS::Duration(aLock);
if (durationS.isSome()) {
const double durationStartMs = collectionStartMs - *durationS * 1000;
buffer.DiscardSamplesBeforeTime(durationStartMs);
}
// Put shared library info
aWriter.StartArrayProperty("libs");
AppendSharedLibraries(aWriter);
@ -2184,10 +2160,7 @@ static void locked_profiler_stream_json_for_this_process(
if (ActivePS::FeatureJava(aLock)) {
java::GeckoJavaSampler::Pause();
BlocksRingBuffer bufferManager(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
UniquePtr<ProfileBuffer> javaBuffer =
CollectJavaThreadProfileData(bufferManager);
UniquePtr<ProfileBuffer> javaBuffer = CollectJavaThreadProfileData();
// Thread id of java Main thread is 0, if we support profiling of other
// java thread, we have to get thread id and name via JNI.
@ -2231,15 +2204,15 @@ static void locked_profiler_stream_json_for_this_process(
{ buffer.StreamPausedRangesToJSON(aWriter, aSinceTime); }
aWriter.EndArray();
const double collectionEndMs = profiler_time();
double collectionEnd = profiler_time();
// Record timestamps for the collection into the buffer, so that consumers
// know why we didn't collect any samples for its duration.
// We put these entries into the buffer after we've collected the profile,
// so they'll be visible for the *next* profile collection (if they haven't
// been overwritten due to buffer wraparound by then).
buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStartMs));
buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEndMs));
buffer.AddEntry(ProfileBufferEntry::CollectionStart(collectionStart));
buffer.AddEntry(ProfileBufferEntry::CollectionEnd(collectionEnd));
}
bool profiler_stream_json_for_this_process(
@ -2320,7 +2293,7 @@ static void PrintUsageThenExit(int aExitCode) {
" started.\n"
" If unset, the platform default is used:\n"
" %u entries per process, or %u when MOZ_PROFILER_STARTUP is set.\n"
" (8 bytes per entry -> %u or %u total bytes per process)\n"
" (%zu bytes per entry -> %zu or %zu total bytes per process)\n"
"\n"
" MOZ_PROFILER_STARTUP_DURATION=<1..>\n"
" If MOZ_PROFILER_STARTUP is set, specifies the maximum life time of\n"
@ -2350,8 +2323,9 @@ static void PrintUsageThenExit(int aExitCode) {
" S/s=MOZ_PROFILER_STARTUP extra default/unavailable)\n",
unsigned(PROFILER_DEFAULT_ENTRIES.Value()),
unsigned(PROFILER_DEFAULT_STARTUP_ENTRIES.Value()),
unsigned(PROFILER_DEFAULT_ENTRIES.Value() * 8),
unsigned(PROFILER_DEFAULT_STARTUP_ENTRIES.Value() * 8));
sizeof(ProfileBufferEntry),
sizeof(ProfileBufferEntry) * PROFILER_DEFAULT_ENTRIES.Value(),
sizeof(ProfileBufferEntry) * PROFILER_DEFAULT_STARTUP_ENTRIES.Value());
#define PRINT_FEATURE(n_, str_, Name_, desc_) \
printf(" %c %5u: \"%s\" (%s)\n", FeatureCategory(ProfilerFeature::Name_), \
@ -2514,18 +2488,6 @@ static SamplerThread* NewSamplerThread(PSLockRef aLock, uint32_t aGeneration,
void SamplerThread::Run() {
PR_SetCurrentThreadName("SamplerThread");
// Use local BlocksRingBuffer&ProfileBuffer to capture the stack.
// (This is to avoid touching the CorePS::BlocksRingBuffer lock while
// a thread is suspended, because that thread could be working with
// the CorePS::BlocksRingBuffer as well.)
BlocksRingBuffer localBlocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
ProfileBuffer localProfileBuffer(localBlocksRingBuffer,
MakePowerOfTwo32<65536>());
// Will be kept between collections, to know what each collection does.
auto previousState = localBlocksRingBuffer.GetState();
// This will be positive if we are running behind schedule (sampling less
// frequently than desired) and negative if we are ahead of schedule.
TimeDuration lastSleepOvershoot = 0;
@ -2550,6 +2512,7 @@ void SamplerThread::Run() {
ActivePS::ClearExpiredExitProfiles(lock);
ActivePS::Buffer(lock).DeleteExpiredStoredMarkers();
TimeStamp expiredMarkersCleaned = TimeStamp::NowUnfuzzed();
if (!ActivePS::IsPaused(lock)) {
@ -2561,6 +2524,7 @@ void SamplerThread::Run() {
// handle per-process generic counters
const Vector<BaseProfilerCount*>& counters = CorePS::Counters(lock);
TimeStamp now = TimeStamp::NowUnfuzzed();
for (auto& counter : counters) {
// create Buffer entries for each counter
buffer.AddEntry(ProfileBufferEntry::CounterId(counter));
@ -2607,64 +2571,12 @@ void SamplerThread::Run() {
AUTO_PROFILER_STATS(gecko_SamplerThread_Run_DoPeriodicSample);
TimeStamp now = TimeStamp::NowUnfuzzed();
// Add the thread ID now, so we know its position in the main buffer,
// which is used by some JS data.
// (DoPeriodicSample only knows about the temporary local buffer.)
uint64_t samplePos =
buffer.AddThreadIdEntry(registeredThread->Info()->ThreadId());
profiledThreadData->LastSample() = Some(samplePos);
// Also add the time, so it's always there after the thread ID, as
// expected by the parser. (Other stack data is optional.)
TimeDuration delta = now - CorePS::ProcessStartTime();
buffer.AddEntry(ProfileBufferEntry::TimeBeforeCompactStack(
delta.ToMilliseconds()));
// Suspend the thread and collect its stack data in the local buffer.
now = TimeStamp::NowUnfuzzed();
mSampler.SuspendAndSampleAndResumeThread(
lock, *registeredThread, [&](const Registers& aRegs) {
DoPeriodicSample(lock, *registeredThread, *profiledThreadData,
now, aRegs, samplePos, localProfileBuffer);
now, aRegs);
});
// There *must* be a CompactStack after a TimeBeforeCompactStack; but
// note that other entries may have been concurrently inserted between
// the TimeBeforeCompactStack above and now. If the captured sample
// from `DoPeriodicSample` is complete, copy it into the global
// buffer, otherwise add an empty one to satisfy the parser that
// expects one.
auto state = localBlocksRingBuffer.GetState();
if (NS_WARN_IF(state.mClearedBlockCount !=
previousState.mClearedBlockCount)) {
LOG("Stack sample too big for local storage, needed %u bytes",
unsigned(state.mRangeEnd.ConvertToU64() -
previousState.mRangeEnd.ConvertToU64()));
// There *must* be a CompactStack after a TimeBeforeCompactStack,
// even an empty one.
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
UniquePtr<BlocksRingBuffer>(nullptr));
} else if (state.mRangeEnd.ConvertToU64() -
previousState.mRangeEnd.ConvertToU64() >=
CorePS::CoreBlocksRingBuffer().BufferLength()->Value()) {
LOG("Stack sample too big for profiler storage, needed %u bytes",
unsigned(state.mRangeEnd.ConvertToU64() -
previousState.mRangeEnd.ConvertToU64()));
// There *must* be a CompactStack after a TimeBeforeCompactStack,
// even an empty one.
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack,
UniquePtr<BlocksRingBuffer>(nullptr));
} else {
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::CompactStack, localBlocksRingBuffer);
}
// Clean up for the next run.
localBlocksRingBuffer.Clear();
previousState = localBlocksRingBuffer.GetState();
}
#if defined(USE_LUL_STACKWALK)
@ -2682,6 +2594,14 @@ void SamplerThread::Run() {
countersSampled - expiredMarkersCleaned,
threadsSampled - countersSampled);
}
Maybe<double> duration = ActivePS::Duration(lock);
if (duration) {
ActivePS::Buffer(lock).DiscardSamplesBeforeTime(
(TimeStamp::NowUnfuzzed() - TimeDuration::FromSeconds(*duration) -
CorePS::ProcessStartTime())
.ToMilliseconds());
}
}
// gPSMutex is not held after this point.
@ -2847,7 +2767,7 @@ static ProfilingStack* locked_register_thread(PSLockRef aLock,
registeredThread->PollJSSampling();
if (registeredThread->GetJSContext()) {
profiledThreadData->NotifyReceivedJSContext(
ActivePS::Buffer(aLock).BufferRangeEnd());
ActivePS::Buffer(aLock).mRangeEnd);
}
}
}
@ -3469,11 +3389,10 @@ static void locked_profiler_start(PSLockRef aLock, PowerOfTwo32 aCapacity,
#endif
// Fall back to the default values if the passed-in values are unreasonable.
// Less than 8192 entries (65536 bytes) may not be enough for the most complex
// stack, so we should be able to store at least one full stack.
// TODO: Review magic numbers.
// Less than 1024 would not be enough for the most complex stack, so we should
// be able to store at least one full stack. TODO: Review magic numbers.
PowerOfTwo32 capacity =
(aCapacity.Value() >= 8192u) ? aCapacity : PROFILER_DEFAULT_ENTRIES;
(aCapacity.Value() >= 1024) ? aCapacity : PROFILER_DEFAULT_ENTRIES;
Maybe<double> duration = aDuration;
if (aDuration && *aDuration <= 0) {
@ -3786,7 +3705,6 @@ void profiler_pause() {
return;
}
RacyFeatures::SetPaused();
ActivePS::SetIsPaused(lock, true);
ActivePS::Buffer(lock).AddEntry(ProfileBufferEntry::Pause(profiler_time()));
}
@ -3811,7 +3729,6 @@ void profiler_resume() {
ActivePS::Buffer(lock).AddEntry(
ProfileBufferEntry::Resume(profiler_time()));
ActivePS::SetIsPaused(lock, false);
RacyFeatures::SetUnpaused();
}
// gPSMutex must be unlocked when we notify, to avoid potential deadlocks.
@ -4060,35 +3977,30 @@ UniqueProfilerBacktrace profiler_get_backtrace() {
regs.Clear();
#endif
// 65536 bytes should be plenty for a single backtrace.
auto bufferManager = MakeUnique<BlocksRingBuffer>(
BlocksRingBuffer::ThreadSafety::WithoutMutex);
auto buffer =
MakeUnique<ProfileBuffer>(*bufferManager, MakePowerOfTwo32<65536>());
// 1024 should be plenty for a single backtrace.
auto buffer = MakeUnique<ProfileBuffer>(MakePowerOfTwo32<1024>());
DoSyncSample(lock, *registeredThread, now, regs, *buffer.get());
return UniqueProfilerBacktrace(new ProfilerBacktrace(
"SyncProfile", tid, std::move(bufferManager), std::move(buffer)));
return UniqueProfilerBacktrace(
new ProfilerBacktrace("SyncProfile", tid, std::move(buffer)));
}
void ProfilerBacktraceDestructor::operator()(ProfilerBacktrace* aBacktrace) {
delete aBacktrace;
}
static void racy_profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload* aPayload) {
static void racy_profiler_add_marker(
const char* aMarkerName, JS::ProfilingCategoryPair aCategoryPair,
UniquePtr<ProfilerMarkerPayload> aPayload) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// This function is hot enough that we use RacyFeatures, not ActivePS.
if (!profiler_can_accept_markers()) {
return;
}
// Note that it's possible that the above test would change again before we
// actually record the marker. Because of this imprecision it's possible to
// miss a marker or record one we shouldn't. Either way is not a big deal.
// We don't assert that RacyFeatures::IsActiveWithoutPrivacy() or
// RacyRegisteredThread::IsBeingProfiled() is true here, because it's
// possible that the result has changed since we tested it in the caller.
//
// Because of this imprecision it's possible to miss a marker or record one
// we shouldn't. Either way is not a big deal.
RacyRegisteredThread* racyRegisteredThread =
TLSRegisteredThread::RacyRegisteredThread();
@ -4100,39 +4012,41 @@ static void racy_profiler_add_marker(const char* aMarkerName,
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, racyRegisteredThread->ThreadId(),
WrapBlocksRingBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());
racyRegisteredThread->AddPendingMarker(
aMarkerName, aCategoryPair, std::move(aPayload), delta.ToMilliseconds());
}
void profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload& aPayload) {
racy_profiler_add_marker(aMarkerName, aCategoryPair, &aPayload);
UniquePtr<ProfilerMarkerPayload> aPayload) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
// This function is hot enough that we use RacyFeatures, not ActivePS.
if (!RacyFeatures::IsActiveWithoutPrivacy()) {
return;
}
racy_profiler_add_marker(aMarkerName, aCategoryPair, std::move(aPayload));
}
void profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair) {
racy_profiler_add_marker(aMarkerName, aCategoryPair, nullptr);
profiler_add_marker(aMarkerName, aCategoryPair, nullptr);
}
// This is a simplified version of profiler_add_marker that can be easily passed
// into the JS engine.
void profiler_add_js_marker(const char* aMarkerName) {
AUTO_PROFILER_STATS(add_marker);
profiler_add_marker(aMarkerName, JS::ProfilingCategoryPair::JS);
profiler_add_marker(aMarkerName, JS::ProfilingCategoryPair::JS, nullptr);
}
void profiler_add_js_allocation_marker(JS::RecordAllocationInfo&& info) {
if (!profiler_can_accept_markers()) {
return;
}
AUTO_PROFILER_STATS(add_marker_with_JsAllocationMarkerPayload);
profiler_add_marker(
"JS allocation", JS::ProfilingCategoryPair::JS,
JsAllocationMarkerPayload(TimeStamp::Now(), std::move(info),
profiler_get_backtrace()));
MakeUnique<JsAllocationMarkerPayload>(TimeStamp::Now(), std::move(info),
profiler_get_backtrace()));
}
void profiler_add_network_marker(
@ -4141,7 +4055,7 @@ void profiler_add_network_marker(
mozilla::net::CacheDisposition aCacheDisposition,
const mozilla::net::TimingStruct* aTimings, nsIURI* aRedirectURI,
UniqueProfilerBacktrace aSource) {
if (!profiler_can_accept_markers()) {
if (!profiler_is_active()) {
return;
}
// These do allocations/frees/etc; avoid if not active
@ -4160,52 +4074,53 @@ void profiler_add_network_marker(
AUTO_PROFILER_STATS(add_marker_with_NetworkMarkerPayload);
profiler_add_marker(
name, JS::ProfilingCategoryPair::NETWORK,
NetworkMarkerPayload(
MakeUnique<NetworkMarkerPayload>(
static_cast<int64_t>(aChannelId), PromiseFlatCString(spec).get(),
aType, aStart, aEnd, aPriority, aCount, aCacheDisposition, aTimings,
PromiseFlatCString(redirect_spec).get(), std::move(aSource)));
}
// This logic needs to add a marker for a different thread, so we actually need
// to lock here.
void profiler_add_marker_for_thread(int aThreadId,
JS::ProfilingCategoryPair aCategoryPair,
const char* aMarkerName,
UniquePtr<ProfilerMarkerPayload> aPayload) {
MOZ_RELEASE_ASSERT(CorePS::Exists());
if (!profiler_can_accept_markers()) {
PSAutoLock lock(gPSMutex);
if (!ActivePS::Exists(lock)) {
return;
}
#ifdef DEBUG
{
PSAutoLock lock(gPSMutex);
if (!ActivePS::Exists(lock)) {
return;
}
// Assert that our thread ID makes sense
bool realThread = false;
const Vector<UniquePtr<RegisteredThread>>& registeredThreads =
CorePS::RegisteredThreads(lock);
for (auto& thread : registeredThreads) {
RefPtr<ThreadInfo> info = thread->Info();
if (info->ThreadId() == aThreadId) {
realThread = true;
break;
}
}
MOZ_ASSERT(realThread, "Invalid thread id");
}
#endif
// Create the ProfilerMarker which we're going to store.
TimeStamp origin = (aPayload && !aPayload->GetStartTime().IsNull())
? aPayload->GetStartTime()
: TimeStamp::NowUnfuzzed();
TimeDuration delta = origin - CorePS::ProcessStartTime();
CorePS::CoreBlocksRingBuffer().PutObjects(
ProfileBufferEntry::Kind::MarkerData, aThreadId,
WrapBlocksRingBufferUnownedCString(aMarkerName),
static_cast<uint32_t>(aCategoryPair), aPayload, delta.ToMilliseconds());
ProfilerMarker* marker =
new ProfilerMarker(aMarkerName, aCategoryPair, aThreadId,
std::move(aPayload), delta.ToMilliseconds());
#ifdef DEBUG
// Assert that our thread ID makes sense
bool realThread = false;
const Vector<UniquePtr<RegisteredThread>>& registeredThreads =
CorePS::RegisteredThreads(lock);
for (auto& thread : registeredThreads) {
RefPtr<ThreadInfo> info = thread->Info();
if (info->ThreadId() == aThreadId) {
realThread = true;
break;
}
}
MOZ_ASSERT(realThread, "Invalid thread id");
#endif
// Insert the marker into the buffer
ProfileBuffer& buffer = ActivePS::Buffer(lock);
buffer.AddStoredMarker(marker);
buffer.AddEntry(ProfileBufferEntry::Marker(marker));
}
void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
@ -4217,14 +4132,14 @@ void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
VTUNE_TRACING(aMarkerName, aKind);
// This function is hot enough that we use RacyFeatures, notActivePS.
if (!profiler_can_accept_markers()) {
if (!RacyFeatures::IsActiveWithoutPrivacy()) {
return;
}
AUTO_PROFILER_STATS(add_marker_with_TracingMarkerPayload);
profiler_add_marker(aMarkerName, aCategoryPair,
TracingMarkerPayload(aCategoryString, aKind, aDocShellId,
aDocShellHistoryId));
auto payload = MakeUnique<TracingMarkerPayload>(
aCategoryString, aKind, aDocShellId, aDocShellHistoryId);
racy_profiler_add_marker(aMarkerName, aCategoryPair, std::move(payload));
}
void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
@ -4237,15 +4152,15 @@ void profiler_tracing(const char* aCategoryString, const char* aMarkerName,
VTUNE_TRACING(aMarkerName, aKind);
// This function is hot enough that we use RacyFeatures, notActivePS.
if (!profiler_can_accept_markers()) {
if (!RacyFeatures::IsActiveWithoutPrivacy()) {
return;
}
AUTO_PROFILER_STATS(add_marker_with_TracingMarkerPayload);
profiler_add_marker(
aMarkerName, aCategoryPair,
TracingMarkerPayload(aCategoryString, aKind, aDocShellId,
aDocShellHistoryId, std::move(aCause)));
auto payload =
MakeUnique<TracingMarkerPayload>(aCategoryString, aKind, aDocShellId,
aDocShellHistoryId, std::move(aCause));
racy_profiler_add_marker(aMarkerName, aCategoryPair, std::move(payload));
}
void profiler_add_text_marker(
@ -4258,8 +4173,8 @@ void profiler_add_text_marker(
AUTO_PROFILER_STATS(add_marker_with_TextMarkerPayload);
profiler_add_marker(
aMarkerName, aCategoryPair,
TextMarkerPayload(aText, aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)));
MakeUnique<TextMarkerPayload>(aText, aStartTime, aEndTime, aDocShellId,
aDocShellHistoryId, std::move(aCause)));
}
void profiler_set_js_context(JSContext* aCx) {
@ -4284,7 +4199,7 @@ void profiler_set_js_context(JSContext* aCx) {
ActivePS::GetProfiledThreadData(lock, registeredThread);
if (profiledThreadData) {
profiledThreadData->NotifyReceivedJSContext(
ActivePS::Buffer(lock).BufferRangeEnd());
ActivePS::Buffer(lock).mRangeEnd);
}
}
}

View File

@ -207,10 +207,6 @@ class RacyFeatures {
static void SetInactive() { sActiveAndFeatures = 0; }
static void SetPaused() { sActiveAndFeatures |= Paused; }
static void SetUnpaused() { sActiveAndFeatures &= ~Paused; }
static bool IsActive() { return uint32_t(sActiveAndFeatures) & Active; }
static bool IsActiveWithFeature(uint32_t aFeature) {
@ -223,18 +219,12 @@ class RacyFeatures {
return (af & Active) && !(af & ProfilerFeature::Privacy);
}
static bool IsActiveAndUnpausedWithoutPrivacy() {
uint32_t af = sActiveAndFeatures; // copy it first
return (af & Active) && !(af & (Paused | ProfilerFeature::Privacy));
}
private:
static constexpr uint32_t Active = 1u << 31;
static constexpr uint32_t Paused = 1u << 30;
static const uint32_t Active = 1u << 31;
// Ensure Active/Paused don't overlap with any of the feature bits.
// Ensure Active doesn't overlap with any of the feature bits.
# define NO_OVERLAP(n_, str_, Name_, desc_) \
static_assert(ProfilerFeature::Name_ != Paused, "bad feature value");
static_assert(ProfilerFeature::Name_ != Active, "bad Active value");
PROFILER_FOR_EACH_FEATURE(NO_OVERLAP);
@ -260,18 +250,18 @@ bool IsThreadBeingProfiled();
static constexpr mozilla::PowerOfTwo32 PROFILER_DEFAULT_ENTRIES =
# if !defined(ARCH_ARMV6)
mozilla::MakePowerOfTwo32<1u << 20>(); // 1'048'576 entries = 8MB
mozilla::MakePowerOfTwo32<1u << 20>(); // 1'048'576
# else
mozilla::MakePowerOfTwo32<1u << 17>(); // 131'072 entries = 1MB
mozilla::MakePowerOfTwo32<1u << 17>(); // 131'072
# endif
// Startup profiling usually need to capture more data, especially on slow
// systems.
static constexpr mozilla::PowerOfTwo32 PROFILER_DEFAULT_STARTUP_ENTRIES =
# if !defined(ARCH_ARMV6)
mozilla::MakePowerOfTwo32<1u << 22>(); // 4'194'304 entries = 32MB
mozilla::MakePowerOfTwo32<1u << 22>(); // 4'194'304
# else
mozilla::MakePowerOfTwo32<1u << 17>(); // 131'072 entries = 1MB
mozilla::MakePowerOfTwo32<1u << 17>(); // 131'072
# endif
# define PROFILER_DEFAULT_DURATION 20
@ -294,8 +284,8 @@ void profiler_shutdown();
// selected options. Stops and restarts the profiler if it is already active.
// After starting the profiler is "active". The samples will be recorded in a
// circular buffer.
// "aCapacity" is the maximum number of 8-bytes entries in the profiler's
// circular buffer.
// "aCapacity" is the maximum number of entries in the profiler's circular
// buffer.
// "aInterval" the sampling interval, measured in millseconds.
// "aFeatures" is the feature set. Features unsupported by this
// platform/configuration are ignored.
@ -434,19 +424,6 @@ inline bool profiler_is_active() {
return mozilla::profiler::detail::RacyFeatures::IsActive();
}
// Same as profiler_is_active(), but with the same extra checks that determine
// if the profiler would currently store markers. So this should be used before
// doing some potentially-expensive work that's used in a marker. E.g.:
//
// if (profiler_can_accept_markers()) {
// ExpensiveMarkerPayload expensivePayload = CreateExpensivePayload();
// BASE_PROFILER_ADD_MARKER_WITH_PAYLOAD(name, OTHER, expensivePayload);
// }
inline bool profiler_can_accept_markers() {
return mozilla::profiler::detail::RacyFeatures::
IsActiveAndUnpausedWithoutPrivacy();
}
// Is the profiler active, and is the current thread being profiled?
// (Same caveats and recommented usage as profiler_is_active().)
inline bool profiler_thread_is_being_profiled() {
@ -564,7 +541,7 @@ struct ProfilerBufferInfo {
uint64_t mRangeStart;
// Index of the newest entry.
uint64_t mRangeEnd;
// Buffer capacity in number of 8-byte entries.
// Buffer capacity in number of entries.
uint32_t mEntryCount;
// Sampling stats: Interval between successive samplings.
ProfilerStats mIntervalsNs;
@ -714,19 +691,18 @@ void profiler_add_marker(const char* aMarkerName,
// the argument list used to construct that `PayloadType`. E.g.:
// `PROFILER_ADD_MARKER_WITH_PAYLOAD("Load", DOM, TextMarkerPayload,
// ("text", start, end, ds, dsh))`
# define PROFILER_ADD_MARKER_WITH_PAYLOAD( \
markerName, categoryPair, PayloadType, parenthesizedPayloadArgs) \
do { \
AUTO_PROFILER_STATS(add_marker_with_##PayloadType); \
::profiler_add_marker(markerName, \
::JS::ProfilingCategoryPair::categoryPair, \
PayloadType parenthesizedPayloadArgs); \
# define PROFILER_ADD_MARKER_WITH_PAYLOAD( \
markerName, categoryPair, PayloadType, parenthesizedPayloadArgs) \
do { \
AUTO_PROFILER_STATS(add_marker_with_##PayloadType); \
::profiler_add_marker( \
markerName, ::JS::ProfilingCategoryPair::categoryPair, \
::mozilla::MakeUnique<PayloadType> parenthesizedPayloadArgs); \
} while (false)
void profiler_add_marker(const char* aMarkerName,
JS::ProfilingCategoryPair aCategoryPair,
const ProfilerMarkerPayload& aPayload);
mozilla::UniquePtr<ProfilerMarkerPayload> aPayload);
void profiler_add_js_marker(const char* aMarkerName);
void profiler_add_js_allocation_marker(JS::RecordAllocationInfo&& info);

View File

@ -7,9 +7,7 @@
#ifndef ProfilerMarkerPayload_h
#define ProfilerMarkerPayload_h
#include "mozilla/Atomics.h"
#include "mozilla/Attributes.h"
#include "mozilla/BlocksRingBuffer.h"
#include "mozilla/Maybe.h"
#include "mozilla/RefPtr.h"
#include "mozilla/TimeStamp.h"
@ -48,144 +46,47 @@ class ProfilerMarkerPayload {
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mCommonProps{mozilla::TimeStamp{}, mozilla::TimeStamp{},
std::move(aStack), std::move(aDocShellId),
std::move(aDocShellHistoryId)} {}
: mStack(std::move(aStack)),
mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId) {}
ProfilerMarkerPayload(
const mozilla::TimeStamp& aStartTime, const mozilla::TimeStamp& aEndTime,
const mozilla::Maybe<nsID>& aDocShellId = mozilla::Nothing(),
const mozilla::Maybe<uint32_t>& aDocShellHistoryId = mozilla::Nothing(),
UniqueProfilerBacktrace aStack = nullptr)
: mCommonProps{aStartTime, aEndTime, std::move(aStack),
std::move(aDocShellId), std::move(aDocShellHistoryId)} {}
: mStartTime(aStartTime),
mEndTime(aEndTime),
mStack(std::move(aStack)),
mDocShellId(aDocShellId),
mDocShellHistoryId(aDocShellHistoryId) {}
virtual ~ProfilerMarkerPayload() {}
// Compute the number of bytes needed to serialize the `DeserializerTag` and
// payload, including in the no-payload (nullptr) case.
static mozilla::BlocksRingBuffer::Length TagAndSerializationBytes(
const ProfilerMarkerPayload* aPayload) {
if (!aPayload) {
return sizeof(DeserializerTag);
}
return aPayload->TagAndSerializationBytes();
}
// Serialize the payload into an EntryWriter, including in the no-payload
// (nullptr) case. Must be of the exact size given by
// `TagAndSerializationBytes(aPayload)`.
static void TagAndSerialize(
const ProfilerMarkerPayload* aPayload,
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) {
if (!aPayload) {
aEntryWriter.WriteObject(DeserializerTag(0));
return;
}
aPayload->SerializeTagAndPayload(aEntryWriter);
}
// Deserialize a payload from an EntryReader, including in the no-payload
// (nullptr) case.
static mozilla::UniquePtr<ProfilerMarkerPayload> DeserializeTagAndPayload(
mozilla::BlocksRingBuffer::EntryReader& aER) {
const auto tag = aER.ReadObject<DeserializerTag>();
Deserializer deserializer = DeserializerForTag(tag);
return deserializer(aER);
}
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const = 0;
UniqueStacks& aUniqueStacks) = 0;
mozilla::TimeStamp GetStartTime() const { return mCommonProps.mStartTime; }
mozilla::TimeStamp GetStartTime() const { return mStartTime; }
protected:
// A `Deserializer` is a free function that can read a serialized payload from
// an `EntryReader` and return a reconstructed `ProfilerMarkerPayload`
// sub-object (may be null if there was no payload).
typedef mozilla::UniquePtr<ProfilerMarkerPayload> (*Deserializer)(
mozilla::BlocksRingBuffer::EntryReader&);
// A `DeserializerTag` will be added before the payload, to help select the
// correct deserializer when reading back the payload.
using DeserializerTag = unsigned char;
// This needs to be big enough to handle all possible sub-types of
// ProfilerMarkerPayload.
static constexpr DeserializerTag DeserializerMax = 32;
// We need an atomic type that can hold a `DeserializerTag`. (Atomic doesn't
// work with too-small types.)
using DeserializerTagAtomic = int;
// Number of currently-registered deserializers.
static mozilla::Atomic<DeserializerTagAtomic, mozilla::ReleaseAcquire,
mozilla::recordreplay::Behavior::DontPreserve>
sDeserializerCount;
// List of currently-registered deserializers.
// sDeserializers[0] is a no-payload deserializer.
static Deserializer sDeserializers[DeserializerMax];
// Get the `DeserializerTag` for a `Deserializer` (which gets registered on
// the first call.) Tag 0 means no payload; a null `aDeserializer` gives that
// 0 tag.
static DeserializerTag TagForDeserializer(Deserializer aDeserializer);
// Get the `Deserializer` for a given `DeserializerTag`.
// Tag 0 is reserved as no-payload deserializer (which returns nullptr).
static Deserializer DeserializerForTag(DeserializerTag aTag);
struct CommonProps {
mozilla::TimeStamp mStartTime;
mozilla::TimeStamp mEndTime;
UniqueProfilerBacktrace mStack;
mozilla::Maybe<nsID> mDocShellId;
mozilla::Maybe<uint32_t> mDocShellHistoryId;
};
// Deserializers can use this base constructor.
explicit ProfilerMarkerPayload(CommonProps&& aCommonProps)
: mCommonProps(std::move(aCommonProps)) {}
// Serialization/deserialization of common props in ProfilerMarkerPayload.
mozilla::BlocksRingBuffer::Length CommonPropsTagAndSerializationBytes() const;
void SerializeTagAndCommonProps(
DeserializerTag aDeserializerTag,
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) const;
static CommonProps DeserializeCommonProps(
mozilla::BlocksRingBuffer::EntryReader& aEntryReader);
void StreamType(const char* aMarkerType, SpliceableJSONWriter& aWriter) const;
void StreamType(const char* aMarkerType, SpliceableJSONWriter& aWriter);
void StreamCommonProps(const char* aMarkerType, SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aProcessStartTime,
UniqueStacks& aUniqueStacks) const;
UniqueStacks& aUniqueStacks);
private:
// Compute the number of bytes needed to serialize payload in
// `SerializeTagAndPayload` below.
virtual mozilla::BlocksRingBuffer::Length TagAndSerializationBytes()
const = 0;
// Serialize the payload into an EntryWriter.
// Must be of the exact size given by `TagAndSerializationBytes()`.
virtual void SerializeTagAndPayload(
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) const = 0;
CommonProps mCommonProps;
mozilla::TimeStamp mStartTime;
mozilla::TimeStamp mEndTime;
UniqueProfilerBacktrace mStack;
mozilla::Maybe<nsID> mDocShellId;
mozilla::Maybe<uint32_t> mDocShellHistoryId;
};
#define DECL_STREAM_PAYLOAD \
void StreamPayload(SpliceableJSONWriter& aWriter, \
const mozilla::TimeStamp& aProcessStartTime, \
UniqueStacks& aUniqueStacks) const override; \
static mozilla::UniquePtr<ProfilerMarkerPayload> Deserialize( \
mozilla::BlocksRingBuffer::EntryReader& aEntryReader); \
mozilla::BlocksRingBuffer::Length TagAndSerializationBytes() const override; \
void SerializeTagAndPayload( \
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) const override;
#define DECL_STREAM_PAYLOAD \
virtual void StreamPayload(SpliceableJSONWriter& aWriter, \
const mozilla::TimeStamp& aProcessStartTime, \
UniqueStacks& aUniqueStacks) override;
// TODO: Increase the coverage of tracing markers that include DocShell
// information
@ -203,18 +104,6 @@ class TracingMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
protected:
TracingMarkerPayload(CommonProps&& aCommonProps, const char* aCategory,
TracingKind aKind)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mCategory(aCategory),
mKind(aKind) {}
// May be used by derived classes.
void SerializeTagAndPayload(
DeserializerTag aDeserializerTag,
mozilla::BlocksRingBuffer::EntryWriter& aEntryWriter) const;
private:
const char* mCategory;
TracingKind mKind;
@ -238,14 +127,6 @@ class FileIOMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
FileIOMarkerPayload(CommonProps&& aCommonProps, const char* aSource,
mozilla::UniqueFreePtr<char>&& aOperation,
mozilla::UniqueFreePtr<char>&& aFilename)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mSource(aSource),
mOperation(std::move(aOperation)),
mFilename(std::move(aFilename)) {}
const char* mSource;
mozilla::UniqueFreePtr<char> mOperation;
mozilla::UniqueFreePtr<char> mFilename;
@ -265,13 +146,6 @@ class DOMEventMarkerPayload : public TracingMarkerPayload {
DECL_STREAM_PAYLOAD
private:
DOMEventMarkerPayload(CommonProps&& aCommonProps, const char* aCategory,
TracingKind aKind, mozilla::TimeStamp aTimeStamp,
nsString aEventType)
: TracingMarkerPayload(std::move(aCommonProps), aCategory, aKind),
mTimeStamp(aTimeStamp),
mEventType(aEventType) {}
mozilla::TimeStamp mTimeStamp;
nsString mEventType;
};
@ -293,18 +167,6 @@ class PrefMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
PrefMarkerPayload(CommonProps&& aCommonProps,
mozilla::TimeStamp aPrefAccessTime, nsCString&& aPrefName,
mozilla::Maybe<mozilla::PrefValueKind>&& aPrefKind,
mozilla::Maybe<mozilla::PrefType>&& aPrefType,
nsCString&& aPrefValue)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mPrefAccessTime(aPrefAccessTime),
mPrefName(aPrefName),
mPrefKind(aPrefKind),
mPrefType(aPrefType),
mPrefValue(aPrefValue) {}
mozilla::TimeStamp mPrefAccessTime;
nsCString mPrefName;
// Nothing means this is a shared preference. Something, on the other hand,
@ -345,16 +207,6 @@ class UserTimingMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
UserTimingMarkerPayload(CommonProps&& aCommonProps, const char* aEntryType,
nsString&& aName,
mozilla::Maybe<nsString>&& aStartMark,
mozilla::Maybe<nsString>&& aEndMark)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mEntryType(aEntryType),
mName(std::move(aName)),
mStartMark(std::move(aStartMark)),
mEndMark(std::move(aEndMark)) {}
// Either "mark" or "measure".
const char* mEntryType;
nsString mName;
@ -376,13 +228,6 @@ class LayerTranslationMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
LayerTranslationMarkerPayload(CommonProps&& aCommonProps,
mozilla::layers::Layer* aLayer,
mozilla::gfx::Point aPoint)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mLayer(aLayer),
mPoint(aPoint) {}
mozilla::layers::Layer* mLayer;
mozilla::gfx::Point mPoint;
};
@ -396,10 +241,6 @@ class VsyncMarkerPayload : public ProfilerMarkerPayload {
: ProfilerMarkerPayload(aVsyncTimestamp, aVsyncTimestamp) {}
DECL_STREAM_PAYLOAD
private:
explicit VsyncMarkerPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
};
class NetworkMarkerPayload : public ProfilerMarkerPayload {
@ -431,22 +272,6 @@ class NetworkMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
NetworkMarkerPayload(CommonProps&& aCommonProps, int64_t aID,
mozilla::UniqueFreePtr<char>&& aURI,
mozilla::UniqueFreePtr<char>&& aRedirectURI,
NetworkLoadType aType, int32_t aPri, int64_t aCount,
mozilla::net::TimingStruct aTimings,
mozilla::net::CacheDisposition aCacheDisposition)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mID(aID),
mURI(std::move(aURI)),
mRedirectURI(std::move(aRedirectURI)),
mType(aType),
mPri(aPri),
mCount(aCount),
mTimings(aTimings),
mCacheDisposition(aCacheDisposition) {}
int64_t mID;
mozilla::UniqueFreePtr<char> mURI;
mozilla::UniqueFreePtr<char> mRedirectURI;
@ -471,14 +296,6 @@ class ScreenshotPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
ScreenshotPayload(CommonProps&& aCommonProps, nsCString&& aScreenshotDataURL,
mozilla::gfx::IntSize aWindowSize,
uintptr_t aWindowIdentifier)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mScreenshotDataURL(std::move(aScreenshotDataURL)),
mWindowSize(aWindowSize),
mWindowIdentifier(aWindowIdentifier) {}
nsCString mScreenshotDataURL;
mozilla::gfx::IntSize mWindowSize;
uintptr_t mWindowIdentifier;
@ -495,11 +312,6 @@ class GCSliceMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
GCSliceMarkerPayload(CommonProps&& aCommonProps,
JS::UniqueChars&& aTimingJSON)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mTimingJSON(std::move(aTimingJSON)) {}
JS::UniqueChars mTimingJSON;
};
@ -514,11 +326,6 @@ class GCMajorMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
GCMajorMarkerPayload(CommonProps&& aCommonProps,
JS::UniqueChars&& aTimingJSON)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mTimingJSON(std::move(aTimingJSON)) {}
JS::UniqueChars mTimingJSON;
};
@ -533,11 +340,6 @@ class GCMinorMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
GCMinorMarkerPayload(CommonProps&& aCommonProps,
JS::UniqueChars&& aTimingData)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mTimingData(std::move(aTimingData)) {}
JS::UniqueChars mTimingData;
};
@ -549,8 +351,6 @@ class HangMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
explicit HangMarkerPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
};
class StyleMarkerPayload : public ProfilerMarkerPayload {
@ -568,10 +368,6 @@ class StyleMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
StyleMarkerPayload(CommonProps&& aCommonProps,
mozilla::ServoTraversalStatistics aStats)
: ProfilerMarkerPayload(std::move(aCommonProps)), mStats(aStats) {}
mozilla::ServoTraversalStatistics mStats;
};
@ -582,10 +378,6 @@ class LongTaskMarkerPayload : public ProfilerMarkerPayload {
: ProfilerMarkerPayload(aStartTime, aEndTime) {}
DECL_STREAM_PAYLOAD
private:
explicit LongTaskMarkerPayload(CommonProps&& aCommonProps)
: ProfilerMarkerPayload(std::move(aCommonProps)) {}
};
class TextMarkerPayload : public ProfilerMarkerPayload {
@ -620,10 +412,6 @@ class TextMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
TextMarkerPayload(CommonProps&& aCommonProps, nsCString&& aText)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mText(std::move(aText)) {}
nsCString mText;
};
@ -638,12 +426,6 @@ class LogMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
LogMarkerPayload(CommonProps&& aCommonProps, nsAutoCStringN<32>&& aModule,
nsCString&& aText)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mModule(std::move(aModule)),
mText(std::move(aText)) {}
nsAutoCStringN<32> mModule; // longest known LazyLogModule name is ~24
nsCString mText;
};
@ -651,7 +433,7 @@ class LogMarkerPayload : public ProfilerMarkerPayload {
class JsAllocationMarkerPayload : public ProfilerMarkerPayload {
public:
JsAllocationMarkerPayload(const mozilla::TimeStamp& aStartTime,
JS::RecordAllocationInfo&& aInfo,
const JS::RecordAllocationInfo& aInfo,
UniqueProfilerBacktrace aStack)
: ProfilerMarkerPayload(aStartTime, aStartTime, mozilla::Nothing(),
mozilla::Nothing(), std::move(aStack)),
@ -670,20 +452,6 @@ class JsAllocationMarkerPayload : public ProfilerMarkerPayload {
DECL_STREAM_PAYLOAD
private:
JsAllocationMarkerPayload(
CommonProps&& aCommonProps,
mozilla::UniqueFreePtr<const char16_t>&& aTypeName,
mozilla::UniqueFreePtr<const char>&& aClassName,
mozilla::UniqueFreePtr<const char16_t>&& aDescriptiveTypeName,
const char* aCoarseType, uint64_t aSize, bool aInNursery)
: ProfilerMarkerPayload(std::move(aCommonProps)),
mTypeName(std::move(aTypeName)),
mClassName(std::move(aClassName)),
mDescriptiveTypeName(std::move(aDescriptiveTypeName)),
mCoarseType(aCoarseType),
mSize(aSize),
mInNursery(aInNursery) {}
mozilla::UniqueFreePtr<const char16_t> mTypeName;
mozilla::UniqueFreePtr<const char> mClassName;
mozilla::UniqueFreePtr<const char16_t> mDescriptiveTypeName;
@ -697,49 +465,4 @@ class JsAllocationMarkerPayload : public ProfilerMarkerPayload {
bool mInNursery;
};
namespace mozilla {
// Serialize a pointed-at ProfilerMarkerPayload, may be null when there are no
// payloads.
template <>
struct BlocksRingBuffer::Serializer<const ProfilerMarkerPayload*> {
static Length Bytes(const ProfilerMarkerPayload* aPayload) {
return ProfilerMarkerPayload::TagAndSerializationBytes(aPayload);
}
static void Write(EntryWriter& aEW, const ProfilerMarkerPayload* aPayload) {
ProfilerMarkerPayload::TagAndSerialize(aPayload, aEW);
}
};
// Serialize a pointed-at ProfilerMarkerPayload, may be null when there are no
// payloads.
template <>
struct BlocksRingBuffer::Serializer<UniquePtr<ProfilerMarkerPayload>> {
static Length Bytes(const UniquePtr<ProfilerMarkerPayload>& aPayload) {
return ProfilerMarkerPayload::TagAndSerializationBytes(aPayload.get());
}
static void Write(EntryWriter& aEW,
const UniquePtr<ProfilerMarkerPayload>& aPayload) {
ProfilerMarkerPayload::TagAndSerialize(aPayload.get(), aEW);
}
};
// Deserialize a ProfilerMarkerPayload into a UniquePtr, may be null if there
// are no payloads.
template <>
struct BlocksRingBuffer::Deserializer<UniquePtr<ProfilerMarkerPayload>> {
static void ReadInto(EntryReader& aER,
UniquePtr<ProfilerMarkerPayload>& aPayload) {
aPayload = Read(aER);
}
static UniquePtr<ProfilerMarkerPayload> Read(EntryReader& aER) {
return ProfilerMarkerPayload::DeserializeTagAndPayload(aER);
}
};
} // namespace mozilla
#endif // ProfilerMarkerPayload_h

View File

@ -40,8 +40,7 @@ TEST(BaseProfiler, BlocksRingBuffer)
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer[i] = uint8_t('A' + i);
}
BlocksRingBuffer rb(BlocksRingBuffer::ThreadSafety::WithMutex,
&buffer[MBSize], MakePowerOfTwo32<MBSize>());
BlocksRingBuffer rb(&buffer[MBSize], MakePowerOfTwo32<MBSize>());
{
nsCString cs(NS_LITERAL_CSTRING("nsCString"));
@ -270,14 +269,11 @@ TEST(GeckoProfiler, EnsureStarted)
{
// Active -> Active with same settings
Maybe<ProfilerBufferInfo> info0 = profiler_get_buffer_info();
ASSERT_TRUE(info0->mRangeEnd > 0);
// First, write some samples into the buffer.
PR_Sleep(PR_MillisecondsToInterval(500));
Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info();
ASSERT_TRUE(info1->mRangeEnd > info0->mRangeEnd);
ASSERT_TRUE(info1->mRangeEnd > 0);
// Call profiler_ensure_started with the same settings as before.
// This operation must not clear our buffer!
@ -289,14 +285,10 @@ TEST(GeckoProfiler, EnsureStarted)
PROFILER_DEFAULT_ENTRIES.Value(), PROFILER_DEFAULT_INTERVAL, features,
filters, MOZ_ARRAY_LENGTH(filters), Some(PROFILER_DEFAULT_DURATION));
// Check that our position in the buffer stayed the same or advanced, but
// not by much, and the range-start after profiler_ensure_started shouldn't
// have passed the range-end before.
// Check that our position in the buffer stayed the same or advanced.
// In particular, it shouldn't have reverted to the start.
Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info();
ASSERT_TRUE(info2->mRangeEnd >= info1->mRangeEnd);
ASSERT_TRUE(info2->mRangeEnd - info1->mRangeEnd <
info1->mRangeEnd - info0->mRangeEnd);
ASSERT_TRUE(info2->mRangeStart < info1->mRangeEnd);
}
{
@ -316,10 +308,8 @@ TEST(GeckoProfiler, EnsureStarted)
PROFILER_DEFAULT_INTERVAL, differentFeatures, filters,
MOZ_ARRAY_LENGTH(filters));
// Check the the buffer was cleared, so its range-start should be at/after
// its range-end before.
Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info();
ASSERT_TRUE(info2->mRangeStart >= info1->mRangeEnd);
ASSERT_TRUE(info2->mRangeEnd < info1->mRangeEnd);
}
{
@ -458,13 +448,11 @@ TEST(GeckoProfiler, Pause)
const char* filters[] = {"GeckoMain"};
ASSERT_TRUE(!profiler_is_paused());
ASSERT_TRUE(!profiler_can_accept_markers());
profiler_start(PROFILER_DEFAULT_ENTRIES, PROFILER_DEFAULT_INTERVAL, features,
filters, MOZ_ARRAY_LENGTH(filters));
ASSERT_TRUE(!profiler_is_paused());
ASSERT_TRUE(profiler_can_accept_markers());
// Check that we are writing samples while not paused.
Maybe<ProfilerBufferInfo> info1 = profiler_get_buffer_info();
@ -472,16 +460,9 @@ TEST(GeckoProfiler, Pause)
Maybe<ProfilerBufferInfo> info2 = profiler_get_buffer_info();
ASSERT_TRUE(info1->mRangeEnd != info2->mRangeEnd);
// Check that we are writing markers while not paused.
info1 = profiler_get_buffer_info();
PROFILER_ADD_MARKER("Not paused", OTHER);
info2 = profiler_get_buffer_info();
ASSERT_TRUE(info1->mRangeEnd != info2->mRangeEnd);
profiler_pause();
ASSERT_TRUE(profiler_is_paused());
ASSERT_TRUE(!profiler_can_accept_markers());
// Check that we are not writing samples while paused.
info1 = profiler_get_buffer_info();
@ -489,89 +470,48 @@ TEST(GeckoProfiler, Pause)
info2 = profiler_get_buffer_info();
ASSERT_TRUE(info1->mRangeEnd == info2->mRangeEnd);
// Check that we are now writing markers while paused.
info1 = profiler_get_buffer_info();
PROFILER_ADD_MARKER("Paused", OTHER);
info2 = profiler_get_buffer_info();
ASSERT_TRUE(info1->mRangeEnd == info2->mRangeEnd);
profiler_resume();
ASSERT_TRUE(!profiler_is_paused());
ASSERT_TRUE(profiler_can_accept_markers());
profiler_stop();
ASSERT_TRUE(!profiler_is_paused());
ASSERT_TRUE(!profiler_can_accept_markers());
}
// A class that keeps track of how many instances have been created, streamed,
// and destroyed.
class GTestMarkerPayload : public ProfilerMarkerPayload {
public:
explicit GTestMarkerPayload(int aN) : mN(aN) { ++sNumCreated; }
explicit GTestMarkerPayload(int aN) : mN(aN) { sNumCreated++; }
virtual ~GTestMarkerPayload() { ++sNumDestroyed; }
virtual ~GTestMarkerPayload() { sNumDestroyed++; }
DECL_STREAM_PAYLOAD
private:
GTestMarkerPayload(CommonProps&& aCommonProps, int aN)
: ProfilerMarkerPayload(std::move(aCommonProps)), mN(aN) {
++sNumDeserialized;
virtual void StreamPayload(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aStartTime,
UniqueStacks& aUniqueStacks) override {
StreamCommonProps("gtest", aWriter, aStartTime, aUniqueStacks);
char buf[64];
SprintfLiteral(buf, "gtest-%d", mN);
aWriter.IntProperty(buf, mN);
sNumStreamed++;
}
private:
int mN;
public:
// The number of GTestMarkerPayload instances that have been created,
// streamed, and destroyed.
static int sNumCreated;
static int sNumSerialized;
static int sNumDeserialized;
static int sNumStreamed;
static int sNumDestroyed;
};
int GTestMarkerPayload::sNumCreated = 0;
int GTestMarkerPayload::sNumSerialized = 0;
int GTestMarkerPayload::sNumDeserialized = 0;
int GTestMarkerPayload::sNumStreamed = 0;
int GTestMarkerPayload::sNumDestroyed = 0;
BlocksRingBuffer::Length GTestMarkerPayload::TagAndSerializationBytes() const {
return CommonPropsTagAndSerializationBytes() + BlocksRingBuffer::SumBytes(mN);
}
void GTestMarkerPayload::SerializeTagAndPayload(
BlocksRingBuffer::EntryWriter& aEntryWriter) const {
static const DeserializerTag tag = TagForDeserializer(Deserialize);
SerializeTagAndCommonProps(tag, aEntryWriter);
aEntryWriter.WriteObject(mN);
++sNumSerialized;
}
// static
UniquePtr<ProfilerMarkerPayload> GTestMarkerPayload::Deserialize(
BlocksRingBuffer::EntryReader& aEntryReader) {
ProfilerMarkerPayload::CommonProps props =
DeserializeCommonProps(aEntryReader);
auto n = aEntryReader.ReadObject<int>();
return UniquePtr<ProfilerMarkerPayload>(
new GTestMarkerPayload(std::move(props), n));
}
void GTestMarkerPayload::StreamPayload(SpliceableJSONWriter& aWriter,
const mozilla::TimeStamp& aStartTime,
UniqueStacks& aUniqueStacks) const {
StreamCommonProps("gtest", aWriter, aStartTime, aUniqueStacks);
char buf[64];
SprintfLiteral(buf, "gtest-%d", mN);
aWriter.IntProperty(buf, mN);
++sNumStreamed;
}
TEST(GeckoProfiler, Markers)
{
uint32_t features = ProfilerFeature::StackWalk;
@ -602,13 +542,6 @@ TEST(GeckoProfiler, Markers)
for (int i = 0; i < 10; i++) {
PROFILER_ADD_MARKER_WITH_PAYLOAD("M5", OTHER, GTestMarkerPayload, (i));
}
// The GTestMarkerPayloads should have been created, serialized, and
// destroyed.
ASSERT_EQ(GTestMarkerPayload::sNumCreated, 10);
ASSERT_EQ(GTestMarkerPayload::sNumSerialized, 10);
ASSERT_EQ(GTestMarkerPayload::sNumDeserialized, 0);
ASSERT_EQ(GTestMarkerPayload::sNumStreamed, 0);
ASSERT_EQ(GTestMarkerPayload::sNumDestroyed, 10);
// Create two strings: one that is the maximum allowed length, and one that
// is one char longer.
@ -637,13 +570,11 @@ TEST(GeckoProfiler, Markers)
UniquePtr<char[]> profile = w.WriteFunc()->CopyData();
// The GTestMarkerPayloads should have been deserialized, streamed, and
// The GTestMarkerPayloads should have been created and streamed, but not yet
// destroyed.
ASSERT_EQ(GTestMarkerPayload::sNumCreated, 10 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumSerialized, 10 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumDeserialized, 0 + 10);
ASSERT_EQ(GTestMarkerPayload::sNumStreamed, 0 + 10);
ASSERT_EQ(GTestMarkerPayload::sNumDestroyed, 10 + 10);
ASSERT_TRUE(GTestMarkerPayload::sNumCreated == 10);
ASSERT_TRUE(GTestMarkerPayload::sNumStreamed == 10);
ASSERT_TRUE(GTestMarkerPayload::sNumDestroyed == 0);
for (int i = 0; i < 10; i++) {
char buf[64];
SprintfLiteral(buf, "\"gtest-%d\"", i);
@ -692,12 +623,8 @@ TEST(GeckoProfiler, Markers)
profiler_stop();
// Nothing more should have happened to the GTestMarkerPayloads.
ASSERT_EQ(GTestMarkerPayload::sNumCreated, 10 + 0 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumSerialized, 10 + 0 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumDeserialized, 0 + 10 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumStreamed, 0 + 10 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumDestroyed, 10 + 10 + 0);
// The GTestMarkerPayloads should have been destroyed.
ASSERT_TRUE(GTestMarkerPayload::sNumDestroyed == 10);
for (int i = 0; i < 10; i++) {
PROFILER_ADD_MARKER_WITH_PAYLOAD("M5", OTHER, GTestMarkerPayload, (i));
@ -711,13 +638,10 @@ TEST(GeckoProfiler, Markers)
profiler_stop();
// The second set of GTestMarkerPayloads should not have been serialized or
// streamed.
ASSERT_EQ(GTestMarkerPayload::sNumCreated, 10 + 0 + 0 + 10);
ASSERT_EQ(GTestMarkerPayload::sNumSerialized, 10 + 0 + 0 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumDeserialized, 0 + 10 + 0 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumStreamed, 0 + 10 + 0 + 0);
ASSERT_EQ(GTestMarkerPayload::sNumDestroyed, 10 + 10 + 0 + 10);
// The second set of GTestMarkerPayloads should not have been streamed.
ASSERT_TRUE(GTestMarkerPayload::sNumCreated == 20);
ASSERT_TRUE(GTestMarkerPayload::sNumStreamed == 10);
ASSERT_TRUE(GTestMarkerPayload::sNumDestroyed == 20);
}
TEST(GeckoProfiler, DurationLimit)
@ -730,8 +654,6 @@ TEST(GeckoProfiler, DurationLimit)
// Clear up the counters after the last test.
GTestMarkerPayload::sNumCreated = 0;
GTestMarkerPayload::sNumSerialized = 0;
GTestMarkerPayload::sNumDeserialized = 0;
GTestMarkerPayload::sNumStreamed = 0;
GTestMarkerPayload::sNumDestroyed = 0;
@ -743,13 +665,10 @@ TEST(GeckoProfiler, DurationLimit)
SpliceableChunkedJSONWriter w;
ASSERT_TRUE(profiler_stream_json_for_this_process(w));
// Both markers created, serialized, destroyed; Only the first marker should
// have been deserialized, streamed, and destroyed again.
ASSERT_EQ(GTestMarkerPayload::sNumCreated, 2);
ASSERT_EQ(GTestMarkerPayload::sNumSerialized, 2);
ASSERT_EQ(GTestMarkerPayload::sNumDeserialized, 1);
ASSERT_EQ(GTestMarkerPayload::sNumStreamed, 1);
ASSERT_EQ(GTestMarkerPayload::sNumDestroyed, 3);
// The first marker should be destroyed.
ASSERT_TRUE(GTestMarkerPayload::sNumCreated == 2);
ASSERT_TRUE(GTestMarkerPayload::sNumStreamed == 1);
ASSERT_TRUE(GTestMarkerPayload::sNumDestroyed == 1);
}
#define COUNTER_NAME "TestCounter"

View File

@ -14,40 +14,46 @@
// Make sure we can record one entry and read it
TEST(ThreadProfile, InsertOneEntry)
{
mozilla::BlocksRingBuffer blocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithMutex);
auto pb = MakeUnique<ProfileBuffer>(
blocksRingBuffer,
mozilla::PowerOfTwo32(2 * (1 + uint32_t(sizeof(ProfileBufferEntry)))));
auto pb = MakeUnique<ProfileBuffer>(mozilla::PowerOfTwo32(10));
pb->AddEntry(ProfileBufferEntry::Time(123.1));
ProfileBufferEntry entry = pb->GetEntry(pb->BufferRangeStart());
ASSERT_TRUE(entry.IsTime());
ASSERT_EQ(123.1, entry.GetDouble());
ASSERT_TRUE(pb->GetEntry(pb->mRangeStart).IsTime());
ASSERT_TRUE(pb->GetEntry(pb->mRangeStart).GetDouble() == 123.1);
}
// See if we can insert some entries
TEST(ThreadProfile, InsertEntriesNoWrap)
{
mozilla::BlocksRingBuffer blocksRingBuffer(
BlocksRingBuffer::ThreadSafety::WithMutex);
auto pb = MakeUnique<ProfileBuffer>(
blocksRingBuffer,
mozilla::PowerOfTwo32(100 * (1 + uint32_t(sizeof(ProfileBufferEntry)))));
const int test_size = 50;
auto pb = MakeUnique<ProfileBuffer>(mozilla::PowerOfTwo32(100));
int test_size = 50;
for (int i = 0; i < test_size; i++) {
pb->AddEntry(ProfileBufferEntry::Time(i));
}
int times = 0;
uint64_t readPos = pb->BufferRangeStart();
while (readPos != pb->BufferRangeEnd()) {
ProfileBufferEntry entry = pb->GetEntry(readPos);
uint64_t readPos = pb->mRangeStart;
while (readPos != pb->mRangeEnd) {
ASSERT_TRUE(pb->GetEntry(readPos).IsTime());
ASSERT_TRUE(pb->GetEntry(readPos).GetDouble() == readPos);
readPos++;
}
}
// See if evicting works as it should in the basic case
TEST(ThreadProfile, InsertEntriesWrap)
{
int entries = 32;
auto pb = MakeUnique<ProfileBuffer>(mozilla::PowerOfTwo32(entries));
ASSERT_TRUE(pb->mRangeStart == 0);
ASSERT_TRUE(pb->mRangeEnd == 0);
int test_size = 43;
for (int i = 0; i < test_size; i++) {
pb->AddEntry(ProfileBufferEntry::Time(i));
}
// We inserted 11 more entries than fit in the buffer, so the first 11 entries
// should have been evicted, and the range start should have increased to 11.
ASSERT_TRUE(pb->mRangeStart == 11);
uint64_t readPos = pb->mRangeStart;
while (readPos != pb->mRangeEnd) {
ASSERT_TRUE(pb->GetEntry(readPos).IsTime());
ASSERT_TRUE(pb->GetEntry(readPos).GetDouble() == readPos);
readPos++;
if (entry.GetKind() == ProfileBufferEntry::Kind::INVALID) {
continue;
}
ASSERT_TRUE(entry.IsTime());
ASSERT_EQ(times, entry.GetDouble());
times++;
}
ASSERT_EQ(test_size, times);
}

View File

@ -406,7 +406,7 @@ class LogModuleManager {
}
#ifdef MOZ_GECKO_PROFILER
if (mAddProfilerMarker && profiler_can_accept_markers()) {
if (mAddProfilerMarker && profiler_is_active()) {
PROFILER_ADD_MARKER_WITH_PAYLOAD("LogMessages", OTHER, LogMarkerPayload,
(aName, buffToWrite, TimeStamp::Now()));
}