Backed out changeset 58dd47c5aa51 (bug 1573111) for build bustages at TestBaseProfiler.cpp:875:51. CLOSED TREE

This commit is contained in:
Brindusan Cristian 2019-08-13 06:21:16 +03:00
parent b2274966d4
commit a8a9f04996
2 changed files with 86 additions and 510 deletions

View File

@ -18,13 +18,9 @@
namespace mozilla { namespace mozilla {
// Thread-safe Ring buffer that can store blocks of different sizes during // Thread-safe Ring buffer that can store blocks of different sizes.
// defined sessions.
// Each *block* contains an *entry* and the entry size: // Each *block* contains an *entry* and the entry size:
// [ entry_size | entry ] [ entry_size | entry ] ... // [ entry_size | entry ] [ entry_size | entry ] ...
// *In-session* is a period of time during which `BlocksRingBuffer` allows
// reading and writing. *Out-of-session*, the `BlocksRingBuffer` object is
// still valid, but contains no data, and gracefully denies accesses.
// //
// To write an entry, the buffer reserves a block of sufficient size (to contain // To write an entry, the buffer reserves a block of sufficient size (to contain
// user data of predetermined size), writes the entry size, and lets the caller // user data of predetermined size), writes the entry size, and lets the caller
@ -130,26 +126,20 @@ class BlocksRingBuffer {
Index mBlockIndex; Index mBlockIndex;
}; };
// Default constructor starts out-of-session (nothing to read or write).
BlocksRingBuffer() = default;
// Constructors with no entry destructor, the oldest entries will be silently // Constructors with no entry destructor, the oldest entries will be silently
// overwritten/destroyed. // overwritten/destroyed.
// Create a buffer of the given length. // Create a buffer of the given length.
explicit BlocksRingBuffer(PowerOfTwo<Length> aLength) explicit BlocksRingBuffer(PowerOfTwo<Length> aLength) : mBuffer(aLength) {}
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer(aLength))) {}
// Take ownership of an existing buffer. // Take ownership of an existing buffer.
BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer, BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength) PowerOfTwo<Length> aLength)
: mMaybeUnderlyingBuffer( : mBuffer(std::move(aExistingBuffer), aLength) {}
Some(UnderlyingBuffer(std::move(aExistingBuffer), aLength))) {}
// Use an externally-owned buffer. // Use an externally-owned buffer.
BlocksRingBuffer(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength) BlocksRingBuffer(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength)
: mMaybeUnderlyingBuffer( : mBuffer(aExternalBuffer, aLength) {}
Some(UnderlyingBuffer(aExternalBuffer, aLength))) {}
// Constructors with an entry destructor, which will be called with an // Constructors with an entry destructor, which will be called with an
// `EntryReader` before the oldest entries get overwritten/destroyed. // `EntryReader` before the oldest entries get overwritten/destroyed.
@ -161,26 +151,24 @@ class BlocksRingBuffer {
template <typename EntryDestructor> template <typename EntryDestructor>
explicit BlocksRingBuffer(PowerOfTwo<Length> aLength, explicit BlocksRingBuffer(PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor) EntryDestructor&& aEntryDestructor)
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer( : mBuffer(aLength),
aLength, std::forward<EntryDestructor>(aEntryDestructor)))) {} mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
// Take ownership of an existing buffer. // Take ownership of an existing buffer.
template <typename EntryDestructor> template <typename EntryDestructor>
explicit BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer, explicit BlocksRingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength, PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor) EntryDestructor&& aEntryDestructor)
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer( : mBuffer(std::move(aExistingBuffer), aLength),
std::move(aExistingBuffer), aLength, mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Use an externally-owned buffer. // Use an externally-owned buffer.
template <typename EntryDestructor> template <typename EntryDestructor>
explicit BlocksRingBuffer(Buffer::Byte* aExternalBuffer, explicit BlocksRingBuffer(Buffer::Byte* aExternalBuffer,
PowerOfTwo<Length> aLength, PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor) EntryDestructor&& aEntryDestructor)
: mMaybeUnderlyingBuffer(Some(UnderlyingBuffer( : mBuffer(aExternalBuffer, aLength),
aExternalBuffer, aLength, mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
std::forward<EntryDestructor>(aEntryDestructor)))) {}
// Destructor explictly destroys all remaining entries, this may invoke the // Destructor explictly destroys all remaining entries, this may invoke the
// caller-provided entry destructor. // caller-provided entry destructor.
@ -192,73 +180,8 @@ class BlocksRingBuffer {
DestroyAllEntries(); DestroyAllEntries();
} }
// Remove underlying buffer, if any. // Buffer length, constant. No need for locking.
void Reset() { PowerOfTwo<Length> BufferLength() const { return mBuffer.BufferLength(); }
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
}
// Create a buffer of the given length.
void Set(PowerOfTwo<Length> aLength) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(aLength);
}
// Take ownership of an existing buffer.
void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(std::move(aExistingBuffer), aLength);
}
// Use an externally-owned buffer.
void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(aExternalBuffer, aLength);
}
// Create a buffer of the given length, with entry destructor.
template <typename EntryDestructor>
void Set(PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
aLength, std::forward<EntryDestructor>(aEntryDestructor));
}
// Take ownership of an existing buffer, with entry destructor.
template <typename EntryDestructor>
void Set(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength, EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
std::move(aExistingBuffer), aLength,
std::forward<EntryDestructor>(aEntryDestructor));
}
// Use an externally-owned buffer, with entry destructor.
template <typename EntryDestructor>
void Set(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
ResetUnderlyingBuffer();
mMaybeUnderlyingBuffer.emplace(
aExternalBuffer, aLength,
std::forward<EntryDestructor>(aEntryDestructor));
}
// Buffer length in bytes.
Maybe<PowerOfTwo<Length>> BufferLength() const {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
return mMaybeUnderlyingBuffer.map([](const UnderlyingBuffer& aBuffer) {
return aBuffer.mBuffer.BufferLength();
});
;
}
// Snapshot of the buffer state. // Snapshot of the buffer state.
struct State { struct State {
@ -277,17 +200,12 @@ class BlocksRingBuffer {
}; };
// Get a snapshot of the current state. // Get a snapshot of the current state.
// When out-of-session, mFirstReadIndex==mNextWriteIndex, and
// mPushedBlockCount==mClearedBlockCount==0.
// Note that these may change right after this thread-safe call, so they // Note that these may change right after this thread-safe call, so they
// should only be used for statistical purposes. // should only be used for statistical purposes.
State GetState() const { State GetState() const {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
return { return {mFirstReadIndex, mNextWriteIndex, mPushedBlockCount,
mFirstReadIndex, mNextWriteIndex, mClearedBlockCount};
mMaybeUnderlyingBuffer ? mMaybeUnderlyingBuffer->mPushedBlockCount : 0,
mMaybeUnderlyingBuffer ? mMaybeUnderlyingBuffer->mClearedBlockCount
: 0};
} }
// Iterator-like class used to read from an entry. // Iterator-like class used to read from an entry.
@ -375,8 +293,7 @@ class BlocksRingBuffer {
friend class BlocksRingBuffer; friend class BlocksRingBuffer;
explicit EntryReader(const BlocksRingBuffer& aRing, BlockIndex aBlockIndex) explicit EntryReader(const BlocksRingBuffer& aRing, BlockIndex aBlockIndex)
: BufferReader(aRing.mMaybeUnderlyingBuffer->mBuffer.ReaderAt( : BufferReader(aRing.mBuffer.ReaderAt(Index(aBlockIndex))),
Index(aBlockIndex))),
mRing(aRing), mRing(aRing),
mEntryBytes(BufferReader::ReadULEB128<Length>()), mEntryBytes(BufferReader::ReadULEB128<Length>()),
mEntryStart(CurrentIndex()) { mEntryStart(CurrentIndex()) {
@ -438,8 +355,7 @@ class BlocksRingBuffer {
// Index past the end of this block, which is the start of the next block. // Index past the end of this block, which is the start of the next block.
BlockIndex NextBlockIndex() const { BlockIndex NextBlockIndex() const {
MOZ_ASSERT(!IsAtEnd()); MOZ_ASSERT(!IsAtEnd());
BufferReader reader = BufferReader reader = mRing->mBuffer.ReaderAt(Index(mBlockIndex));
mRing->mMaybeUnderlyingBuffer->mBuffer.ReaderAt(Index(mBlockIndex));
Length entrySize = reader.ReadULEB128<Length>(); Length entrySize = reader.ReadULEB128<Length>();
return BlockIndex(reader.CurrentIndex() + entrySize); return BlockIndex(reader.CurrentIndex() + entrySize);
} }
@ -518,48 +434,40 @@ class BlocksRingBuffer {
NotNull<const BlocksRingBuffer*> mRing; NotNull<const BlocksRingBuffer*> mRing;
}; };
// Call `aCallback(Maybe<BlocksRingBuffer::Reader>&&)`, and return whatever // Call `aCallback(BlocksRingBuffer::Reader)` with temporary Reader, and
// `aCallback` returns. `Maybe` may be `Nothing` when out-of-session. // return whatever `aCallback` returns.
// Callback should not store `Reader`, because it may become invalid after // Callback should not store `Reader`, as it may become invalid after this
// this call. // call.
template <typename Callback> template <typename Callback>
auto Read(Callback&& aCallback) const { auto Read(Callback&& aCallback) const {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
Maybe<Reader> maybeReader; return std::forward<Callback>(aCallback)(Reader(*this));
if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
maybeReader.emplace(Reader(*this));
}
return std::forward<Callback>(aCallback)(std::move(maybeReader));
} }
// Call `aCallback(BlocksRingBuffer::EntryReader&)` on each item. // Call `aCallback(BlocksRingBuffer::EntryReader&)` on each item.
// Callback should not store `EntryReader`, because it may become invalid // Callback should not store `EntryReader`, as it may become invalid after
// after this call. // this thread-safe call.
template <typename Callback> template <typename Callback>
void ReadEach(Callback&& aCallback) const { void ReadEach(Callback&& aCallback) const {
Read([&](Maybe<Reader>&& aMaybeReader) { Read([&](const Reader& aReader) { aReader.ForEach(aCallback); });
if (MOZ_LIKELY(aMaybeReader)) {
std::move(aMaybeReader)->ForEach(aCallback);
}
});
} }
// Call `aCallback(Maybe<BlocksRingBuffer::EntryReader>&&)` on the entry at // Call `aCallback(Maybe<BlocksRingBuffer::EntryReader>&&)` on the entry at
// the given BlockIndex; The `Maybe` will be `Nothing` if out-of-session, or // the given BlockIndex; The `Maybe` will be `Nothing` if that entry doesn't
// if that entry doesn't exist anymore, or if we've reached just past the // exist anymore, or if we've reached just past the last entry. Return
// last entry. Return whatever `aCallback` returns. Callback should not // whatever `aCallback` returns.
// store `EntryReader`, because it may become invalid after this call. // Callback should not store `EntryReader`, as it may become invalid after
// this thread-safe call.
template <typename Callback> template <typename Callback>
auto ReadAt(BlockIndex aBlockIndex, Callback&& aCallback) const { auto ReadAt(BlockIndex aBlockIndex, Callback&& aCallback) const {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
MOZ_ASSERT(aBlockIndex <= mNextWriteIndex); MOZ_ASSERT(aBlockIndex <= mNextWriteIndex);
Maybe<EntryReader> maybeEntryReader; Maybe<EntryReader> maybeReader;
if (MOZ_LIKELY(mMaybeUnderlyingBuffer) && aBlockIndex >= mFirstReadIndex && if (aBlockIndex >= mFirstReadIndex && aBlockIndex < mNextWriteIndex) {
aBlockIndex < mNextWriteIndex) {
AssertBlockIndexIsValid(aBlockIndex); AssertBlockIndexIsValid(aBlockIndex);
maybeEntryReader.emplace(ReaderInBlockAt(aBlockIndex)); maybeReader.emplace(ReaderInBlockAt(aBlockIndex));
} }
return std::forward<Callback>(aCallback)(std::move(maybeEntryReader)); return std::forward<Callback>(aCallback)(std::move(maybeReader));
} }
class EntryReserver; class EntryReserver;
@ -659,8 +567,7 @@ class BlocksRingBuffer {
EntryWriter(BlocksRingBuffer& aRing, BlockIndex aBlockIndex, EntryWriter(BlocksRingBuffer& aRing, BlockIndex aBlockIndex,
Length aEntryBytes) Length aEntryBytes)
: BufferWriter(aRing.mMaybeUnderlyingBuffer->mBuffer.WriterAt( : BufferWriter(aRing.mBuffer.WriterAt(Index(aBlockIndex))),
Index(aBlockIndex))),
mRing(aRing), mRing(aRing),
mEntryBytes(aEntryBytes), mEntryBytes(aEntryBytes),
mEntryStart([&]() { mEntryStart([&]() {
@ -700,9 +607,7 @@ class BlocksRingBuffer {
// Don't allow even half of the buffer length. More than that would // Don't allow even half of the buffer length. More than that would
// probably be unreasonable, and much more would risk having an entry // probably be unreasonable, and much more would risk having an entry
// wrapping around and overwriting itself! // wrapping around and overwriting itself!
MOZ_RELEASE_ASSERT( MOZ_RELEASE_ASSERT(aBytes < mRing->BufferLength().Value() / 2);
aBytes <
mRing->mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() / 2);
// COmpute block size from the requested entry size. // COmpute block size from the requested entry size.
const Length blockBytes = EntryWriter::BlockSizeForEntrySize(aBytes); const Length blockBytes = EntryWriter::BlockSizeForEntrySize(aBytes);
// We will put this new block at the end of the current buffer. // We will put this new block at the end of the current buffer.
@ -711,22 +616,20 @@ class BlocksRingBuffer {
const Index blockEnd = Index(blockIndex) + blockBytes; const Index blockEnd = Index(blockIndex) + blockBytes;
// ... which is where the following block will go. // ... which is where the following block will go.
mRing->mNextWriteIndex = BlockIndex(blockEnd); mRing->mNextWriteIndex = BlockIndex(blockEnd);
while ( while (blockEnd >
blockEnd > Index(mRing->mFirstReadIndex) + mRing->BufferLength().Value()) {
Index(mRing->mFirstReadIndex) +
mRing->mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value()) {
// About to trample on an old block. // About to trample on an old block.
EntryReader reader = mRing->ReaderInBlockAt(mRing->mFirstReadIndex); EntryReader reader = mRing->ReaderInBlockAt(mRing->mFirstReadIndex);
// Call provided entry destructor for that entry. // Call provided entry destructor for that entry.
if (mRing->mMaybeUnderlyingBuffer->mEntryDestructor) { if (mRing->mEntryDestructor) {
mRing->mMaybeUnderlyingBuffer->mEntryDestructor(reader); mRing->mEntryDestructor(reader);
} }
mRing->mMaybeUnderlyingBuffer->mClearedBlockCount += 1; mRing->mClearedBlockCount += 1;
MOZ_ASSERT(reader.CurrentIndex() <= Index(reader.NextBlockIndex())); MOZ_ASSERT(reader.CurrentIndex() <= Index(reader.NextBlockIndex()));
// Move the buffer reading start past this cleared block. // Move the buffer reading start past this cleared block.
mRing->mFirstReadIndex = reader.NextBlockIndex(); mRing->mFirstReadIndex = reader.NextBlockIndex();
} }
mRing->mMaybeUnderlyingBuffer->mPushedBlockCount += 1; mRing->mPushedBlockCount += 1;
// Finally, let aCallback write into the entry. // Finally, let aCallback write into the entry.
return std::forward<Callback>(aCallback)( return std::forward<Callback>(aCallback)(
EntryWriter(*mRing, blockIndex, aBytes)); EntryWriter(*mRing, blockIndex, aBytes));
@ -784,12 +687,11 @@ class BlocksRingBuffer {
NotNull<BlocksRingBuffer*> mRing; NotNull<BlocksRingBuffer*> mRing;
}; };
// Main function to write entries. // Add a new entry, call `aCallback` with a temporary EntryReserver (so that
// Call `aCallback(Maybe<BlocksRingBuffer::EntryReserver>&&)`, and return // `aCallback` can reserve an entry or just write something), and return
// whatever `aCallback` returns. `Maybe` may be `Nothing` when out-of-session. // whatever `aCallback` returns.
// Callback should not store `EntryReserver`, because it may become invalid // Callback should not store `EntryReserver`, as it may become invalid after
// after this call. The `EntryReserver` can then be used to reserve one or // this thread-safe call.
// more entries; another callback can then fill each.
template <typename Callback> template <typename Callback>
auto Put(Callback&& aCallback) { auto Put(Callback&& aCallback) {
// Implementation note: We are locking during the whole operation (reserving // Implementation note: We are locking during the whole operation (reserving
@ -802,41 +704,23 @@ class BlocksRingBuffer {
// is fully written. // is fully written.
// TODO: Investigate this potential improvement as part of bug 1562604. // TODO: Investigate this potential improvement as part of bug 1562604.
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
Maybe<EntryReserver> maybeEntryReserver; return std::forward<Callback>(aCallback)(EntryReserver(*this));
if (MOZ_LIKELY(mMaybeUnderlyingBuffer)) {
maybeEntryReserver.emplace(EntryReserver(*this));
}
return std::forward<Callback>(aCallback)(std::move(maybeEntryReserver));
} }
// Add a new entry of known size, call `aCallback` with a temporary // Add a new entry of known size, call `aCallback` with a temporary
// EntryWriter, and return whatever `aCallback` returns. Callback should not // EntryWriter, and return whatever `aCallback` returns.
// store `EntryWriter`, as it may become invalid after this thread-safe call. // Callback should not store `EntryWriter`, as it may become invalid after
// this thread-safe call.
template <typename Callback> template <typename Callback>
auto Put(Length aLength, Callback&& aCallback) { auto Put(Length aLength, Callback&& aCallback) {
return Put([&](Maybe<EntryReserver>&& aER) { return Put([&](EntryReserver aER) {
if (MOZ_LIKELY(aER)) { return aER.Reserve(aLength, std::forward<Callback>(aCallback));
// We are in-session, with an EntryReserver at the ready.
// Reserve the requested space, then invoke the callback with the given
// EntryWriter inserted into a Maybe.
return aER->Reserve(aLength, [&](EntryWriter aEW) {
return std::forward<Callback>(aCallback)(Some(std::move(aEW)));
});
}
// Out-of-session, just invoke the callback with Nothing.
return std::forward<Callback>(aCallback)(Maybe<EntryWriter>{});
}); });
} }
// Add a new entry copied from the given buffer, return block index. // Add a new entry copied from the given buffer, return block index.
BlockIndex PutFrom(const void* aSrc, Length aBytes) { BlockIndex PutFrom(const void* aSrc, Length aBytes) {
return Put([&](Maybe<EntryReserver>&& aER) { return Put([&](EntryReserver aER) { return aER.Write(aSrc, aBytes); });
if (MOZ_LIKELY(aER)) {
return std::move(aER)->Write(aSrc, aBytes);
}
// Out-of-session, return "empty" BlockIndex.
return BlockIndex{};
});
} }
// Add a new entry copied from the given object, return block index. // Add a new entry copied from the given object, return block index.
@ -844,13 +728,7 @@ class BlocksRingBuffer {
// TODO: Allow more types (follow-up patches in progress, see bug 1562604). // TODO: Allow more types (follow-up patches in progress, see bug 1562604).
template <typename T> template <typename T>
BlockIndex PutObject(const T& aOb) { BlockIndex PutObject(const T& aOb) {
return Put([&](Maybe<EntryReserver>&& aER) { return Put([&](EntryReserver aER) { return aER.WriteObject<T>(aOb); });
if (MOZ_LIKELY(aER)) {
return std::move(aER)->WriteObject<T>(aOb);
}
// Out-of-session, return "empty" BlockIndex.
return BlockIndex{};
});
} }
// Clear all entries, calling entry destructor (if any), and move read index // Clear all entries, calling entry destructor (if any), and move read index
@ -865,9 +743,6 @@ class BlocksRingBuffer {
// cannot be read anymore. // cannot be read anymore.
void ClearBefore(BlockIndex aBlockIndex) { void ClearBefore(BlockIndex aBlockIndex) {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
if (!mMaybeUnderlyingBuffer) {
return;
}
// Don't accept a not-yet-written index. One-past-the-end is ok. // Don't accept a not-yet-written index. One-past-the-end is ok.
MOZ_ASSERT(aBlockIndex <= mNextWriteIndex); MOZ_ASSERT(aBlockIndex <= mNextWriteIndex);
if (aBlockIndex <= mFirstReadIndex) { if (aBlockIndex <= mFirstReadIndex) {
@ -881,15 +756,15 @@ class BlocksRingBuffer {
} }
// Otherwise we need to clear a subset of entries. // Otherwise we need to clear a subset of entries.
AssertBlockIndexIsValid(aBlockIndex); AssertBlockIndexIsValid(aBlockIndex);
if (mMaybeUnderlyingBuffer->mEntryDestructor) { if (mEntryDestructor) {
// We have an entry destructor, destroy entries before aBlockIndex. // We have an entry destructor, destroy entries before aBlockIndex.
Reader reader(*this); Reader reader(*this);
BlockIterator it = reader.begin(); BlockIterator it = reader.begin();
for (; it.CurrentBlockIndex() < aBlockIndex; ++it) { for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex()); MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
EntryReader reader = *it; EntryReader reader = *it;
mMaybeUnderlyingBuffer->mEntryDestructor(reader); mEntryDestructor(reader);
mMaybeUnderlyingBuffer->mClearedBlockCount += 1; mClearedBlockCount += 1;
} }
MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex); MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
} else { } else {
@ -898,7 +773,7 @@ class BlocksRingBuffer {
BlockIterator it = reader.begin(); BlockIterator it = reader.begin();
for (; it.CurrentBlockIndex() < aBlockIndex; ++it) { for (; it.CurrentBlockIndex() < aBlockIndex; ++it) {
MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex()); MOZ_ASSERT(it.CurrentBlockIndex() < reader.end().CurrentBlockIndex());
mMaybeUnderlyingBuffer->mClearedBlockCount += 1; mClearedBlockCount += 1;
} }
MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex); MOZ_ASSERT(it.CurrentBlockIndex() == aBlockIndex);
} }
@ -910,18 +785,12 @@ class BlocksRingBuffer {
#ifdef DEBUG #ifdef DEBUG
void Dump() const { void Dump() const {
baseprofiler::detail::BaseProfilerAutoLock lock(mMutex); baseprofiler::detail::BaseProfilerAutoLock lock(mMutex);
if (!mMaybeUnderlyingBuffer) {
printf("empty BlocksRingBuffer\n");
return;
}
using ULL = unsigned long long; using ULL = unsigned long long;
printf("start=%llu (%llu) end=%llu (%llu) - ", ULL(Index(mFirstReadIndex)), printf("start=%llu (%llu) end=%llu (%llu) - ", ULL(Index(mFirstReadIndex)),
ULL(Index(mFirstReadIndex) & ULL(Index(mFirstReadIndex) & (BufferLength().Value() - 1)),
(mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() - 1)),
ULL(Index(mNextWriteIndex)), ULL(Index(mNextWriteIndex)),
ULL(Index(mNextWriteIndex) & ULL(Index(mNextWriteIndex) & (BufferLength().Value() - 1)));
(mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() - 1))); mBuffer.Dump();
mMaybeUnderlyingBuffer->mBuffer.Dump();
} }
#endif // DEBUG #endif // DEBUG
@ -939,13 +808,11 @@ class BlocksRingBuffer {
# if 1 # if 1
// Quick check that this looks like a valid block start. // Quick check that this looks like a valid block start.
// Read the entry size at the start of the block. // Read the entry size at the start of the block.
BufferReader br = BufferReader br = mBuffer.ReaderAt(Index(aBlockIndex));
mMaybeUnderlyingBuffer->mBuffer.ReaderAt(Index(aBlockIndex));
Length entryBytes = br.ReadULEB128<Length>(); Length entryBytes = br.ReadULEB128<Length>();
// It should be between 1 and half of the buffer length max. // It should be between 1 and half of the buffer length max.
MOZ_ASSERT(entryBytes > 0); MOZ_ASSERT(entryBytes > 0);
MOZ_ASSERT(entryBytes < MOZ_ASSERT(entryBytes < BufferLength().Value() / 2);
mMaybeUnderlyingBuffer->mBuffer.BufferLength().Value() / 2);
// The end of the block should be inside the live buffer range. // The end of the block should be inside the live buffer range.
MOZ_ASSERT(Index(aBlockIndex) + BufferReader::ULEB128Size(entryBytes) + MOZ_ASSERT(Index(aBlockIndex) + BufferReader::ULEB128Size(entryBytes) +
entryBytes <= entryBytes <=
@ -977,26 +844,18 @@ class BlocksRingBuffer {
// destructor or ClearAllEntries. // destructor or ClearAllEntries.
void DestroyAllEntries() { void DestroyAllEntries() {
mMutex.AssertCurrentThreadOwns(); mMutex.AssertCurrentThreadOwns();
if (!mMaybeUnderlyingBuffer) { if (mEntryDestructor) {
return;
}
if (mMaybeUnderlyingBuffer->mEntryDestructor) {
// We have an entry destructor, destroy all the things! // We have an entry destructor, destroy all the things!
Reader(*this).ForEach([this](EntryReader& aReader) { Reader(*this).ForEach(
mMaybeUnderlyingBuffer->mEntryDestructor(aReader); [this](EntryReader& aReader) { mEntryDestructor(aReader); });
});
} }
mMaybeUnderlyingBuffer->mClearedBlockCount = mClearedBlockCount = mPushedBlockCount;
mMaybeUnderlyingBuffer->mPushedBlockCount;
} }
// Clear all entries, calling entry destructor (if any), and move read index // Clear all entries, calling entry destructor (if any), and move read index
// to the end so that these entries cannot be read anymore. // to the end so that these entries cannot be read anymore.
void ClearAllEntries() { void ClearAllEntries() {
mMutex.AssertCurrentThreadOwns(); mMutex.AssertCurrentThreadOwns();
if (!mMaybeUnderlyingBuffer) {
return;
}
DestroyAllEntries(); DestroyAllEntries();
// Move read index to write index, so there's effectively no more entries // Move read index to write index, so there's effectively no more entries
// that can be read. (Not setting both to 0, in case user is keeping // that can be read. (Not setting both to 0, in case user is keeping
@ -1004,90 +863,23 @@ class BlocksRingBuffer {
mFirstReadIndex = mNextWriteIndex; mFirstReadIndex = mNextWriteIndex;
} }
// If there is an underlying buffer (with optional entry destructor), destroy
// all entries, move read index to the end, and discard the buffer and entry
// destructor. This BlocksRingBuffer will now gracefully reject all API calls,
// and is in a state where a new underlying buffer&entry deleter may be
// installed.
void ResetUnderlyingBuffer() {
if (!mMaybeUnderlyingBuffer) {
return;
}
ClearAllEntries();
mMaybeUnderlyingBuffer.reset();
}
// Mutex guarding the following members. // Mutex guarding the following members.
mutable baseprofiler::detail::BaseProfilerMutex mMutex; mutable baseprofiler::detail::BaseProfilerMutex mMutex;
struct UnderlyingBuffer { // Underlying circular byte buffer.
// Create a buffer of the given length. Buffer mBuffer;
explicit UnderlyingBuffer(PowerOfTwo<Length> aLength) : mBuffer(aLength) {}
// Take ownership of an existing buffer.
UnderlyingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength)
: mBuffer(std::move(aExistingBuffer), aLength) {}
// Use an externally-owned buffer.
UnderlyingBuffer(Buffer::Byte* aExternalBuffer, PowerOfTwo<Length> aLength)
: mBuffer(aExternalBuffer, aLength) {}
// Create a buffer of the given length.
template <typename EntryDestructor>
explicit UnderlyingBuffer(PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mBuffer(aLength),
mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
// Take ownership of an existing buffer.
template <typename EntryDestructor>
explicit UnderlyingBuffer(UniquePtr<Buffer::Byte[]> aExistingBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mBuffer(std::move(aExistingBuffer), aLength),
mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
// Use an externally-owned buffer.
template <typename EntryDestructor>
explicit UnderlyingBuffer(Buffer::Byte* aExternalBuffer,
PowerOfTwo<Length> aLength,
EntryDestructor&& aEntryDestructor)
: mBuffer(aExternalBuffer, aLength),
mEntryDestructor(std::forward<EntryDestructor>(aEntryDestructor)) {}
// Only allow move-construction.
UnderlyingBuffer(UnderlyingBuffer&&) = default;
// Copies and move-assignment are explictly disallowed.
UnderlyingBuffer(const UnderlyingBuffer&) = delete;
UnderlyingBuffer& operator=(const UnderlyingBuffer&) = delete;
UnderlyingBuffer& operator=(UnderlyingBuffer&&) = delete;
// Underlying circular byte buffer.
Buffer mBuffer;
// If set, function to call for each entry that is about to be destroyed.
std::function<void(EntryReader&)> mEntryDestructor;
// Statistics.
uint64_t mPushedBlockCount = 0;
uint64_t mClearedBlockCount = 0;
};
// Underlying buffer, with entry destructor and stats.
// Only valid during in-session period.
Maybe<UnderlyingBuffer> mMaybeUnderlyingBuffer;
// Index to the first block to be read (or cleared). Initialized to 1 because // Index to the first block to be read (or cleared). Initialized to 1 because
// 0 is reserved for the "empty" BlockIndex value. Kept between sessions, so // 0 is reserved for the "empty" BlockIndex value.
// that stored indices from one session will be gracefully denied in future
// sessions.
BlockIndex mFirstReadIndex = BlockIndex(Index(1)); BlockIndex mFirstReadIndex = BlockIndex(Index(1));
// Index where the next new block should be allocated. Initialized to 1 // Index where the next new block should be allocated. Initialized to 1
// because 0 is reserved for the "empty" BlockIndex value. Kept between // because 0 is reserved for the "empty" BlockIndex value.
// sessions, so that stored indices from one session will be gracefully denied
// in future sessions.
BlockIndex mNextWriteIndex = BlockIndex(Index(1)); BlockIndex mNextWriteIndex = BlockIndex(Index(1));
// If set, function to call for each entry that is about to be destroyed.
std::function<void(EntryReader&)> mEntryDestructor;
// Statistics.
uint64_t mPushedBlockCount = 0;
uint64_t mClearedBlockCount = 0;
}; };
} // namespace mozilla } // namespace mozilla

View File

@ -525,9 +525,8 @@ void TestBlocksRingBufferAPI() {
VERIFY_START_END_DESTROYED(1, 6, 0); VERIFY_START_END_DESTROYED(1, 6, 0);
// Push `2` through EntryReserver, check output BlockIndex. // Push `2` through EntryReserver, check output BlockIndex.
auto bi2 = rb.Put([](Maybe<BlocksRingBuffer::EntryReserver>&& aER) { auto bi2 = rb.Put([](BlocksRingBuffer::EntryReserver aER) {
MOZ_RELEASE_ASSERT(aER.isSome()); return aER.WriteObject(uint32_t(2));
return aER->WriteObject(uint32_t(2));
}); });
static_assert( static_assert(
std::is_same<decltype(bi2), BlocksRingBuffer::BlockIndex>::value, std::is_same<decltype(bi2), BlocksRingBuffer::BlockIndex>::value,
@ -602,9 +601,8 @@ void TestBlocksRingBufferAPI() {
// Push `3` through EntryReserver and then EntryWriter, check writer output // Push `3` through EntryReserver and then EntryWriter, check writer output
// is returned to the initial caller. // is returned to the initial caller.
auto put3 = rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aER) { auto put3 = rb.Put([&](BlocksRingBuffer::EntryReserver aER) {
MOZ_RELEASE_ASSERT(aER.isSome()); return aER.Reserve(
return aER->Reserve(
sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter aEW) { sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter aEW) {
aEW.WriteObject(uint32_t(3)); aEW.WriteObject(uint32_t(3));
return float(ExtractBlockIndex(aEW.CurrentBlockIndex())); return float(ExtractBlockIndex(aEW.CurrentBlockIndex()));
@ -670,9 +668,8 @@ void TestBlocksRingBufferAPI() {
// Push 5 through EntryReserver then EntryWriter, no returns. // Push 5 through EntryReserver then EntryWriter, no returns.
// This will destroy the second entry. // This will destroy the second entry.
// Check that the EntryWriter can access bi4 but not bi2. // Check that the EntryWriter can access bi4 but not bi2.
auto bi5_6 = rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aER) { auto bi5_6 = rb.Put([&](BlocksRingBuffer::EntryReserver aER) {
MOZ_RELEASE_ASSERT(aER.isSome()); return aER.Reserve(
return aER->Reserve(
sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter aEW) { sizeof(uint32_t), [&](BlocksRingBuffer::EntryWriter aEW) {
aEW.WriteObject(uint32_t(5)); aEW.WriteObject(uint32_t(5));
MOZ_RELEASE_ASSERT(aEW.GetEntryAt(bi2).isNothing()); MOZ_RELEASE_ASSERT(aEW.GetEntryAt(bi2).isNothing());
@ -777,217 +774,6 @@ void TestBlocksRingBufferAPI() {
printf("TestBlocksRingBufferAPI done\n"); printf("TestBlocksRingBufferAPI done\n");
} }
void TestBlocksRingBufferUnderlyingBufferChanges() {
printf("TestBlocksRingBufferUnderlyingBufferChanges...\n");
// Out-of-session BlocksRingBuffer to start with.
BlocksRingBuffer rb;
// Block index to read at. Initially "null", but may be changed below.
BlocksRingBuffer::BlockIndex bi;
// Test all rb APIs when rb is out-of-session and therefore doesn't have an
// underlying buffer.
auto testOutOfSession = [&]() {
MOZ_RELEASE_ASSERT(rb.BufferLength().isNothing());
BlocksRingBuffer::State state = rb.GetState();
// When out-of-session, range start and ends are the same, and there are no
// pushed&cleared blocks.
MOZ_RELEASE_ASSERT(state.mRangeStart == state.mRangeEnd);
MOZ_RELEASE_ASSERT(state.mPushedBlockCount == 0);
MOZ_RELEASE_ASSERT(state.mClearedBlockCount == 0);
// `Put()` functions run the callback with `Nothing`.
int32_t ran = 0;
rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aMaybeEntryReserver) {
MOZ_RELEASE_ASSERT(aMaybeEntryReserver.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
ran = 0;
rb.Put(1, [&](Maybe<BlocksRingBuffer::EntryWriter>&& aMaybeEntryWriter) {
MOZ_RELEASE_ASSERT(aMaybeEntryWriter.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
// `PutFrom` won't do anything, and returns the null BlockIndex.
MOZ_RELEASE_ASSERT(rb.PutFrom(&ran, sizeof(ran)) ==
BlocksRingBuffer::BlockIndex{});
MOZ_RELEASE_ASSERT(rb.PutObject(ran) == BlocksRingBuffer::BlockIndex{});
// `Read()` functions run the callback with `Nothing`.
ran = 0;
rb.Read([&](Maybe<BlocksRingBuffer::Reader>&& aMaybeReader) {
MOZ_RELEASE_ASSERT(aMaybeReader.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
ran = 0;
rb.ReadAt(BlocksRingBuffer::BlockIndex{},
[&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
ran = 0;
rb.ReadAt(bi,
[&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
// `ReadEach` shouldn't run the callback (nothing to read).
rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
};
// As `testOutOfSession()` attempts to modify the buffer, we run it twice to
// make sure one run doesn't influence the next one.
testOutOfSession();
testOutOfSession();
rb.ClearBefore(bi);
testOutOfSession();
testOutOfSession();
rb.Clear();
testOutOfSession();
testOutOfSession();
rb.Reset();
testOutOfSession();
testOutOfSession();
constexpr uint32_t MBSize = 32;
rb.Set(MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>());
constexpr bool EMPTY = true;
constexpr bool NOT_EMPTY = false;
// Test all rb APIs when rb has an underlying buffer.
auto testInSession = [&](bool aExpectEmpty) {
MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
BlocksRingBuffer::State state = rb.GetState();
if (aExpectEmpty) {
MOZ_RELEASE_ASSERT(state.mRangeStart == state.mRangeEnd);
MOZ_RELEASE_ASSERT(state.mPushedBlockCount == 0);
MOZ_RELEASE_ASSERT(state.mClearedBlockCount == 0);
} else {
MOZ_RELEASE_ASSERT(state.mRangeStart < state.mRangeEnd);
MOZ_RELEASE_ASSERT(state.mPushedBlockCount > 0);
MOZ_RELEASE_ASSERT(state.mClearedBlockCount >= 0);
}
int32_t ran = 0;
rb.Put([&](Maybe<BlocksRingBuffer::EntryReserver>&& aMaybeEntryReserver) {
MOZ_RELEASE_ASSERT(aMaybeEntryReserver.isSome());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
ran = 0;
// The following three `Put...` will write three int32_t of value 1.
bi = rb.Put(sizeof(ran),
[&](Maybe<BlocksRingBuffer::EntryWriter>&& aMaybeEntryWriter) {
MOZ_RELEASE_ASSERT(aMaybeEntryWriter.isSome());
++ran;
aMaybeEntryWriter->WriteObject(ran);
return aMaybeEntryWriter->CurrentBlockIndex();
});
MOZ_RELEASE_ASSERT(ran == 1);
MOZ_RELEASE_ASSERT(rb.PutFrom(&ran, sizeof(ran)) !=
BlocksRingBuffer::BlockIndex{});
MOZ_RELEASE_ASSERT(rb.PutObject(ran) != BlocksRingBuffer::BlockIndex{});
ran = 0;
rb.Read([&](Maybe<BlocksRingBuffer::Reader>&& aMaybeReader) {
MOZ_RELEASE_ASSERT(aMaybeReader.isSome());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
ran = 0;
rb.ReadEach([&](BlocksRingBuffer::EntryReader& aEntryReader) {
MOZ_RELEASE_ASSERT(aEntryReader.RemainingBytes() == sizeof(ran));
MOZ_RELEASE_ASSERT(aEntryReader.ReadObject<decltype(ran)>() == 1);
++ran;
});
MOZ_RELEASE_ASSERT(ran >= 3);
ran = 0;
rb.ReadAt(BlocksRingBuffer::BlockIndex{},
[&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing());
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
ran = 0;
rb.ReadAt(bi,
[&](Maybe<BlocksRingBuffer::EntryReader>&& aMaybeEntryReader) {
MOZ_RELEASE_ASSERT(aMaybeEntryReader.isNothing() == !bi);
++ran;
});
MOZ_RELEASE_ASSERT(ran == 1);
};
testInSession(EMPTY);
testInSession(NOT_EMPTY);
rb.Set(MakePowerOfTwo<BlocksRingBuffer::Length, 32>());
MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
testInSession(EMPTY);
testInSession(NOT_EMPTY);
rb.Reset();
testOutOfSession();
testOutOfSession();
uint8_t buffer[MBSize * 3];
for (size_t i = 0; i < MBSize * 3; ++i) {
buffer[i] = uint8_t('A' + i);
}
rb.Set(&buffer[MBSize], MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>());
MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
testInSession(EMPTY);
testInSession(NOT_EMPTY);
rb.Reset();
testOutOfSession();
testOutOfSession();
int cleared = 0;
rb.Set(&buffer[MBSize], MakePowerOfTwo<BlocksRingBuffer::Length, MBSize>(),
[&](auto&&) { ++cleared; });
MOZ_RELEASE_ASSERT(rb.BufferLength().isSome());
rb.ReadEach([](auto&&) { MOZ_RELEASE_ASSERT(false); });
testInSession(EMPTY);
testInSession(NOT_EMPTY);
// Remove the current underlying buffer, this should clear all entries.
rb.Reset();
// The above should clear all entries (2 tests, three entries each).
MOZ_RELEASE_ASSERT(cleared == 2 * 3);
// Check that only the provided stack-based sub-buffer was modified.
uint32_t changed = 0;
for (size_t i = MBSize; i < MBSize * 2; ++i) {
changed += (buffer[i] == uint8_t('A' + i)) ? 0 : 1;
}
// Expect at least 75% changes.
MOZ_RELEASE_ASSERT(changed >= MBSize * 6 / 8);
// Everything around the sub-buffer should be unchanged.
for (size_t i = 0; i < MBSize; ++i) {
MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
}
for (size_t i = MBSize * 2; i < MBSize * 3; ++i) {
MOZ_RELEASE_ASSERT(buffer[i] == uint8_t('A' + i));
}
testOutOfSession();
testOutOfSession();
printf("TestBlocksRingBufferUnderlyingBufferChanges done\n");
}
void TestBlocksRingBufferThreading() { void TestBlocksRingBufferThreading() {
printf("TestBlocksRingBufferThreading...\n"); printf("TestBlocksRingBufferThreading...\n");
@ -1041,10 +827,9 @@ void TestBlocksRingBufferThreading() {
// Reserve as many bytes as the thread number (but at least enough // Reserve as many bytes as the thread number (but at least enough
// to store an int), and write an increasing int. // to store an int), and write an increasing int.
rb.Put(std::max(aThreadNo, int(sizeof(push))), rb.Put(std::max(aThreadNo, int(sizeof(push))),
[&](Maybe<BlocksRingBuffer::EntryWriter>&& aEW) { [&](BlocksRingBuffer::EntryWriter aEW) {
MOZ_RELEASE_ASSERT(aEW.isSome()); aEW.WriteObject(aThreadNo * 1000000 + push);
aEW->WriteObject(aThreadNo * 1000000 + push); aEW += aEW.RemainingBytes();
*aEW += aEW->RemainingBytes();
}); });
} }
}, },
@ -1117,7 +902,6 @@ void TestProfiler() {
TestLEB128(); TestLEB128();
TestModuloBuffer(); TestModuloBuffer();
TestBlocksRingBufferAPI(); TestBlocksRingBufferAPI();
TestBlocksRingBufferUnderlyingBufferChanges();
TestBlocksRingBufferThreading(); TestBlocksRingBufferThreading();
{ {