Bug 1747439: netwerk/cache2 cleanup r=necko-reviewers,kershaw

Differential Revision: https://phabricator.services.mozilla.com/D135167
This commit is contained in:
Randell Jesup 2022-01-12 20:38:37 +00:00
parent d23473f33b
commit 6ef1621b21
11 changed files with 145 additions and 114 deletions

View File

@ -146,7 +146,10 @@ CacheEntry::Callback::~Callback() {
MOZ_COUNT_DTOR(CacheEntry::Callback);
}
// We have locks on both this and aEntry
void CacheEntry::Callback::ExchangeEntry(CacheEntry* aEntry) {
aEntry->mLock.AssertCurrentThreadOwns();
mEntry->mLock.AssertCurrentThreadOwns();
if (mEntry == aEntry) return;
// The counter may go from zero to non-null only under the service lock
@ -206,10 +209,10 @@ CacheEntry::CacheEntry(const nsACString& aStorageID, const nsACString& aURI,
mStorageID(aStorageID),
mUseDisk(aUseDisk),
mSkipSizeCheck(aSkipSizeCheck),
mPinned(aPin),
mSecurityInfoLoaded(false),
mPreventCallbacks(false),
mHasData(false),
mPinned(aPin),
mPinningKnown(false),
mCacheEntryId(GetNextId()) {
LOG(("CacheEntry::CacheEntry [this=%p]", this));
@ -285,9 +288,6 @@ nsresult CacheEntry::HashingKey(const nsACString& aStorageID,
void CacheEntry::AsyncOpen(nsICacheEntryOpenCallback* aCallback,
uint32_t aFlags) {
LOG(("CacheEntry::AsyncOpen [this=%p, state=%s, flags=%d, callback=%p]", this,
StateString(mState), aFlags, aCallback));
bool readonly = aFlags & nsICacheStorage::OPEN_READONLY;
bool bypassIfBusy = aFlags & nsICacheStorage::OPEN_BYPASS_IF_BUSY;
bool truncate = aFlags & nsICacheStorage::OPEN_TRUNCATE;
@ -295,9 +295,20 @@ void CacheEntry::AsyncOpen(nsICacheEntryOpenCallback* aCallback,
bool multithread = aFlags & nsICacheStorage::CHECK_MULTITHREADED;
bool secret = aFlags & nsICacheStorage::OPEN_SECRETLY;
MOZ_ASSERT(!readonly || !truncate, "Bad flags combination");
MOZ_ASSERT(!(truncate && mState > LOADING),
"Must not call truncate on already loaded entry");
if (MOZ_LOG_TEST(gCache2Log, LogLevel::Debug)) {
MutexAutoLock lock(mLock);
LOG(("CacheEntry::AsyncOpen [this=%p, state=%s, flags=%d, callback=%p]",
this, StateString(mState), aFlags, aCallback));
}
#ifdef DEBUG
{
// yes, if logging is on in DEBUG we'll take the lock twice in a row
MutexAutoLock lock(mLock);
MOZ_ASSERT(!readonly || !truncate, "Bad flags combination");
MOZ_ASSERT(!(truncate && mState > LOADING),
"Must not call truncate on already loaded entry");
}
#endif
Callback callback(this, aCallback, readonly, multithread, secret);
@ -470,9 +481,9 @@ NS_IMETHODIMP CacheEntry::OnFileReady(nsresult aResult, bool aIsNew) {
mFileStatus = aResult;
mPinned = mFile->IsPinned();
;
mPinningKnown = true;
LOG((" pinning=%d", mPinned));
LOG((" pinning=%d", (bool)mPinned));
if (mState == READY) {
mHasData = true;
@ -558,6 +569,7 @@ already_AddRefed<CacheEntryHandle> CacheEntry::ReopenTruncated(
void CacheEntry::TransferCallbacks(CacheEntry& aFromEntry) {
mozilla::MutexAutoLock lock(mLock);
aFromEntry.mLock.AssertCurrentThreadOwns();
LOG(("CacheEntry::TransferCallbacks [entry=%p, from=%p]", this, &aFromEntry));
@ -679,11 +691,10 @@ bool CacheEntry::InvokeCallbacks(bool aReadOnly) {
}
bool CacheEntry::InvokeCallback(Callback& aCallback) {
mLock.AssertCurrentThreadOwns();
LOG(("CacheEntry::InvokeCallback [this=%p, state=%s, cb=%p]", this,
StateString(mState), aCallback.mCallback.get()));
mLock.AssertCurrentThreadOwns();
// When this entry is doomed we want to notify the callback any time
if (!mIsDoomed) {
// When we are here, the entry must be loaded from disk
@ -801,22 +812,21 @@ bool CacheEntry::InvokeCallback(Callback& aCallback) {
}
void CacheEntry::InvokeAvailableCallback(Callback const& aCallback) {
LOG(
("CacheEntry::InvokeAvailableCallback [this=%p, state=%s, cb=%p, r/o=%d, "
"n/w=%d]",
this, StateString(mState), aCallback.mCallback.get(),
aCallback.mReadOnly, aCallback.mNotWanted));
nsresult rv;
uint32_t state;
{
mozilla::MutexAutoLock lock(mLock);
state = mState;
}
LOG(
("CacheEntry::InvokeAvailableCallback [this=%p, state=%s, cb=%p, "
"r/o=%d, "
"n/w=%d]",
this, StateString(mState), aCallback.mCallback.get(),
aCallback.mReadOnly, aCallback.mNotWanted));
// When we are here, the entry must be loaded from disk
MOZ_ASSERT(state > LOADING || mIsDoomed);
// When we are here, the entry must be loaded from disk
MOZ_ASSERT(state > LOADING || mIsDoomed);
}
bool onAvailThread;
rv = aCallback.OnAvailThread(&onAvailThread);
@ -916,12 +926,11 @@ CacheEntryHandle* CacheEntry::NewWriteHandle() {
}
void CacheEntry::OnHandleClosed(CacheEntryHandle const* aHandle) {
mozilla::MutexAutoLock lock(mLock);
LOG(("CacheEntry::OnHandleClosed [this=%p, state=%s, handle=%p]", this,
StateString(mState), aHandle));
mozilla::MutexAutoLock lock(mLock);
if (IsDoomed() && NS_SUCCEEDED(mFileStatus) &&
if (mIsDoomed && NS_SUCCEEDED(mFileStatus) &&
// Note: mHandlesCount is dropped before this method is called
(mHandlesCount == 0 ||
(mHandlesCount == 1 && mWriter && mWriter != aHandle))) {
@ -1090,14 +1099,15 @@ nsresult CacheEntry::SetContentType(uint8_t aContentType) {
nsresult CacheEntry::GetIsForcedValid(bool* aIsForcedValid) {
NS_ENSURE_ARG(aIsForcedValid);
MOZ_ASSERT(mState > LOADING);
#ifdef DEBUG
{
mozilla::MutexAutoLock lock(mLock);
if (mPinned) {
*aIsForcedValid = true;
return NS_OK;
}
MOZ_ASSERT(mState > LOADING);
}
#endif
if (mPinned) {
*aIsForcedValid = true;
return NS_OK;
}
nsAutoCString key;
@ -1454,12 +1464,12 @@ nsresult CacheEntry::MetaDataReady() {
}
nsresult CacheEntry::SetValid() {
LOG(("CacheEntry::SetValid [this=%p, state=%s]", this, StateString(mState)));
nsCOMPtr<nsIOutputStream> outputStream;
{
mozilla::MutexAutoLock lock(mLock);
LOG(("CacheEntry::SetValid [this=%p, state=%s]", this,
StateString(mState)));
MOZ_ASSERT(mState > EMPTY);
@ -1480,9 +1490,8 @@ nsresult CacheEntry::SetValid() {
}
nsresult CacheEntry::Recreate(bool aMemoryOnly, nsICacheEntry** _retval) {
LOG(("CacheEntry::Recreate [this=%p, state=%s]", this, StateString(mState)));
mozilla::MutexAutoLock lock(mLock);
LOG(("CacheEntry::Recreate [this=%p, state=%s]", this, StateString(mState)));
RefPtr<CacheEntryHandle> handle = ReopenTruncated(aMemoryOnly, nullptr);
if (handle) {
@ -1623,9 +1632,8 @@ bool CacheEntry::DeferOrBypassRemovalOnPinStatus(bool aPinned) {
LOG(("CacheEntry::DeferOrBypassRemovalOnPinStatus [this=%p]", this));
mozilla::MutexAutoLock lock(mLock);
if (mPinningKnown) {
LOG((" pinned=%d, caller=%d", mPinned, aPinned));
LOG((" pinned=%d, caller=%d", (bool)mPinned, aPinned));
// Bypass when the pin status of this entry doesn't match the pin status
// caller wants to remove
return mPinned != aPinned;
@ -1888,10 +1896,10 @@ NS_IMETHODIMP CacheOutputCloseListener::Run() {
// Memory reporting
size_t CacheEntry::SizeOfExcludingThis(
mozilla::MallocSizeOf mallocSizeOf) const {
size_t CacheEntry::SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) {
size_t n = 0;
MutexAutoLock lock(mLock);
n += mCallbacks.ShallowSizeOfExcludingThis(mallocSizeOf);
if (mFile) {
n += mFile->SizeOfIncludingThis(mallocSizeOf);
@ -1911,8 +1919,7 @@ size_t CacheEntry::SizeOfExcludingThis(
return n;
}
size_t CacheEntry::SizeOfIncludingThis(
mozilla::MallocSizeOf mallocSizeOf) const {
size_t CacheEntry::SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) {
return mallocSizeOf(this) + SizeOfExcludingThis(mallocSizeOf);
}

View File

@ -157,8 +157,8 @@ class CacheEntry final : public nsIRunnable, public CacheFileListener {
uint32_t(-1)};
// Memory reporting
size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const;
size_t SizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf);
size_t SizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf);
private:
virtual ~CacheEntry();
@ -188,7 +188,8 @@ class CacheEntry final : public nsIRunnable, public CacheFileListener {
void ExchangeEntry(CacheEntry* aEntry);
// Returns true when an entry is about to be "defer" doomed and this is
// a "defer" callback.
// a "defer" callback. The caller must hold a lock (this entry is in the
// caller's mCallback array)
bool DeferDoom(bool* aDoom) const;
// We are raising reference count here to take into account the pending
@ -323,9 +324,10 @@ class CacheEntry final : public nsIRunnable, public CacheFileListener {
// When mFileStatus is read and found success it is ensured there is mFile and
// that it is after a successful call to Init().
Atomic<nsresult, ReleaseAcquire> mFileStatus{NS_ERROR_NOT_INITIALIZED};
nsCString mURI;
nsCString mEnhanceID;
nsCString mStorageID;
// Set in constructor
nsCString const mURI;
nsCString const mEnhanceID;
nsCString const mStorageID;
// mUseDisk, mSkipSizeCheck, mIsDoomed are plain "bool", not "bool:1",
// so as to avoid bitfield races with the byte containing
@ -337,6 +339,8 @@ class CacheEntry final : public nsIRunnable, public CacheFileListener {
bool const mSkipSizeCheck;
// Set when entry is doomed with AsyncDoom() or DoomAlreadyRemoved().
Atomic<bool, Relaxed> mIsDoomed{false};
// The indication of pinning this entry was open with
Atomic<bool, Relaxed> mPinned;
// Following flags are all synchronized with the cache entry lock.
@ -352,8 +356,6 @@ class CacheEntry final : public nsIRunnable, public CacheFileListener {
// false: after load and a new file, or dropped to back to false when a
// writer fails to open an output stream.
bool mHasData : 1;
// The indication of pinning this entry was open with
bool mPinned : 1;
// Whether the pinning state of the entry is known (equals to the actual state
// of the cache file)
bool mPinningKnown : 1;

View File

@ -289,6 +289,16 @@ nsresult CacheFile::Init(const nsACString& aKey, bool aCreateNew,
return NS_OK;
}
void CacheFile::Key(nsACString& aKey) {
CacheFileAutoLock lock(this);
aKey = mKey;
}
bool CacheFile::IsPinned() {
CacheFileAutoLock lock(this);
return mPinned;
}
nsresult CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk* aChunk) {
CacheFileAutoLock lock(this);
@ -452,6 +462,7 @@ nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
bool mAlreadyDoomed;
} autoDoom(aHandle);
RefPtr<CacheFileMetadata> metadata;
nsCOMPtr<CacheFileListener> listener;
bool isNew = false;
nsresult retval = NS_OK;
@ -559,19 +570,23 @@ nsresult CacheFile::OnFileOpened(CacheFileHandle* aHandle, nsresult aResult) {
return NS_OK;
}
}
if (listener) {
lock.Unlock();
listener->OnFileReady(retval, isNew);
return NS_OK;
}
MOZ_ASSERT(NS_SUCCEEDED(aResult));
MOZ_ASSERT(!mMetadata);
MOZ_ASSERT(mListener);
// mMetaData is protected by a lock, but ReadMetaData has to be called
// without the lock. Alternatively we could make a
// "ReadMetaDataLocked", and temporarily unlock to call OnFileReady
metadata = mMetadata =
new CacheFileMetadata(mHandle, mKey, WrapNotNull(mLock));
}
if (listener) {
listener->OnFileReady(retval, isNew);
return NS_OK;
}
MOZ_ASSERT(NS_SUCCEEDED(aResult));
MOZ_ASSERT(!mMetadata);
MOZ_ASSERT(mListener);
mMetadata = new CacheFileMetadata(mHandle, mKey, WrapNotNull(mLock));
mMetadata->ReadMetadata(this);
metadata->ReadMetadata(this);
return NS_OK;
}
@ -588,41 +603,44 @@ nsresult CacheFile::OnDataRead(CacheFileHandle* aHandle, char* aBuf,
}
nsresult CacheFile::OnMetadataRead(nsresult aResult) {
MOZ_ASSERT(mListener);
LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this,
static_cast<uint32_t>(aResult)));
nsCOMPtr<CacheFileListener> listener;
bool isNew = false;
if (NS_SUCCEEDED(aResult)) {
mPinned = mMetadata->Pinned();
mReady = true;
mDataSize = mMetadata->Offset();
if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
isNew = true;
mMetadata->MarkDirty();
} else {
const char* altData = mMetadata->GetElement(CacheFileUtils::kAltDataKey);
if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
altData, &mAltDataOffset, &mAltDataType)) ||
(mAltDataOffset > mDataSize))) {
// alt-metadata cannot be parsed or alt-data offset is invalid
mMetadata->InitEmptyMetadata();
{
CacheFileAutoLock lock(this);
MOZ_ASSERT(mListener);
LOG(("CacheFile::OnMetadataRead() [this=%p, rv=0x%08" PRIx32 "]", this,
static_cast<uint32_t>(aResult)));
if (NS_SUCCEEDED(aResult)) {
mPinned = mMetadata->Pinned();
mReady = true;
mDataSize = mMetadata->Offset();
if (mDataSize == 0 && mMetadata->ElementsSize() == 0) {
isNew = true;
mAltDataOffset = -1;
mAltDataType.Truncate();
mDataSize = 0;
mMetadata->MarkDirty();
} else {
CacheFileAutoLock lock(this);
PreloadChunks(0);
const char* altData =
mMetadata->GetElement(CacheFileUtils::kAltDataKey);
if (altData && (NS_FAILED(CacheFileUtils::ParseAlternativeDataInfo(
altData, &mAltDataOffset, &mAltDataType)) ||
(mAltDataOffset > mDataSize))) {
// alt-metadata cannot be parsed or alt-data offset is invalid
mMetadata->InitEmptyMetadata();
isNew = true;
mAltDataOffset = -1;
mAltDataType.Truncate();
mDataSize = 0;
} else {
PreloadChunks(0);
}
}
InitIndexEntry();
}
InitIndexEntry();
mListener.swap(listener);
}
nsCOMPtr<CacheFileListener> listener;
mListener.swap(listener);
listener->OnFileReady(aResult, isNew);
return NS_OK;
}
@ -1011,6 +1029,7 @@ nsresult CacheFile::Doom(CacheFileListener* aCallback) {
}
nsresult CacheFile::DoomLocked(CacheFileListener* aCallback) {
AssertOwnsLock();
MOZ_ASSERT(mHandle || mMemoryOnly || mOpeningFile);
LOG(("CacheFile::DoomLocked() [this=%p, listener=%p]", this, aCallback));
@ -1329,6 +1348,7 @@ nsresult CacheFile::GetFetchCount(uint32_t* _retval) {
}
nsresult CacheFile::GetDiskStorageSizeInKB(uint32_t* aDiskStorageSize) {
CacheFileAutoLock lock(this);
if (!mHandle) {
return NS_ERROR_NOT_AVAILABLE;
}
@ -1351,20 +1371,6 @@ nsresult CacheFile::OnFetched() {
return NS_OK;
}
void CacheFile::Lock() { mLock->Lock().Lock(); }
void CacheFile::Unlock() {
// move the elements out of mObjsToRelease
// so that they can be released after we unlock
nsTArray<RefPtr<nsISupports>> objs = std::move(mObjsToRelease);
mLock->Lock().Unlock();
}
void CacheFile::AssertOwnsLock() const {
mLock->Lock().AssertCurrentThreadOwns();
}
void CacheFile::ReleaseOutsideLock(RefPtr<nsISupports> aObject) {
AssertOwnsLock();
@ -2219,6 +2225,7 @@ nsresult CacheFile::NotifyChunkListeners(uint32_t aIndex, nsresult aResult,
}
bool CacheFile::HaveChunkListeners(uint32_t aIndex) {
AssertOwnsLock();
ChunkListeners* listeners;
mChunkListeners.Get(aIndex, &listeners);
return !!listeners;
@ -2412,7 +2419,6 @@ void CacheFile::WriteMetadataIfNeededLocked(bool aFireAndForget) {
void CacheFile::PostWriteTimer() {
if (mMemoryOnly) return;
LOG(("CacheFile::PostWriteTimer() [this=%p]", this));
CacheFileIOManager::ScheduleMetadataWrite(this);
@ -2481,6 +2487,7 @@ void CacheFile::SetError(nsresult aStatus) {
}
nsresult CacheFile::InitIndexEntry() {
AssertOwnsLock();
MOZ_ASSERT(mHandle);
if (mHandle->IsDoomed()) return NS_OK;

View File

@ -119,9 +119,9 @@ class CacheFile final : public CacheFileChunkListener,
nsresult OnFetched();
bool DataSize(int64_t* aSize);
void Key(nsACString& aKey) { aKey = mKey; }
void Key(nsACString& aKey);
bool IsDoomed();
bool IsPinned() const { return mPinned; }
bool IsPinned();
// Returns true when there is a potentially unfinished write operation.
bool IsWriteInProgress();
bool EntryWouldExceedLimit(int64_t aOffset, int64_t aSize, bool aIsAltData);
@ -140,9 +140,15 @@ class CacheFile final : public CacheFileChunkListener,
virtual ~CacheFile();
void Lock();
void Unlock();
void AssertOwnsLock() const;
void Lock() { mLock->Lock().Lock(); }
void Unlock() {
// move the elements out of mObjsToRelease
// so that they can be released after we unlock
nsTArray<RefPtr<nsISupports>> objs = std::move(mObjsToRelease);
mLock->Lock().Unlock();
}
void AssertOwnsLock() const { mLock->Lock().AssertCurrentThreadOwns(); }
void ReleaseOutsideLock(RefPtr<nsISupports> aObject);
enum ECallerType { READER = 0, WRITER = 1, PRELOADER = 2 };

View File

@ -412,6 +412,7 @@ nsresult CacheFileChunk::Write(CacheFileHandle* aHandle,
void CacheFileChunk::WaitForUpdate(CacheFileChunkListener* aCallback) {
AssertOwnsLock();
mFile->AssertOwnsLock(); // For thread-safety analysis
LOG(("CacheFileChunk::WaitForUpdate() [this=%p, listener=%p]", this,
aCallback));
@ -507,6 +508,7 @@ uint32_t CacheFileChunk::DataSize() const { return mBuf->DataSize(); }
void CacheFileChunk::UpdateDataSize(uint32_t aOffset, uint32_t aLen) {
AssertOwnsLock();
mFile->AssertOwnsLock(); // For thread-safety analysis
// UpdateDataSize() is called only when we've written some data to the chunk
// and we never write data anymore once some error occurs.

View File

@ -342,6 +342,7 @@ CacheFileInputStream::AsyncWait(nsIInputStreamCallback* aCallback,
NS_IMETHODIMP
CacheFileInputStream::Seek(int32_t whence, int64_t offset) {
CacheFileAutoLock lock(mFile);
mFile->AssertOwnsLock(); // For thread-safety analysis
LOG(("CacheFileInputStream::Seek() [this=%p, whence=%d, offset=%" PRId64 "]",
this, whence, offset));
@ -396,6 +397,7 @@ CacheFileInputStream::SetEOF() {
NS_IMETHODIMP
CacheFileInputStream::Tell(int64_t* _retval) {
CacheFileAutoLock lock(mFile);
mFile->AssertOwnsLock(); // For thread-safety analysis
if (mClosed) {
LOG(("CacheFileInputStream::Tell() - Stream is closed. [this=%p]", this));

View File

@ -82,6 +82,7 @@ NS_IMETHODIMP
CacheFileOutputStream::Write(const char* aBuf, uint32_t aCount,
uint32_t* _retval) {
CacheFileAutoLock lock(mFile);
mFile->AssertOwnsLock(); // For thread-safety analysis
LOG(("CacheFileOutputStream::Write() [this=%p, count=%d]", this, aCount));
@ -246,6 +247,7 @@ CacheFileOutputStream::AsyncWait(nsIOutputStreamCallback* aCallback,
NS_IMETHODIMP
CacheFileOutputStream::Seek(int32_t whence, int64_t offset) {
CacheFileAutoLock lock(mFile);
mFile->AssertOwnsLock(); // For thread-safety analysis
LOG(("CacheFileOutputStream::Seek() [this=%p, whence=%d, offset=%" PRId64 "]",
this, whence, offset));
@ -296,6 +298,7 @@ CacheFileOutputStream::SetEOF() {
NS_IMETHODIMP
CacheFileOutputStream::Tell(int64_t* _retval) {
CacheFileAutoLock lock(mFile);
mFile->AssertOwnsLock(); // For thread-safety analysis
if (mClosed) {
LOG(("CacheFileOutputStream::Tell() - Stream is closed. [this=%p]", this));
@ -347,6 +350,8 @@ void CacheFileOutputStream::NotifyCloseListener() {
}
void CacheFileOutputStream::ReleaseChunk() {
mFile->AssertOwnsLock();
LOG(("CacheFileOutputStream::ReleaseChunk() [this=%p, idx=%d]", this,
mChunk->Index()));

View File

@ -113,6 +113,8 @@ class CacheIOThread final : public nsIThreadObserver {
mozilla::Monitor mMonitor{"CacheIOThread"};
PRThread* mThread{nullptr};
// Only set in Init(), before the thread is started, which reads it but never
// writes
UniquePtr<detail::BlockingIOWatcher> mBlockingIOWatcher;
Atomic<nsIThread*> mXPCOMThread{nullptr};
Atomic<uint32_t, Relaxed> mLowestLevelWaiting{LAST_LEVEL};
@ -126,7 +128,7 @@ class CacheIOThread final : public nsIThreadObserver {
// Raised when nsIEventTarget.Dispatch() is called on this thread
Atomic<bool, Relaxed> mHasXPCOMEvents{false};
// See YieldAndRerun() above
bool mRerunCurrentEvent{false};
bool mRerunCurrentEvent{false}; // Only accessed on the cache thread
// Signal to process all pending events and then shutdown
// Synchronized by mMonitor
bool mShutdown{false};

View File

@ -243,9 +243,7 @@ NS_INTERFACE_MAP_BEGIN(CacheIndex)
NS_INTERFACE_MAP_ENTRY(nsIRunnable)
NS_INTERFACE_MAP_END
CacheIndex::CacheIndex()
{
CacheIndex::CacheIndex() {
sLock.AssertCurrentThreadOwns();
LOG(("CacheIndex::CacheIndex [this=%p]", this));
MOZ_ASSERT(!gInstance, "multiple CacheIndex instances!");

View File

@ -2173,7 +2173,7 @@ void CacheStorageService::TelemetryRecordEntryCreation(
timeStamp, TimeStamp::NowLoRes());
}
void CacheStorageService::TelemetryRecordEntryRemoval(CacheEntry const* entry) {
void CacheStorageService::TelemetryRecordEntryRemoval(CacheEntry* entry) {
MOZ_ASSERT(CacheStorageService::IsOnManagementThread());
// Doomed entries must not be considered, we are only interested in purged

View File

@ -216,7 +216,7 @@ class CacheStorageService final : public nsICacheStorageService,
// These are helpers for telemetry monitoring of the memory pools.
void TelemetryPrune(TimeStamp& now);
void TelemetryRecordEntryCreation(CacheEntry const* entry);
void TelemetryRecordEntryRemoval(CacheEntry const* entry);
void TelemetryRecordEntryRemoval(CacheEntry* entry);
private:
// Following methods are thread safe to call.