mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-24 13:21:05 +00:00
Bug 1368887 - Crash in mozilla::net::CacheFile::IsKilled, r=valentin, r=honzab
CacheFile::OnChunkRead and CacheFile::OnChunkWritten now correctly handle discarded chunks. It's now also ensured that the chunk which is going to be discarded isn't references by any input stream.
This commit is contained in:
parent
eaff967268
commit
7652415367
@ -332,6 +332,18 @@ CacheFile::OnChunkRead(nsresult aResult, CacheFileChunk *aChunk)
|
||||
LOG(("CacheFile::OnChunkRead() [this=%p, rv=0x%08" PRIx32 ", chunk=%p, idx=%u]",
|
||||
this, static_cast<uint32_t>(aResult), aChunk, index));
|
||||
|
||||
if (aChunk->mDiscardedChunk) {
|
||||
// We discard only unused chunks, so it must be still unused when reading
|
||||
// data finishes.
|
||||
MOZ_ASSERT(aChunk->mRefCnt == 2);
|
||||
aChunk->mActiveChunk = false;
|
||||
ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget());
|
||||
|
||||
DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
|
||||
MOZ_ASSERT(removed);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
if (NS_FAILED(aResult)) {
|
||||
SetError(aResult);
|
||||
}
|
||||
@ -365,6 +377,18 @@ CacheFile::OnChunkWritten(nsresult aResult, CacheFileChunk *aChunk)
|
||||
MOZ_ASSERT(!mOpeningFile);
|
||||
MOZ_ASSERT(mHandle);
|
||||
|
||||
if (aChunk->mDiscardedChunk) {
|
||||
// We discard only unused chunks, so it must be still unused when writing
|
||||
// data finishes.
|
||||
MOZ_ASSERT(aChunk->mRefCnt == 2);
|
||||
aChunk->mActiveChunk = false;
|
||||
ReleaseOutsideLock(RefPtr<CacheFileChunkListener>(aChunk->mFile.forget()).forget());
|
||||
|
||||
DebugOnly<bool> removed = mDiscardedChunks.RemoveElement(aChunk);
|
||||
MOZ_ASSERT(removed);
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
if (NS_FAILED(aResult)) {
|
||||
SetError(aResult);
|
||||
}
|
||||
@ -1903,8 +1927,11 @@ CacheFile::Truncate(int64_t aOffset)
|
||||
|
||||
nsresult rv;
|
||||
|
||||
MOZ_ASSERT(aOffset <= mDataSize);
|
||||
// If we ever need to truncate on non alt-data boundary, we need to handle
|
||||
// existing input streams.
|
||||
MOZ_ASSERT(aOffset == mAltDataOffset, "Truncating normal data not implemented");
|
||||
MOZ_ASSERT(mReady);
|
||||
MOZ_ASSERT(!mOutput);
|
||||
|
||||
uint32_t lastChunk = 0;
|
||||
if (mDataSize > 0) {
|
||||
@ -1918,6 +1945,9 @@ CacheFile::Truncate(int64_t aOffset)
|
||||
|
||||
uint32_t bytesInNewLastChunk = aOffset - newLastChunk * kChunkSize;
|
||||
|
||||
LOG(("CacheFileTruncate() - lastChunk=%u, newLastChunk=%u, "
|
||||
"bytesInNewLastChunk=%u", lastChunk, newLastChunk, bytesInNewLastChunk));
|
||||
|
||||
// Remove all truncated chunks from mCachedChunks
|
||||
for (auto iter = mCachedChunks.Iter(); !iter.Done(); iter.Next()) {
|
||||
uint32_t idx = iter.Key();
|
||||
@ -1929,6 +1959,34 @@ CacheFile::Truncate(int64_t aOffset)
|
||||
}
|
||||
}
|
||||
|
||||
// We need to make sure no input stream holds a reference to a chunk we're
|
||||
// going to discard. In theory, if alt-data begins at chunk boundary, input
|
||||
// stream for normal data can get the chunk containing only alt-data via
|
||||
// EnsureCorrectChunk() call. The input stream won't read the data from such
|
||||
// chunk, but it will keep the reference until the stream is closed and we
|
||||
// cannot simply discard this chunk.
|
||||
int64_t maxInputChunk = -1;
|
||||
for (uint32_t i = 0; i < mInputs.Length(); ++i) {
|
||||
int64_t inputChunk = mInputs[i]->GetChunkIdx();
|
||||
|
||||
if (maxInputChunk < inputChunk) {
|
||||
maxInputChunk = inputChunk;
|
||||
}
|
||||
|
||||
MOZ_RELEASE_ASSERT(mInputs[i]->GetPosition() <= aOffset);
|
||||
}
|
||||
|
||||
MOZ_RELEASE_ASSERT(maxInputChunk <= newLastChunk + 1);
|
||||
if (maxInputChunk == newLastChunk + 1) {
|
||||
// Truncating must be done at chunk boundary
|
||||
MOZ_RELEASE_ASSERT(bytesInNewLastChunk == kChunkSize);
|
||||
newLastChunk++;
|
||||
bytesInNewLastChunk = 0;
|
||||
LOG(("CacheFileTruncate() - chunk %p is still in use, using newLastChunk=%u"
|
||||
" and bytesInNewLastChunk=%u", mChunks.GetWeak(newLastChunk),
|
||||
newLastChunk, bytesInNewLastChunk));
|
||||
}
|
||||
|
||||
// Discard all truncated chunks in mChunks
|
||||
for (auto iter = mChunks.Iter(); !iter.Done(); iter.Next()) {
|
||||
uint32_t idx = iter.Key();
|
||||
@ -1937,6 +1995,11 @@ CacheFile::Truncate(int64_t aOffset)
|
||||
RefPtr<CacheFileChunk>& chunk = iter.Data();
|
||||
LOG(("CacheFile::Truncate() - discarding chunk [idx=%u, chunk=%p]",
|
||||
idx, chunk.get()));
|
||||
|
||||
if (HaveChunkListeners(idx)) {
|
||||
NotifyChunkListeners(idx, NS_ERROR_NOT_AVAILABLE, chunk);
|
||||
}
|
||||
|
||||
chunk->mDiscardedChunk = true;
|
||||
mDiscardedChunks.AppendElement(chunk);
|
||||
iter.Remove();
|
||||
|
@ -604,7 +604,14 @@ CacheFileInputStream::CanRead(CacheFileChunkReadHandle *aHandle)
|
||||
MOZ_ASSERT(mChunk);
|
||||
MOZ_ASSERT(mPos / kChunkSize == mChunk->Index());
|
||||
|
||||
int64_t retval = aHandle->Offset() + aHandle->DataSize() - mPos;
|
||||
int64_t retval = aHandle->Offset() + aHandle->DataSize();
|
||||
|
||||
if (!mAlternativeData && mFile->mAltDataOffset != -1 &&
|
||||
mFile->mAltDataOffset < retval) {
|
||||
retval = mFile->mAltDataOffset;
|
||||
}
|
||||
|
||||
retval -= mPos;
|
||||
if (retval <= 0 && NS_FAILED(mChunk->GetStatus())) {
|
||||
CloseWithStatusLocked(mChunk->GetStatus());
|
||||
}
|
||||
|
@ -40,6 +40,7 @@ public:
|
||||
|
||||
uint32_t GetPosition() const { return mPos; };
|
||||
bool IsAlternativeData() const { return mAlternativeData; };
|
||||
int64_t GetChunkIdx() const { return mChunk ? mChunk->Index() : -1; };
|
||||
|
||||
private:
|
||||
virtual ~CacheFileInputStream();
|
||||
|
Loading…
Reference in New Issue
Block a user