2015-01-15 23:11:35 +00:00
|
|
|
/* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
|
|
|
|
/* This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
|
|
|
|
|
|
|
/**
|
|
|
|
* SourceBuffer is a single producer, multiple consumer data structure used for
|
|
|
|
* storing image source (compressed) data.
|
|
|
|
*/
|
|
|
|
|
2015-05-15 03:52:05 +00:00
|
|
|
#ifndef mozilla_image_sourcebuffer_h
|
|
|
|
#define mozilla_image_sourcebuffer_h
|
2015-01-15 23:11:35 +00:00
|
|
|
|
2016-08-06 00:09:48 +00:00
|
|
|
#include <algorithm>
|
2015-01-15 23:11:35 +00:00
|
|
|
#include "mozilla/Maybe.h"
|
|
|
|
#include "mozilla/MemoryReporting.h"
|
|
|
|
#include "mozilla/Mutex.h"
|
|
|
|
#include "mozilla/Move.h"
|
|
|
|
#include "mozilla/MemoryReporting.h"
|
2015-10-18 05:24:48 +00:00
|
|
|
#include "mozilla/RefPtr.h"
|
2015-09-23 01:27:34 +00:00
|
|
|
#include "mozilla/RefCounted.h"
|
2015-01-15 23:11:35 +00:00
|
|
|
#include "mozilla/UniquePtr.h"
|
2015-10-18 05:24:48 +00:00
|
|
|
#include "mozilla/RefPtr.h"
|
2015-01-15 23:11:35 +00:00
|
|
|
#include "nsTArray.h"
|
|
|
|
|
2015-08-01 01:10:29 +00:00
|
|
|
class nsIInputStream;
|
|
|
|
|
2015-01-15 23:11:35 +00:00
|
|
|
namespace mozilla {
|
|
|
|
namespace image {
|
|
|
|
|
|
|
|
class SourceBuffer;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* IResumable is an interface for classes that can schedule themselves to resume
|
|
|
|
* their work later. An implementation of IResumable generally should post a
|
|
|
|
* runnable to some event target which continues the work of the task.
|
|
|
|
*/
|
|
|
|
struct IResumable {
|
|
|
|
MOZ_DECLARE_REFCOUNTED_TYPENAME(IResumable)
|
|
|
|
|
|
|
|
// Subclasses may or may not be XPCOM classes, so we just require that they
|
|
|
|
// implement AddRef and Release.
|
2017-01-25 19:51:34 +00:00
|
|
|
NS_INLINE_DECL_PURE_VIRTUAL_REFCOUNTING
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
virtual void Resume() = 0;
|
|
|
|
|
|
|
|
protected:
|
|
|
|
virtual ~IResumable() {}
|
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* SourceBufferIterator is a class that allows consumers of image source data to
|
|
|
|
* read the contents of a SourceBuffer sequentially.
|
|
|
|
*
|
|
|
|
* Consumers can advance through the SourceBuffer by calling
|
|
|
|
* AdvanceOrScheduleResume() repeatedly. After every advance, they should call
|
|
|
|
* check the return value, which will tell them the iterator's new state.
|
|
|
|
*
|
|
|
|
* If WAITING is returned, AdvanceOrScheduleResume() has arranged
|
|
|
|
* to call the consumer's Resume() method later, so the consumer should save its
|
|
|
|
* state if needed and stop running.
|
|
|
|
*
|
|
|
|
* If the iterator's new state is READY, then the consumer can call Data() and
|
|
|
|
* Length() to read new data from the SourceBuffer.
|
|
|
|
*
|
|
|
|
* Finally, in the COMPLETE state the consumer can call CompletionStatus() to
|
|
|
|
* get the status passed to SourceBuffer::Complete().
|
|
|
|
*/
|
2015-03-21 16:28:04 +00:00
|
|
|
class SourceBufferIterator final {
|
2015-01-15 23:11:35 +00:00
|
|
|
public:
|
|
|
|
enum State {
|
|
|
|
START, // The iterator is at the beginning of the buffer.
|
|
|
|
READY, // The iterator is pointing to new data.
|
|
|
|
WAITING, // The iterator is blocked and the caller must yield.
|
|
|
|
COMPLETE // The iterator is pointing to the end of the buffer.
|
|
|
|
};
|
|
|
|
|
2017-07-22 11:50:31 +00:00
|
|
|
explicit SourceBufferIterator(SourceBuffer* aOwner, size_t aReadLimit)
|
2015-01-15 23:11:35 +00:00
|
|
|
: mOwner(aOwner),
|
|
|
|
mState(START),
|
2016-07-11 07:44:39 +00:00
|
|
|
mChunkCount(0),
|
|
|
|
mByteCount(0),
|
2017-07-22 11:50:31 +00:00
|
|
|
mRemainderToRead(aReadLimit) {
|
2015-01-15 23:11:35 +00:00
|
|
|
MOZ_ASSERT(aOwner);
|
|
|
|
mData.mIterating.mChunk = 0;
|
|
|
|
mData.mIterating.mData = nullptr;
|
|
|
|
mData.mIterating.mOffset = 0;
|
2016-07-15 05:48:31 +00:00
|
|
|
mData.mIterating.mAvailableLength = 0;
|
|
|
|
mData.mIterating.mNextReadLength = 0;
|
2015-01-15 23:11:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
SourceBufferIterator(SourceBufferIterator&& aOther)
|
2018-05-30 19:15:35 +00:00
|
|
|
: mOwner(std::move(aOther.mOwner)),
|
2015-01-15 23:11:35 +00:00
|
|
|
mState(aOther.mState),
|
|
|
|
mData(aOther.mData),
|
2016-07-11 07:44:39 +00:00
|
|
|
mChunkCount(aOther.mChunkCount),
|
|
|
|
mByteCount(aOther.mByteCount),
|
2017-07-22 11:50:31 +00:00
|
|
|
mRemainderToRead(aOther.mRemainderToRead) {}
|
2015-01-15 23:11:35 +00:00
|
|
|
|
2015-01-16 04:28:38 +00:00
|
|
|
~SourceBufferIterator();
|
|
|
|
|
2016-07-12 07:45:01 +00:00
|
|
|
SourceBufferIterator& operator=(SourceBufferIterator&& aOther);
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* Returns true if there are no more than @aBytes remaining in the
|
|
|
|
* SourceBuffer. If the SourceBuffer is not yet complete, returns false.
|
|
|
|
*/
|
|
|
|
bool RemainingBytesIsNoMoreThan(size_t aBytes) const;
|
|
|
|
|
|
|
|
/**
|
2016-07-15 05:48:31 +00:00
|
|
|
* Advances the iterator through the SourceBuffer if possible. Advances no
|
|
|
|
* more than @aRequestedBytes bytes. (Use SIZE_MAX to advance as much as
|
|
|
|
* possible.)
|
2016-07-12 07:15:03 +00:00
|
|
|
*
|
|
|
|
* This is a wrapper around AdvanceOrScheduleResume() that makes it clearer at
|
|
|
|
* the callsite when the no resuming is intended.
|
|
|
|
*
|
|
|
|
* @return State::READY if the iterator was successfully advanced.
|
|
|
|
* State::WAITING if the iterator could not be advanced because it's
|
|
|
|
* at the end of the underlying SourceBuffer, but the SourceBuffer
|
|
|
|
* may still receive additional data.
|
|
|
|
* State::COMPLETE if the iterator could not be advanced because it's
|
|
|
|
* at the end of the underlying SourceBuffer and the SourceBuffer is
|
|
|
|
* marked complete (i.e., it will never receive any additional
|
|
|
|
* data).
|
|
|
|
*/
|
2016-07-15 05:48:31 +00:00
|
|
|
State Advance(size_t aRequestedBytes) {
|
|
|
|
return AdvanceOrScheduleResume(aRequestedBytes, nullptr);
|
|
|
|
}
|
2016-07-12 07:15:03 +00:00
|
|
|
|
|
|
|
/**
|
2016-07-15 05:48:31 +00:00
|
|
|
* Advances the iterator through the SourceBuffer if possible. Advances no
|
|
|
|
* more than @aRequestedBytes bytes. (Use SIZE_MAX to advance as much as
|
|
|
|
* possible.) If advancing is not possible and @aConsumer is not null,
|
|
|
|
* arranges to call the @aConsumer's Resume() method when more data is
|
|
|
|
* available.
|
2016-07-12 07:15:03 +00:00
|
|
|
*
|
|
|
|
* @return State::READY if the iterator was successfully advanced.
|
|
|
|
* State::WAITING if the iterator could not be advanced because it's
|
|
|
|
* at the end of the underlying SourceBuffer, but the SourceBuffer
|
|
|
|
* may still receive additional data. @aConsumer's Resume() method
|
|
|
|
* will be called when additional data is available.
|
|
|
|
* State::COMPLETE if the iterator could not be advanced because it's
|
|
|
|
* at the end of the underlying SourceBuffer and the SourceBuffer is
|
|
|
|
* marked complete (i.e., it will never receive any additional
|
|
|
|
* data).
|
2015-01-15 23:11:35 +00:00
|
|
|
*/
|
2016-07-15 05:48:31 +00:00
|
|
|
State AdvanceOrScheduleResume(size_t aRequestedBytes, IResumable* aConsumer);
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
/// If at the end, returns the status passed to SourceBuffer::Complete().
|
|
|
|
nsresult CompletionStatus() const {
|
2015-03-23 23:59:00 +00:00
|
|
|
MOZ_ASSERT(mState == COMPLETE,
|
|
|
|
"Calling CompletionStatus() in the wrong state");
|
2015-01-15 23:11:35 +00:00
|
|
|
return mState == COMPLETE ? mData.mAtEnd.mStatus : NS_OK;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If we're ready to read, returns a pointer to the new data.
|
|
|
|
const char* Data() const {
|
|
|
|
MOZ_ASSERT(mState == READY, "Calling Data() in the wrong state");
|
|
|
|
return mState == READY ? mData.mIterating.mData + mData.mIterating.mOffset
|
|
|
|
: nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// If we're ready to read, returns the length of the new data.
|
|
|
|
size_t Length() const {
|
|
|
|
MOZ_ASSERT(mState == READY, "Calling Length() in the wrong state");
|
2016-07-15 05:48:31 +00:00
|
|
|
return mState == READY ? mData.mIterating.mNextReadLength : 0;
|
2015-01-15 23:11:35 +00:00
|
|
|
}
|
|
|
|
|
2018-10-03 21:40:35 +00:00
|
|
|
/// If we're ready to read, returns whether or not everything available thus
|
|
|
|
/// far has been in the same contiguous buffer.
|
|
|
|
bool IsContiguous() const {
|
|
|
|
MOZ_ASSERT(mState == READY, "Calling IsContiguous() in the wrong state");
|
|
|
|
return mState == READY ? mData.mIterating.mChunk == 0 : false;
|
|
|
|
}
|
|
|
|
|
2016-07-11 07:44:39 +00:00
|
|
|
/// @return a count of the chunks we've advanced through.
|
|
|
|
uint32_t ChunkCount() const { return mChunkCount; }
|
|
|
|
|
|
|
|
/// @return a count of the bytes in all chunks we've advanced through.
|
|
|
|
size_t ByteCount() const { return mByteCount; }
|
|
|
|
|
2017-07-22 11:50:31 +00:00
|
|
|
/// @return the source buffer which owns the iterator.
|
|
|
|
SourceBuffer* Owner() const {
|
|
|
|
MOZ_ASSERT(mOwner);
|
|
|
|
return mOwner;
|
|
|
|
}
|
|
|
|
|
|
|
|
/// @return the current offset from the beginning of the buffer.
|
|
|
|
size_t Position() const {
|
|
|
|
return mByteCount - mData.mIterating.mAvailableLength;
|
|
|
|
}
|
|
|
|
|
2015-01-15 23:11:35 +00:00
|
|
|
private:
|
|
|
|
friend class SourceBuffer;
|
|
|
|
|
|
|
|
SourceBufferIterator(const SourceBufferIterator&) = delete;
|
|
|
|
SourceBufferIterator& operator=(const SourceBufferIterator&) = delete;
|
|
|
|
|
|
|
|
bool HasMore() const { return mState != COMPLETE; }
|
|
|
|
|
2016-07-15 05:48:31 +00:00
|
|
|
State AdvanceFromLocalBuffer(size_t aRequestedBytes) {
|
|
|
|
MOZ_ASSERT(mState == READY, "Advancing in the wrong state");
|
|
|
|
MOZ_ASSERT(mData.mIterating.mAvailableLength > 0,
|
|
|
|
"The local buffer shouldn't be empty");
|
|
|
|
MOZ_ASSERT(mData.mIterating.mNextReadLength == 0,
|
|
|
|
"Advancing without consuming previous data");
|
|
|
|
|
|
|
|
mData.mIterating.mNextReadLength =
|
|
|
|
std::min(mData.mIterating.mAvailableLength, aRequestedBytes);
|
|
|
|
|
|
|
|
return READY;
|
|
|
|
}
|
|
|
|
|
2015-01-15 23:11:35 +00:00
|
|
|
State SetReady(uint32_t aChunk, const char* aData, size_t aOffset,
|
2016-07-15 05:48:31 +00:00
|
|
|
size_t aAvailableLength, size_t aRequestedBytes) {
|
2015-01-15 23:11:35 +00:00
|
|
|
MOZ_ASSERT(mState != COMPLETE);
|
2016-07-15 05:48:31 +00:00
|
|
|
mState = READY;
|
2016-07-11 07:44:39 +00:00
|
|
|
|
2017-07-22 11:50:31 +00:00
|
|
|
// Prevent the iterator from reporting more data than it is allowed to read.
|
|
|
|
if (aAvailableLength > mRemainderToRead) {
|
|
|
|
aAvailableLength = mRemainderToRead;
|
|
|
|
}
|
|
|
|
|
2016-07-11 07:44:39 +00:00
|
|
|
// Update state.
|
2015-01-15 23:11:35 +00:00
|
|
|
mData.mIterating.mChunk = aChunk;
|
|
|
|
mData.mIterating.mData = aData;
|
|
|
|
mData.mIterating.mOffset = aOffset;
|
2016-07-15 05:48:31 +00:00
|
|
|
mData.mIterating.mAvailableLength = aAvailableLength;
|
2016-07-11 07:44:39 +00:00
|
|
|
|
|
|
|
// Update metrics.
|
|
|
|
mChunkCount++;
|
2016-07-15 05:48:31 +00:00
|
|
|
mByteCount += aAvailableLength;
|
2016-07-11 07:44:39 +00:00
|
|
|
|
2016-07-15 05:48:31 +00:00
|
|
|
// Attempt to advance by the requested number of bytes.
|
|
|
|
return AdvanceFromLocalBuffer(aRequestedBytes);
|
2015-01-15 23:11:35 +00:00
|
|
|
}
|
|
|
|
|
2017-07-28 01:18:17 +00:00
|
|
|
State SetWaiting(bool aHasConsumer) {
|
2015-01-15 23:11:35 +00:00
|
|
|
MOZ_ASSERT(mState != COMPLETE);
|
2017-07-28 01:18:17 +00:00
|
|
|
// Without a consumer, we won't know when to wake up precisely. Caller
|
|
|
|
// convention should mean that we don't try to advance unless we have
|
|
|
|
// written new data, but that doesn't mean we got enough.
|
|
|
|
MOZ_ASSERT(mState != WAITING || !aHasConsumer,
|
|
|
|
"Did we get a spurious wakeup somehow?");
|
2015-01-15 23:11:35 +00:00
|
|
|
return mState = WAITING;
|
|
|
|
}
|
|
|
|
|
|
|
|
State SetComplete(nsresult aStatus) {
|
|
|
|
mData.mAtEnd.mStatus = aStatus;
|
|
|
|
return mState = COMPLETE;
|
|
|
|
}
|
|
|
|
|
2015-10-18 05:24:48 +00:00
|
|
|
RefPtr<SourceBuffer> mOwner;
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
State mState;
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This union contains our iteration state if we're still iterating (for
|
|
|
|
* states START, READY, and WAITING) and the status the SourceBuffer was
|
|
|
|
* completed with if we're in state COMPLETE.
|
|
|
|
*/
|
|
|
|
union {
|
|
|
|
struct {
|
2017-07-22 11:50:31 +00:00
|
|
|
uint32_t mChunk; // Index of the chunk in SourceBuffer.
|
|
|
|
const char* mData; // Pointer to the start of the chunk.
|
|
|
|
size_t mOffset; // Current read position of the iterator relative to
|
|
|
|
// mData.
|
|
|
|
size_t mAvailableLength; // How many bytes remain unread in the chunk,
|
|
|
|
// relative to mOffset.
|
|
|
|
size_t
|
|
|
|
mNextReadLength; // How many bytes the last iterator advance
|
|
|
|
// requested to be read, so that we know much
|
|
|
|
// to increase mOffset and reduce mAvailableLength
|
|
|
|
// by when the next advance is requested.
|
|
|
|
} mIterating; // Cached info of the chunk currently iterating over.
|
2015-01-15 23:11:35 +00:00
|
|
|
struct {
|
2017-07-22 11:50:31 +00:00
|
|
|
nsresult mStatus; // Status code indicating if we read all the data.
|
|
|
|
} mAtEnd; // State info after iterator is complete.
|
2015-01-15 23:11:35 +00:00
|
|
|
} mData;
|
2016-07-11 07:44:39 +00:00
|
|
|
|
2017-07-22 11:50:31 +00:00
|
|
|
uint32_t mChunkCount; // Count of chunks observed, including current chunk.
|
|
|
|
size_t mByteCount; // Count of readable bytes observed, including unread
|
|
|
|
// bytes from the current chunk.
|
|
|
|
size_t mRemainderToRead; // Count of bytes left to read if there is a maximum
|
|
|
|
// imposed by the caller. SIZE_MAX if unlimited.
|
2015-01-15 23:11:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
/**
|
|
|
|
* SourceBuffer is a parallel data structure used for storing image source
|
|
|
|
* (compressed) data.
|
|
|
|
*
|
|
|
|
* SourceBuffer is a single producer, multiple consumer data structure. The
|
|
|
|
* single producer calls Append() to append data to the buffer. In parallel,
|
|
|
|
* multiple consumers can call Iterator(), which returns a SourceBufferIterator
|
|
|
|
* that they can use to iterate through the buffer. The SourceBufferIterator
|
|
|
|
* returns a series of pointers which remain stable for lifetime of the
|
|
|
|
* SourceBuffer, and the data they point to is immutable, ensuring that the
|
|
|
|
* producer never interferes with the consumers.
|
|
|
|
*
|
|
|
|
* In order to avoid blocking, SourceBuffer works with SourceBufferIterator to
|
|
|
|
* keep a list of consumers which are waiting for new data, and to resume them
|
|
|
|
* when the producer appends more. All consumers must implement the IResumable
|
|
|
|
* interface to make this possible.
|
|
|
|
*/
|
2015-03-21 16:28:04 +00:00
|
|
|
class SourceBuffer final {
|
2015-01-15 23:11:35 +00:00
|
|
|
public:
|
|
|
|
MOZ_DECLARE_REFCOUNTED_TYPENAME(image::SourceBuffer)
|
|
|
|
NS_INLINE_DECL_THREADSAFE_REFCOUNTING(image::SourceBuffer)
|
|
|
|
|
|
|
|
SourceBuffer();
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Producer methods.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/**
|
|
|
|
* If the producer knows how long the source data will be, it should call
|
|
|
|
* ExpectLength, which enables SourceBuffer to preallocate its buffer.
|
|
|
|
*/
|
|
|
|
nsresult ExpectLength(size_t aExpectedLength);
|
|
|
|
|
|
|
|
/// Append the provided data to the buffer.
|
|
|
|
nsresult Append(const char* aData, size_t aLength);
|
|
|
|
|
2015-08-01 01:10:29 +00:00
|
|
|
/// Append the data available on the provided nsIInputStream to the buffer.
|
|
|
|
nsresult AppendFromInputStream(nsIInputStream* aInputStream, uint32_t aCount);
|
|
|
|
|
2015-01-15 23:11:35 +00:00
|
|
|
/**
|
|
|
|
* Mark the buffer complete, with a status that will be available to
|
|
|
|
* consumers. Further calls to Append() are forbidden after Complete().
|
|
|
|
*/
|
|
|
|
void Complete(nsresult aStatus);
|
|
|
|
|
|
|
|
/// Returns true if the buffer is complete.
|
|
|
|
bool IsComplete();
|
|
|
|
|
|
|
|
/// Memory reporting.
|
|
|
|
size_t SizeOfIncludingThisWithComputedFallback(MallocSizeOf) const;
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Consumer methods.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2017-07-22 11:50:31 +00:00
|
|
|
/**
|
|
|
|
* Returns an iterator to this SourceBuffer, which cannot read more than the
|
|
|
|
* given length.
|
|
|
|
*/
|
|
|
|
SourceBufferIterator Iterator(size_t aReadLength = SIZE_MAX);
|
2015-01-15 23:11:35 +00:00
|
|
|
|
2016-07-11 06:48:17 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Consumer methods.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/**
|
|
|
|
* The minimum chunk capacity we'll allocate, if we don't know the correct
|
|
|
|
* capacity (which would happen because ExpectLength() wasn't called or gave
|
|
|
|
* us the wrong value). This is only exposed for use by tests; if normal code
|
|
|
|
* is using this, it's doing something wrong.
|
|
|
|
*/
|
|
|
|
static const size_t MIN_CHUNK_CAPACITY = 4096;
|
|
|
|
|
2018-05-09 13:31:07 +00:00
|
|
|
/**
|
|
|
|
* The maximum chunk capacity we'll allocate. This was historically the
|
|
|
|
* maximum we would preallocate based on the network size. We may adjust it
|
|
|
|
* in the future based on the IMAGE_DECODE_CHUNKS telemetry to ensure most
|
|
|
|
* images remain in a single chunk.
|
|
|
|
*/
|
|
|
|
static const size_t MAX_CHUNK_CAPACITY = 20 * 1024 * 1024;
|
|
|
|
|
2015-01-15 23:11:35 +00:00
|
|
|
private:
|
|
|
|
friend class SourceBufferIterator;
|
|
|
|
|
2015-01-16 04:28:38 +00:00
|
|
|
~SourceBuffer();
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Chunk type and chunk-related methods.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
2017-08-01 10:59:11 +00:00
|
|
|
class Chunk final {
|
2015-01-15 23:11:35 +00:00
|
|
|
public:
|
|
|
|
explicit Chunk(size_t aCapacity) : mCapacity(aCapacity), mLength(0) {
|
|
|
|
MOZ_ASSERT(aCapacity > 0, "Creating zero-capacity chunk");
|
2017-08-01 10:59:11 +00:00
|
|
|
mData = static_cast<char*>(malloc(mCapacity));
|
|
|
|
}
|
|
|
|
|
|
|
|
~Chunk() { free(mData); }
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
Chunk(Chunk&& aOther)
|
|
|
|
: mCapacity(aOther.mCapacity),
|
|
|
|
mLength(aOther.mLength),
|
2017-08-01 10:59:11 +00:00
|
|
|
mData(aOther.mData) {
|
2015-01-15 23:11:35 +00:00
|
|
|
aOther.mCapacity = aOther.mLength = 0;
|
|
|
|
aOther.mData = nullptr;
|
|
|
|
}
|
|
|
|
|
|
|
|
Chunk& operator=(Chunk&& aOther) {
|
2017-08-01 10:59:11 +00:00
|
|
|
free(mData);
|
2015-01-15 23:11:35 +00:00
|
|
|
mCapacity = aOther.mCapacity;
|
|
|
|
mLength = aOther.mLength;
|
2017-08-01 10:59:11 +00:00
|
|
|
mData = aOther.mData;
|
2015-01-15 23:11:35 +00:00
|
|
|
aOther.mCapacity = aOther.mLength = 0;
|
|
|
|
aOther.mData = nullptr;
|
|
|
|
return *this;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool AllocationFailed() const { return !mData; }
|
|
|
|
size_t Capacity() const { return mCapacity; }
|
|
|
|
size_t Length() const { return mLength; }
|
|
|
|
|
|
|
|
char* Data() const {
|
|
|
|
MOZ_ASSERT(mData, "Allocation failed but nobody checked for it");
|
2017-08-01 10:59:11 +00:00
|
|
|
return mData;
|
2015-01-15 23:11:35 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void AddLength(size_t aAdditionalLength) {
|
|
|
|
MOZ_ASSERT(mLength + aAdditionalLength <= mCapacity);
|
|
|
|
mLength += aAdditionalLength;
|
|
|
|
}
|
|
|
|
|
2017-08-01 10:59:11 +00:00
|
|
|
bool SetCapacity(size_t aCapacity) {
|
|
|
|
MOZ_ASSERT(mData, "Allocation failed but nobody checked for it");
|
|
|
|
char* data = static_cast<char*>(realloc(mData, aCapacity));
|
|
|
|
if (!data) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
mData = data;
|
|
|
|
mCapacity = aCapacity;
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2015-01-15 23:11:35 +00:00
|
|
|
private:
|
|
|
|
Chunk(const Chunk&) = delete;
|
|
|
|
Chunk& operator=(const Chunk&) = delete;
|
|
|
|
|
|
|
|
size_t mCapacity;
|
|
|
|
size_t mLength;
|
2017-08-01 10:59:11 +00:00
|
|
|
char* mData;
|
2015-01-15 23:11:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
nsresult AppendChunk(Maybe<Chunk>&& aChunk);
|
2018-05-09 13:31:07 +00:00
|
|
|
Maybe<Chunk> CreateChunk(size_t aCapacity, size_t aExistingCapacity = 0,
|
|
|
|
bool aRoundUp = true);
|
2015-01-16 04:28:38 +00:00
|
|
|
nsresult Compact();
|
|
|
|
static size_t RoundedUpCapacity(size_t aCapacity);
|
2015-01-15 23:11:35 +00:00
|
|
|
size_t FibonacciCapacityWithMinimum(size_t aMinCapacity);
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Iterator / consumer methods.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
void AddWaitingConsumer(IResumable* aConsumer);
|
|
|
|
void ResumeWaitingConsumers();
|
|
|
|
|
|
|
|
typedef SourceBufferIterator::State State;
|
|
|
|
|
|
|
|
State AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
|
2016-07-15 05:48:31 +00:00
|
|
|
size_t aRequestedBytes,
|
2015-01-15 23:11:35 +00:00
|
|
|
IResumable* aConsumer);
|
|
|
|
bool RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
|
|
|
|
size_t aBytes) const;
|
|
|
|
|
2015-01-16 04:28:38 +00:00
|
|
|
void OnIteratorRelease();
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Helper methods.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
nsresult HandleError(nsresult aError);
|
|
|
|
bool IsEmpty();
|
|
|
|
bool IsLastChunk(uint32_t aChunk);
|
|
|
|
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
// Member variables.
|
|
|
|
//////////////////////////////////////////////////////////////////////////////
|
|
|
|
|
|
|
|
/// All private members are protected by mMutex.
|
|
|
|
mutable Mutex mMutex;
|
|
|
|
|
|
|
|
/// The data in this SourceBuffer, stored as a series of Chunks.
|
2017-08-01 10:59:11 +00:00
|
|
|
AutoTArray<Chunk, 1> mChunks;
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
/// Consumers which are waiting to be notified when new data is available.
|
2015-10-18 05:24:48 +00:00
|
|
|
nsTArray<RefPtr<IResumable>> mWaitingConsumers;
|
2015-01-15 23:11:35 +00:00
|
|
|
|
|
|
|
/// If present, marks this SourceBuffer complete with the given final status.
|
|
|
|
Maybe<nsresult> mStatus;
|
2015-01-16 04:28:38 +00:00
|
|
|
|
|
|
|
/// Count of active consumers.
|
|
|
|
uint32_t mConsumerCount;
|
2018-05-09 13:31:07 +00:00
|
|
|
|
|
|
|
/// True if compacting has been performed.
|
|
|
|
bool mCompacted;
|
2015-01-15 23:11:35 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
} // namespace image
|
|
|
|
} // namespace mozilla
|
|
|
|
|
2015-05-15 03:52:05 +00:00
|
|
|
#endif // mozilla_image_sourcebuffer_h
|