mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-23 12:51:06 +00:00
Bug 1530774 - Part 3. Remove decoder support for producing paletted frames. r=tnikkel
Differential Revision: https://phabricator.services.mozilla.com/D23716
This commit is contained in:
parent
5e76ee78d0
commit
3befab3ee9
@ -33,6 +33,15 @@ enum class DisposalMethod : int8_t {
|
||||
};
|
||||
|
||||
struct AnimationParams {
|
||||
AnimationParams(const gfx::IntRect& aBlendRect, const FrameTimeout& aTimeout,
|
||||
uint32_t aFrameNum, BlendMethod aBlendMethod,
|
||||
DisposalMethod aDisposalMethod)
|
||||
: mBlendRect(aBlendRect),
|
||||
mTimeout(aTimeout),
|
||||
mFrameNum(aFrameNum),
|
||||
mBlendMethod(aBlendMethod),
|
||||
mDisposalMethod(aDisposalMethod) {}
|
||||
|
||||
gfx::IntRect mBlendRect;
|
||||
FrameTimeout mTimeout;
|
||||
uint32_t mFrameNum;
|
||||
|
@ -31,19 +31,12 @@ AnimationSurfaceProvider::AnimationSurfaceProvider(
|
||||
MOZ_ASSERT(!mDecoder->IsFirstFrameDecode(),
|
||||
"Use DecodedSurfaceProvider for single-frame image decodes");
|
||||
|
||||
// We may produce paletted surfaces for GIF which means the frames are smaller
|
||||
// than one would expect.
|
||||
size_t pixelSize = !aDecoder->ShouldBlendAnimation() &&
|
||||
aDecoder->GetType() == DecoderType::GIF
|
||||
? sizeof(uint8_t)
|
||||
: sizeof(uint32_t);
|
||||
|
||||
// Calculate how many frames we need to decode in this animation before we
|
||||
// enter decode-on-demand mode.
|
||||
IntSize frameSize = aSurfaceKey.Size();
|
||||
size_t threshold =
|
||||
(size_t(gfxPrefs::ImageAnimatedDecodeOnDemandThresholdKB()) * 1024) /
|
||||
(pixelSize * frameSize.width * frameSize.height);
|
||||
(sizeof(uint32_t) * frameSize.width * frameSize.height);
|
||||
size_t batch = gfxPrefs::ImageAnimatedDecodeOnDemandBatchSize();
|
||||
|
||||
mFrames.reset(
|
||||
@ -413,13 +406,8 @@ void AnimationSurfaceProvider::RequestFrameDiscarding() {
|
||||
auto oldFrameQueue =
|
||||
static_cast<AnimationFrameRetainedBuffer*>(mFrames.get());
|
||||
|
||||
// We only recycle if it is a full frame. Partial frames may be sized
|
||||
// differently from each other. We do not support recycling with WebRender
|
||||
// and shared surfaces at this time as there is additional synchronization
|
||||
// required to know when it is safe to recycle.
|
||||
MOZ_ASSERT(!mDecoder->GetFrameRecycler());
|
||||
if (gfxPrefs::ImageAnimatedDecodeOnDemandRecycle() &&
|
||||
mDecoder->ShouldBlendAnimation()) {
|
||||
if (gfxPrefs::ImageAnimatedDecodeOnDemandRecycle()) {
|
||||
mFrames.reset(new AnimationFrameRecyclingQueue(std::move(*oldFrameQueue)));
|
||||
mDecoder->SetFrameRecycler(this);
|
||||
} else {
|
||||
|
@ -46,8 +46,6 @@ class MOZ_STACK_CLASS AutoRecordDecoderTelemetry final {
|
||||
Decoder::Decoder(RasterImage* aImage)
|
||||
: mImageData(nullptr),
|
||||
mImageDataLength(0),
|
||||
mColormap(nullptr),
|
||||
mColormapSize(0),
|
||||
mImage(aImage),
|
||||
mFrameRecycler(nullptr),
|
||||
mProgress(NoProgress),
|
||||
@ -253,20 +251,16 @@ DecoderTelemetry Decoder::Telemetry() const {
|
||||
}
|
||||
|
||||
nsresult Decoder::AllocateFrame(const gfx::IntSize& aOutputSize,
|
||||
const gfx::IntRect& aFrameRect,
|
||||
gfx::SurfaceFormat aFormat,
|
||||
uint8_t aPaletteDepth,
|
||||
const Maybe<AnimationParams>& aAnimParams) {
|
||||
mCurrentFrame =
|
||||
AllocateFrameInternal(aOutputSize, aFrameRect, aFormat, aPaletteDepth,
|
||||
aAnimParams, std::move(mCurrentFrame));
|
||||
mCurrentFrame = AllocateFrameInternal(aOutputSize, aFormat, aAnimParams,
|
||||
std::move(mCurrentFrame));
|
||||
|
||||
if (mCurrentFrame) {
|
||||
mHasFrameToTake = true;
|
||||
|
||||
// Gather the raw pointers the decoders will use.
|
||||
mCurrentFrame->GetImageData(&mImageData, &mImageDataLength);
|
||||
mCurrentFrame->GetPaletteData(&mColormap, &mColormapSize);
|
||||
|
||||
// We should now be on |aFrameNum|. (Note that we're comparing the frame
|
||||
// number, which is zero-based, with the frame count, which is one-based.)
|
||||
@ -284,8 +278,7 @@ nsresult Decoder::AllocateFrame(const gfx::IntSize& aOutputSize,
|
||||
}
|
||||
|
||||
RawAccessFrameRef Decoder::AllocateFrameInternal(
|
||||
const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect,
|
||||
SurfaceFormat aFormat, uint8_t aPaletteDepth,
|
||||
const gfx::IntSize& aOutputSize, SurfaceFormat aFormat,
|
||||
const Maybe<AnimationParams>& aAnimParams,
|
||||
RawAccessFrameRef&& aPreviousFrame) {
|
||||
if (HasError()) {
|
||||
@ -298,8 +291,7 @@ RawAccessFrameRef Decoder::AllocateFrameInternal(
|
||||
return RawAccessFrameRef();
|
||||
}
|
||||
|
||||
if (aOutputSize.width <= 0 || aOutputSize.height <= 0 ||
|
||||
aFrameRect.Width() <= 0 || aFrameRect.Height() <= 0) {
|
||||
if (aOutputSize.width <= 0 || aOutputSize.height <= 0) {
|
||||
NS_WARNING("Trying to add frame with zero or negative size");
|
||||
return RawAccessFrameRef();
|
||||
}
|
||||
@ -310,23 +302,21 @@ RawAccessFrameRef Decoder::AllocateFrameInternal(
|
||||
}
|
||||
|
||||
if (frameNum > 0) {
|
||||
if (ShouldBlendAnimation()) {
|
||||
if (aPreviousFrame->GetDisposalMethod() !=
|
||||
DisposalMethod::RESTORE_PREVIOUS) {
|
||||
// If the new restore frame is the direct previous frame, then we know
|
||||
// the dirty rect is composed only of the current frame's blend rect and
|
||||
// the restore frame's clear rect (if applicable) which are handled in
|
||||
// filters.
|
||||
mRestoreFrame = std::move(aPreviousFrame);
|
||||
mRestoreDirtyRect.SetBox(0, 0, 0, 0);
|
||||
} else {
|
||||
// We only need the previous frame's dirty rect, because while there may
|
||||
// have been several frames between us and mRestoreFrame, the only areas
|
||||
// that changed are the restore frame's clear rect, the current frame
|
||||
// blending rect, and the previous frame's blending rect. All else is
|
||||
// forgotten due to us restoring the same frame again.
|
||||
mRestoreDirtyRect = aPreviousFrame->GetBoundedBlendRect();
|
||||
}
|
||||
if (aPreviousFrame->GetDisposalMethod() !=
|
||||
DisposalMethod::RESTORE_PREVIOUS) {
|
||||
// If the new restore frame is the direct previous frame, then we know
|
||||
// the dirty rect is composed only of the current frame's blend rect and
|
||||
// the restore frame's clear rect (if applicable) which are handled in
|
||||
// filters.
|
||||
mRestoreFrame = std::move(aPreviousFrame);
|
||||
mRestoreDirtyRect.SetBox(0, 0, 0, 0);
|
||||
} else {
|
||||
// We only need the previous frame's dirty rect, because while there may
|
||||
// have been several frames between us and mRestoreFrame, the only areas
|
||||
// that changed are the restore frame's clear rect, the current frame
|
||||
// blending rect, and the previous frame's blending rect. All else is
|
||||
// forgotten due to us restoring the same frame again.
|
||||
mRestoreDirtyRect = aPreviousFrame->GetBoundedBlendRect();
|
||||
}
|
||||
}
|
||||
|
||||
@ -337,10 +327,7 @@ RawAccessFrameRef Decoder::AllocateFrameInternal(
|
||||
// memory footprint, then the recycler will allow us to reuse the buffers.
|
||||
// Each frame should be the same size and have mostly the same properties.
|
||||
if (mFrameRecycler) {
|
||||
MOZ_ASSERT(ShouldBlendAnimation());
|
||||
MOZ_ASSERT(aPaletteDepth == 0);
|
||||
MOZ_ASSERT(aAnimParams);
|
||||
MOZ_ASSERT(aFrameRect.IsEqualEdges(IntRect(IntPoint(0, 0), aOutputSize)));
|
||||
|
||||
ref = mFrameRecycler->RecycleFrame(mRecycleRect);
|
||||
if (ref) {
|
||||
@ -368,9 +355,8 @@ RawAccessFrameRef Decoder::AllocateFrameInternal(
|
||||
|
||||
bool nonPremult = bool(mSurfaceFlags & SurfaceFlags::NO_PREMULTIPLY_ALPHA);
|
||||
auto frame = MakeNotNull<RefPtr<imgFrame>>();
|
||||
if (NS_FAILED(frame->InitForDecoder(
|
||||
aOutputSize, aFrameRect, aFormat, aPaletteDepth, nonPremult,
|
||||
aAnimParams, ShouldBlendAnimation(), bool(mFrameRecycler)))) {
|
||||
if (NS_FAILED(frame->InitForDecoder(aOutputSize, aFormat, nonPremult,
|
||||
aAnimParams, bool(mFrameRecycler)))) {
|
||||
NS_WARNING("imgFrame::Init should succeed");
|
||||
return RawAccessFrameRef();
|
||||
}
|
||||
|
@ -271,14 +271,6 @@ class Decoder {
|
||||
return bool(mDecoderFlags & DecoderFlags::FIRST_FRAME_ONLY);
|
||||
}
|
||||
|
||||
/**
|
||||
* Should blend the current frame with the previous frames to produce a
|
||||
* complete frame instead of a partial frame for animated images.
|
||||
*/
|
||||
bool ShouldBlendAnimation() const {
|
||||
return bool(mDecoderFlags & DecoderFlags::BLEND_ANIMATION);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the number of complete animation frames which have been decoded so
|
||||
* far, if it has changed since the last call to TakeCompleteFrameCount();
|
||||
@ -418,20 +410,11 @@ class Decoder {
|
||||
* For use during decoding only. Allows the BlendAnimationFilter to get the
|
||||
* frame it should be pulling the previous frame data from.
|
||||
*/
|
||||
const RawAccessFrameRef& GetRestoreFrameRef() const {
|
||||
MOZ_ASSERT(ShouldBlendAnimation());
|
||||
return mRestoreFrame;
|
||||
}
|
||||
const RawAccessFrameRef& GetRestoreFrameRef() const { return mRestoreFrame; }
|
||||
|
||||
const gfx::IntRect& GetRestoreDirtyRect() const {
|
||||
MOZ_ASSERT(ShouldBlendAnimation());
|
||||
return mRestoreDirtyRect;
|
||||
}
|
||||
const gfx::IntRect& GetRestoreDirtyRect() const { return mRestoreDirtyRect; }
|
||||
|
||||
const gfx::IntRect& GetRecycleRect() const {
|
||||
MOZ_ASSERT(ShouldBlendAnimation());
|
||||
return mRecycleRect;
|
||||
}
|
||||
const gfx::IntRect& GetRecycleRect() const { return mRecycleRect; }
|
||||
|
||||
const gfx::IntRect& GetFirstFrameRefreshArea() const {
|
||||
return mFirstFrameRefreshArea;
|
||||
@ -542,12 +525,9 @@ class Decoder {
|
||||
|
||||
/**
|
||||
* Allocates a new frame, making it our current frame if successful.
|
||||
*
|
||||
* If a non-paletted frame is desired, pass 0 for aPaletteDepth.
|
||||
*/
|
||||
nsresult AllocateFrame(const gfx::IntSize& aOutputSize,
|
||||
const gfx::IntRect& aFrameRect,
|
||||
gfx::SurfaceFormat aFormat, uint8_t aPaletteDepth = 0,
|
||||
gfx::SurfaceFormat aFormat,
|
||||
const Maybe<AnimationParams>& aAnimParams = Nothing());
|
||||
|
||||
private:
|
||||
@ -572,18 +552,15 @@ class Decoder {
|
||||
}
|
||||
|
||||
RawAccessFrameRef AllocateFrameInternal(
|
||||
const gfx::IntSize& aOutputSize, const gfx::IntRect& aFrameRect,
|
||||
gfx::SurfaceFormat aFormat, uint8_t aPaletteDepth,
|
||||
const gfx::IntSize& aOutputSize, gfx::SurfaceFormat aFormat,
|
||||
const Maybe<AnimationParams>& aAnimParams,
|
||||
RawAccessFrameRef&& aPreviousFrame);
|
||||
|
||||
protected:
|
||||
Maybe<Downscaler> mDownscaler;
|
||||
|
||||
uint8_t* mImageData; // Pointer to image data in either Cairo or 8bit format
|
||||
uint8_t* mImageData; // Pointer to image data in BGRA/X
|
||||
uint32_t mImageDataLength;
|
||||
uint32_t* mColormap; // Current colormap to be used in Cairo format
|
||||
uint32_t mColormapSize;
|
||||
|
||||
private:
|
||||
RefPtr<RasterImage> mImage;
|
||||
|
@ -31,14 +31,6 @@ enum class DecoderFlags : uint8_t {
|
||||
* set.
|
||||
*/
|
||||
CANNOT_SUBSTITUTE = 1 << 4,
|
||||
|
||||
/**
|
||||
* By default, an animation decoder will produce partial frames that need to
|
||||
* be combined with the previously displayed/composited frame by FrameAnimator
|
||||
* to produce a complete frame. If this flag is set, the decoder will perform
|
||||
* this blending at decode time, and the frames produced are complete.
|
||||
*/
|
||||
BLEND_ANIMATION = 1 << 5
|
||||
};
|
||||
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(DecoderFlags)
|
||||
|
||||
|
@ -1196,8 +1196,6 @@ bool RasterImage::Decode(const IntSize& aSize, uint32_t aFlags,
|
||||
nsresult rv;
|
||||
bool animated = mAnimationState && aPlaybackType == PlaybackType::eAnimated;
|
||||
if (animated) {
|
||||
decoderFlags |= DecoderFlags::BLEND_ANIMATION;
|
||||
|
||||
size_t currentFrame = mAnimationState->GetCurrentAnimationFrameIndex();
|
||||
rv = DecoderFactory::CreateAnimationDecoder(
|
||||
mDecoderType, WrapNotNull(this), mSourceBuffer, mSize, decoderFlags,
|
||||
@ -1348,7 +1346,7 @@ ImgDrawResult RasterImage::DrawInternal(DrawableSurface&& aSurface,
|
||||
|
||||
// By now we may have a frame with the requested size. If not, we need to
|
||||
// adjust the drawing parameters accordingly.
|
||||
IntSize finalSize = aSurface->GetImageSize();
|
||||
IntSize finalSize = aSurface->GetSize();
|
||||
bool couldRedecodeForBetterFrame = false;
|
||||
if (finalSize != aSize) {
|
||||
gfx::Size scale(double(aSize.width) / finalSize.width,
|
||||
|
@ -360,11 +360,6 @@ class BlendAnimationFilter final : public SurfaceFilter {
|
||||
return rv;
|
||||
}
|
||||
|
||||
if (!aConfig.mDecoder || !aConfig.mDecoder->ShouldBlendAnimation()) {
|
||||
MOZ_ASSERT_UNREACHABLE("Expected image decoder that is blending!");
|
||||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
imgFrame* currentFrame = aConfig.mDecoder->GetCurrentFrame();
|
||||
if (!currentFrame) {
|
||||
MOZ_ASSERT_UNREACHABLE("Decoder must have current frame!");
|
||||
@ -418,7 +413,7 @@ class BlendAnimationFilter final : public SurfaceFilter {
|
||||
const RawAccessFrameRef& restoreFrame =
|
||||
aConfig.mDecoder->GetRestoreFrameRef();
|
||||
if (restoreFrame) {
|
||||
MOZ_ASSERT(restoreFrame->GetImageSize() == outputSize);
|
||||
MOZ_ASSERT(restoreFrame->GetSize() == outputSize);
|
||||
MOZ_ASSERT(restoreFrame->IsFinished());
|
||||
|
||||
// We can safely use this pointer without holding a RawAccessFrameRef
|
||||
|
@ -54,20 +54,14 @@ uint8_t* AbstractSurfaceSink::DoAdvanceRow() {
|
||||
}
|
||||
|
||||
nsresult SurfaceSink::Configure(const SurfaceConfig& aConfig) {
|
||||
// For non-paletted surfaces, the surface size is just the output size.
|
||||
IntSize surfaceSize = aConfig.mOutputSize;
|
||||
|
||||
// Non-paletted surfaces should not have frame rects, so we just pass
|
||||
// AllocateFrame() a frame rect which covers the entire surface.
|
||||
IntRect frameRect(0, 0, surfaceSize.width, surfaceSize.height);
|
||||
|
||||
// Allocate the frame.
|
||||
// XXX(seth): Once every Decoder subclass uses SurfacePipe, we probably want
|
||||
// to allocate the frame directly here and get rid of Decoder::AllocateFrame
|
||||
// altogether.
|
||||
nsresult rv = aConfig.mDecoder->AllocateFrame(
|
||||
surfaceSize, frameRect, aConfig.mFormat,
|
||||
/* aPaletteDepth */ 0, aConfig.mAnimParams);
|
||||
nsresult rv = aConfig.mDecoder->AllocateFrame(surfaceSize, aConfig.mFormat,
|
||||
aConfig.mAnimParams);
|
||||
if (NS_FAILED(rv)) {
|
||||
return rv;
|
||||
}
|
||||
|
@ -59,10 +59,6 @@ enum class SurfacePipeFlags {
|
||||
// result in a better user experience for
|
||||
// progressive display but which may be more
|
||||
// computationally expensive.
|
||||
|
||||
BLEND_ANIMATION = 1 << 4 // If set, produce the next full frame of an
|
||||
// animation instead of a partial frame to be
|
||||
// blended later.
|
||||
};
|
||||
MOZ_MAKE_ENUM_CLASS_BITWISE_OPERATORS(SurfacePipeFlags)
|
||||
|
||||
@ -102,8 +98,7 @@ class SurfacePipeFactory {
|
||||
const bool downscale = aInputSize != aOutputSize;
|
||||
const bool removeFrameRect = !aFrameRect.IsEqualEdges(
|
||||
nsIntRect(0, 0, aInputSize.width, aInputSize.height));
|
||||
const bool blendAnimation =
|
||||
bool(aFlags & SurfacePipeFlags::BLEND_ANIMATION);
|
||||
const bool blendAnimation = aAnimParams.isSome();
|
||||
|
||||
// Don't interpolate if we're sure we won't show this surface to the user
|
||||
// until it's completely decoded. The final pass of an ADAM7 image doesn't
|
||||
@ -118,8 +113,6 @@ class SurfacePipeFactory {
|
||||
return Nothing();
|
||||
}
|
||||
|
||||
MOZ_ASSERT_IF(blendAnimation, aAnimParams);
|
||||
|
||||
// Construct configurations for the SurfaceFilters. Note that the order of
|
||||
// these filters is significant. We want to deinterlace or interpolate raw
|
||||
// input rows, before any other transformations, and we want to remove the
|
||||
|
@ -657,9 +657,9 @@ LexerTransition<nsBMPDecoder::State> nsBMPDecoder::ReadBitfields(
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
|
||||
nsresult rv = AllocateFrame(
|
||||
OutputSize(), FullOutputFrame(),
|
||||
mMayHaveTransparency ? SurfaceFormat::B8G8R8A8 : SurfaceFormat::B8G8R8X8);
|
||||
nsresult rv = AllocateFrame(OutputSize(), mMayHaveTransparency
|
||||
? SurfaceFormat::B8G8R8A8
|
||||
: SurfaceFormat::B8G8R8X8);
|
||||
if (NS_FAILED(rv)) {
|
||||
return Transition::TerminateFailure();
|
||||
}
|
||||
|
@ -86,6 +86,8 @@ nsGIFDecoder2::nsGIFDecoder2(RasterImage* aImage)
|
||||
mOldColor(0),
|
||||
mCurrentFrameIndex(-1),
|
||||
mColorTablePos(0),
|
||||
mColormap(nullptr),
|
||||
mColormapSize(0),
|
||||
mColorMask('\0'),
|
||||
mGIFOpen(false),
|
||||
mSawTransparency(false) {
|
||||
@ -164,15 +166,17 @@ nsresult nsGIFDecoder2::BeginImageFrame(const IntRect& aFrameRect,
|
||||
MOZ_ASSERT(HasSize());
|
||||
|
||||
bool hasTransparency = CheckForTransparency(aFrameRect);
|
||||
bool blendAnimation = ShouldBlendAnimation();
|
||||
|
||||
// Make sure there's no animation if we're downscaling.
|
||||
MOZ_ASSERT_IF(Size() != OutputSize(), !GetImageMetadata().HasAnimation());
|
||||
|
||||
AnimationParams animParams{
|
||||
aFrameRect, FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time),
|
||||
uint32_t(mGIFStruct.images_decoded), BlendMethod::OVER,
|
||||
DisposalMethod(mGIFStruct.disposal_method)};
|
||||
Maybe<AnimationParams> animParams;
|
||||
if (!IsFirstFrameDecode()) {
|
||||
animParams.emplace(aFrameRect,
|
||||
FrameTimeout::FromRawMilliseconds(mGIFStruct.delay_time),
|
||||
uint32_t(mGIFStruct.images_decoded), BlendMethod::OVER,
|
||||
DisposalMethod(mGIFStruct.disposal_method));
|
||||
}
|
||||
|
||||
SurfacePipeFlags pipeFlags =
|
||||
aIsInterlaced ? SurfacePipeFlags::DEINTERLACE : SurfacePipeFlags();
|
||||
@ -188,30 +192,8 @@ nsresult nsGIFDecoder2::BeginImageFrame(const IntRect& aFrameRect,
|
||||
format = SurfaceFormat::B8G8R8A8;
|
||||
}
|
||||
|
||||
if (blendAnimation) {
|
||||
pipeFlags |= SurfacePipeFlags::BLEND_ANIMATION;
|
||||
}
|
||||
|
||||
Maybe<SurfacePipe> pipe;
|
||||
if (mGIFStruct.images_decoded == 0 || blendAnimation) {
|
||||
// The first frame is always decoded into an RGB surface.
|
||||
pipe = SurfacePipeFactory::CreateSurfacePipe(this, Size(), OutputSize(),
|
||||
aFrameRect, format,
|
||||
Some(animParams), pipeFlags);
|
||||
} else {
|
||||
// This is an animation frame (and not the first). To minimize the memory
|
||||
// usage of animations, the image data is stored in paletted form.
|
||||
//
|
||||
// We should never use paletted surfaces with a draw target directly, so
|
||||
// the only practical difference between B8G8R8A8 and B8G8R8X8 is the
|
||||
// cleared pixel value if we get truncated. We want 0 in that case to
|
||||
// ensure it is an acceptable value for the color map as was the case
|
||||
// historically.
|
||||
MOZ_ASSERT(Size() == OutputSize());
|
||||
pipe = SurfacePipeFactory::CreatePalettedSurfacePipe(
|
||||
this, Size(), aFrameRect, format, aDepth, Some(animParams), pipeFlags);
|
||||
}
|
||||
|
||||
Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
|
||||
this, Size(), OutputSize(), aFrameRect, format, animParams, pipeFlags);
|
||||
mCurrentFrameIndex = mGIFStruct.images_decoded;
|
||||
|
||||
if (!pipe) {
|
||||
@ -255,6 +237,8 @@ void nsGIFDecoder2::EndImageFrame() {
|
||||
mOldColor = 0;
|
||||
}
|
||||
|
||||
mColormap = nullptr;
|
||||
mColormapSize = 0;
|
||||
mCurrentFrameIndex = -1;
|
||||
}
|
||||
|
||||
@ -874,11 +858,6 @@ LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::FinishImageDescriptor(
|
||||
mGIFStruct.local_colormap_size = 1 << depth;
|
||||
|
||||
if (!mColormap) {
|
||||
// Allocate a buffer to store the local color tables. This could be if the
|
||||
// first frame has a local color table, or for subsequent frames when
|
||||
// blending the animation during decoding.
|
||||
MOZ_ASSERT(mGIFStruct.images_decoded == 0 || ShouldBlendAnimation());
|
||||
|
||||
// Ensure our current colormap buffer is large enough to hold the new one.
|
||||
mColormapSize = sizeof(uint32_t) << realDepth;
|
||||
if (mGIFStruct.local_colormap_buffer_size < mColormapSize) {
|
||||
@ -1019,18 +998,11 @@ LexerTransition<nsGIFDecoder2::State> nsGIFDecoder2::ReadLZWData(
|
||||
(length > 0 || mGIFStruct.bits >= mGIFStruct.codesize)) {
|
||||
size_t bytesRead = 0;
|
||||
|
||||
auto result =
|
||||
mGIFStruct.images_decoded == 0 || ShouldBlendAnimation()
|
||||
? mPipe.WritePixelBlocks<uint32_t>(
|
||||
[&](uint32_t* aPixelBlock, int32_t aBlockSize) {
|
||||
return YieldPixels<uint32_t>(data, length, &bytesRead,
|
||||
aPixelBlock, aBlockSize);
|
||||
})
|
||||
: mPipe.WritePixelBlocks<uint8_t>(
|
||||
[&](uint8_t* aPixelBlock, int32_t aBlockSize) {
|
||||
return YieldPixels<uint8_t>(data, length, &bytesRead,
|
||||
aPixelBlock, aBlockSize);
|
||||
});
|
||||
auto result = mPipe.WritePixelBlocks<uint32_t>(
|
||||
[&](uint32_t* aPixelBlock, int32_t aBlockSize) {
|
||||
return YieldPixels<uint32_t>(data, length, &bytesRead, aPixelBlock,
|
||||
aBlockSize);
|
||||
});
|
||||
|
||||
if (MOZ_UNLIKELY(bytesRead > length)) {
|
||||
MOZ_ASSERT_UNREACHABLE("Overread?");
|
||||
|
@ -143,6 +143,8 @@ class nsGIFDecoder2 : public Decoder {
|
||||
// current position - i.e., the offset into which the next byte should be
|
||||
// written.
|
||||
size_t mColorTablePos;
|
||||
uint32_t* mColormap; // Current colormap to be used in Cairo format
|
||||
uint32_t mColormapSize;
|
||||
|
||||
uint8_t mColorMask; // Apply this to the pixel to keep within colormap
|
||||
bool mGIFOpen;
|
||||
|
@ -385,8 +385,7 @@ LexerTransition<nsJPEGDecoder::State> nsJPEGDecoder::ReadJPEGData(
|
||||
jpeg_calc_output_dimensions(&mInfo);
|
||||
|
||||
MOZ_ASSERT(!mImageData, "Already have a buffer allocated?");
|
||||
nsresult rv = AllocateFrame(OutputSize(), FullOutputFrame(),
|
||||
SurfaceFormat::B8G8R8X8);
|
||||
nsresult rv = AllocateFrame(OutputSize(), SurfaceFormat::B8G8R8X8);
|
||||
if (NS_FAILED(rv)) {
|
||||
mState = JPEG_ERROR;
|
||||
MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug,
|
||||
|
@ -195,7 +195,7 @@ nsresult nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) {
|
||||
|
||||
Maybe<AnimationParams> animParams;
|
||||
#ifdef PNG_APNG_SUPPORTED
|
||||
if (png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
|
||||
if (!IsFirstFrameDecode() && png_get_valid(mPNG, mInfo, PNG_INFO_acTL)) {
|
||||
mAnimInfo = AnimFrameInfo(mPNG, mInfo);
|
||||
|
||||
if (mAnimInfo.mDispose == DisposalMethod::CLEAR) {
|
||||
@ -222,10 +222,6 @@ nsresult nsPNGDecoder::CreateFrame(const FrameInfo& aFrameInfo) {
|
||||
pipeFlags |= SurfacePipeFlags::PROGRESSIVE_DISPLAY;
|
||||
}
|
||||
|
||||
if (ShouldBlendAnimation()) {
|
||||
pipeFlags |= SurfacePipeFlags::BLEND_ANIMATION;
|
||||
}
|
||||
|
||||
Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
|
||||
this, Size(), OutputSize(), aFrameInfo.mFrameRect, mFormat, animParams,
|
||||
pipeFlags);
|
||||
@ -539,7 +535,8 @@ void nsPNGDecoder::info_callback(png_structp png_ptr, png_infop info_ptr) {
|
||||
static_cast<nsPNGDecoder*>(png_get_progressive_ptr(png_ptr));
|
||||
|
||||
if (decoder->mGotInfoCallback) {
|
||||
MOZ_LOG(sPNGLog, LogLevel::Warning, ("libpng called info_callback more than once\n"));
|
||||
MOZ_LOG(sPNGLog, LogLevel::Warning,
|
||||
("libpng called info_callback more than once\n"));
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -231,16 +231,13 @@ nsresult nsWebPDecoder::CreateFrame(const nsIntRect& aFrameRect) {
|
||||
|
||||
SurfacePipeFlags pipeFlags = SurfacePipeFlags();
|
||||
|
||||
if (ShouldBlendAnimation()) {
|
||||
pipeFlags |= SurfacePipeFlags::BLEND_ANIMATION;
|
||||
Maybe<AnimationParams> animParams;
|
||||
if (!IsFirstFrameDecode()) {
|
||||
animParams.emplace(aFrameRect, mTimeout, mCurrentFrame, mBlend, mDisposal);
|
||||
}
|
||||
|
||||
AnimationParams animParams{aFrameRect, mTimeout, mCurrentFrame, mBlend,
|
||||
mDisposal};
|
||||
|
||||
Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(
|
||||
this, Size(), OutputSize(), aFrameRect, mFormat, Some(animParams),
|
||||
pipeFlags);
|
||||
this, Size(), OutputSize(), aFrameRect, mFormat, animParams, pipeFlags);
|
||||
if (!pipe) {
|
||||
MOZ_LOG(sWebPLog, LogLevel::Error,
|
||||
("[this=%p] nsWebPDecoder::CreateFrame -- no pipe\n", this));
|
||||
|
@ -96,12 +96,10 @@ static bool ShouldUseHeap(const IntSize& aSize, int32_t aStride,
|
||||
}
|
||||
|
||||
static already_AddRefed<DataSourceSurface> AllocateBufferForImage(
|
||||
const IntSize& size, SurfaceFormat format, bool aIsAnimated = false,
|
||||
bool aIsFullFrame = true) {
|
||||
const IntSize& size, SurfaceFormat format, bool aIsAnimated = false) {
|
||||
int32_t stride = VolatileSurfaceStride(size, format);
|
||||
|
||||
if (gfxVars::GetUseWebRenderOrDefault() && gfxPrefs::ImageMemShared() &&
|
||||
aIsFullFrame) {
|
||||
if (gfxVars::GetUseWebRenderOrDefault() && gfxPrefs::ImageMemShared()) {
|
||||
RefPtr<SourceSurfaceSharedData> newSurf = new SourceSurfaceSharedData();
|
||||
if (newSurf->Init(size, stride, format)) {
|
||||
return newSurf.forget();
|
||||
@ -176,21 +174,6 @@ static bool ClearSurface(DataSourceSurface* aSurface, const IntSize& aSize,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool AllowedImageAndFrameDimensions(const nsIntSize& aImageSize,
|
||||
const nsIntRect& aFrameRect) {
|
||||
if (!SurfaceCache::IsLegalSize(aImageSize)) {
|
||||
return false;
|
||||
}
|
||||
if (!SurfaceCache::IsLegalSize(aFrameRect.Size())) {
|
||||
return false;
|
||||
}
|
||||
nsIntRect imageRect(0, 0, aImageSize.width, aImageSize.height);
|
||||
if (!imageRect.Contains(aFrameRect)) {
|
||||
NS_WARNING("Animated image frame does not fit inside bounds of image");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
imgFrame::imgFrame()
|
||||
: mMonitor("imgFrame"),
|
||||
mDecoded(0, 0, 0, 0),
|
||||
@ -204,10 +187,7 @@ imgFrame::imgFrame()
|
||||
mDisposalMethod(DisposalMethod::NOT_SPECIFIED),
|
||||
mBlendMethod(BlendMethod::OVER),
|
||||
mFormat(SurfaceFormat::UNKNOWN),
|
||||
mPalettedImageData(nullptr),
|
||||
mPaletteDepth(0),
|
||||
mNonPremult(false),
|
||||
mIsFullFrame(false) {}
|
||||
mNonPremult(false) {}
|
||||
|
||||
imgFrame::~imgFrame() {
|
||||
#ifdef DEBUG
|
||||
@ -215,142 +195,96 @@ imgFrame::~imgFrame() {
|
||||
MOZ_ASSERT(mAborted || AreAllPixelsWritten());
|
||||
MOZ_ASSERT(mAborted || mFinished);
|
||||
#endif
|
||||
|
||||
free(mPalettedImageData);
|
||||
mPalettedImageData = nullptr;
|
||||
}
|
||||
|
||||
nsresult imgFrame::InitForDecoder(const nsIntSize& aImageSize,
|
||||
const nsIntRect& aRect, SurfaceFormat aFormat,
|
||||
uint8_t aPaletteDepth, bool aNonPremult,
|
||||
SurfaceFormat aFormat, bool aNonPremult,
|
||||
const Maybe<AnimationParams>& aAnimParams,
|
||||
bool aIsFullFrame, bool aShouldRecycle) {
|
||||
bool aShouldRecycle) {
|
||||
// Assert for properties that should be verified by decoders,
|
||||
// warn for properties related to bad content.
|
||||
if (!AllowedImageAndFrameDimensions(aImageSize, aRect)) {
|
||||
if (!SurfaceCache::IsLegalSize(aImageSize)) {
|
||||
NS_WARNING("Should have legal image size");
|
||||
mAborted = true;
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
mImageSize = aImageSize;
|
||||
mFrameRect = aRect;
|
||||
|
||||
// May be updated shortly after InitForDecoder by BlendAnimationFilter
|
||||
// because it needs to take into consideration the previous frames to
|
||||
// properly calculate. We start with the whole frame as dirty.
|
||||
mDirtyRect = aRect;
|
||||
mDirtyRect = GetRect();
|
||||
|
||||
if (aAnimParams) {
|
||||
mBlendRect = aAnimParams->mBlendRect;
|
||||
mTimeout = aAnimParams->mTimeout;
|
||||
mBlendMethod = aAnimParams->mBlendMethod;
|
||||
mDisposalMethod = aAnimParams->mDisposalMethod;
|
||||
mIsFullFrame = aAnimParams->mFrameNum == 0 || aIsFullFrame;
|
||||
} else {
|
||||
mBlendRect = aRect;
|
||||
mIsFullFrame = true;
|
||||
}
|
||||
|
||||
// We only allow a non-trivial frame rect (i.e., a frame rect that doesn't
|
||||
// cover the entire image) for paletted animation frames. We never draw those
|
||||
// frames directly; we just use FrameAnimator to composite them and produce a
|
||||
// BGRA surface that we actually draw. We enforce this here to make sure that
|
||||
// imgFrame::Draw(), which is responsible for drawing all other kinds of
|
||||
// frames, never has to deal with a non-trivial frame rect.
|
||||
if (aPaletteDepth == 0 &&
|
||||
!mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize))) {
|
||||
MOZ_ASSERT_UNREACHABLE(
|
||||
"Creating a non-paletted imgFrame with a "
|
||||
"non-trivial frame rect");
|
||||
return NS_ERROR_FAILURE;
|
||||
mBlendRect = GetRect();
|
||||
}
|
||||
|
||||
if (aShouldRecycle) {
|
||||
// If we are recycling then we should always use BGRA for the underlying
|
||||
// surface because if we use BGRX, the next frame composited into the
|
||||
// surface could be BGRA and cause rendering problems.
|
||||
MOZ_ASSERT(mIsFullFrame);
|
||||
MOZ_ASSERT(aPaletteDepth == 0);
|
||||
MOZ_ASSERT(aAnimParams);
|
||||
mFormat = SurfaceFormat::B8G8R8A8;
|
||||
} else {
|
||||
mFormat = aFormat;
|
||||
}
|
||||
|
||||
mPaletteDepth = aPaletteDepth;
|
||||
mNonPremult = aNonPremult;
|
||||
mShouldRecycle = aShouldRecycle;
|
||||
|
||||
if (aPaletteDepth != 0) {
|
||||
// We're creating for a paletted image.
|
||||
if (aPaletteDepth > 8) {
|
||||
NS_WARNING("Should have legal palette depth");
|
||||
NS_ERROR("This Depth is not supported");
|
||||
mAborted = true;
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
|
||||
|
||||
// Use the fallible allocator here. Paletted images always use 1 byte per
|
||||
// pixel, so calculating the amount of memory we need is straightforward.
|
||||
size_t dataSize = PaletteDataLength() + mFrameRect.Area();
|
||||
mPalettedImageData =
|
||||
static_cast<uint8_t*>(calloc(dataSize, sizeof(uint8_t)));
|
||||
if (!mPalettedImageData) {
|
||||
NS_WARNING("Call to calloc for paletted image data should succeed");
|
||||
}
|
||||
NS_ENSURE_TRUE(mPalettedImageData, NS_ERROR_OUT_OF_MEMORY);
|
||||
} else {
|
||||
MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitForDecoder() twice?");
|
||||
bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
|
||||
mRawSurface = AllocateBufferForImage(mImageSize, mFormat, postFirstFrame);
|
||||
if (!mRawSurface) {
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
bool postFirstFrame = aAnimParams && aAnimParams->mFrameNum > 0;
|
||||
mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat,
|
||||
postFirstFrame, mIsFullFrame);
|
||||
if (!mRawSurface) {
|
||||
if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
|
||||
aAnimParams) {
|
||||
mBlankRawSurface = AllocateBufferForImage(mImageSize, mFormat);
|
||||
if (!mBlankRawSurface) {
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (StaticPrefs::browser_measurement_render_anims_and_video_solid() &&
|
||||
aAnimParams) {
|
||||
mBlankRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat);
|
||||
if (!mBlankRawSurface) {
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
|
||||
if (!mLockedSurface) {
|
||||
NS_WARNING("Failed to create LockedSurface");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
mLockedSurface =
|
||||
CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
|
||||
if (!mLockedSurface) {
|
||||
NS_WARNING("Failed to create LockedSurface");
|
||||
if (mBlankRawSurface) {
|
||||
mBlankLockedSurface =
|
||||
CreateLockedSurface(mBlankRawSurface, mImageSize, mFormat);
|
||||
if (!mBlankLockedSurface) {
|
||||
NS_WARNING("Failed to create BlankLockedSurface");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
|
||||
if (mBlankRawSurface) {
|
||||
mBlankLockedSurface =
|
||||
CreateLockedSurface(mBlankRawSurface, mFrameRect.Size(), mFormat);
|
||||
if (!mBlankLockedSurface) {
|
||||
NS_WARNING("Failed to create BlankLockedSurface");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
|
||||
NS_WARNING("Could not clear allocated buffer");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) {
|
||||
NS_WARNING("Could not clear allocated buffer");
|
||||
if (mBlankRawSurface) {
|
||||
if (!GreenSurface(mBlankRawSurface, mImageSize, mFormat)) {
|
||||
NS_WARNING("Could not clear allocated blank buffer");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (mBlankRawSurface) {
|
||||
if (!GreenSurface(mBlankRawSurface, mFrameRect.Size(), mFormat)) {
|
||||
NS_WARNING("Could not clear allocated blank buffer");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NS_OK;
|
||||
@ -361,7 +295,6 @@ nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
|
||||
// done with it in a timely manner. Let's ensure they are done with it first.
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
MOZ_ASSERT(mIsFullFrame);
|
||||
MOZ_ASSERT(mLockCount > 0);
|
||||
MOZ_ASSERT(mLockedSurface);
|
||||
|
||||
@ -417,7 +350,7 @@ nsresult imgFrame::InitForDecoderRecycle(const AnimationParams& aAnimParams) {
|
||||
mTimeout = aAnimParams.mTimeout;
|
||||
mBlendMethod = aAnimParams.mBlendMethod;
|
||||
mDisposalMethod = aAnimParams.mDisposalMethod;
|
||||
mDirtyRect = mFrameRect;
|
||||
mDirtyRect = GetRect();
|
||||
|
||||
return NS_OK;
|
||||
}
|
||||
@ -435,10 +368,7 @@ nsresult imgFrame::InitWithDrawable(
|
||||
}
|
||||
|
||||
mImageSize = aSize;
|
||||
mFrameRect = IntRect(IntPoint(0, 0), aSize);
|
||||
|
||||
mFormat = aFormat;
|
||||
mPaletteDepth = 0;
|
||||
|
||||
RefPtr<DrawTarget> target;
|
||||
|
||||
@ -448,28 +378,27 @@ nsresult imgFrame::InitWithDrawable(
|
||||
// get away with using volatile buffers.
|
||||
MOZ_ASSERT(!mLockedSurface, "Called imgFrame::InitWithDrawable() twice?");
|
||||
|
||||
mRawSurface = AllocateBufferForImage(mFrameRect.Size(), mFormat);
|
||||
mRawSurface = AllocateBufferForImage(mImageSize, mFormat);
|
||||
if (!mRawSurface) {
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
mLockedSurface =
|
||||
CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
|
||||
mLockedSurface = CreateLockedSurface(mRawSurface, mImageSize, mFormat);
|
||||
if (!mLockedSurface) {
|
||||
NS_WARNING("Failed to create LockedSurface");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
if (!ClearSurface(mRawSurface, mFrameRect.Size(), mFormat)) {
|
||||
if (!ClearSurface(mRawSurface, mImageSize, mFormat)) {
|
||||
NS_WARNING("Could not clear allocated buffer");
|
||||
mAborted = true;
|
||||
return NS_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
target = gfxPlatform::CreateDrawTargetForData(
|
||||
mLockedSurface->GetData(), mFrameRect.Size(), mLockedSurface->Stride(),
|
||||
mLockedSurface->GetData(), mImageSize, mLockedSurface->Stride(),
|
||||
mFormat);
|
||||
} else {
|
||||
// We can't use data surfaces for content, so we'll create an offscreen
|
||||
@ -479,14 +408,14 @@ nsresult imgFrame::InitWithDrawable(
|
||||
MOZ_ASSERT(!mOptSurface, "Called imgFrame::InitWithDrawable() twice?");
|
||||
|
||||
if (aTargetDT && !gfxVars::UseWebRender()) {
|
||||
target = aTargetDT->CreateSimilarDrawTarget(mFrameRect.Size(), mFormat);
|
||||
target = aTargetDT->CreateSimilarDrawTarget(mImageSize, mFormat);
|
||||
} else {
|
||||
if (gfxPlatform::GetPlatform()->SupportsAzureContentForType(aBackend)) {
|
||||
target = gfxPlatform::GetPlatform()->CreateDrawTargetForBackend(
|
||||
aBackend, mFrameRect.Size(), mFormat);
|
||||
aBackend, mImageSize, mFormat);
|
||||
} else {
|
||||
target = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(
|
||||
mFrameRect.Size(), mFormat);
|
||||
mImageSize, mFormat);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -499,8 +428,8 @@ nsresult imgFrame::InitWithDrawable(
|
||||
// Draw using the drawable the caller provided.
|
||||
RefPtr<gfxContext> ctx = gfxContext::CreateOrNull(target);
|
||||
MOZ_ASSERT(ctx); // Already checked the draw target above.
|
||||
gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mFrameRect.Size()),
|
||||
ImageRegion::Create(ThebesRect(mFrameRect)),
|
||||
gfxUtils::DrawPixelSnapped(ctx, aDrawable, SizeDouble(mImageSize),
|
||||
ImageRegion::Create(ThebesRect(GetRect())),
|
||||
mFormat, aSamplingFilter, aImageFlags);
|
||||
|
||||
if (canUseDataSurface && !mLockedSurface) {
|
||||
@ -557,7 +486,7 @@ nsresult imgFrame::Optimize(DrawTarget* aTarget) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
if (mPalettedImageData || mOptSurface) {
|
||||
if (mOptSurface) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
@ -660,13 +589,6 @@ bool imgFrame::Draw(gfxContext* aContext, const ImageRegion& aRegion,
|
||||
NS_ASSERTION(!aRegion.IsRestricted() ||
|
||||
!aRegion.Rect().Intersect(aRegion.Restriction()).IsEmpty(),
|
||||
"We must be allowed to sample *some* source pixels!");
|
||||
MOZ_ASSERT(mFrameRect.IsEqualEdges(IntRect(IntPoint(), mImageSize)),
|
||||
"Directly drawing an image with a non-trivial frame rect!");
|
||||
|
||||
if (mPalettedImageData) {
|
||||
MOZ_ASSERT_UNREACHABLE("Directly drawing a paletted image!");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Perform the draw and freeing of the surface outside the lock. We want to
|
||||
// avoid contention with the decoder if we can. The surface may also attempt
|
||||
@ -733,18 +655,13 @@ nsresult imgFrame::ImageUpdatedInternal(const nsIntRect& aUpdateRect) {
|
||||
|
||||
// Clamp to the frame rect to ensure that decoder bugs don't result in a
|
||||
// decoded rect that extends outside the bounds of the frame rect.
|
||||
IntRect updateRect = mFrameRect.Intersect(aUpdateRect);
|
||||
IntRect updateRect = aUpdateRect.Intersect(GetRect());
|
||||
if (updateRect.IsEmpty()) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
mDecoded.UnionRect(mDecoded, updateRect);
|
||||
|
||||
// Paletted images cannot invalidate.
|
||||
if (mPalettedImageData) {
|
||||
return NS_OK;
|
||||
}
|
||||
|
||||
// Update our invalidation counters for any consumers watching for changes
|
||||
// in the surface.
|
||||
if (mRawSurface) {
|
||||
@ -761,27 +678,26 @@ void imgFrame::Finish(Opacity aFrameOpacity /* = Opacity::SOME_TRANSPARENCY */,
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
|
||||
|
||||
if (mPalettedImageData) {
|
||||
ImageUpdatedInternal(mFrameRect);
|
||||
} else if (!mDecoded.IsEqualEdges(mFrameRect)) {
|
||||
IntRect frameRect(GetRect());
|
||||
if (!mDecoded.IsEqualEdges(frameRect)) {
|
||||
// The decoder should have produced rows starting from either the bottom or
|
||||
// the top of the image. We need to calculate the region for which we have
|
||||
// not yet invalidated.
|
||||
IntRect delta(0, 0, mFrameRect.width, 0);
|
||||
IntRect delta(0, 0, frameRect.width, 0);
|
||||
if (mDecoded.y == 0) {
|
||||
delta.y = mDecoded.height;
|
||||
delta.height = mFrameRect.height - mDecoded.height;
|
||||
} else if (mDecoded.y + mDecoded.height == mFrameRect.height) {
|
||||
delta.height = mFrameRect.height - mDecoded.y;
|
||||
delta.height = frameRect.height - mDecoded.height;
|
||||
} else if (mDecoded.y + mDecoded.height == frameRect.height) {
|
||||
delta.height = frameRect.height - mDecoded.y;
|
||||
} else {
|
||||
MOZ_ASSERT_UNREACHABLE("Decoder only updated middle of image!");
|
||||
delta = mFrameRect;
|
||||
delta = frameRect;
|
||||
}
|
||||
|
||||
ImageUpdatedInternal(delta);
|
||||
}
|
||||
|
||||
MOZ_ASSERT(mDecoded.IsEqualEdges(mFrameRect));
|
||||
MOZ_ASSERT(mDecoded.IsEqualEdges(frameRect));
|
||||
|
||||
if (aFinalize) {
|
||||
FinalizeSurfaceInternal();
|
||||
@ -797,18 +713,14 @@ uint32_t imgFrame::GetImageBytesPerRow() const {
|
||||
mMonitor.AssertCurrentThreadOwns();
|
||||
|
||||
if (mRawSurface) {
|
||||
return mFrameRect.Width() * BytesPerPixel(mFormat);
|
||||
}
|
||||
|
||||
if (mPaletteDepth) {
|
||||
return mFrameRect.Width();
|
||||
return mImageSize.width * BytesPerPixel(mFormat);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint32_t imgFrame::GetImageDataLength() const {
|
||||
return GetImageBytesPerRow() * mFrameRect.Height();
|
||||
return GetImageBytesPerRow() * mImageSize.height;
|
||||
}
|
||||
|
||||
void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
|
||||
@ -819,6 +731,7 @@ void imgFrame::GetImageData(uint8_t** aData, uint32_t* aLength) const {
|
||||
void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
|
||||
mMonitor.AssertCurrentThreadOwns();
|
||||
MOZ_ASSERT(mLockCount > 0, "Image data should be locked");
|
||||
MOZ_ASSERT(mLockedSurface);
|
||||
|
||||
if (mLockedSurface) {
|
||||
// TODO: This is okay for now because we only realloc shared surfaces on
|
||||
@ -828,15 +741,7 @@ void imgFrame::GetImageDataInternal(uint8_t** aData, uint32_t* aLength) const {
|
||||
MOZ_ASSERT(
|
||||
*aData,
|
||||
"mLockedSurface is non-null, but GetData is null in GetImageData");
|
||||
} else if (mPalettedImageData) {
|
||||
*aData = mPalettedImageData + PaletteDataLength();
|
||||
MOZ_ASSERT(
|
||||
*aData,
|
||||
"mPalettedImageData is non-null, but result is null in GetImageData");
|
||||
} else {
|
||||
MOZ_ASSERT(
|
||||
false,
|
||||
"Have neither mLockedSurface nor mPalettedImageData in GetImageData");
|
||||
*aData = nullptr;
|
||||
}
|
||||
|
||||
@ -850,27 +755,6 @@ uint8_t* imgFrame::GetImageData() const {
|
||||
return data;
|
||||
}
|
||||
|
||||
bool imgFrame::GetIsPaletted() const { return mPalettedImageData != nullptr; }
|
||||
|
||||
void imgFrame::GetPaletteData(uint32_t** aPalette, uint32_t* length) const {
|
||||
AssertImageDataLocked();
|
||||
|
||||
if (!mPalettedImageData) {
|
||||
*aPalette = nullptr;
|
||||
*length = 0;
|
||||
} else {
|
||||
*aPalette = (uint32_t*)mPalettedImageData;
|
||||
*length = PaletteDataLength();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t* imgFrame::GetPaletteData() const {
|
||||
uint32_t* data;
|
||||
uint32_t length;
|
||||
GetPaletteData(&data, &length);
|
||||
return data;
|
||||
}
|
||||
|
||||
uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
@ -880,9 +764,7 @@ uint8_t* imgFrame::LockImageData(bool aOnlyFinished) {
|
||||
}
|
||||
|
||||
uint8_t* data;
|
||||
if (mPalettedImageData) {
|
||||
data = mPalettedImageData;
|
||||
} else if (mLockedSurface) {
|
||||
if (mLockedSurface) {
|
||||
data = mLockedSurface->GetData();
|
||||
} else {
|
||||
data = nullptr;
|
||||
@ -999,7 +881,7 @@ already_AddRefed<SourceSurface> imgFrame::GetSourceSurfaceInternal(
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return CreateLockedSurface(mRawSurface, mFrameRect.Size(), mFormat);
|
||||
return CreateLockedSurface(mRawSurface, mImageSize, mFormat);
|
||||
}
|
||||
|
||||
void imgFrame::Abort() {
|
||||
@ -1037,7 +919,7 @@ void imgFrame::WaitUntilFinished() const {
|
||||
|
||||
bool imgFrame::AreAllPixelsWritten() const {
|
||||
mMonitor.AssertCurrentThreadOwns();
|
||||
return mDecoded.IsEqualInterior(mFrameRect);
|
||||
return mDecoded.IsEqualInterior(GetRect());
|
||||
}
|
||||
|
||||
void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
|
||||
@ -1045,9 +927,6 @@ void imgFrame::AddSizeOfExcludingThis(MallocSizeOf aMallocSizeOf,
|
||||
MonitorAutoLock lock(mMonitor);
|
||||
|
||||
AddSizeOfCbData metadata;
|
||||
if (mPalettedImageData) {
|
||||
metadata.heap += aMallocSizeOf(mPalettedImageData);
|
||||
}
|
||||
if (mLockedSurface) {
|
||||
metadata.heap += aMallocSizeOf(mLockedSurface);
|
||||
}
|
||||
|
@ -50,11 +50,10 @@ class imgFrame {
|
||||
* when drawing content into an imgFrame, as it may use a different graphics
|
||||
* backend than normal content drawing.
|
||||
*/
|
||||
nsresult InitForDecoder(const nsIntSize& aImageSize, const nsIntRect& aRect,
|
||||
SurfaceFormat aFormat, uint8_t aPaletteDepth,
|
||||
nsresult InitForDecoder(const nsIntSize& aImageSize, SurfaceFormat aFormat,
|
||||
bool aNonPremult,
|
||||
const Maybe<AnimationParams>& aAnimParams,
|
||||
bool aIsFullFrame, bool aShouldRecycle);
|
||||
bool aShouldRecycle);
|
||||
|
||||
/**
|
||||
* Reinitialize this imgFrame with the new parameters, but otherwise retain
|
||||
@ -161,14 +160,13 @@ class imgFrame {
|
||||
* changes caused by Optimize(), since an imgFrame is not optimized throughout
|
||||
* its lifetime.
|
||||
*/
|
||||
uint32_t GetBytesPerPixel() const { return GetIsPaletted() ? 1 : 4; }
|
||||
uint32_t GetBytesPerPixel() const { return 4; }
|
||||
|
||||
const IntSize& GetImageSize() const { return mImageSize; }
|
||||
const IntRect& GetRect() const { return mFrameRect; }
|
||||
IntSize GetSize() const { return mFrameRect.Size(); }
|
||||
const IntSize& GetSize() const { return mImageSize; }
|
||||
IntRect GetRect() const { return IntRect(IntPoint(0, 0), mImageSize); }
|
||||
const IntRect& GetBlendRect() const { return mBlendRect; }
|
||||
IntRect GetBoundedBlendRect() const {
|
||||
return mBlendRect.Intersect(mFrameRect);
|
||||
return mBlendRect.Intersect(GetRect());
|
||||
}
|
||||
FrameTimeout GetTimeout() const { return mTimeout; }
|
||||
BlendMethod GetBlendMethod() const { return mBlendMethod; }
|
||||
@ -177,16 +175,9 @@ class imgFrame {
|
||||
void GetImageData(uint8_t** aData, uint32_t* length) const;
|
||||
uint8_t* GetImageData() const;
|
||||
|
||||
bool GetIsPaletted() const;
|
||||
void GetPaletteData(uint32_t** aPalette, uint32_t* length) const;
|
||||
uint32_t* GetPaletteData() const;
|
||||
uint8_t GetPaletteDepth() const { return mPaletteDepth; }
|
||||
|
||||
const IntRect& GetDirtyRect() const { return mDirtyRect; }
|
||||
void SetDirtyRect(const IntRect& aDirtyRect) { mDirtyRect = aDirtyRect; }
|
||||
|
||||
bool IsFullFrame() const { return mIsFullFrame; }
|
||||
|
||||
void SetOptimizable();
|
||||
|
||||
void FinalizeSurface();
|
||||
@ -240,10 +231,6 @@ class imgFrame {
|
||||
*/
|
||||
already_AddRefed<SourceSurface> GetSourceSurfaceInternal(bool aTemporary);
|
||||
|
||||
uint32_t PaletteDataLength() const {
|
||||
return mPaletteDepth ? (size_t(1) << mPaletteDepth) * sizeof(uint32_t) : 0;
|
||||
}
|
||||
|
||||
struct SurfaceWithFormat {
|
||||
RefPtr<gfxDrawable> mDrawable;
|
||||
SurfaceFormat mFormat;
|
||||
@ -320,17 +307,6 @@ class imgFrame {
|
||||
//! The size of the buffer we are decoding to.
|
||||
IntSize mImageSize;
|
||||
|
||||
//! XXX(aosmond): This means something different depending on the context. We
|
||||
//! should correct this.
|
||||
//!
|
||||
//! There are several different contexts for mFrameRect:
|
||||
//! - If for non-animated image, it will be originate at (0, 0) and matches
|
||||
//! the dimensions of mImageSize.
|
||||
//! - If for an APNG, it also matches the above.
|
||||
//! - If for a GIF which is producing full frames, it matches the above.
|
||||
//! - If for a GIF which is producing partial frames, it matches mBlendRect.
|
||||
IntRect mFrameRect;
|
||||
|
||||
//! The contents for the frame, as represented in the encoded image. This may
|
||||
//! differ from mImageSize because it may be a partial frame. For the first
|
||||
//! frame, this means we need to shift the data in place, and for animated
|
||||
@ -350,18 +326,7 @@ class imgFrame {
|
||||
BlendMethod mBlendMethod;
|
||||
SurfaceFormat mFormat;
|
||||
|
||||
// The palette and image data for images that are paletted, since Cairo
|
||||
// doesn't support these images.
|
||||
// The paletted data comes first, then the image data itself.
|
||||
// Total length is PaletteDataLength() + GetImageDataLength().
|
||||
uint8_t* mPalettedImageData;
|
||||
uint8_t mPaletteDepth;
|
||||
|
||||
bool mNonPremult;
|
||||
|
||||
//! True if the frame has all of the data stored in it, false if it needs to
|
||||
//! be combined with another frame (e.g. the previous frame) to be complete.
|
||||
bool mIsFullFrame;
|
||||
};
|
||||
|
||||
/**
|
||||
@ -378,7 +343,6 @@ class DrawableFrameRef final {
|
||||
explicit DrawableFrameRef(imgFrame* aFrame) : mFrame(aFrame) {
|
||||
MOZ_ASSERT(aFrame);
|
||||
MonitorAutoLock lock(aFrame->mMonitor);
|
||||
MOZ_ASSERT(!aFrame->GetIsPaletted(), "Paletted must use RawAccessFrameRef");
|
||||
|
||||
if (aFrame->mRawSurface) {
|
||||
mRef.emplace(aFrame->mRawSurface, DataSourceSurface::READ);
|
||||
@ -432,10 +396,10 @@ class DrawableFrameRef final {
|
||||
/**
|
||||
* A reference to an imgFrame that holds the imgFrame's surface in memory in a
|
||||
* format appropriate for access as raw data. If you have a RawAccessFrameRef
|
||||
* |ref| and |if (ref)| is true, then calls to GetImageData() and
|
||||
* GetPaletteData() are guaranteed to succeed. This guarantee is stronger than
|
||||
* DrawableFrameRef, so everything that a valid DrawableFrameRef guarantees is
|
||||
* also guaranteed by a valid RawAccessFrameRef.
|
||||
* |ref| and |if (ref)| is true, then calls to GetImageData() is guaranteed to
|
||||
* succeed. This guarantee is stronger than DrawableFrameRef, so everything that
|
||||
* a valid DrawableFrameRef guarantees is also guaranteed by a valid
|
||||
* RawAccessFrameRef.
|
||||
*
|
||||
* This may be considerably more expensive than is necessary just for drawing,
|
||||
* so only use this when you need to read or write the raw underlying image data
|
||||
@ -507,7 +471,6 @@ class RawAccessFrameRef final {
|
||||
}
|
||||
|
||||
uint8_t* Data() const { return mData; }
|
||||
uint32_t PaletteDataLength() const { return mFrame->PaletteDataLength(); }
|
||||
|
||||
private:
|
||||
RawAccessFrameRef(const RawAccessFrameRef& aOther) = delete;
|
||||
|
@ -355,7 +355,7 @@ void CheckGeneratedSurface(SourceSurface* aSurface, const IntRect& aRect,
|
||||
|
||||
void CheckGeneratedPalettedImage(Decoder* aDecoder, const IntRect& aRect) {
|
||||
RawAccessFrameRef currentFrame = aDecoder->GetCurrentFrameRef();
|
||||
IntSize imageSize = currentFrame->GetImageSize();
|
||||
IntSize imageSize = currentFrame->GetSize();
|
||||
|
||||
// This diagram shows how the surface is divided into regions that the code
|
||||
// below tests for the correct content. The output rect is the bounds of the
|
||||
|
@ -18,9 +18,8 @@ static already_AddRefed<imgFrame> CreateEmptyFrame(
|
||||
AnimationParams animParams{aFrameRect, FrameTimeout::Forever(),
|
||||
/* aFrameNum */ 1, BlendMethod::OVER,
|
||||
DisposalMethod::NOT_SPECIFIED};
|
||||
nsresult rv = frame->InitForDecoder(aSize, IntRect(IntPoint(0, 0), aSize),
|
||||
SurfaceFormat::B8G8R8A8, 0, false,
|
||||
Some(animParams), true, aCanRecycle);
|
||||
nsresult rv = frame->InitForDecoder(aSize, SurfaceFormat::B8G8R8A8, false,
|
||||
Some(animParams), aCanRecycle);
|
||||
EXPECT_TRUE(NS_SUCCEEDED(rv));
|
||||
RawAccessFrameRef frameRef = frame->RawAccessRef();
|
||||
frame->SetRawAccessOnly();
|
||||
|
@ -22,7 +22,7 @@ using namespace mozilla::image;
|
||||
static already_AddRefed<Decoder> CreateTrivialBlendingDecoder() {
|
||||
gfxPrefs::GetSingleton();
|
||||
DecoderType decoderType = DecoderFactory::GetDecoderType("image/gif");
|
||||
DecoderFlags decoderFlags = DecoderFlags::BLEND_ANIMATION;
|
||||
DecoderFlags decoderFlags = DefaultDecoderFlags();
|
||||
SurfaceFlags surfaceFlags = DefaultSurfaceFlags();
|
||||
auto sourceBuffer = MakeNotNull<RefPtr<SourceBuffer>>();
|
||||
return DecoderFactory::CreateAnonymousDecoder(
|
||||
|
@ -345,7 +345,7 @@ static void WithSingleChunkAnimationDecode(const ImageTestCase& aTestCase,
|
||||
task->Run();
|
||||
|
||||
// Create a decoder.
|
||||
DecoderFlags decoderFlags = DecoderFlags::BLEND_ANIMATION;
|
||||
DecoderFlags decoderFlags = DefaultDecoderFlags();
|
||||
SurfaceFlags surfaceFlags = DefaultSurfaceFlags();
|
||||
RefPtr<Decoder> decoder = DecoderFactory::CreateAnonymousDecoder(
|
||||
decoderType, sourceBuffer, Nothing(), decoderFlags, surfaceFlags);
|
||||
|
@ -42,7 +42,7 @@ static void CheckFrameAnimatorBlendResults(const ImageTestCase& aTestCase,
|
||||
|
||||
template <typename Func>
|
||||
static void WithFrameAnimatorDecode(const ImageTestCase& aTestCase,
|
||||
bool aBlendFilter, Func aResultChecker) {
|
||||
Func aResultChecker) {
|
||||
// Create an image.
|
||||
RefPtr<Image> image = ImageFactory::CreateAnonymousImage(
|
||||
nsDependentCString(aTestCase.mMimeType));
|
||||
@ -80,9 +80,6 @@ static void WithFrameAnimatorDecode(const ImageTestCase& aTestCase,
|
||||
// Create an AnimationSurfaceProvider which will manage the decoding process
|
||||
// and make this decoder's output available in the surface cache.
|
||||
DecoderFlags decoderFlags = DefaultDecoderFlags();
|
||||
if (aBlendFilter) {
|
||||
decoderFlags |= DecoderFlags::BLEND_ANIMATION;
|
||||
}
|
||||
SurfaceFlags surfaceFlags = DefaultSurfaceFlags();
|
||||
rv = DecoderFactory::CreateAnimationDecoder(
|
||||
decoderType, rasterImage, sourceBuffer, aTestCase.mSize, decoderFlags,
|
||||
@ -97,9 +94,8 @@ static void WithFrameAnimatorDecode(const ImageTestCase& aTestCase,
|
||||
aResultChecker(rasterImage.get());
|
||||
}
|
||||
|
||||
static void CheckFrameAnimatorBlend(const ImageTestCase& aTestCase,
|
||||
bool aBlendFilter) {
|
||||
WithFrameAnimatorDecode(aTestCase, aBlendFilter, [&](RasterImage* aImage) {
|
||||
static void CheckFrameAnimatorBlend(const ImageTestCase& aTestCase) {
|
||||
WithFrameAnimatorDecode(aTestCase, [&](RasterImage* aImage) {
|
||||
CheckFrameAnimatorBlendResults(aTestCase, aImage);
|
||||
});
|
||||
}
|
||||
@ -109,27 +105,14 @@ class ImageFrameAnimator : public ::testing::Test {
|
||||
AutoInitializeImageLib mInit;
|
||||
};
|
||||
|
||||
TEST_F(ImageFrameAnimator, BlendGIFWithAnimator) {
|
||||
CheckFrameAnimatorBlend(BlendAnimatedGIFTestCase(), /* aBlendFilter */ false);
|
||||
}
|
||||
|
||||
TEST_F(ImageFrameAnimator, BlendGIFWithFilter) {
|
||||
CheckFrameAnimatorBlend(BlendAnimatedGIFTestCase(), /* aBlendFilter */ true);
|
||||
}
|
||||
|
||||
TEST_F(ImageFrameAnimator, BlendPNGWithAnimator) {
|
||||
CheckFrameAnimatorBlend(BlendAnimatedPNGTestCase(), /* aBlendFilter */ false);
|
||||
CheckFrameAnimatorBlend(BlendAnimatedGIFTestCase());
|
||||
}
|
||||
|
||||
TEST_F(ImageFrameAnimator, BlendPNGWithFilter) {
|
||||
CheckFrameAnimatorBlend(BlendAnimatedPNGTestCase(), /* aBlendFilter */ true);
|
||||
}
|
||||
|
||||
TEST_F(ImageFrameAnimator, BlendWebPWithAnimator) {
|
||||
CheckFrameAnimatorBlend(BlendAnimatedWebPTestCase(),
|
||||
/* aBlendFilter */ false);
|
||||
CheckFrameAnimatorBlend(BlendAnimatedPNGTestCase());
|
||||
}
|
||||
|
||||
TEST_F(ImageFrameAnimator, BlendWebPWithFilter) {
|
||||
CheckFrameAnimatorBlend(BlendAnimatedWebPTestCase(), /* aBlendFilter */ true);
|
||||
CheckFrameAnimatorBlend(BlendAnimatedWebPTestCase());
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user