Backed out 6 changesets (bug 1774306) for causing leak failures CLOSED TREE

Backed out changeset aebaccb21fac (bug 1774306)
Backed out changeset 718fc08ac6a0 (bug 1774306)
Backed out changeset 3b5616ad1231 (bug 1774306)
Backed out changeset c2bae45b50fb (bug 1774306)
Backed out changeset 836597b2894b (bug 1774306)
Backed out changeset 51ee6beb36f8 (bug 1774306)
This commit is contained in:
Noemi Erli 2022-12-14 08:27:50 +02:00
parent 7ac1460c9f
commit 769b7ebb72
7 changed files with 209 additions and 237 deletions

View File

@ -399,7 +399,7 @@ void StructuredCloneHolder::Read(nsIGlobalObject* aGlobal, JSContext* aCx,
mWasmModuleArray.Clear();
mClonedSurfaces.Clear();
mInputStreamArray.Clear();
mVideoFrames.Clear();
mVideoFrameImages.Clear();
Clear();
}
}
@ -1029,7 +1029,7 @@ JSObject* StructuredCloneHolder::CustomReadHandler(
aTag == SCTAG_DOM_VIDEOFRAME &&
CloneScope() == StructuredCloneScope::SameProcess) {
return VideoFrame::ReadStructuredClone(aCx, mGlobal, aReader,
VideoFrames()[aIndex]);
VideoFrameImages()[aIndex]);
}
return ReadFullySerializableObjects(aCx, aReader, aTag);
@ -1279,28 +1279,6 @@ StructuredCloneHolder::CustomReadTransferHandler(
aReturnObject);
}
if (StaticPrefs::dom_media_webcodecs_enabled() &&
aTag == SCTAG_DOM_VIDEOFRAME &&
CloneScope() == StructuredCloneScope::SameProcess) {
MOZ_ASSERT(aContent);
VideoFrame::TransferredData* data =
static_cast<VideoFrame::TransferredData*>(aContent);
nsCOMPtr<nsIGlobalObject> global = mGlobal;
// aContent will be deallocated here if frame is non-null. Otherwise, it
// will be done in CustomFreeTransferHandler instead.
if (RefPtr<VideoFrame> frame =
VideoFrame::FromTransferred(global.get(), data)) {
delete data;
JS::Rooted<JS::Value> value(aCx);
if (!GetOrCreateDOMReflector(aCx, frame, &value)) {
JS_ClearPendingException(aCx);
return false;
}
aReturnObject.set(&value.toObject());
return true;
}
}
return false;
}
@ -1379,27 +1357,6 @@ StructuredCloneHolder::CustomWriteTransferHandler(
return true;
}
if (StaticPrefs::dom_media_webcodecs_enabled()) {
VideoFrame* videoFrame = nullptr;
rv = UNWRAP_OBJECT(VideoFrame, &obj, videoFrame);
if (NS_SUCCEEDED(rv)) {
MOZ_ASSERT(videoFrame);
*aExtraData = 0;
*aTag = SCTAG_DOM_VIDEOFRAME;
*aOwnership = JS::SCTAG_TMO_CUSTOM;
*aContent = nullptr;
UniquePtr<VideoFrame::TransferredData> data = videoFrame->Transfer();
if (!data) {
return false;
}
*aContent = data.release();
MOZ_ASSERT(*aContent);
return true;
}
}
}
if (StaticPrefs::dom_streams_transferable_enabled()) {
@ -1528,16 +1485,6 @@ void StructuredCloneHolder::CustomFreeTransferHandler(
MessagePort::ForceClose(mPortIdentifiers[aExtraData + 1]);
return;
}
if (StaticPrefs::dom_media_webcodecs_enabled() &&
aTag == SCTAG_DOM_VIDEOFRAME &&
CloneScope() == StructuredCloneScope::SameProcess) {
MOZ_ASSERT(aContent);
VideoFrame::TransferredData* data =
static_cast<VideoFrame::TransferredData*>(aContent);
delete data;
return;
}
}
bool StructuredCloneHolder::CustomCanTransferHandler(
@ -1613,15 +1560,6 @@ bool StructuredCloneHolder::CustomCanTransferHandler(
}
}
if (StaticPrefs::dom_media_webcodecs_enabled()) {
VideoFrame* videoframe = nullptr;
nsresult rv = UNWRAP_OBJECT(VideoFrame, &obj, videoframe);
if (NS_SUCCEEDED(rv)) {
SameProcessScopeRequired(aSameProcessScopeRequired);
return CloneScope() == StructuredCloneScope::SameProcess;
}
}
return false;
}

View File

@ -166,7 +166,7 @@ class StructuredCloneHolderBase {
class BlobImpl;
class MessagePort;
class MessagePortIdentifier;
struct VideoFrameSerializedData;
struct VideoFrameImageData;
class StructuredCloneHolder : public StructuredCloneHolderBase {
public:
@ -210,7 +210,7 @@ class StructuredCloneHolder : public StructuredCloneHolderBase {
bool HasClonedDOMObjects() const {
return !mBlobImplArray.IsEmpty() || !mWasmModuleArray.IsEmpty() ||
!mClonedSurfaces.IsEmpty() || !mInputStreamArray.IsEmpty() ||
!mVideoFrames.IsEmpty();
!mVideoFrameImages.IsEmpty();
}
nsTArray<RefPtr<BlobImpl>>& BlobImpls() {
@ -266,7 +266,9 @@ class StructuredCloneHolder : public StructuredCloneHolderBase {
return mClonedSurfaces;
}
nsTArray<VideoFrameSerializedData>& VideoFrames() { return mVideoFrames; }
nsTArray<VideoFrameImageData>& VideoFrameImages() {
return mVideoFrameImages;
}
// Implementations of the virtual methods to allow cloning of objects which
// JS engine itself doesn't clone.
@ -366,7 +368,7 @@ class StructuredCloneHolder : public StructuredCloneHolderBase {
nsTArray<RefPtr<gfx::DataSourceSurface>> mClonedSurfaces;
// Used for cloning VideoFrame in the structured cloning algorithm.
nsTArray<VideoFrameSerializedData> mVideoFrames;
nsTArray<VideoFrameImageData> mVideoFrameImages;
// This raw pointer is only set within ::Read() and is unset by the end.
nsIGlobalObject* MOZ_NON_OWNING_REF mGlobal;

View File

@ -1051,10 +1051,10 @@ static Result<RefPtr<VideoFrame>, nsCString> CreateVideoFrameFromBuffer(
// TODO: Spec should assign aInit.mFormat to inner format value:
// https://github.com/w3c/webcodecs/issues/509.
// This comment should be removed once the issue is resolved.
return MakeRefPtr<VideoFrame>(aGlobal, data, aInit.mFormat, codedSize,
parsedRect,
displaySize ? *displaySize : parsedRect.Size(),
duration, aInit.mTimestamp, colorSpace);
return MakeRefPtr<VideoFrame>(
aGlobal, data, aInit.mFormat, codedSize, parsedRect,
displaySize ? *displaySize : parsedRect.Size(), std::move(duration),
aInit.mTimestamp, colorSpace);
}
template <class T>
@ -1137,26 +1137,46 @@ InitializeFrameWithResourceAndSize(
const VideoColorSpaceInit colorSpace{};
return MakeAndAddRef<VideoFrame>(aGlobal, image, format->PixelFormat(),
image->GetSize(), visibleRect.value(),
displaySize.value(), duration,
displaySize.value(), std::move(duration),
aInit.mTimestamp.Value(), colorSpace);
}
// https://w3c.github.io/webcodecs/#videoframe-initialize-frame-from-other-frame
struct VideoFrameData {
VideoFrameData(layers::Image* aImage, const VideoPixelFormat& aFormat,
gfx::IntRect aVisibleRect, gfx::IntSize aDisplaySize,
Maybe<uint64_t> aDuration, int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace)
: mImage(aImage),
mFormat(aFormat),
mVisibleRect(aVisibleRect),
mDisplaySize(aDisplaySize),
mDuration(aDuration),
mTimestamp(aTimestamp),
mColorSpace(aColorSpace) {}
RefPtr<layers::Image> mImage;
VideoFrame::Format mFormat;
const gfx::IntRect mVisibleRect;
const gfx::IntSize mDisplaySize;
const Maybe<uint64_t> mDuration;
const int64_t mTimestamp;
const VideoColorSpaceInit mColorSpace;
};
static Result<already_AddRefed<VideoFrame>, nsCString>
InitializeFrameFromOtherFrame(nsIGlobalObject* aGlobal, VideoFrameData&& aData,
const VideoFrameInit& aInit) {
MOZ_ASSERT(aGlobal);
MOZ_ASSERT(aData.mImage);
VideoFrame::Format format(aData.mFormat);
if (aInit.mAlpha == AlphaOption::Discard) {
format.MakeOpaque();
aData.mFormat.MakeOpaque();
// Keep the alpha data in image for now until it's being rendered.
}
Tuple<Maybe<gfx::IntRect>, Maybe<gfx::IntSize>> init;
MOZ_TRY_VAR(init,
ValidateVideoFrameInit(aInit, format, aData.mImage->GetSize()));
MOZ_TRY_VAR(init, ValidateVideoFrameInit(aInit, aData.mFormat,
aData.mImage->GetSize()));
Maybe<gfx::IntRect> visibleRect = Get<0>(init);
Maybe<gfx::IntSize> displaySize = Get<1>(init);
@ -1170,8 +1190,9 @@ InitializeFrameFromOtherFrame(nsIGlobalObject* aGlobal, VideoFrameData&& aData,
: aData.mTimestamp;
return MakeAndAddRef<VideoFrame>(
aGlobal, aData.mImage, format.PixelFormat(), aData.mImage->GetSize(),
*visibleRect, *displaySize, duration, timestamp, aData.mColorSpace);
aGlobal, aData.mImage, aData.mFormat.PixelFormat(),
aData.mImage->GetSize(), *visibleRect, *displaySize, std::move(duration),
timestamp, aData.mColorSpace);
}
/*
@ -1182,14 +1203,14 @@ VideoFrame::VideoFrame(nsIGlobalObject* aParent,
const RefPtr<layers::Image>& aImage,
const VideoPixelFormat& aFormat, gfx::IntSize aCodedSize,
gfx::IntRect aVisibleRect, gfx::IntSize aDisplaySize,
const Maybe<uint64_t>& aDuration, int64_t aTimestamp,
Maybe<uint64_t>&& aDuration, int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace)
: mParent(aParent),
mResource(Some(Resource(aImage, VideoFrame::Format(aFormat)))),
mCodedSize(aCodedSize),
mVisibleRect(aVisibleRect),
mDisplaySize(aDisplaySize),
mDuration(aDuration),
mDuration(std::move(aDuration)),
mTimestamp(aTimestamp),
mColorSpace(aColorSpace) {
MOZ_ASSERT(mParent);
@ -1548,6 +1569,8 @@ already_AddRefed<VideoFrame> VideoFrame::Constructor(
}
// Check the usability.
// TODO: aRv.Throw(NS_ERROR_DOM_INVALID_STATE_ERR) if this is _detached_ (bug
// 1774306).
if (!aVideoFrame.mResource) {
aRv.ThrowInvalidStateError(
"The VideoFrame is closed or no image found there");
@ -1602,6 +1625,7 @@ already_AddRefed<VideoFrame> VideoFrame::Constructor(
Nullable<VideoPixelFormat> VideoFrame::GetFormat() const {
AssertIsOnOwningThread();
// TODO: Return Nullable<T>() if this is _detached_ (bug 1774306).
return mResource
? Nullable<VideoPixelFormat>(mResource->mFormat.PixelFormat())
: Nullable<VideoPixelFormat>();
@ -1625,6 +1649,8 @@ uint32_t VideoFrame::CodedHeight() const {
already_AddRefed<DOMRectReadOnly> VideoFrame::GetCodedRect() const {
AssertIsOnOwningThread();
// TODO: Return nullptr if this is _detached_ instead of checking resource
// (bug 1774306).
return mResource
? MakeAndAddRef<DOMRectReadOnly>(
mParent, 0.0f, 0.0f, static_cast<double>(mCodedSize.Width()),
@ -1636,6 +1662,8 @@ already_AddRefed<DOMRectReadOnly> VideoFrame::GetCodedRect() const {
already_AddRefed<DOMRectReadOnly> VideoFrame::GetVisibleRect() const {
AssertIsOnOwningThread();
// TODO: Return nullptr if this is _detached_ instead of checking resource
// (bug 1774306).
return mResource ? MakeAndAddRef<DOMRectReadOnly>(
mParent, static_cast<double>(mVisibleRect.X()),
static_cast<double>(mVisibleRect.Y()),
@ -1684,6 +1712,8 @@ uint32_t VideoFrame::AllocationSize(const VideoFrameCopyToOptions& aOptions,
ErrorResult& aRv) {
AssertIsOnOwningThread();
// TODO: Throw error if this is _detached_ instead of checking resource (bug
// 1774306).
if (!mResource) {
aRv.ThrowInvalidStateError("No media resource in VideoFrame");
return 0;
@ -1707,6 +1737,8 @@ already_AddRefed<Promise> VideoFrame::CopyTo(
const VideoFrameCopyToOptions& aOptions, ErrorResult& aRv) {
AssertIsOnOwningThread();
// TODO: Throw error if this is _detached_ instead of checking resource (bug
// 1774306).
if (!mResource) {
aRv.ThrowInvalidStateError("No media resource in VideoFrame");
return nullptr;
@ -1790,6 +1822,8 @@ already_AddRefed<Promise> VideoFrame::CopyTo(
already_AddRefed<VideoFrame> VideoFrame::Clone(ErrorResult& aRv) {
AssertIsOnOwningThread();
// TODO: Throw error if this is _detached_ instead of checking resource (bug
// 1774306).
if (!mResource) {
aRv.ThrowInvalidStateError("No media resource in the VideoFrame now");
return nullptr;
@ -1803,6 +1837,7 @@ already_AddRefed<VideoFrame> VideoFrame::Clone(ErrorResult& aRv) {
void VideoFrame::Close() {
AssertIsOnOwningThread();
// TODO: Set _detached_ to `true` (bug 1774306).
mResource.reset();
mCodedSize = gfx::IntSize();
mVisibleRect = gfx::IntRect();
@ -1812,13 +1847,88 @@ void VideoFrame::Close() {
// https://w3c.github.io/webcodecs/#ref-for-deserialization-steps%E2%91%A0
/* static */
JSObject* VideoFrame::ReadStructuredClone(
JSContext* aCx, nsIGlobalObject* aGlobal, JSStructuredCloneReader* aReader,
const VideoFrameSerializedData& aData) {
if (!IsSameOrigin(aGlobal, aData.mPrincipalURI.get())) {
JSObject* VideoFrame::ReadStructuredClone(JSContext* aCx,
nsIGlobalObject* aGlobal,
JSStructuredCloneReader* aReader,
const VideoFrameImageData& aData) {
if (!IsSameOrigin(aGlobal, aData.mURI.get())) {
return nullptr;
}
VideoPixelFormat format;
if (NS_WARN_IF(!JS_ReadBytes(aReader, &format, 1))) {
return nullptr;
}
uint32_t codedWidth = 0;
uint32_t codedHeight = 0;
if (NS_WARN_IF(!JS_ReadUint32Pair(aReader, &codedWidth, &codedHeight))) {
return nullptr;
}
uint32_t visibleX = 0;
uint32_t visibleY = 0;
uint32_t visibleWidth = 0;
uint32_t visibleHeight = 0;
if (NS_WARN_IF(!JS_ReadUint32Pair(aReader, &visibleX, &visibleY)) ||
NS_WARN_IF(!JS_ReadUint32Pair(aReader, &visibleWidth, &visibleHeight))) {
return nullptr;
}
uint32_t displayWidth = 0;
uint32_t displayHeight = 0;
if (NS_WARN_IF(!JS_ReadUint32Pair(aReader, &displayWidth, &displayHeight))) {
return nullptr;
}
uint8_t hasDuration = 0;
uint32_t durationLow = 0;
uint32_t durationHigh = 0;
if (NS_WARN_IF(!JS_ReadBytes(aReader, &hasDuration, 1)) ||
NS_WARN_IF(!JS_ReadUint32Pair(aReader, &durationLow, &durationHigh))) {
return nullptr;
}
Maybe<uint64_t> duration =
hasDuration ? Some(uint64_t(durationHigh) << 32 | durationLow)
: Nothing();
uint32_t timestampLow = 0;
uint32_t timestampHigh = 0;
if (NS_WARN_IF(!JS_ReadUint32Pair(aReader, &timestampLow, &timestampHigh))) {
return nullptr;
}
int64_t timestamp = int64_t(timestampHigh) << 32 | timestampLow;
uint8_t colorSpaceFullRange = 0;
uint8_t colorSpaceMatrix = 0;
uint8_t colorSpacePrimaries = 0;
uint8_t colorSpaceTransfer = 0;
if (NS_WARN_IF(!JS_ReadBytes(aReader, &colorSpaceFullRange, 1)) ||
NS_WARN_IF(!JS_ReadBytes(aReader, &colorSpaceMatrix, 1)) ||
NS_WARN_IF(!JS_ReadBytes(aReader, &colorSpacePrimaries, 1)) ||
NS_WARN_IF(!JS_ReadBytes(aReader, &colorSpaceTransfer, 1))) {
return nullptr;
}
VideoColorSpaceInit colorSpace{};
if (colorSpaceFullRange < 2) {
colorSpace.mFullRange.Construct(colorSpaceFullRange > 0);
}
if (colorSpaceMatrix <
static_cast<uint8_t>(VideoMatrixCoefficients::EndGuard_)) {
colorSpace.mMatrix.Construct(
static_cast<VideoMatrixCoefficients>(colorSpaceMatrix));
}
if (colorSpacePrimaries <
static_cast<uint8_t>(VideoColorPrimaries::EndGuard_)) {
colorSpace.mPrimaries.Construct(
static_cast<VideoColorPrimaries>(colorSpacePrimaries));
}
if (colorSpaceTransfer <
static_cast<uint8_t>(VideoTransferCharacteristics::EndGuard_)) {
colorSpace.mTransfer.Construct(
static_cast<VideoTransferCharacteristics>(colorSpaceTransfer));
}
JS::Rooted<JS::Value> value(aCx, JS::NullValue());
// To avoid a rooting hazard error from returning a raw JSObject* before
// running the RefPtr destructor, RefPtr needs to be destructed before
@ -1827,9 +1937,10 @@ JSObject* VideoFrame::ReadStructuredClone(
// be safely destructed while the unrooted return JSObject* is on the stack.
{
RefPtr<VideoFrame> frame = MakeAndAddRef<VideoFrame>(
aGlobal, aData.mImage, aData.mFormat, aData.mCodedSize,
aData.mVisibleRect, aData.mDisplaySize, aData.mDuration,
aData.mTimestamp, aData.mColorSpace);
aGlobal, aData.mImage, format, gfx::IntSize(codedWidth, codedHeight),
gfx::IntRect(visibleX, visibleY, visibleWidth, visibleHeight),
gfx::IntSize(displayWidth, displayHeight), std::move(duration),
timestamp, colorSpace);
if (!GetOrCreateDOMReflector(aCx, frame, &value) || !value.isObject()) {
return nullptr;
}
@ -1842,61 +1953,69 @@ bool VideoFrame::WriteStructuredClone(JSStructuredCloneWriter* aWriter,
StructuredCloneHolder* aHolder) const {
AssertIsOnOwningThread();
// TODO: Throw error if this is _detached_ instead of checking resource (bug
// 1774306).
if (!mResource) {
return false;
}
const uint8_t format = BitwiseCast<uint8_t>(mResource->mFormat.PixelFormat());
const uint32_t codedWidth = BitwiseCast<uint32_t>(mCodedSize.Width());
const uint32_t codedHeight = BitwiseCast<uint32_t>(mCodedSize.Height());
const uint32_t visibleX = BitwiseCast<uint32_t>(mVisibleRect.X());
const uint32_t visibleY = BitwiseCast<uint32_t>(mVisibleRect.Y());
const uint32_t visibleWidth = BitwiseCast<uint32_t>(mVisibleRect.Width());
const uint32_t visibleHeight = BitwiseCast<uint32_t>(mVisibleRect.Height());
const uint32_t displayWidth = BitwiseCast<uint32_t>(mDisplaySize.Width());
const uint32_t displayHeight = BitwiseCast<uint32_t>(mDisplaySize.Height());
const uint8_t hasDuration = mDuration ? 1 : 0;
const uint32_t durationLow = mDuration ? uint32_t(*mDuration) : 0;
const uint32_t durationHigh = mDuration ? uint32_t(*mDuration >> 32) : 0;
const uint32_t timestampLow = uint32_t(mTimestamp);
const uint32_t timestampHigh = uint32_t(mTimestamp >> 32);
const uint8_t colorSpaceFullRange =
mColorSpace.mFullRange.WasPassed() ? mColorSpace.mFullRange.Value() : 2;
const uint8_t colorSpaceMatrix = BitwiseCast<uint8_t>(
mColorSpace.mMatrix.WasPassed() ? mColorSpace.mMatrix.Value()
: VideoMatrixCoefficients::EndGuard_);
const uint8_t colorSpacePrimaries = BitwiseCast<uint8_t>(
mColorSpace.mPrimaries.WasPassed() ? mColorSpace.mPrimaries.Value()
: VideoColorPrimaries::EndGuard_);
const uint8_t colorSpaceTransfer =
BitwiseCast<uint8_t>(mColorSpace.mTransfer.WasPassed()
? mColorSpace.mTransfer.Value()
: VideoTransferCharacteristics::EndGuard_);
// Indexing the image and send the index to the receiver.
const uint32_t index = aHolder->VideoFrames().Length();
const uint32_t index = aHolder->VideoFrameImages().Length();
RefPtr<layers::Image> image(mResource->mImage.get());
// The serialization is limited to the same process scope so it's ok to
// serialize a reference instead of a copy.
nsCOMPtr<nsIURI> uri = GetPrincipalURI();
aHolder->VideoFrames().AppendElement(VideoFrameSerializedData(
image.get(), mResource->mFormat.PixelFormat(), mCodedSize, mVisibleRect,
mDisplaySize, mDuration, mTimestamp, mColorSpace, uri.get()));
return !NS_WARN_IF(!JS_WriteUint32Pair(aWriter, SCTAG_DOM_VIDEOFRAME, index));
}
// https://w3c.github.io/webcodecs/#ref-for-transfer-steps%E2%91%A0
UniquePtr<VideoFrame::TransferredData> VideoFrame::Transfer() {
AssertIsOnOwningThread();
if (!mResource) {
return nullptr;
}
nsCOMPtr<nsIURI> uri = GetPrincipalURI();
Resource r = mResource.extract();
auto frame = MakeUnique<TransferredData>(
r.mImage.get(), r.mFormat.PixelFormat(), mCodedSize, mVisibleRect,
mDisplaySize, mDuration, mTimestamp, mColorSpace, uri.get());
Close();
return frame;
}
// https://w3c.github.io/webcodecs/#ref-for-transfer-receiving-steps%E2%91%A0
/* static */
already_AddRefed<VideoFrame> VideoFrame::FromTransferred(
nsIGlobalObject* aGlobal, TransferredData* aData) {
MOZ_ASSERT(aData);
if (!IsSameOrigin(aGlobal, aData->mPrincipalURI.get())) {
return nullptr;
}
return MakeAndAddRef<VideoFrame>(aGlobal, aData->mImage, aData->mFormat,
aData->mCodedSize, aData->mVisibleRect,
aData->mDisplaySize, aData->mDuration,
aData->mTimestamp, aData->mColorSpace);
}
nsCOMPtr<nsIURI> VideoFrame::GetPrincipalURI() const {
AssertIsOnOwningThread();
nsIPrincipal* principal = mParent->PrincipalOrNull();
return principal ? principal->GetURI() : nullptr;
nsCOMPtr<nsIURI> uri = principal ? principal->GetURI() : nullptr;
aHolder->VideoFrameImages().AppendElement(
VideoFrameImageData{image.forget(), uri});
return !(
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, SCTAG_DOM_VIDEOFRAME, index)) ||
NS_WARN_IF(!JS_WriteBytes(aWriter, &format, 1)) ||
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, codedWidth, codedHeight)) ||
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, visibleX, visibleY)) ||
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, visibleWidth, visibleHeight)) ||
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, displayWidth, displayHeight)) ||
NS_WARN_IF(!JS_WriteBytes(aWriter, &hasDuration, 1)) ||
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, durationLow, durationHigh)) ||
NS_WARN_IF(!JS_WriteUint32Pair(aWriter, timestampLow, timestampHigh)) ||
NS_WARN_IF(!JS_WriteBytes(aWriter, &colorSpaceFullRange, 1)) ||
NS_WARN_IF(!JS_WriteBytes(aWriter, &colorSpaceMatrix, 1)) ||
NS_WARN_IF(!JS_WriteBytes(aWriter, &colorSpacePrimaries, 1)) ||
NS_WARN_IF(!JS_WriteBytes(aWriter, &colorSpaceTransfer, 1)));
}
/*

View File

@ -54,42 +54,9 @@ struct VideoFrameCopyToOptions;
namespace mozilla::dom {
struct VideoFrameData {
VideoFrameData(layers::Image* aImage, const VideoPixelFormat& aFormat,
gfx::IntRect aVisibleRect, gfx::IntSize aDisplaySize,
Maybe<uint64_t> aDuration, int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace)
: mImage(aImage),
mFormat(aFormat),
mVisibleRect(aVisibleRect),
mDisplaySize(aDisplaySize),
mDuration(aDuration),
mTimestamp(aTimestamp),
mColorSpace(aColorSpace) {}
struct VideoFrameImageData {
const RefPtr<layers::Image> mImage;
const VideoPixelFormat mFormat;
const gfx::IntRect mVisibleRect;
const gfx::IntSize mDisplaySize;
const Maybe<uint64_t> mDuration;
const int64_t mTimestamp;
const VideoColorSpaceInit mColorSpace;
};
struct VideoFrameSerializedData : VideoFrameData {
VideoFrameSerializedData(layers::Image* aImage,
const VideoPixelFormat& aFormat,
gfx::IntSize aCodedSize, gfx::IntRect aVisibleRect,
gfx::IntSize aDisplaySize, Maybe<uint64_t> aDuration,
int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace,
nsIURI* aPrincipalURI)
: VideoFrameData(aImage, aFormat, aVisibleRect, aDisplaySize, aDuration,
aTimestamp, aColorSpace),
mCodedSize(aCodedSize),
mPrincipalURI(aPrincipalURI) {}
const gfx::IntSize mCodedSize;
const nsCOMPtr<nsIURI> mPrincipalURI;
const nsCOMPtr<nsIURI> mURI;
};
class VideoFrame final : public nsISupports, public nsWrapperCache {
@ -101,7 +68,7 @@ class VideoFrame final : public nsISupports, public nsWrapperCache {
VideoFrame(nsIGlobalObject* aParent, const RefPtr<layers::Image>& aImage,
const VideoPixelFormat& aFormat, gfx::IntSize aCodedSize,
gfx::IntRect aVisibleRect, gfx::IntSize aDisplaySize,
const Maybe<uint64_t>& aDuration, int64_t aTimestamp,
Maybe<uint64_t>&& aDuration, int64_t aTimestamp,
const VideoColorSpaceInit& aColorSpace);
VideoFrame(const VideoFrame& aOther);
@ -179,19 +146,11 @@ class VideoFrame final : public nsISupports, public nsWrapperCache {
// [Serializable] implementations: {Read, Write}StructuredClone
static JSObject* ReadStructuredClone(JSContext* aCx, nsIGlobalObject* aGlobal,
JSStructuredCloneReader* aReader,
const VideoFrameSerializedData& aData);
const VideoFrameImageData& aImage);
bool WriteStructuredClone(JSStructuredCloneWriter* aWriter,
StructuredCloneHolder* aHolder) const;
// [Transferable] implementations: Transfer, FromTransferred
using TransferredData = VideoFrameSerializedData;
UniquePtr<TransferredData> Transfer();
static already_AddRefed<VideoFrame> FromTransferred(nsIGlobalObject* aGlobal,
TransferredData* aData);
public:
// A VideoPixelFormat wrapper providing utilities for VideoFrame.
class Format final {
@ -221,8 +180,6 @@ class VideoFrame final : public nsISupports, public nsWrapperCache {
// VideoFrame can run on either main thread or worker thread.
void AssertIsOnOwningThread() const { NS_ASSERT_OWNINGTHREAD(VideoFrame); }
nsCOMPtr<nsIURI> GetPrincipalURI() const;
// A class representing the VideoFrame's data.
class Resource final {
public:
@ -241,7 +198,6 @@ class VideoFrame final : public nsISupports, public nsWrapperCache {
nsCOMPtr<nsIGlobalObject> mParent;
// Use Maybe instead of UniquePtr to allow copy ctor.
// The mResource's existence is used as the [[Detached]] for [Transferable].
Maybe<const Resource> mResource; // Nothing() after `Close()`d
// TODO: Replace this by mResource->mImage->GetSize()?

View File

@ -12,8 +12,8 @@ enum AlphaOption {
"discard",
};
// [Serializable, Transferable] are implemented without adding attributes here.
[Exposed=(Window,DedicatedWorker), Pref="dom.media.webcodecs.enabled"]
// [Serializable] is implemented without adding attribute here.
[Exposed=(Window,DedicatedWorker) /*, Transferable (bug 1774306) */, Pref="dom.media.webcodecs.enabled"]
interface VideoFrame {
// The constructors should be shorten to:
// ```

View File

@ -2,6 +2,8 @@
prefs: [dom.media.webcodecs.enabled:true]
expected:
if (os == "android") and fission: [OK, TIMEOUT]
[Verify transferring frames closes them.]
expected: FAIL
[Verify closing a frame doesn't affect its clones.]
expected: FAIL
@ -11,6 +13,8 @@
prefs: [dom.media.webcodecs.enabled:true]
expected:
if (os == "android") and fission: [OK, TIMEOUT]
[Verify transferring frames closes them.]
expected: FAIL
[Verify closing a frame doesn't affect its clones.]
expected: FAIL

View File

@ -4,15 +4,9 @@
<script src='/resources/testharness.js'></script>
<script src='/resources/testharnessreport.js'></script>
<script src='/common/get-host-info.sub.js'></script>
<script src='/webcodecs/utils.js'></script>
<script id='workerCode' type='javascript/worker'>
self.onmessage = (e) => {
let frame = e.data.frame;
if (e.data.transfer) {
postMessage(frame, [frame]);
} else {
postMessage(frame);
}
postMessage(e.data);
};
</script>
</head>
@ -27,13 +21,13 @@ const CROSSORIGIN_HELPER = CROSSORIGIN_BASE + HELPER;
promise_test(async () => {
const target = (await appendIframe(SAMEORIGIN_HELPER)).contentWindow;
let frame = createVideoFrame(10);
assert_true(await canSerializeVideoFrame(target, frame));
assert_true(await canSendVideoFrame(target, frame));
}, 'Verify frames can be passed within the same agent clusters');
promise_test(async () => {
const target = (await appendIframe(CROSSORIGIN_HELPER)).contentWindow;
let frame = createVideoFrame(20);
assert_false(await canSerializeVideoFrame(target, frame));
assert_false(await canSendVideoFrame(target, frame));
}, 'Verify frames cannot be passed accross the different agent clusters');
promise_test(async () => {
@ -42,7 +36,7 @@ promise_test(async () => {
});
const worker = new Worker(window.URL.createObjectURL(blob));
let frame = createVideoFrame(30);
worker.postMessage({frame: frame, transfer: false});
worker.postMessage(frame);
const received = await new Promise(resolve => worker.onmessage = e => {
resolve(e.data);
});
@ -50,33 +44,6 @@ promise_test(async () => {
assert_equals(received.timestamp, 30);
}, 'Verify frames can be passed back and forth between main and worker');
promise_test(async () => {
const target = (await appendIframe(SAMEORIGIN_HELPER)).contentWindow;
let frame = createVideoFrame(10);
assert_true(await canTransferVideoFrame(target, frame));
assert_true(isFrameClosed(frame));
}, 'Verify frames can be transferred within the same agent clusters');
promise_test(async () => {
const target = (await appendIframe(CROSSORIGIN_HELPER)).contentWindow;
let frame = createVideoFrame(20);
assert_false(await canTransferVideoFrame(target, frame));
}, 'Verify frames cannot be transferred accross the different agent clusters');
promise_test(async () => {
const blob = new Blob([document.querySelector('#workerCode').textContent], {
type: 'text/javascript',
});
const worker = new Worker(window.URL.createObjectURL(blob));
let frame = createVideoFrame(30);
worker.postMessage({frame: frame, transfer: true}, [frame]);
const received = await new Promise(resolve => worker.onmessage = e => {
resolve(e.data);
});
assert_equals(received.toString(), '[object VideoFrame]');
assert_equals(received.timestamp, 30);
}, 'Verify frames can be transferred back and forth between main and worker');
function appendIframe(src) {
const frame = document.createElement('iframe');
document.body.appendChild(frame);
@ -97,22 +64,8 @@ function createVideoFrame(ts) {
});
}
function canSerializeVideoFrame(target, vf, transfer) {
return canPostVideoFrame(target, vf, false);
};
function canTransferVideoFrame(target, vf, transfer) {
return canPostVideoFrame(target, vf, true);
};
function canPostVideoFrame(target, vf, transfer) {
if (transfer) {
target.postMessage(vf, '*', [vf]);
assert_true(isFrameClosed(vf));
} else {
target.postMessage(vf, '*');
}
// vf.timestamp doesn't change after vf is closed, so it's fine to use it.
function canSendVideoFrame(target, vf) {
target.postMessage(vf, '*');
target.postMessage({'id': vf.timestamp}, '*');
return new Promise(resolve => window.onmessage = e => {
resolve(e.data == 'RECEIVED');