mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-02-26 04:09:50 +00:00
Bug 1881647 - Enable WMFMediaDataEncoder to encode RGBA images r=media-playback-reviewers,padenot
This patch extends the capabilities of WMFMediaDataEncoder to support the encoding of RGBA images. It achieves this by integrating a new function, ConvertToNV12, which efficiently converts RGB* images to the NV12 format, making the encoding of RGBA content on Windows possible. Depends on D208168 Differential Revision: https://phabricator.services.mozilla.com/D207899
This commit is contained in:
parent
a5f7e89e5b
commit
d6b0361f8d
@ -3,10 +3,11 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this file,
|
||||
* You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "ImageToI420.h"
|
||||
#include "ImageConversion.h"
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "libyuv/convert.h"
|
||||
#include "libyuv/convert_from_argb.h"
|
||||
#include "mozilla/dom/ImageBitmapBinding.h"
|
||||
#include "mozilla/dom/ImageUtils.h"
|
||||
#include "mozilla/gfx/Point.h"
|
||||
@ -151,4 +152,56 @@ nsresult ConvertToI420(Image* aImage, uint8_t* aDestY, int aDestStrideY,
|
||||
}
|
||||
}
|
||||
|
||||
nsresult ConvertToNV12(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY,
|
||||
uint8_t* aDestUV, int aDestStrideUV) {
|
||||
if (!aImage->IsValid()) {
|
||||
return NS_ERROR_INVALID_ARG;
|
||||
}
|
||||
|
||||
if (const PlanarYCbCrData* data = GetPlanarYCbCrData(aImage)) {
|
||||
const ImageUtils imageUtils(aImage);
|
||||
Maybe<dom::ImageBitmapFormat> format = imageUtils.GetFormat();
|
||||
if (format.isNothing()) {
|
||||
MOZ_ASSERT_UNREACHABLE("YUV format conversion not implemented");
|
||||
return NS_ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
if (format.value() != ImageBitmapFormat::YUV420P) {
|
||||
NS_WARNING("ConvertToNV12: Convert YUV data in I420 only");
|
||||
return NS_ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
return MapRv(libyuv::I420ToNV12(
|
||||
data->mYChannel, data->mYStride, data->mCbChannel, data->mCbCrStride,
|
||||
data->mCrChannel, data->mCbCrStride, aDestY, aDestStrideY, aDestUV,
|
||||
aDestStrideUV, aImage->GetSize().width, aImage->GetSize().height));
|
||||
}
|
||||
|
||||
RefPtr<SourceSurface> surf = GetSourceSurface(aImage);
|
||||
if (!surf) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
RefPtr<DataSourceSurface> data = surf->GetDataSurface();
|
||||
if (!data) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
DataSourceSurface::ScopedMap map(data, DataSourceSurface::READ);
|
||||
if (!map.IsMapped()) {
|
||||
return NS_ERROR_FAILURE;
|
||||
}
|
||||
|
||||
if (surf->GetFormat() != SurfaceFormat::B8G8R8A8 &&
|
||||
surf->GetFormat() != SurfaceFormat::B8G8R8X8) {
|
||||
NS_WARNING("ConvertToNV12: Convert SurfaceFormat in BGR* only");
|
||||
return NS_ERROR_NOT_IMPLEMENTED;
|
||||
}
|
||||
|
||||
return MapRv(
|
||||
libyuv::ARGBToNV12(static_cast<uint8_t*>(map.GetData()), map.GetStride(),
|
||||
aDestY, aDestStrideY, aDestUV, aDestStrideUV,
|
||||
aImage->GetSize().width, aImage->GetSize().height));
|
||||
}
|
||||
|
||||
} // namespace mozilla
|
@ -21,6 +21,12 @@ nsresult ConvertToI420(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY,
|
||||
uint8_t* aDestU, int aDestStrideU, uint8_t* aDestV,
|
||||
int aDestStrideV);
|
||||
|
||||
/**
|
||||
* Converts aImage to an NV12 image and writes it to the given buffers.
|
||||
*/
|
||||
nsresult ConvertToNV12(layers::Image* aImage, uint8_t* aDestY, int aDestStrideY,
|
||||
uint8_t* aDestUV, int aDestStrideUV);
|
||||
|
||||
} // namespace mozilla
|
||||
|
||||
#endif /* ImageToI420Converter_h */
|
@ -7,7 +7,7 @@
|
||||
#define VideoFrameConverter_h
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "ImageToI420.h"
|
||||
#include "ImageConversion.h"
|
||||
#include "Pacer.h"
|
||||
#include "PerformanceRecorder.h"
|
||||
#include "VideoSegment.h"
|
||||
|
@ -9,7 +9,7 @@
|
||||
#include <vpx/vpx_encoder.h>
|
||||
|
||||
#include "DriftCompensation.h"
|
||||
#include "ImageToI420.h"
|
||||
#include "ImageConversion.h"
|
||||
#include "mozilla/gfx/2D.h"
|
||||
#include "prsystem.h"
|
||||
#include "VideoSegment.h"
|
||||
|
@ -157,7 +157,7 @@ EXPORTS += [
|
||||
"FileBlockCache.h",
|
||||
"ForwardedInputTrack.h",
|
||||
"FrameStatistics.h",
|
||||
"ImageToI420.h",
|
||||
"ImageConversion.h",
|
||||
"Intervals.h",
|
||||
"MediaCache.h",
|
||||
"MediaContainerType.h",
|
||||
@ -282,7 +282,7 @@ UNIFIED_SOURCES += [
|
||||
"GetUserMediaRequest.cpp",
|
||||
"GraphDriver.cpp",
|
||||
"GraphRunner.cpp",
|
||||
"ImageToI420.cpp",
|
||||
"ImageConversion.cpp",
|
||||
"MediaCache.cpp",
|
||||
"MediaContainerType.cpp",
|
||||
"MediaDecoder.cpp",
|
||||
|
@ -15,7 +15,7 @@
|
||||
#include "libavutil/pixfmt.h"
|
||||
#include "mozilla/dom/ImageUtils.h"
|
||||
#include "nsPrintfCString.h"
|
||||
#include "ImageToI420.h"
|
||||
#include "ImageConversion.h"
|
||||
#include "libyuv.h"
|
||||
#include "FFmpegRuntimeLinker.h"
|
||||
|
||||
|
@ -7,6 +7,7 @@
|
||||
#include "WMFMediaDataEncoder.h"
|
||||
|
||||
#include "ImageContainer.h"
|
||||
#include "ImageConversion.h"
|
||||
#include "MFTEncoder.h"
|
||||
#include "PlatformEncoderModule.h"
|
||||
#include "TimeUnits.h"
|
||||
@ -37,8 +38,7 @@ RefPtr<InitPromise> WMFMediaDataEncoder::Init() {
|
||||
return InvokeAsync(mTaskQueue, this, __func__,
|
||||
&WMFMediaDataEncoder::ProcessInit);
|
||||
}
|
||||
RefPtr<EncodePromise> WMFMediaDataEncoder::Encode(
|
||||
const MediaData* aSample) {
|
||||
RefPtr<EncodePromise> WMFMediaDataEncoder::Encode(const MediaData* aSample) {
|
||||
MOZ_ASSERT(aSample);
|
||||
|
||||
RefPtr<const VideoData> sample(aSample->As<const VideoData>());
|
||||
@ -67,8 +67,7 @@ RefPtr<ShutdownPromise> WMFMediaDataEncoder::Shutdown() {
|
||||
return ShutdownPromise::CreateAndResolve(true, __func__);
|
||||
});
|
||||
}
|
||||
RefPtr<GenericPromise> WMFMediaDataEncoder::SetBitrate(
|
||||
uint32_t aBitsPerSec) {
|
||||
RefPtr<GenericPromise> WMFMediaDataEncoder::SetBitrate(uint32_t aBitsPerSec) {
|
||||
return InvokeAsync(
|
||||
mTaskQueue, __func__,
|
||||
[self = RefPtr<WMFMediaDataEncoder>(this), aBitsPerSec]() {
|
||||
@ -162,7 +161,8 @@ void WMFMediaDataEncoder::FillConfigData() {
|
||||
: nullptr;
|
||||
}
|
||||
|
||||
RefPtr<EncodePromise> WMFMediaDataEncoder::ProcessEncode(RefPtr<const VideoData>&& aSample) {
|
||||
RefPtr<EncodePromise> WMFMediaDataEncoder::ProcessEncode(
|
||||
RefPtr<const VideoData>&& aSample) {
|
||||
AssertOnTaskQueue();
|
||||
MOZ_ASSERT(mEncoder);
|
||||
MOZ_ASSERT(aSample);
|
||||
@ -196,43 +196,110 @@ already_AddRefed<IMFSample> WMFMediaDataEncoder::ConvertToNV12InputSample(
|
||||
AssertOnTaskQueue();
|
||||
MOZ_ASSERT(mEncoder);
|
||||
|
||||
const layers::PlanarYCbCrImage* image = aData->mImage->AsPlanarYCbCrImage();
|
||||
// TODO: Take care non planar Y-Cb-Cr image (Bug 1881647).
|
||||
NS_ENSURE_TRUE(image, nullptr);
|
||||
struct NV12Info {
|
||||
int32_t mYStride = 0;
|
||||
int32_t mUVStride = 0;
|
||||
size_t mYLength = 0;
|
||||
size_t mBufferLength = 0;
|
||||
} info;
|
||||
|
||||
const layers::PlanarYCbCrData* yuv = image->GetData();
|
||||
auto ySize = yuv->YDataSize();
|
||||
auto cbcrSize = yuv->CbCrDataSize();
|
||||
size_t yLength = yuv->mYStride * ySize.height;
|
||||
size_t length = yLength + (yuv->mCbCrStride * cbcrSize.height * 2);
|
||||
if (const layers::PlanarYCbCrImage* image =
|
||||
aData->mImage->AsPlanarYCbCrImage()) {
|
||||
// Assume this is I420. If it's not, the whole process fails in
|
||||
// ConvertToNV12 below.
|
||||
const layers::PlanarYCbCrData* yuv = image->GetData();
|
||||
info.mYStride = yuv->mYStride;
|
||||
info.mUVStride = yuv->mCbCrStride * 2;
|
||||
info.mYLength = info.mYStride * yuv->YDataSize().height;
|
||||
info.mBufferLength =
|
||||
info.mYLength + (info.mUVStride * yuv->CbCrDataSize().height);
|
||||
} else {
|
||||
info.mYStride = aData->mImage->GetSize().width;
|
||||
info.mUVStride = info.mYStride;
|
||||
|
||||
const int32_t yHeight = aData->mImage->GetSize().height;
|
||||
const int32_t uvHeight = yHeight / 2;
|
||||
|
||||
CheckedInt<size_t> yLength(info.mYStride);
|
||||
yLength *= yHeight;
|
||||
if (!yLength.isValid()) {
|
||||
WMF_ENC_LOGE("yLength overflows");
|
||||
return nullptr;
|
||||
}
|
||||
info.mYLength = yLength.value();
|
||||
|
||||
CheckedInt<size_t> uvLength(info.mUVStride);
|
||||
uvLength *= uvHeight;
|
||||
if (!uvLength.isValid()) {
|
||||
WMF_ENC_LOGE("uvLength overflows");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
CheckedInt<size_t> length(yLength);
|
||||
length += uvLength;
|
||||
if (!length.isValid()) {
|
||||
WMF_ENC_LOGE("length overflows");
|
||||
return nullptr;
|
||||
}
|
||||
info.mBufferLength = length.value();
|
||||
}
|
||||
|
||||
RefPtr<IMFSample> input;
|
||||
HRESULT hr = mEncoder->CreateInputSample(&input, length);
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
|
||||
HRESULT hr = mEncoder->CreateInputSample(&input, info.mBufferLength);
|
||||
if (FAILED(hr)) {
|
||||
_com_error error(hr);
|
||||
WMF_ENC_LOGE("CreateInputSample: error = 0x%lX, %ls", hr,
|
||||
error.ErrorMessage());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
RefPtr<IMFMediaBuffer> buffer;
|
||||
hr = input->GetBufferByIndex(0, getter_AddRefs(buffer));
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
|
||||
if (FAILED(hr)) {
|
||||
_com_error error(hr);
|
||||
WMF_ENC_LOGE("GetBufferByIndex: error = 0x%lX, %ls", hr,
|
||||
error.ErrorMessage());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
hr = buffer->SetCurrentLength(length);
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
|
||||
hr = buffer->SetCurrentLength(info.mBufferLength);
|
||||
if (FAILED(hr)) {
|
||||
_com_error error(hr);
|
||||
WMF_ENC_LOGE("SetCurrentLength: error = 0x%lX, %ls", hr,
|
||||
error.ErrorMessage());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
LockBuffer lockBuffer(buffer);
|
||||
NS_ENSURE_TRUE(SUCCEEDED(lockBuffer.Result()), nullptr);
|
||||
hr = lockBuffer.Result();
|
||||
if (FAILED(hr)) {
|
||||
_com_error error(hr);
|
||||
WMF_ENC_LOGE("LockBuffer: error = 0x%lX, %ls", hr, error.ErrorMessage());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// TODO: Take care non I420 image (Bug 1881647).
|
||||
bool ok = libyuv::I420ToNV12(
|
||||
yuv->mYChannel, yuv->mYStride, yuv->mCbChannel,
|
||||
yuv->mCbCrStride, yuv->mCrChannel, yuv->mCbCrStride,
|
||||
lockBuffer.Data(), yuv->mYStride, lockBuffer.Data() + yLength,
|
||||
yuv->mCbCrStride * 2, ySize.width, ySize.height) == 0;
|
||||
NS_ENSURE_TRUE(ok, nullptr);
|
||||
nsresult rv =
|
||||
ConvertToNV12(aData->mImage, lockBuffer.Data(), info.mYStride,
|
||||
lockBuffer.Data() + info.mYLength, info.mUVStride);
|
||||
if (NS_FAILED(rv)) {
|
||||
WMF_ENC_LOGE("Failed to convert to NV12");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
hr = input->SetSampleTime(UsecsToHNs(aData->mTime.ToMicroseconds()));
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
|
||||
if (FAILED(hr)) {
|
||||
_com_error error(hr);
|
||||
WMF_ENC_LOGE("SetSampleTime: error = 0x%lX, %ls", hr, error.ErrorMessage());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
hr = input->SetSampleDuration(UsecsToHNs(aData->mDuration.ToMicroseconds()));
|
||||
NS_ENSURE_TRUE(SUCCEEDED(hr), nullptr);
|
||||
if (FAILED(hr)) {
|
||||
_com_error error(hr);
|
||||
WMF_ENC_LOGE("SetSampleDuration: error = 0x%lX, %ls", hr,
|
||||
error.ErrorMessage());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return input.forget();
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
[video-encoder-h264.https.any.html?main]
|
||||
[Test that encoding with a specific H264 profile actually produces that profile.]
|
||||
expected:
|
||||
if os == "win": FAIL
|
||||
if os == "win": [FAIL, PASS]
|
||||
if os == "android": PRECONDITION_FAILED
|
||||
if os == "linux" and version == "Ubuntu 18.04": PRECONDITION_FAILED
|
||||
if os == "mac": PASS
|
||||
@ -9,7 +9,7 @@
|
||||
[video-encoder-h264.https.any.html?baseline]
|
||||
[Test that encoding with a specific H264 profile actually produces that profile.]
|
||||
expected:
|
||||
if os == "win": FAIL
|
||||
if os == "win": [FAIL, PASS]
|
||||
if os == "android": PRECONDITION_FAILED
|
||||
if os == "mac": PASS
|
||||
if os == "linux" and version == "Ubuntu 18.04": PRECONDITION_FAILED
|
||||
@ -17,7 +17,7 @@
|
||||
[video-encoder-h264.https.any.worker.html?baseline]
|
||||
[Test that encoding with a specific H264 profile actually produces that profile.]
|
||||
expected:
|
||||
if os == "win": FAIL
|
||||
if os == "win": [FAIL, PASS]
|
||||
if os == "android": PRECONDITION_FAILED
|
||||
if os == "mac": PASS
|
||||
if os == "linux" and version == "Ubuntu 18.04": PRECONDITION_FAILED
|
||||
@ -25,7 +25,7 @@
|
||||
[video-encoder-h264.https.any.html?high]
|
||||
[Test that encoding with a specific H264 profile actually produces that profile.]
|
||||
expected:
|
||||
if os == "win": FAIL
|
||||
if os == "win": [FAIL, PASS]
|
||||
if os == "android": PRECONDITION_FAILED
|
||||
if os == "mac": PASS
|
||||
if os == "linux" and version == "Ubuntu 18.04": PRECONDITION_FAILED
|
||||
@ -33,7 +33,7 @@
|
||||
[video-encoder-h264.https.any.worker.html?main]
|
||||
[Test that encoding with a specific H264 profile actually produces that profile.]
|
||||
expected:
|
||||
if os == "win": FAIL
|
||||
if os == "win": [FAIL, PASS]
|
||||
if os == "android": PRECONDITION_FAILED
|
||||
if os == "mac": PASS
|
||||
if os == "linux" and version == "Ubuntu 18.04": PRECONDITION_FAILED
|
||||
@ -41,7 +41,7 @@
|
||||
[video-encoder-h264.https.any.worker.html?high]
|
||||
[Test that encoding with a specific H264 profile actually produces that profile.]
|
||||
expected:
|
||||
if os == "win": FAIL
|
||||
if os == "win": [FAIL, PASS]
|
||||
if os == "android": PRECONDITION_FAILED
|
||||
if os == "mac": PASS
|
||||
if os == "linux" and version == "Ubuntu 18.04": PRECONDITION_FAILED
|
||||
|
Loading…
x
Reference in New Issue
Block a user