mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-27 06:43:32 +00:00
Backed out changeset 318f32313091 (bug 1459526) for causing wrench bustages. CLOSED TREE
This commit is contained in:
parent
72024fcc23
commit
367d829107
@ -72,7 +72,6 @@ already_AddRefed<Image> RemoteImageHolder::DeserializeImage(
|
||||
pData.mStereoMode = descriptor.stereoMode();
|
||||
pData.mColorDepth = descriptor.colorDepth();
|
||||
pData.mYUVColorSpace = descriptor.yUVColorSpace();
|
||||
pData.mColorRange = descriptor.colorRange();
|
||||
pData.mYChannel = ImageDataSerializer::GetYChannel(buffer, descriptor);
|
||||
pData.mCbChannel = ImageDataSerializer::GetCbChannel(buffer, descriptor);
|
||||
pData.mCrChannel = ImageDataSerializer::GetCrChannel(buffer, descriptor);
|
||||
|
Binary file not shown.
Before Width: | Height: | Size: 8.5 KiB |
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,71 +0,0 @@
|
||||
# Reference image generated via https://jdashg.github.io/misc/colors/color-quads-16-127-235.html
|
||||
# Test videos encoded via ../gen_combos.py --write color_quads/720p.png
|
||||
|
||||
# We're sort of testing two things here:
|
||||
# 1. Does a av1.webm video into the actual values we expect?
|
||||
# 2. Do other similar videos decode the same was as av1.webm?
|
||||
# We have this split because while each platform/compositor has its own inaccuracies,
|
||||
# each platform/compositor will have the *same* inaccuracies regardless of video.
|
||||
# So, we just need to first check if e.g. av1.webm decodes to what we expect,
|
||||
# and then we have generally trivially compare other codecs/containers to that.
|
||||
|
||||
|
||||
# -
|
||||
# yuv420p
|
||||
|
||||
fuzzy(16-50,5234-5621) fuzzy-if(swgl,32-38,5462-91746) fuzzy-if(!webrender,16-38,5234-94640) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm ../reftest_img.html?src=color_quads/720p.png
|
||||
fuzzy-if(Android,254-255,273680-273807) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
|
||||
== ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
|
||||
fuzzy(1-2,75-225) fuzzy-if(Android,254-255,273680-273807) fuzzy-if(!Android&&!webrender,1-2,75-94070) fuzzy-if(OSX&&webrender,32-32,187407-187407) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
|
||||
fuzzy-if(Android,254-255,273680-273807) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p.av1.webm
|
||||
|
||||
skip-if(!webrender||Android) fuzzy(16-48,8349-8818) fuzzy-if(winWidget&&swgl,38-38,184080-184080) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm ../reftest_img.html?src=color_quads/720p.png
|
||||
skip-if(!webrender||Android) fuzzy-if(Android,255-255,273726-273726) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
|
||||
skip-if(!webrender||Android) skip-if(winWidget&&swgl) fuzzy-if(Android,255-255,273726-273726) fuzzy-if(OSX||winWidget,2-34,184281-187407) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
|
||||
skip-if(!webrender||Android) fuzzy-if(Android,255-255,273726-273726) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p.av1.webm
|
||||
|
||||
# -
|
||||
# yuv420p10
|
||||
|
||||
skip-if(!webrender||Android) fuzzy(33-49,2499-2579) fuzzy-if(swgl,34-52,270527-270528) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm ../reftest_img.html?src=color_quads/720p.png
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
|
||||
#[2] skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.yuv420p10.av1.webm
|
||||
|
||||
skip-if(!webrender||Android) fuzzy(33-49,174932-175092) fuzzy-if(swgl&&!winWidget,37-52,11553-11554) fuzzy-if(swgl&&winWidget,40-40,187200-187200) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm ../reftest_img.html?src=color_quads/720p.png
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
|
||||
#[2] skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
|
||||
skip-if(!webrender||Android) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.yuv420p10.av1.webm
|
||||
|
||||
# Android is really broken in a variety of ways for p10.
|
||||
#[2]: yuv420p10 broken in h264.mp4: https://bugzilla.mozilla.org/show_bug.cgi?id=1711812
|
||||
|
||||
|
||||
# -
|
||||
# gbrp
|
||||
# Note: tv-gbrp doesn't really make sense, and we should consider dropping it.
|
||||
# Specifically, we should probably do (gbrp, ...(tv,pc)x(yuv,yuv10)) instead of (tv,pc)x(gbrp,yuv,yuv10)
|
||||
# That said, we should probably test a couple combos, at least. (But then again, why not all!)
|
||||
# !webrender does not support gbr
|
||||
|
||||
skip-if(!webrender) skip-if(winWidget&&swgl) fuzzy(0-1,0-3600) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.av1.webm ../reftest_img.html?src=color_quads/720p.png
|
||||
skip-if(!webrender) skip-if(winWidget&&swgl) fuzzy(0-1,0-7200) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.gbrp.av1.webm ../reftest_img.html?src=color_quads/720p.png
|
||||
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.av1.webm
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.gbrp.av1.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.gbrp.av1.webm
|
||||
|
||||
# Our h264.mp4 doesn't handle gbrp, but *also* doesn't error properly.
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.h264.mp4 ../reftest_video.html?src=timeout
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.gbrp.h264.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.h264.mp4
|
||||
|
||||
# Our vp9 support doesn't handle gbrp
|
||||
skip-if(!webrender||OSX||winWidget) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm ../reftest_video.html?src=timeout
|
||||
skip-if(!webrender||!OSX) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm ../reftest_video.html?src=none
|
||||
skip-if(!webrender||!winWidget) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm ../reftest_video.html?src=none
|
||||
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.gbrp.vp9.webm ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm
|
||||
skip-if(!webrender) == ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.pc.gbrp.vp9.mp4 ../reftest_video.html?src=color_quads/720p.png.bt709.bt709.tv.gbrp.vp9.webm
|
@ -1,250 +0,0 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
import concurrent.futures
|
||||
import pathlib
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
ARGS = sys.argv
|
||||
SRC_PATH = pathlib.Path(ARGS.pop())
|
||||
DIR = SRC_PATH.parent
|
||||
|
||||
|
||||
# crossCombine([{a:false},{a:5}], [{},{b:5}])
|
||||
# [{a:false}, {a:true}, {a:false,b:5}, {a:true,b:5}]
|
||||
def cross_combine(*args):
|
||||
args = list(args)
|
||||
|
||||
def cross_combine2(listA, listB):
|
||||
listC = []
|
||||
for a in listA:
|
||||
for b in listB:
|
||||
c = dict()
|
||||
c.update(a)
|
||||
c.update(b)
|
||||
listC.append(c)
|
||||
return listC
|
||||
|
||||
res = [dict()]
|
||||
while True:
|
||||
try:
|
||||
next = args.pop(0)
|
||||
except IndexError:
|
||||
break
|
||||
res = cross_combine2(res, next)
|
||||
return res
|
||||
|
||||
|
||||
def keyed_combiner(key, vals):
|
||||
res = []
|
||||
for v in vals:
|
||||
d = dict()
|
||||
d[key] = v
|
||||
res.append(d)
|
||||
return res
|
||||
|
||||
|
||||
# -
|
||||
|
||||
|
||||
def eprint(*args, **kwargs):
|
||||
print(*args, file=sys.stderr, **kwargs)
|
||||
|
||||
|
||||
# -
|
||||
|
||||
OGG = []
|
||||
WEBM_CODECS = ["av1", "vp9"]
|
||||
|
||||
if "--all" in ARGS:
|
||||
OGG = cross_combine(
|
||||
[{"ext": "ogg"}], keyed_combiner("vcodec", ["theora", "vp8", "vp9"])
|
||||
)
|
||||
WEBM_CODECS += ["vp8"]
|
||||
|
||||
MP4 = cross_combine([{"ext": "mp4"}], keyed_combiner("vcodec", ["av1", "h264", "vp9"]))
|
||||
|
||||
WEBM = cross_combine([{"ext": "webm"}], keyed_combiner("vcodec", WEBM_CODECS))
|
||||
|
||||
# -
|
||||
|
||||
FORMAT_LIST = set(
|
||||
[
|
||||
"yuv420p",
|
||||
"yuv420p10",
|
||||
# 'yuv420p12',
|
||||
# 'yuv420p16be',
|
||||
# 'yuv420p16le',
|
||||
"gbrp",
|
||||
]
|
||||
)
|
||||
|
||||
if "--all" in ARGS:
|
||||
FORMAT_LIST |= set(
|
||||
[
|
||||
"yuv420p",
|
||||
"yuv420p10",
|
||||
"yuv420p12",
|
||||
"yuv420p16be",
|
||||
"yuv420p16le",
|
||||
"yuv422p",
|
||||
"yuv422p10",
|
||||
"yuv422p12",
|
||||
"yuv422p16be",
|
||||
"yuv422p16le",
|
||||
"yuv444p",
|
||||
"yuv444p10",
|
||||
"yuv444p12",
|
||||
"yuv444p16be",
|
||||
"yuv444p16le",
|
||||
"yuv411p",
|
||||
"yuv410p",
|
||||
"yuyv422",
|
||||
"uyvy422",
|
||||
"rgb24",
|
||||
"bgr24",
|
||||
"rgb8",
|
||||
"bgr8",
|
||||
"rgb444be",
|
||||
"rgb444le",
|
||||
"bgr444be",
|
||||
"bgr444le",
|
||||
# 'nv12', # Encoding not different than yuv420p?
|
||||
# 'nv21', # Encoding not different than yuv420p?
|
||||
"gbrp",
|
||||
"gbrp9be",
|
||||
"gbrp9le",
|
||||
"gbrp10be",
|
||||
"gbrp10le",
|
||||
"gbrp12be",
|
||||
"gbrp12le",
|
||||
"gbrp14be",
|
||||
"gbrp14le",
|
||||
"gbrp16be",
|
||||
"gbrp16le",
|
||||
]
|
||||
)
|
||||
|
||||
FORMATS = keyed_combiner("format", list(FORMAT_LIST))
|
||||
|
||||
RANGE = keyed_combiner("range", ["tv", "pc"])
|
||||
|
||||
CSPACE_LIST = set(
|
||||
[
|
||||
"bt709",
|
||||
# 'bt2020',
|
||||
]
|
||||
)
|
||||
|
||||
if "--all" in ARGS:
|
||||
CSPACE_LIST |= set(
|
||||
[
|
||||
"bt709",
|
||||
"bt2020",
|
||||
"bt601-6-525", # aka smpte170m NTSC
|
||||
"bt601-6-625", # aka bt470bg PAL
|
||||
]
|
||||
)
|
||||
CSPACE_LIST = list(CSPACE_LIST)
|
||||
|
||||
# -
|
||||
|
||||
COMBOS = cross_combine(
|
||||
WEBM + MP4 + OGG,
|
||||
FORMATS,
|
||||
RANGE,
|
||||
keyed_combiner("src_cspace", CSPACE_LIST),
|
||||
keyed_combiner("dst_cspace", CSPACE_LIST),
|
||||
)
|
||||
|
||||
# -
|
||||
|
||||
print(f"{len(COMBOS)} combinations...")
|
||||
|
||||
todo = []
|
||||
for c in COMBOS:
|
||||
dst_name = ".".join(
|
||||
[
|
||||
SRC_PATH.name,
|
||||
c["src_cspace"],
|
||||
c["dst_cspace"],
|
||||
c["range"],
|
||||
c["format"],
|
||||
c["vcodec"],
|
||||
c["ext"],
|
||||
]
|
||||
)
|
||||
|
||||
src_cspace = c["src_cspace"]
|
||||
|
||||
vf = f"scale=out_range={c['range']}"
|
||||
vf += f",colorspace=all={c['dst_cspace']}"
|
||||
vf += f":iall={src_cspace}"
|
||||
args = [
|
||||
"ffmpeg",
|
||||
"-y",
|
||||
# For input:
|
||||
"-color_primaries",
|
||||
src_cspace,
|
||||
"-color_trc",
|
||||
src_cspace,
|
||||
"-colorspace",
|
||||
src_cspace,
|
||||
"-i",
|
||||
SRC_PATH.as_posix(),
|
||||
# For output:
|
||||
"-bitexact", # E.g. don't use true random uuids
|
||||
"-vf",
|
||||
vf,
|
||||
"-pix_fmt",
|
||||
c["format"],
|
||||
"-vcodec",
|
||||
c["vcodec"],
|
||||
"-crf",
|
||||
"1", # Not-quite-lossless
|
||||
(DIR / dst_name).as_posix(),
|
||||
]
|
||||
if "-v" in ARGS or "-vv" in ARGS:
|
||||
print("$ " + " ".join(args))
|
||||
else:
|
||||
print(" " + args[-1])
|
||||
|
||||
todo.append(args)
|
||||
|
||||
# -
|
||||
|
||||
with open(DIR / "reftest.list", "r") as f:
|
||||
reftest_list_text = f.read()
|
||||
|
||||
for args in todo:
|
||||
vid_name = pathlib.Path(args[-1]).name
|
||||
if vid_name not in reftest_list_text:
|
||||
print(f"WARNING: Not in reftest.list: {vid_name}")
|
||||
|
||||
# -
|
||||
|
||||
if "--write" not in ARGS:
|
||||
print("Use --write to write. Exiting...")
|
||||
exit(0)
|
||||
|
||||
# -
|
||||
|
||||
|
||||
def run_cmd(args):
|
||||
dest = None
|
||||
if "-vv" not in ARGS:
|
||||
dest = subprocess.DEVNULL
|
||||
subprocess.run(args, stderr=dest)
|
||||
|
||||
|
||||
with concurrent.futures.ThreadPoolExecutor() as pool:
|
||||
fs = []
|
||||
for cur_args in todo:
|
||||
f = pool.submit(run_cmd, cur_args)
|
||||
fs.append(f)
|
||||
|
||||
done = 0
|
||||
for f in concurrent.futures.as_completed(fs):
|
||||
f.result() # Raise if it raised
|
||||
done += 1
|
||||
sys.stdout.write(f"\rEncoded {done}/{len(todo)}")
|
@ -15,8 +15,5 @@ window.addEventListener("MozReftestInvalidate", doTest);
|
||||
</head>
|
||||
<body>
|
||||
<video id="v1" style="position:absolute; left:0; top:0; filter:hue-rotate(90deg);"></video>
|
||||
<script>
|
||||
//doTest();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -15,8 +15,5 @@ window.addEventListener("MozReftestInvalidate", doTest);
|
||||
</head>
|
||||
<body>
|
||||
<video id="v1" style="position:absolute; left:0; top:0; filter:hue-rotate(90deg);"></video>
|
||||
<script>
|
||||
//doTest();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
||||
|
@ -1,9 +1,9 @@
|
||||
skip-if(Android) fuzzy-if(OSX,0-80,0-76800) fuzzy-if(appleSilicon,92-92,76799-76799) fuzzy-if(winWidget,0-62,0-76799) fuzzy-if(gtkWidget&&layersGPUAccelerated,0-70,0-2032) fuzzy-if(swgl,62-69,588-76737) HTTP(..) == short.mp4.firstframe.html short.mp4.firstframe-ref.html
|
||||
skip-if(Android) fuzzy-if(OSX,0-87,0-76797) fuzzy-if(appleSilicon,83-83,76797-76797) fuzzy-if(winWidget,0-60,0-76797) fuzzy-if(gtkWidget&&layersGPUAccelerated,0-60,0-6070) fuzzy-if(swgl,55-76,1698-76545) HTTP(..) == short.mp4.lastframe.html short.mp4.lastframe-ref.html
|
||||
skip-if(Android) skip-if(cocoaWidget) skip-if(winWidget) fuzzy-if(gtkWidget&&layersGPUAccelerated,0-57,0-4282) fuzzy-if(OSX,55-80,4173-4417) fuzzy-if(swgl,54-54,31270-31270) HTTP(..) == bipbop_300_215kbps.mp4.lastframe.html bipbop_300_215kbps.mp4.lastframe-ref.html
|
||||
skip-if(Android) fuzzy-if(OSX,0-80,0-76800) fuzzy-if(appleSilicon,92-92,76799-76799) fuzzy-if(winWidget,0-62,0-76799) fuzzy-if(gtkWidget&&layersGPUAccelerated,0-70,0-644) HTTP(..) == short.mp4.firstframe.html short.mp4.firstframe-ref.html
|
||||
skip-if(Android) fuzzy-if(OSX,0-87,0-76797) fuzzy-if(appleSilicon,83-83,76797-76797) fuzzy-if(winWidget,0-60,0-76797) fuzzy-if(gtkWidget&&layersGPUAccelerated,0-60,0-1810) HTTP(..) == short.mp4.lastframe.html short.mp4.lastframe-ref.html
|
||||
skip-if(Android) skip-if(cocoaWidget) skip-if(winWidget) fuzzy-if(gtkWidget&&layersGPUAccelerated,0-57,0-4281) fuzzy-if(OSX,55-80,4173-4417) HTTP(..) == bipbop_300_215kbps.mp4.lastframe.html bipbop_300_215kbps.mp4.lastframe-ref.html
|
||||
skip-if(Android) fuzzy-if(OSX,0-25,0-175921) fuzzy-if(appleSilicon,49-49,176063-176063) fuzzy-if(winWidget,0-71,0-179198) fuzzy-if((/^Windows\x20NT\x2010\.0/.test(http.oscpu))&&(/^aarch64-msvc/.test(xulRuntime.XPCOMABI)),0-255,0-179500) HTTP(..) == gizmo.mp4.seek.html gizmo.mp4.55thframe-ref.html
|
||||
skip-if(Android) skip-if(MinGW) skip-if((/^Windows\x20NT\x2010\.0/.test(http.oscpu))&&(/^aarch64-msvc/.test(xulRuntime.XPCOMABI))) fuzzy(0-10,0-778236) == image-10bits-rendering-video.html image-10bits-rendering-ref.html
|
||||
skip-if(Android) skip-if(MinGW) skip-if((/^Windows\x20NT\x2010\.0/.test(http.oscpu))&&(/^aarch64-msvc/.test(xulRuntime.XPCOMABI))) fuzzy(0-10,0-778536) == image-10bits-rendering-90-video.html image-10bits-rendering-90-ref.html
|
||||
skip-if(Android) fuzzy(0-27,0-573106) fuzzy-if(appleSilicon,46-46,575885-575885) == image-10bits-rendering-720-video.html image-10bits-rendering-720-ref.html
|
||||
skip-if(Android) fuzzy(0-26,0-567562) fuzzy-if(appleSilicon,46-46,575885-575885) == image-10bits-rendering-720-video.html image-10bits-rendering-720-ref.html
|
||||
skip-if(Android) fuzzy(0-31,0-573249) == image-10bits-rendering-720-90-video.html image-10bits-rendering-720-90-ref.html
|
||||
skip-if(Android) skip-if(/^Windows\x20NT\x206\.1/.test(http.oscpu)) fuzzy(0-84,0-771156) fails-if(useDrawSnapshot) == uneven_frame_duration_video.html uneven_frame_duration_video-ref.html # Skip on Windows 7 as the resolution of the video is too high for test machines and will fail in the decoder.
|
||||
|
@ -1,20 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html class="reftest-wait">
|
||||
<head>
|
||||
<meta charset='utf-8'>
|
||||
</head>
|
||||
<body>
|
||||
<img id="e_img" style="position:absolute; left:0; top:0; max-width:100%">
|
||||
<script>
|
||||
(async () => {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
const src = params.get('src');
|
||||
src.defined;
|
||||
|
||||
e_img.src = src;
|
||||
await e_img.decode()
|
||||
document.documentElement.removeAttribute('class');
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
@ -1,64 +0,0 @@
|
||||
<!DOCTYPE HTML>
|
||||
<html class="reftest-wait">
|
||||
<head>
|
||||
<meta charset='utf-8'>
|
||||
</head>
|
||||
<body>
|
||||
<video id="e_video" style="position:absolute; left:0; top:0; max-width:100%">
|
||||
<script>
|
||||
|
||||
const TIMEOUT_MS = 2000;
|
||||
|
||||
// -
|
||||
|
||||
function sleepPromise(ms) {
|
||||
return new Promise(go => {
|
||||
setTimeout(go, ms);
|
||||
});
|
||||
}
|
||||
|
||||
(async () => {
|
||||
await sleepPromise(TIMEOUT_MS);
|
||||
if (!document.documentElement.hasAttribute('class')) return;
|
||||
|
||||
const div = document.body.appendChild(document.createElement('div'));
|
||||
div.textContent = `Timed out after ${TIMEOUT_MS}ms`;
|
||||
console.log(div.textContent);
|
||||
|
||||
document.documentElement.removeAttribute('class');
|
||||
})();
|
||||
|
||||
// -
|
||||
// Test
|
||||
|
||||
(async () => {
|
||||
const params = new URLSearchParams(window.location.search);
|
||||
const src = params.get('src');
|
||||
src.defined;
|
||||
if (src == 'none') {
|
||||
console.log('Show blank.');
|
||||
document.documentElement.removeAttribute('class');
|
||||
return;
|
||||
}
|
||||
if (src == 'timeout') {
|
||||
console.log('Deliberate timeout.');
|
||||
return;
|
||||
}
|
||||
|
||||
e_video.src = src;
|
||||
e_video.muted = true;
|
||||
const p = e_video.play();
|
||||
p.defined;
|
||||
try {
|
||||
await p;
|
||||
console.log('e_video.play() accepted');
|
||||
} catch (e) {
|
||||
const div = document.body.appendChild(document.createElement('div'));
|
||||
div.textContent = `Error: ${JSON.stringify(e)}`;
|
||||
console.log(div.textContent);
|
||||
}
|
||||
document.documentElement.removeAttribute('class');
|
||||
})();
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
@ -111,10 +111,6 @@ class MacIOSurface final
|
||||
bool IsFullRange() const {
|
||||
return GetPixelFormat() == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
||||
}
|
||||
mozilla::gfx::ColorRange GetColorRange() const {
|
||||
if (IsFullRange()) return mozilla::gfx::ColorRange::FULL;
|
||||
return mozilla::gfx::ColorRange::LIMITED;
|
||||
}
|
||||
|
||||
// We would like to forward declare NSOpenGLContext, but it is an @interface
|
||||
// and this file is also used from c++, so we use a void *.
|
||||
|
@ -225,87 +225,6 @@ enum class ColorRange : uint8_t {
|
||||
_Last = FULL,
|
||||
};
|
||||
|
||||
// Really "YcbcrColorSpace"
|
||||
enum class YUVRangedColorSpace : uint8_t {
|
||||
BT601_Narrow = 0,
|
||||
BT601_Full,
|
||||
BT709_Narrow,
|
||||
BT709_Full,
|
||||
BT2020_Narrow,
|
||||
BT2020_Full,
|
||||
GbrIdentity,
|
||||
|
||||
_First = BT601_Narrow,
|
||||
_Last = GbrIdentity,
|
||||
Default = BT709_Narrow,
|
||||
};
|
||||
|
||||
struct FromYUVRangedColorSpaceT final {
|
||||
const YUVColorSpace space;
|
||||
const ColorRange range;
|
||||
};
|
||||
|
||||
inline FromYUVRangedColorSpaceT FromYUVRangedColorSpace(
|
||||
const YUVRangedColorSpace s) {
|
||||
switch (s) {
|
||||
case YUVRangedColorSpace::BT601_Narrow:
|
||||
return {YUVColorSpace::BT601, ColorRange::LIMITED};
|
||||
case YUVRangedColorSpace::BT601_Full:
|
||||
return {YUVColorSpace::BT601, ColorRange::FULL};
|
||||
|
||||
case YUVRangedColorSpace::BT709_Narrow:
|
||||
return {YUVColorSpace::BT709, ColorRange::LIMITED};
|
||||
case YUVRangedColorSpace::BT709_Full:
|
||||
return {YUVColorSpace::BT709, ColorRange::FULL};
|
||||
|
||||
case YUVRangedColorSpace::BT2020_Narrow:
|
||||
return {YUVColorSpace::BT2020, ColorRange::LIMITED};
|
||||
case YUVRangedColorSpace::BT2020_Full:
|
||||
return {YUVColorSpace::BT2020, ColorRange::FULL};
|
||||
|
||||
case YUVRangedColorSpace::GbrIdentity:
|
||||
return {YUVColorSpace::Identity, ColorRange::FULL};
|
||||
}
|
||||
MOZ_CRASH("bad YUVRangedColorSpace");
|
||||
}
|
||||
|
||||
// Todo: This should go in the CPP.
|
||||
inline YUVRangedColorSpace ToYUVRangedColorSpace(const YUVColorSpace space,
|
||||
const ColorRange range) {
|
||||
bool narrow;
|
||||
switch (range) {
|
||||
case ColorRange::FULL:
|
||||
narrow = false;
|
||||
break;
|
||||
case ColorRange::LIMITED:
|
||||
narrow = true;
|
||||
break;
|
||||
}
|
||||
|
||||
switch (space) {
|
||||
case YUVColorSpace::Identity:
|
||||
MOZ_ASSERT(range == ColorRange::FULL);
|
||||
return YUVRangedColorSpace::GbrIdentity;
|
||||
|
||||
case YUVColorSpace::BT601:
|
||||
return narrow ? YUVRangedColorSpace::BT601_Narrow
|
||||
: YUVRangedColorSpace::BT601_Full;
|
||||
|
||||
case YUVColorSpace::BT709:
|
||||
return narrow ? YUVRangedColorSpace::BT709_Narrow
|
||||
: YUVRangedColorSpace::BT709_Full;
|
||||
|
||||
case YUVColorSpace::BT2020:
|
||||
return narrow ? YUVRangedColorSpace::BT2020_Narrow
|
||||
: YUVRangedColorSpace::BT2020_Full;
|
||||
}
|
||||
}
|
||||
|
||||
template <typename DescriptorT>
|
||||
inline YUVRangedColorSpace GetYUVRangedColorSpace(const DescriptorT& d) {
|
||||
return ToYUVRangedColorSpace(d.yUVColorSpace(), d.colorRange());
|
||||
}
|
||||
|
||||
static inline SurfaceFormat SurfaceFormatForColorDepth(ColorDepth aColorDepth) {
|
||||
SurfaceFormat format = SurfaceFormat::A8;
|
||||
switch (aColorDepth) {
|
||||
|
@ -738,13 +738,6 @@ struct ParamTraits<mozilla::gfx::YUVColorSpace>
|
||||
mozilla::gfx::YUVColorSpace, mozilla::gfx::YUVColorSpace::_First,
|
||||
mozilla::gfx::YUVColorSpace::_Last> {};
|
||||
|
||||
template <>
|
||||
struct ParamTraits<mozilla::gfx::YUVRangedColorSpace>
|
||||
: public ContiguousEnumSerializerInclusive<
|
||||
mozilla::gfx::YUVRangedColorSpace,
|
||||
mozilla::gfx::YUVRangedColorSpace::_First,
|
||||
mozilla::gfx::YUVRangedColorSpace::_Last> {};
|
||||
|
||||
template <>
|
||||
struct ParamTraits<mozilla::StereoMode>
|
||||
: public ContiguousEnumSerializer<mozilla::StereoMode,
|
||||
|
@ -757,7 +757,6 @@ bool DCSurfaceVideo::CreateVideoSwapChain(RenderTextureHost* aTexture) {
|
||||
return true;
|
||||
}
|
||||
|
||||
// TODO: Replace with YUVRangedColorSpace
|
||||
static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace(
|
||||
const gfx::YUVColorSpace aYUVColorSpace,
|
||||
const gfx::ColorRange aColorRange) {
|
||||
@ -786,20 +785,14 @@ static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace(
|
||||
return Nothing();
|
||||
}
|
||||
|
||||
static Maybe<DXGI_COLOR_SPACE_TYPE> GetSourceDXGIColorSpace(
|
||||
const gfx::YUVRangedColorSpace aYUVColorSpace) {
|
||||
const auto info = FromYUVRangedColorSpace(aYUVColorSpace);
|
||||
return GetSourceDXGIColorSpace(info.space, info.range);
|
||||
}
|
||||
|
||||
bool DCSurfaceVideo::CallVideoProcessorBlt(RenderTextureHost* aTexture) {
|
||||
HRESULT hr;
|
||||
const auto videoDevice = mDCLayerTree->GetVideoDevice();
|
||||
const auto videoContext = mDCLayerTree->GetVideoContext();
|
||||
const auto texture = aTexture->AsRenderDXGITextureHost();
|
||||
|
||||
Maybe<DXGI_COLOR_SPACE_TYPE> sourceColorSpace =
|
||||
GetSourceDXGIColorSpace(texture->GetYUVColorSpace());
|
||||
Maybe<DXGI_COLOR_SPACE_TYPE> sourceColorSpace = GetSourceDXGIColorSpace(
|
||||
texture->GetYUVColorSpace(), texture->GetColorRange());
|
||||
if (sourceColorSpace.isNothing()) {
|
||||
gfxCriticalNote << "Unsupported color space";
|
||||
return false;
|
||||
|
@ -46,6 +46,9 @@ class RenderAndroidSurfaceTextureHost final : public RenderTextureHostSWGL {
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
void UnmapPlanes() override;
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override {
|
||||
return gfx::YUVColorSpace::Default;
|
||||
}
|
||||
|
||||
RenderAndroidSurfaceTextureHost* AsRenderAndroidSurfaceTextureHost()
|
||||
override {
|
||||
|
@ -183,12 +183,12 @@ gfx::ColorDepth RenderBufferTextureHost::GetColorDepth() const {
|
||||
}
|
||||
}
|
||||
|
||||
gfx::YUVRangedColorSpace RenderBufferTextureHost::GetYUVColorSpace() const {
|
||||
gfx::YUVColorSpace RenderBufferTextureHost::GetYUVColorSpace() const {
|
||||
switch (mDescriptor.type()) {
|
||||
case layers::BufferDescriptor::TYCbCrDescriptor:
|
||||
return gfx::GetYUVRangedColorSpace(mDescriptor.get_YCbCrDescriptor());
|
||||
return mDescriptor.get_YCbCrDescriptor().yUVColorSpace();
|
||||
default:
|
||||
return gfx::YUVRangedColorSpace::Default;
|
||||
return gfx::YUVColorSpace::Default;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -42,7 +42,7 @@ class RenderBufferTextureHost final : public RenderTextureHostSWGL {
|
||||
|
||||
gfx::ColorDepth GetColorDepth() const override;
|
||||
|
||||
gfx::YUVRangedColorSpace GetYUVColorSpace() const override;
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override;
|
||||
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
|
@ -97,10 +97,9 @@ void RenderCompositorD3D11SWGL::HandleExternalImage(
|
||||
if (host->GetFormat() == SurfaceFormat::NV12 ||
|
||||
host->GetFormat() == SurfaceFormat::P010 ||
|
||||
host->GetFormat() == SurfaceFormat::P016) {
|
||||
const auto yuv = FromYUVRangedColorSpace(host->GetYUVColorSpace());
|
||||
texturedEffect =
|
||||
new EffectNV12(layer, yuv.space, yuv.range, host->GetColorDepth(),
|
||||
aFrameSurface.mFilter);
|
||||
new EffectNV12(layer, host->GetYUVColorSpace(), host->GetColorRange(),
|
||||
host->GetColorDepth(), aFrameSurface.mFilter);
|
||||
} else {
|
||||
MOZ_ASSERT(host->GetFormat() == SurfaceFormat::B8G8R8X8 ||
|
||||
host->GetFormat() == SurfaceFormat::B8G8R8A8);
|
||||
@ -123,10 +122,9 @@ void RenderCompositorD3D11SWGL::HandleExternalImage(
|
||||
GetDevice(), SurfaceFormat::A8, host->GetD3D11Texture2D(2));
|
||||
u->SetNextSibling(v);
|
||||
|
||||
const auto yuv = FromYUVRangedColorSpace(host->GetYUVColorSpace());
|
||||
texturedEffect =
|
||||
new EffectYCbCr(layer, yuv.space, yuv.range, host->GetColorDepth(),
|
||||
aFrameSurface.mFilter);
|
||||
new EffectYCbCr(layer, host->GetYUVColorSpace(), host->GetColorRange(),
|
||||
host->GetColorDepth(), aFrameSurface.mFilter);
|
||||
size = host->GetSize(0);
|
||||
host->LockInternal();
|
||||
}
|
||||
|
@ -57,8 +57,8 @@ class RenderDXGITextureHost final : public RenderTextureHostSWGL {
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
void UnmapPlanes() override;
|
||||
gfx::YUVRangedColorSpace GetYUVColorSpace() const override {
|
||||
return ToYUVRangedColorSpace(mYUVColorSpace, GetColorRange());
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override {
|
||||
return mYUVColorSpace;
|
||||
}
|
||||
|
||||
bool EnsureD3D11Texture2D(ID3D11Device* aDevice);
|
||||
@ -146,8 +146,8 @@ class RenderDXGIYCbCrTextureHost final : public RenderTextureHostSWGL {
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
void UnmapPlanes() override;
|
||||
gfx::YUVRangedColorSpace GetYUVColorSpace() const override {
|
||||
return ToYUVRangedColorSpace(mYUVColorSpace, GetColorRange());
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override {
|
||||
return mYUVColorSpace;
|
||||
}
|
||||
|
||||
bool EnsureD3D11Texture2D(ID3D11Device* aDevice);
|
||||
|
@ -213,12 +213,12 @@ gfx::ColorDepth RenderExternalTextureHost::GetColorDepth() const {
|
||||
}
|
||||
}
|
||||
|
||||
gfx::YUVRangedColorSpace RenderExternalTextureHost::GetYUVColorSpace() const {
|
||||
gfx::YUVColorSpace RenderExternalTextureHost::GetYUVColorSpace() const {
|
||||
switch (mDescriptor.type()) {
|
||||
case layers::BufferDescriptor::TYCbCrDescriptor:
|
||||
return gfx::GetYUVRangedColorSpace(mDescriptor.get_YCbCrDescriptor());
|
||||
return mDescriptor.get_YCbCrDescriptor().yUVColorSpace();
|
||||
default:
|
||||
return gfx::YUVRangedColorSpace::Default;
|
||||
return gfx::YUVColorSpace::Default;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -39,7 +39,7 @@ class RenderExternalTextureHost final : public RenderTextureHostSWGL {
|
||||
|
||||
gfx::ColorDepth GetColorDepth() const override;
|
||||
|
||||
gfx::YUVRangedColorSpace GetYUVColorSpace() const override;
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override;
|
||||
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
|
@ -145,10 +145,8 @@ gfx::ColorDepth RenderMacIOSurfaceTextureHost::GetColorDepth() const {
|
||||
return gfx::ColorDepth::COLOR_8;
|
||||
}
|
||||
|
||||
gfx::YUVRangedColorSpace RenderMacIOSurfaceTextureHost::GetYUVColorSpace()
|
||||
const {
|
||||
return ToYUVRangedColorSpace(mSurface->GetYUVColorSpace(),
|
||||
mSurface->GetColorRange());
|
||||
gfx::YUVColorSpace RenderMacIOSurfaceTextureHost::GetYUVColorSpace() const {
|
||||
return mSurface->GetYUVColorSpace();
|
||||
}
|
||||
|
||||
bool RenderMacIOSurfaceTextureHost::MapPlane(RenderCompositor* aCompositor,
|
||||
|
@ -42,7 +42,7 @@ class RenderMacIOSurfaceTextureHost final : public RenderTextureHostSWGL {
|
||||
size_t GetPlaneCount() const override;
|
||||
gfx::SurfaceFormat GetFormat() const override;
|
||||
gfx::ColorDepth GetColorDepth() const override;
|
||||
gfx::YUVRangedColorSpace GetYUVColorSpace() const override;
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override;
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
void UnmapPlanes() override;
|
||||
|
@ -167,7 +167,7 @@ bool RenderTextureHostSWGL::LockSWGLCompositeSurface(
|
||||
case gfx::SurfaceFormat::YUV422: {
|
||||
aInfo->yuv_planes = mPlanes.size();
|
||||
auto colorSpace = GetYUVColorSpace();
|
||||
aInfo->color_space = ToWrYuvRangedColorSpace(colorSpace);
|
||||
aInfo->color_space = ToWrYuvColorSpace(colorSpace);
|
||||
auto colorDepth = GetColorDepth();
|
||||
aInfo->color_depth = ToWrColorDepth(colorDepth);
|
||||
break;
|
||||
|
@ -33,8 +33,8 @@ class RenderTextureHostSWGL : public RenderTextureHost {
|
||||
return gfx::ColorDepth::COLOR_8;
|
||||
}
|
||||
|
||||
virtual gfx::YUVRangedColorSpace GetYUVColorSpace() const {
|
||||
return gfx::YUVRangedColorSpace::Default;
|
||||
virtual gfx::YUVColorSpace GetYUVColorSpace() const {
|
||||
return gfx::YUVColorSpace::Default;
|
||||
}
|
||||
|
||||
struct PlaneInfo {
|
||||
|
@ -111,11 +111,11 @@ gfx::ColorDepth RenderTextureHostWrapper::GetColorDepth() const {
|
||||
return gfx::ColorDepth::COLOR_8;
|
||||
}
|
||||
|
||||
gfx::YUVRangedColorSpace RenderTextureHostWrapper::GetYUVColorSpace() const {
|
||||
gfx::YUVColorSpace RenderTextureHostWrapper::GetYUVColorSpace() const {
|
||||
if (RenderTextureHostSWGL* swglHost = EnsureRenderTextureHostSWGL()) {
|
||||
return swglHost->GetYUVColorSpace();
|
||||
}
|
||||
return gfx::YUVRangedColorSpace::Default;
|
||||
return gfx::YUVColorSpace::Default;
|
||||
}
|
||||
|
||||
bool RenderTextureHostWrapper::MapPlane(RenderCompositor* aCompositor,
|
||||
|
@ -41,7 +41,7 @@ class RenderTextureHostWrapper final : public RenderTextureHostSWGL {
|
||||
size_t GetPlaneCount() const override;
|
||||
gfx::SurfaceFormat GetFormat() const override;
|
||||
gfx::ColorDepth GetColorDepth() const override;
|
||||
gfx::YUVRangedColorSpace GetYUVColorSpace() const override;
|
||||
gfx::YUVColorSpace GetYUVColorSpace() const override;
|
||||
bool MapPlane(RenderCompositor* aCompositor, uint8_t aChannelIndex,
|
||||
PlaneInfo& aPlaneInfo) override;
|
||||
void UnmapPlanes() override;
|
||||
|
@ -787,31 +787,6 @@ static inline wr::WrYuvColorSpace ToWrYuvColorSpace(
|
||||
return wr::WrYuvColorSpace::Rec601;
|
||||
}
|
||||
|
||||
// TODO: Use YUVRangedColorSpace instead of assuming ColorRange::LIMITED.
|
||||
static inline wr::YuvRangedColorSpace ToWrYuvRangedColorSpace(
|
||||
gfx::YUVRangedColorSpace aFrom) {
|
||||
switch (aFrom) {
|
||||
case gfx::YUVRangedColorSpace::BT601_Narrow:
|
||||
return wr::YuvRangedColorSpace::Rec601Narrow;
|
||||
case gfx::YUVRangedColorSpace::BT601_Full:
|
||||
return wr::YuvRangedColorSpace::Rec601Full;
|
||||
case gfx::YUVRangedColorSpace::BT709_Narrow:
|
||||
return wr::YuvRangedColorSpace::Rec709Narrow;
|
||||
case gfx::YUVRangedColorSpace::BT709_Full:
|
||||
return wr::YuvRangedColorSpace::Rec709Full;
|
||||
case gfx::YUVRangedColorSpace::BT2020_Narrow:
|
||||
return wr::YuvRangedColorSpace::Rec2020Narrow;
|
||||
case gfx::YUVRangedColorSpace::BT2020_Full:
|
||||
return wr::YuvRangedColorSpace::Rec2020Full;
|
||||
case gfx::YUVRangedColorSpace::GbrIdentity:
|
||||
break;
|
||||
default:
|
||||
MOZ_ASSERT_UNREACHABLE("Tried to convert invalid YUVColorSpace.");
|
||||
break;
|
||||
}
|
||||
return wr::YuvRangedColorSpace::GbrIdentity;
|
||||
}
|
||||
|
||||
static inline wr::WrColorDepth ToWrColorDepth(gfx::ColorDepth aColorDepth) {
|
||||
switch (aColorDepth) {
|
||||
case gfx::ColorDepth::COLOR_8:
|
||||
|
@ -2260,10 +2260,7 @@ fn translate_expression(state: &mut State, e: &syntax::Expr) -> Expr {
|
||||
match (lhs.ty.kind, rhs.ty.kind) {
|
||||
(TypeKind::Mat2, TypeKind::Vec2) |
|
||||
(TypeKind::Mat3, TypeKind::Vec3) |
|
||||
(TypeKind::Mat3, TypeKind::Mat3) |
|
||||
(TypeKind::Mat3, TypeKind::Mat43) |
|
||||
(TypeKind::Mat4, TypeKind::Vec4) => rhs.ty.clone(),
|
||||
(TypeKind::Mat43, TypeKind::Vec4) => Type::new(TypeKind::Vec3),
|
||||
(TypeKind::Mat2, TypeKind::Float) |
|
||||
(TypeKind::Mat3, TypeKind::Float) |
|
||||
(TypeKind::Mat4, TypeKind::Float) => lhs.ty.clone(),
|
||||
@ -2554,7 +2551,7 @@ fn translate_expression(state: &mut State, e: &syntax::Expr) -> Expr {
|
||||
.fields
|
||||
.iter()
|
||||
.find(|x| &x.name == i)
|
||||
.expect(&format!("missing field `{}` in `{}`", i, sym.name));
|
||||
.expect("missing field");
|
||||
Expr {
|
||||
kind: ExprKind::Dot(e, i.clone()),
|
||||
ty: field.ty.clone(),
|
||||
@ -3046,13 +3043,6 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
Type::new(Vec4),
|
||||
vec![Type::new(Vec4)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
"vec4",
|
||||
Some("make_vec4"),
|
||||
Type::new(Vec4),
|
||||
vec![Type::new(IVec4)],
|
||||
);
|
||||
|
||||
declare_function(
|
||||
state,
|
||||
@ -3238,35 +3228,6 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
Type::new(Float),
|
||||
],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
"mat3x4",
|
||||
Some("make_mat3x4"),
|
||||
Type::new(Mat34),
|
||||
vec![
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
Type::new(Float),
|
||||
],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
"transpose",
|
||||
None,
|
||||
Type::new(Mat43),
|
||||
vec![Type::new(Mat34)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
"mat4",
|
||||
@ -4166,7 +4127,7 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
None,
|
||||
Type::new(Void),
|
||||
vec![Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(Vec3), Type::new(Mat3), Type::new(Int)],
|
||||
Type::new(Int), Type::new(Int)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
@ -4175,7 +4136,7 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
Type::new(Void),
|
||||
vec![Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(Vec3), Type::new(Mat3), Type::new(Int)],
|
||||
Type::new(Int), Type::new(Int)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
@ -4185,7 +4146,7 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
vec![Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(Vec3), Type::new(Mat3), Type::new(Int)],
|
||||
Type::new(Int), Type::new(Int)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
@ -4193,8 +4154,7 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
None,
|
||||
Type::new(Void),
|
||||
vec![Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(Vec3), Type::new(Mat3), Type::new(Int),
|
||||
Type::new(Float)],
|
||||
Type::new(Int), Type::new(Int), Type::new(Float)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
@ -4203,8 +4163,7 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
Type::new(Void),
|
||||
vec![Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(Vec3), Type::new(Mat3), Type::new(Int),
|
||||
Type::new(Float)],
|
||||
Type::new(Int), Type::new(Int), Type::new(Float)],
|
||||
);
|
||||
declare_function(
|
||||
state,
|
||||
@ -4214,8 +4173,7 @@ pub fn ast_to_hir(state: &mut State, tu: &syntax::TranslationUnit) -> Translatio
|
||||
vec![Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(*s), Type::new(Vec2), Type::new(Vec4),
|
||||
Type::new(Vec3), Type::new(Mat3), Type::new(Int),
|
||||
Type::new(Float)],
|
||||
Type::new(Int), Type::new(Int), Type::new(Float)],
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -501,123 +501,94 @@ struct YUVMatrix {
|
||||
// These constants are loaded off the "this" pointer via relative addressing
|
||||
// modes and should be about as quick to load as directly addressed SIMD
|
||||
// constant memory.
|
||||
V8<int16_t> rbCoeffs;
|
||||
V8<int16_t> gCoeffs;
|
||||
V8<uint16_t> yScale;
|
||||
V8<int16_t> yBias;
|
||||
V8<int16_t> uvBias;
|
||||
V8<int16_t> brMask;
|
||||
|
||||
V8<int16_t> br_uvCoeffs; // biased by 6 bits [b_from_u, r_from_v, repeats]
|
||||
V8<int16_t> gg_uvCoeffs; // biased by 6 bits [g_from_u, g_from_v, repeats]
|
||||
V8<uint16_t> yCoeffs; // biased by 7 bits
|
||||
V8<int16_t> yBias; // 0 or 16
|
||||
V8<int16_t> uvBias; // 128
|
||||
V8<int16_t> br_yMask;
|
||||
|
||||
// E.g. rec709-narrow:
|
||||
// [ 1.16, 0, 1.79, -0.97 ]
|
||||
// [ 1.16, -0.21, -0.53, 0.30 ]
|
||||
// [ 1.16, 2.11, 0, -1.13 ]
|
||||
// =
|
||||
// [ yScale, 0, r_from_v ] ([Y ] )
|
||||
// [ yScale, g_from_u, g_from_v ] x ([cb] - ycbcr_bias )
|
||||
// [ yScale, b_from_u, 0 ] ([cr] )
|
||||
static YUVMatrix From(const vec3_scalar& ycbcr_bias,
|
||||
const mat3_scalar& rgb_from_debiased_ycbcr) {
|
||||
assert(ycbcr_bias.z == ycbcr_bias.y);
|
||||
|
||||
const auto rgb_from_y = rgb_from_debiased_ycbcr[0].y;
|
||||
assert(rgb_from_debiased_ycbcr[0].x == rgb_from_debiased_ycbcr[0].z);
|
||||
|
||||
int16_t br_from_y_mask = -1;
|
||||
if (rgb_from_debiased_ycbcr[0].x == 0.0) {
|
||||
// gbr-identity matrix?
|
||||
assert(rgb_from_debiased_ycbcr[0].x == 0);
|
||||
assert(rgb_from_debiased_ycbcr[0].y == 1);
|
||||
assert(rgb_from_debiased_ycbcr[0].z == 0);
|
||||
|
||||
assert(rgb_from_debiased_ycbcr[1].x == 0);
|
||||
assert(rgb_from_debiased_ycbcr[1].y == 0);
|
||||
assert(rgb_from_debiased_ycbcr[1].z == 1);
|
||||
|
||||
assert(rgb_from_debiased_ycbcr[2].x == 1);
|
||||
assert(rgb_from_debiased_ycbcr[2].y == 0);
|
||||
assert(rgb_from_debiased_ycbcr[2].z == 0);
|
||||
|
||||
assert(ycbcr_bias.x == 0);
|
||||
assert(ycbcr_bias.y == 0);
|
||||
assert(ycbcr_bias.z == 0);
|
||||
|
||||
br_from_y_mask = 0;
|
||||
} else {
|
||||
assert(rgb_from_debiased_ycbcr[0].x == rgb_from_y);
|
||||
}
|
||||
|
||||
assert(rgb_from_debiased_ycbcr[1].x == 0.0);
|
||||
const auto g_from_u = rgb_from_debiased_ycbcr[1].y;
|
||||
const auto b_from_u = rgb_from_debiased_ycbcr[1].z;
|
||||
|
||||
const auto r_from_v = rgb_from_debiased_ycbcr[2].x;
|
||||
const auto g_from_v = rgb_from_debiased_ycbcr[2].y;
|
||||
assert(rgb_from_debiased_ycbcr[2].z == 0.0);
|
||||
|
||||
return YUVMatrix({ycbcr_bias.x, ycbcr_bias.y}, rgb_from_y, br_from_y_mask,
|
||||
r_from_v, g_from_u, g_from_v, b_from_u);
|
||||
}
|
||||
// Set the coefficients to cancel out and pass through YUV as GBR. All biases
|
||||
// are set to zero and the BR-mask is set to remove the contribution of Y to
|
||||
// the BR channels. Scales are set such that the shift by 6 in convert is
|
||||
// balanced.
|
||||
YUVMatrix()
|
||||
: rbCoeffs(1 << 6),
|
||||
gCoeffs(0),
|
||||
yScale(1 << (6 + 1)),
|
||||
yBias(0),
|
||||
uvBias(0),
|
||||
brMask(0) {}
|
||||
|
||||
// Convert matrix coefficients to fixed-point representation.
|
||||
YUVMatrix(vec2_scalar yuv_bias, double yCoeff, int16_t br_yMask_, double rv,
|
||||
double gu, double gv, double bu)
|
||||
: br_uvCoeffs(zip(I16(int16_t(bu * (1 << 6) + 0.5)),
|
||||
I16(int16_t(rv * (1 << 6) + 0.5)))),
|
||||
gg_uvCoeffs(zip(I16(-int16_t(-gu * (1 << 6) +
|
||||
0.5)), // These are negative coeffs, so
|
||||
// round them away from zero
|
||||
I16(-int16_t(-gv * (1 << 6) + 0.5)))),
|
||||
yCoeffs(uint16_t(yCoeff * (1 << (6 + 1)) + 0.5)),
|
||||
// (E.g. 1.16 => 74.5<<1)
|
||||
yBias(int16_t(yCoeff * yuv_bias.x * 255 * (1<<6) + 0.5)),
|
||||
uvBias(int16_t(yuv_bias.y * 255 + 0.5)),
|
||||
br_yMask(br_yMask_) {
|
||||
assert(yuv_bias.x >= 0);
|
||||
assert(yuv_bias.y >= 0);
|
||||
assert(yCoeff > 0);
|
||||
assert(br_yMask_ == 0 || br_yMask_ == -1);
|
||||
assert(bu > 0);
|
||||
assert(rv > 0);
|
||||
assert(gu <= 0);
|
||||
assert(gv <= 0);
|
||||
}
|
||||
YUVMatrix(double rv, double gu, double gv, double bu)
|
||||
: rbCoeffs(
|
||||
zip(I16(int16_t(bu * 64.0 + 0.5)), I16(int16_t(rv * 64.0 + 0.5)))),
|
||||
gCoeffs(zip(I16(-int16_t(gu * -64.0 + 0.5)),
|
||||
I16(-int16_t(gv * -64.0 + 0.5)))),
|
||||
yScale(2 * 74 + 1),
|
||||
yBias(int16_t(-16 * 74.5) + (1 << 5)),
|
||||
uvBias(-128),
|
||||
brMask(-1) {}
|
||||
|
||||
ALWAYS_INLINE PackedRGBA8 convert(V8<int16_t> yy, V8<int16_t> uv) const {
|
||||
// We gave ourselves an extra bit (7 instead of 6) of bias to give us some
|
||||
// extra precision for the more-sensitive y scaling.
|
||||
// Note that we have to use an unsigned multiply with a 2x scale to
|
||||
// represent a fractional scale and to avoid shifting with the sign bit.
|
||||
// Bias Y values by -16 and multiply by 74.5. Add 2^5 offset to round to
|
||||
// nearest 2^6. Note that we have to use an unsigned multiply with a 2x
|
||||
// scale to represent a fractional scale and to avoid shifting with the sign
|
||||
// bit.
|
||||
yy = bit_cast<V8<int16_t>>((bit_cast<V8<uint16_t>>(yy) * yScale) >> 1) +
|
||||
yBias;
|
||||
|
||||
// Note: if you subtract the bias before multiplication, we see more
|
||||
// underflows. This could be fixed by an unsigned subsat.
|
||||
yy = bit_cast<V8<int16_t>>((bit_cast<V8<uint16_t>>(yy) * yCoeffs) >> 1);
|
||||
yy -= yBias;
|
||||
// Bias U/V values by -128.
|
||||
uv += uvBias;
|
||||
|
||||
// Compute [B] = [yCoeff*Y + bu*U + 0*V]
|
||||
// [R] [yCoeff*Y + 0*U + rv*V]
|
||||
uv -= uvBias;
|
||||
auto br = br_uvCoeffs * uv;
|
||||
br = addsat(yy & br_yMask, br);
|
||||
// Compute (R, B) = (74.5*Y + rv*V, 74.5*Y + bu*U)
|
||||
auto br = rbCoeffs * uv;
|
||||
br = addsat(yy & brMask, br);
|
||||
br >>= 6;
|
||||
|
||||
// Compute G = yCoeff*Y + gu*U + gv*V
|
||||
// First calc [gu*U, gv*V, ...]:
|
||||
auto gg = gg_uvCoeffs * uv;
|
||||
// Then cross the streams to get `gu*U + gv*V`:
|
||||
gg = addsat(gg, bit_cast<V8<int16_t>>(bit_cast<V4<uint32_t>>(gg) >> 16));
|
||||
// Add the other parts:
|
||||
gg = addsat(yy, gg); // This is the part that needs the most headroom
|
||||
// usually. In particular, ycbcr(255,255,255) hugely
|
||||
// saturates.
|
||||
// Compute G = 74.5*Y + -gu*U + -gv*V
|
||||
auto gg = gCoeffs * uv;
|
||||
gg = addsat(
|
||||
yy,
|
||||
addsat(gg, bit_cast<V8<int16_t>>(bit_cast<V4<uint32_t>>(gg) >> 16)));
|
||||
gg >>= 6;
|
||||
|
||||
// Interleave B/R and G values. Force alpha (high-gg half) to opaque.
|
||||
// Interleave B/R and G values. Force alpha to opaque.
|
||||
return packYUV(gg, br);
|
||||
}
|
||||
};
|
||||
|
||||
enum YUVColorSpace { REC_601 = 0, REC_709, REC_2020, IDENTITY };
|
||||
|
||||
static const YUVMatrix yuvMatrix[IDENTITY + 1] = {
|
||||
// clang-format off
|
||||
// From Rec601:
|
||||
// [R] [1.1643835616438356, 0.0, 1.5960267857142858 ] [Y - 16]
|
||||
// [G] = [1.1643835616438358, -0.3917622900949137, -0.8129676472377708 ] x [U - 128]
|
||||
// [B] [1.1643835616438356, 2.017232142857143, 8.862867620416422e-17] [V - 128]
|
||||
{1.5960267857142858, -0.3917622900949137, -0.8129676472377708, 2.017232142857143},
|
||||
|
||||
// From Rec709:
|
||||
// [R] [1.1643835616438356, 0.0, 1.7927410714285714] [Y - 16]
|
||||
// [G] = [1.1643835616438358, -0.21324861427372963, -0.532909328559444 ] x [U - 128]
|
||||
// [B] [1.1643835616438356, 2.1124017857142854, 0.0 ] [V - 128]
|
||||
{1.7927410714285714, -0.21324861427372963, -0.532909328559444, 2.1124017857142854},
|
||||
|
||||
// From Re2020:
|
||||
// [R] [1.16438356164384, 0.0, 1.678674107142860 ] [Y - 16]
|
||||
// [G] = [1.16438356164384, -0.187326104219343, -0.650424318505057 ] x [U - 128]
|
||||
// [B] [1.16438356164384, 2.14177232142857, 0.0 ] [V - 128]
|
||||
{1.678674107142860, -0.187326104219343, -0.650424318505057, 2.14177232142857},
|
||||
|
||||
// Identity
|
||||
// [R] [V]
|
||||
// [G] = [Y]
|
||||
// [B] [U]
|
||||
{},
|
||||
// clang-format on
|
||||
};
|
||||
|
||||
// Helper function for textureLinearRowR8 that samples horizontal taps and
|
||||
// combines them based on Y fraction with next row.
|
||||
template <typename S>
|
||||
@ -992,7 +963,7 @@ static void linear_row_yuv(uint32_t* dest, int span, sampler2DRect samplerY,
|
||||
}
|
||||
|
||||
static void linear_convert_yuv(Texture& ytex, Texture& utex, Texture& vtex,
|
||||
const YUVMatrix& rgbFromYcbcr, int colorDepth,
|
||||
YUVColorSpace colorSpace, int colorDepth,
|
||||
const IntRect& srcReq, Texture& dsttex,
|
||||
const IntRect& dstReq, bool invertY,
|
||||
const IntRect& clipRect) {
|
||||
@ -1040,136 +1011,13 @@ static void linear_convert_yuv(Texture& ytex, Texture& utex, Texture& vtex,
|
||||
for (int rows = dstBounds.height(); rows > 0; rows--) {
|
||||
linear_row_yuv((uint32_t*)dest, span, &sampler[0], srcUV, srcDUV.x,
|
||||
&sampler[1], &sampler[2], chromaUV, chromaDUV.x, colorDepth,
|
||||
rgbFromYcbcr);
|
||||
yuvMatrix[colorSpace]);
|
||||
dest += destStride;
|
||||
srcUV.y += srcDUV.y;
|
||||
chromaUV.y += chromaDUV.y;
|
||||
}
|
||||
}
|
||||
|
||||
// -
|
||||
// This section must match gfx/2d/Types.h
|
||||
|
||||
enum class YUVRangedColorSpace : uint8_t {
|
||||
BT601_Narrow = 0,
|
||||
BT601_Full,
|
||||
BT709_Narrow,
|
||||
BT709_Full,
|
||||
BT2020_Narrow,
|
||||
BT2020_Full,
|
||||
GbrIdentity,
|
||||
};
|
||||
|
||||
// -
|
||||
// This section must match yuv.glsl
|
||||
|
||||
vec4_scalar get_ycbcr_zeros_ones(const YUVRangedColorSpace color_space,
|
||||
const GLuint color_depth) {
|
||||
// For SWGL's 8bpc-only pipeline, our extra care here probably doesn't matter.
|
||||
// However, technically e.g. 10-bit achromatic zero for cb and cr is
|
||||
// (128 << 2) / ((1 << 10) - 1) = 512 / 1023, which != 128 / 255, and affects
|
||||
// our matrix values subtly. Maybe not enough to matter? But it's the most
|
||||
// correct thing to do.
|
||||
// Unlike the glsl version, our texture samples are u8([0,255]) not
|
||||
// u16([0,1023]) though.
|
||||
switch (color_space) {
|
||||
case YUVRangedColorSpace::BT601_Narrow:
|
||||
case YUVRangedColorSpace::BT709_Narrow:
|
||||
case YUVRangedColorSpace::BT2020_Narrow: {
|
||||
auto extra_bit_count = color_depth - 8;
|
||||
vec4_scalar zo = {
|
||||
float(16 << extra_bit_count),
|
||||
float(128 << extra_bit_count),
|
||||
float(235 << extra_bit_count),
|
||||
float(240 << extra_bit_count),
|
||||
};
|
||||
float all_bits = (1 << color_depth) - 1;
|
||||
zo /= all_bits;
|
||||
return zo;
|
||||
}
|
||||
|
||||
case YUVRangedColorSpace::BT601_Full:
|
||||
case YUVRangedColorSpace::BT709_Full:
|
||||
case YUVRangedColorSpace::BT2020_Full: {
|
||||
const auto narrow =
|
||||
get_ycbcr_zeros_ones(YUVRangedColorSpace::BT601_Narrow, color_depth);
|
||||
return {0.0, narrow.y, 1.0, 1.0};
|
||||
}
|
||||
|
||||
case YUVRangedColorSpace::GbrIdentity:
|
||||
break;
|
||||
}
|
||||
return {0.0, 0.0, 1.0, 1.0};
|
||||
}
|
||||
|
||||
constexpr mat3_scalar RgbFromYuv_Rec601 = {
|
||||
{1.00000, 1.00000, 1.00000},
|
||||
{0.00000, -0.17207, 0.88600},
|
||||
{0.70100, -0.35707, 0.00000},
|
||||
};
|
||||
constexpr mat3_scalar RgbFromYuv_Rec709 = {
|
||||
{1.00000, 1.00000, 1.00000},
|
||||
{0.00000, -0.09366, 0.92780},
|
||||
{0.78740, -0.23406, 0.00000},
|
||||
};
|
||||
constexpr mat3_scalar RgbFromYuv_Rec2020 = {
|
||||
{1.00000, 1.00000, 1.00000},
|
||||
{0.00000, -0.08228, 0.94070},
|
||||
{0.73730, -0.28568, 0.00000},
|
||||
};
|
||||
constexpr mat3_scalar RgbFromYuv_GbrIdentity = {
|
||||
{0, 1, 0},
|
||||
{0, 0, 1},
|
||||
{1, 0, 0},
|
||||
};
|
||||
|
||||
inline mat3_scalar get_rgb_from_yuv(const YUVRangedColorSpace color_space) {
|
||||
switch (color_space) {
|
||||
case YUVRangedColorSpace::BT601_Narrow:
|
||||
case YUVRangedColorSpace::BT601_Full:
|
||||
return RgbFromYuv_Rec601;
|
||||
case YUVRangedColorSpace::BT709_Narrow:
|
||||
case YUVRangedColorSpace::BT709_Full:
|
||||
return RgbFromYuv_Rec709;
|
||||
case YUVRangedColorSpace::BT2020_Narrow:
|
||||
case YUVRangedColorSpace::BT2020_Full:
|
||||
return RgbFromYuv_Rec2020;
|
||||
case YUVRangedColorSpace::GbrIdentity:
|
||||
break;
|
||||
}
|
||||
return RgbFromYuv_GbrIdentity;
|
||||
}
|
||||
|
||||
struct YcbcrInfo final {
|
||||
vec3_scalar ycbcr_bias;
|
||||
mat3_scalar rgb_from_debiased_ycbcr;
|
||||
};
|
||||
|
||||
inline YcbcrInfo get_ycbcr_info(const YUVRangedColorSpace color_space,
|
||||
GLuint color_depth) {
|
||||
// SWGL always does 8bpc math, so don't scale the matrix for 10bpc!
|
||||
color_depth = 8;
|
||||
|
||||
const auto zeros_ones = get_ycbcr_zeros_ones(color_space, color_depth);
|
||||
const auto zeros = vec2_scalar{zeros_ones.x, zeros_ones.y};
|
||||
const auto ones = vec2_scalar{zeros_ones.z, zeros_ones.w};
|
||||
const auto scale = 1.0f / (ones - zeros);
|
||||
|
||||
const auto rgb_from_yuv = get_rgb_from_yuv(color_space);
|
||||
const mat3_scalar yuv_from_debiased_ycbcr = {
|
||||
{scale.x, 0, 0},
|
||||
{0, scale.y, 0},
|
||||
{0, 0, scale.y},
|
||||
};
|
||||
|
||||
YcbcrInfo ret;
|
||||
ret.ycbcr_bias = {zeros.x, zeros.y, zeros.y};
|
||||
ret.rgb_from_debiased_ycbcr = rgb_from_yuv * yuv_from_debiased_ycbcr;
|
||||
return ret;
|
||||
}
|
||||
|
||||
// -
|
||||
|
||||
extern "C" {
|
||||
|
||||
// Extension for compositing a YUV surface represented by separate YUV planes
|
||||
@ -1177,7 +1025,7 @@ extern "C" {
|
||||
// transform from YUV to BGRA after sampling.
|
||||
void CompositeYUV(LockedTexture* lockedDst, LockedTexture* lockedY,
|
||||
LockedTexture* lockedU, LockedTexture* lockedV,
|
||||
YUVRangedColorSpace colorSpace, GLuint colorDepth, GLint srcX,
|
||||
YUVColorSpace colorSpace, GLuint colorDepth, GLint srcX,
|
||||
GLint srcY, GLsizei srcWidth, GLsizei srcHeight, GLint dstX,
|
||||
GLint dstY, GLsizei dstWidth, GLsizei dstHeight,
|
||||
GLboolean flip, GLint clipX, GLint clipY, GLsizei clipWidth,
|
||||
@ -1185,14 +1033,10 @@ void CompositeYUV(LockedTexture* lockedDst, LockedTexture* lockedY,
|
||||
if (!lockedDst || !lockedY || !lockedU || !lockedV) {
|
||||
return;
|
||||
}
|
||||
if (colorSpace > YUVRangedColorSpace::GbrIdentity) {
|
||||
if (colorSpace > IDENTITY) {
|
||||
assert(false);
|
||||
return;
|
||||
}
|
||||
const auto ycbcrInfo = get_ycbcr_info(colorSpace, colorDepth);
|
||||
const auto rgbFromYcbcr =
|
||||
YUVMatrix::From(ycbcrInfo.ycbcr_bias, ycbcrInfo.rgb_from_debiased_ycbcr);
|
||||
|
||||
Texture& ytex = *lockedY;
|
||||
Texture& utex = *lockedU;
|
||||
Texture& vtex = *lockedV;
|
||||
@ -1218,7 +1062,7 @@ void CompositeYUV(LockedTexture* lockedDst, LockedTexture* lockedY,
|
||||
// For now, always use a linear filter path that would be required for
|
||||
// scaling. Further fast-paths for non-scaled video might be desirable in the
|
||||
// future.
|
||||
linear_convert_yuv(ytex, utex, vtex, rgbFromYcbcr, colorDepth, srcReq, dsttex,
|
||||
linear_convert_yuv(ytex, utex, vtex, colorSpace, colorDepth, srcReq, dsttex,
|
||||
dstReq, flip, clipRect);
|
||||
}
|
||||
|
||||
|
@ -343,7 +343,6 @@ bvec2 make_bvec2(const X& x, const Y& y) {
|
||||
return bvec2(x, y);
|
||||
}
|
||||
|
||||
struct vec3_scalar;
|
||||
struct vec4_scalar;
|
||||
|
||||
struct vec2_scalar {
|
||||
@ -372,7 +371,6 @@ struct vec2_scalar {
|
||||
vec2_scalar sel(XYZW c1, XYZW c2) {
|
||||
return vec2_scalar(select(c1), select(c2));
|
||||
}
|
||||
vec3_scalar sel(XYZW c1, XYZW c2, XYZW c3);
|
||||
vec4_scalar sel(XYZW c1, XYZW c2, XYZW c3, XYZW c4);
|
||||
|
||||
friend bool operator==(const vec2_scalar& l, const vec2_scalar& r) {
|
||||
@ -1075,9 +1073,6 @@ struct ivec4_scalar {
|
||||
friend ivec4_scalar operator&(int32_t a, ivec4_scalar b) {
|
||||
return ivec4_scalar{a & b.x, a & b.y, a & b.z, a & b.w};
|
||||
}
|
||||
friend ivec4_scalar operator<<(ivec4_scalar a, int32_t b) {
|
||||
return ivec4_scalar{a.x << b, a.y << b, a.z << b, a.w << b};
|
||||
}
|
||||
|
||||
int32_t& operator[](int index) {
|
||||
switch (index) {
|
||||
@ -1529,9 +1524,6 @@ struct vec3 {
|
||||
friend vec3 operator/(vec3 a, Float b) {
|
||||
return vec3(a.x / b, a.y / b, a.z / b);
|
||||
}
|
||||
friend vec3 operator/(vec3 a, vec3 b) {
|
||||
return vec3(a.x / b.x, a.y / b.y, a.z / b.z);
|
||||
}
|
||||
|
||||
friend I32 operator==(const vec3& l, const vec3& r) {
|
||||
return l.x == r.x && l.y == r.y && l.z == r.z;
|
||||
@ -1776,9 +1768,6 @@ struct vec4_scalar {
|
||||
}
|
||||
};
|
||||
|
||||
vec3_scalar vec2_scalar::sel(XYZW c1, XYZW c2, XYZW c3) {
|
||||
return {select(c1), select(c2), select(c3)};
|
||||
}
|
||||
vec4_scalar vec2_scalar::sel(XYZW c1, XYZW c2, XYZW c3, XYZW c4) {
|
||||
return vec4_scalar{select(c1), select(c2), select(c3), select(c4)};
|
||||
}
|
||||
@ -2052,10 +2041,6 @@ vec4 make_vec4(const X& x, const Y& y, const Z& z, const W& w) {
|
||||
return vec4(x, y, z, w);
|
||||
}
|
||||
|
||||
vec4_scalar make_vec4(const ivec4_scalar& v) {
|
||||
return vec4_scalar{float(v.x), float(v.y), float(v.z), float(v.w)};
|
||||
}
|
||||
|
||||
ALWAYS_INLINE vec3::vec3(vec4 v) : x(v.x), y(v.y), z(v.z) {}
|
||||
|
||||
SI ivec4 roundfast(vec4 v, Float scale) {
|
||||
@ -2385,17 +2370,6 @@ struct mat3_scalar {
|
||||
u.z = m[0].z * v.x + m[1].z * v.y + m[2].z * v.z;
|
||||
return u;
|
||||
}
|
||||
|
||||
friend auto operator*(mat3_scalar a, mat3_scalar b) {
|
||||
mat3_scalar r;
|
||||
for (int c = 0; c < 3; c++) {
|
||||
const auto& v = b[c];
|
||||
r[c].x = a[0].x * v.x + a[1].x * v.y + a[2].x * v.z;
|
||||
r[c].y = a[0].y * v.x + a[1].y * v.y + a[2].y * v.z;
|
||||
r[c].z = a[0].z * v.x + a[1].z * v.y + a[2].z * v.z;
|
||||
}
|
||||
return r;
|
||||
}
|
||||
};
|
||||
|
||||
struct mat3 {
|
||||
@ -2468,87 +2442,6 @@ mat3 make_mat3(const X& x, const Y& y, const Z& z) {
|
||||
return mat3(x, y, z);
|
||||
}
|
||||
|
||||
struct mat3x4_scalar {
|
||||
vec4_scalar data[3];
|
||||
|
||||
mat3x4_scalar() = default;
|
||||
constexpr mat3x4_scalar(vec4_scalar a, vec4_scalar b, vec4_scalar c) {
|
||||
data[0] = a;
|
||||
data[1] = b;
|
||||
data[2] = c;
|
||||
}
|
||||
|
||||
auto& operator[](int index) { return data[index]; }
|
||||
constexpr auto operator[](int index) const { return data[index]; }
|
||||
|
||||
friend auto operator*(mat3x4_scalar m, vec3_scalar v) {
|
||||
vec4_scalar u;
|
||||
u.x = m[0].x * v.x + m[1].x * v.y + m[2].x * v.z;
|
||||
u.y = m[0].y * v.x + m[1].y * v.y + m[2].y * v.z;
|
||||
u.z = m[0].z * v.x + m[1].z * v.y + m[2].z * v.z;
|
||||
u.w = m[0].w * v.x + m[1].w * v.y + m[2].w * v.z;
|
||||
return u;
|
||||
}
|
||||
|
||||
friend auto operator*(mat3x4_scalar m, vec3 v) {
|
||||
vec4 u;
|
||||
u.x = m[0].x * v.x + m[1].x * v.y + m[2].x * v.z;
|
||||
u.y = m[0].y * v.x + m[1].y * v.y + m[2].y * v.z;
|
||||
u.z = m[0].z * v.x + m[1].z * v.y + m[2].z * v.z;
|
||||
u.w = m[0].w * v.x + m[1].w * v.y + m[2].w * v.z;
|
||||
return u;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr mat3x4_scalar make_mat3x4(float m0, float m1, float m2, float m3,
|
||||
float m4, float m5, float m6, float m7,
|
||||
float m8, float m9, float m10, float m11) {
|
||||
return mat3x4_scalar{
|
||||
{m0, m1, m2, m3},
|
||||
{m4, m5, m6, m7},
|
||||
{m8, m9, m10, m11},
|
||||
};
|
||||
}
|
||||
|
||||
struct mat4x3_scalar {
|
||||
vec3_scalar data[4];
|
||||
|
||||
mat4x3_scalar() = default;
|
||||
constexpr mat4x3_scalar(vec3_scalar a, vec3_scalar b, vec3_scalar c,
|
||||
vec3_scalar d) {
|
||||
data[0] = a;
|
||||
data[1] = b;
|
||||
data[2] = c;
|
||||
data[3] = d;
|
||||
}
|
||||
|
||||
auto& operator[](int index) { return data[index]; }
|
||||
constexpr auto operator[](int index) const { return data[index]; }
|
||||
|
||||
friend auto operator*(mat4x3_scalar m, vec4_scalar v) {
|
||||
vec3_scalar u;
|
||||
u.x = m[0].x * v.x + m[1].x * v.y + m[2].x * v.z + m[3].x * v.w;
|
||||
u.y = m[0].y * v.x + m[1].y * v.y + m[2].y * v.z + m[3].y * v.w;
|
||||
u.z = m[0].z * v.x + m[1].z * v.y + m[2].z * v.z + m[3].z * v.w;
|
||||
return u;
|
||||
}
|
||||
|
||||
friend auto operator*(mat4x3_scalar m, vec4 v) {
|
||||
vec3 u;
|
||||
u.x = m[0].x * v.x + m[1].x * v.y + m[2].x * v.z + m[3].x * v.w;
|
||||
u.y = m[0].y * v.x + m[1].y * v.y + m[2].y * v.z + m[3].y * v.w;
|
||||
u.z = m[0].z * v.x + m[1].z * v.y + m[2].z * v.z + m[3].z * v.w;
|
||||
return u;
|
||||
}
|
||||
};
|
||||
|
||||
constexpr mat4x3_scalar transpose(const mat3x4_scalar m) {
|
||||
return {{m[0].x, m[1].x, m[2].x},
|
||||
{m[0].y, m[1].y, m[2].y},
|
||||
{m[0].z, m[1].z, m[2].z},
|
||||
{m[0].w, m[1].w, m[2].w}};
|
||||
}
|
||||
|
||||
struct mat4_scalar {
|
||||
vec4_scalar data[4];
|
||||
|
||||
|
@ -981,27 +981,27 @@ static int blendGaussianBlur(S sampler, vec2 uv, const vec4_scalar& uv_rect,
|
||||
swgl_commitGaussianBlur(R8, s, p, uv_rect, hori, radius, coeffs)
|
||||
|
||||
// Convert and pack planar YUV samples to RGB output using a color space
|
||||
static ALWAYS_INLINE PackedRGBA8 convertYUV(const YUVMatrix& rgb_from_ycbcr,
|
||||
U16 y, U16 u, U16 v) {
|
||||
static ALWAYS_INLINE PackedRGBA8 convertYUV(int colorSpace, U16 y, U16 u,
|
||||
U16 v) {
|
||||
auto yy = V8<int16_t>(zip(y, y));
|
||||
auto uv = V8<int16_t>(zip(u, v));
|
||||
return rgb_from_ycbcr.convert(yy, uv);
|
||||
return yuvMatrix[colorSpace].convert(yy, uv);
|
||||
}
|
||||
|
||||
// Helper functions to sample from planar YUV textures before converting to RGB
|
||||
template <typename S0>
|
||||
static ALWAYS_INLINE PackedRGBA8 sampleYUV(S0 sampler0, ivec2 uv0,
|
||||
const YUVMatrix& rgb_from_ycbcr,
|
||||
int colorSpace,
|
||||
UNUSED int rescaleFactor) {
|
||||
switch (sampler0->format) {
|
||||
case TextureFormat::RGBA8: {
|
||||
auto planar = textureLinearPlanarRGBA8(sampler0, uv0);
|
||||
return convertYUV(rgb_from_ycbcr, highHalf(planar.rg), lowHalf(planar.rg),
|
||||
return convertYUV(colorSpace, highHalf(planar.rg), lowHalf(planar.rg),
|
||||
lowHalf(planar.ba));
|
||||
}
|
||||
case TextureFormat::YUV422: {
|
||||
auto planar = textureLinearPlanarYUV422(sampler0, uv0);
|
||||
return convertYUV(rgb_from_ycbcr, planar.y, planar.u, planar.v);
|
||||
return convertYUV(colorSpace, planar.y, planar.u, planar.v);
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
@ -1011,21 +1011,18 @@ static ALWAYS_INLINE PackedRGBA8 sampleYUV(S0 sampler0, ivec2 uv0,
|
||||
|
||||
template <bool BLEND, typename S0, typename P, typename C = NoColor>
|
||||
static int blendYUV(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
const vec4_scalar& uv_rect0, const vec3_scalar& ycbcr_bias,
|
||||
const mat3_scalar& rgb_from_debiased_ycbcr,
|
||||
const vec4_scalar& uv_rect0, int colorSpace,
|
||||
int rescaleFactor, C color = C()) {
|
||||
if (!swgl_isTextureLinear(sampler0)) {
|
||||
return 0;
|
||||
}
|
||||
LINEAR_QUANTIZE_UV(sampler0, uv0, uv_step0, uv_rect0, min_uv0, max_uv0);
|
||||
const auto rgb_from_ycbcr =
|
||||
YUVMatrix::From(ycbcr_bias, rgb_from_debiased_ycbcr);
|
||||
auto c = packColor(buf, color);
|
||||
auto* end = buf + span;
|
||||
for (; buf < end; buf += swgl_StepSize, uv0 += uv_step0) {
|
||||
commit_blend_span<BLEND>(
|
||||
buf, applyColor(sampleYUV(sampler0, ivec2(clamp(uv0, min_uv0, max_uv0)),
|
||||
rgb_from_ycbcr, rescaleFactor),
|
||||
colorSpace, rescaleFactor),
|
||||
c));
|
||||
}
|
||||
return span;
|
||||
@ -1033,23 +1030,20 @@ static int blendYUV(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
|
||||
template <typename S0, typename S1>
|
||||
static ALWAYS_INLINE PackedRGBA8 sampleYUV(S0 sampler0, ivec2 uv0, S1 sampler1,
|
||||
ivec2 uv1,
|
||||
const YUVMatrix& rgb_from_ycbcr,
|
||||
ivec2 uv1, int colorSpace,
|
||||
UNUSED int rescaleFactor) {
|
||||
switch (sampler1->format) {
|
||||
case TextureFormat::RG8: {
|
||||
assert(sampler0->format == TextureFormat::R8);
|
||||
auto y = textureLinearUnpackedR8(sampler0, uv0);
|
||||
auto planar = textureLinearPlanarRG8(sampler1, uv1);
|
||||
return convertYUV(rgb_from_ycbcr, y, lowHalf(planar.rg),
|
||||
highHalf(planar.rg));
|
||||
return convertYUV(colorSpace, y, lowHalf(planar.rg), highHalf(planar.rg));
|
||||
}
|
||||
case TextureFormat::RGBA8: {
|
||||
assert(sampler0->format == TextureFormat::R8);
|
||||
auto y = textureLinearUnpackedR8(sampler0, uv0);
|
||||
auto planar = textureLinearPlanarRGBA8(sampler1, uv1);
|
||||
return convertYUV(rgb_from_ycbcr, y, lowHalf(planar.ba),
|
||||
highHalf(planar.rg));
|
||||
return convertYUV(colorSpace, y, lowHalf(planar.ba), highHalf(planar.rg));
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
@ -1061,23 +1055,20 @@ template <bool BLEND, typename S0, typename S1, typename P,
|
||||
typename C = NoColor>
|
||||
static int blendYUV(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
const vec4_scalar& uv_rect0, S1 sampler1, vec2 uv1,
|
||||
const vec4_scalar& uv_rect1, const vec3_scalar& ycbcr_bias,
|
||||
const mat3_scalar& rgb_from_debiased_ycbcr,
|
||||
const vec4_scalar& uv_rect1, int colorSpace,
|
||||
int rescaleFactor, C color = C()) {
|
||||
if (!swgl_isTextureLinear(sampler0) || !swgl_isTextureLinear(sampler1)) {
|
||||
return 0;
|
||||
}
|
||||
LINEAR_QUANTIZE_UV(sampler0, uv0, uv_step0, uv_rect0, min_uv0, max_uv0);
|
||||
LINEAR_QUANTIZE_UV(sampler1, uv1, uv_step1, uv_rect1, min_uv1, max_uv1);
|
||||
const auto rgb_from_ycbcr =
|
||||
YUVMatrix::From(ycbcr_bias, rgb_from_debiased_ycbcr);
|
||||
auto c = packColor(buf, color);
|
||||
auto* end = buf + span;
|
||||
for (; buf < end; buf += swgl_StepSize, uv0 += uv_step0, uv1 += uv_step1) {
|
||||
commit_blend_span<BLEND>(
|
||||
buf, applyColor(sampleYUV(sampler0, ivec2(clamp(uv0, min_uv0, max_uv0)),
|
||||
sampler1, ivec2(clamp(uv1, min_uv1, max_uv1)),
|
||||
rgb_from_ycbcr, rescaleFactor),
|
||||
colorSpace, rescaleFactor),
|
||||
c));
|
||||
}
|
||||
return span;
|
||||
@ -1086,8 +1077,7 @@ static int blendYUV(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
template <typename S0, typename S1, typename S2>
|
||||
static ALWAYS_INLINE PackedRGBA8 sampleYUV(S0 sampler0, ivec2 uv0, S1 sampler1,
|
||||
ivec2 uv1, S2 sampler2, ivec2 uv2,
|
||||
const YUVMatrix& rgb_from_ycbcr,
|
||||
int rescaleFactor) {
|
||||
int colorSpace, int rescaleFactor) {
|
||||
assert(sampler0->format == sampler1->format &&
|
||||
sampler0->format == sampler2->format);
|
||||
switch (sampler0->format) {
|
||||
@ -1095,7 +1085,7 @@ static ALWAYS_INLINE PackedRGBA8 sampleYUV(S0 sampler0, ivec2 uv0, S1 sampler1,
|
||||
auto y = textureLinearUnpackedR8(sampler0, uv0);
|
||||
auto u = textureLinearUnpackedR8(sampler1, uv1);
|
||||
auto v = textureLinearUnpackedR8(sampler2, uv2);
|
||||
return convertYUV(rgb_from_ycbcr, y, u, v);
|
||||
return convertYUV(colorSpace, y, u, v);
|
||||
}
|
||||
case TextureFormat::R16: {
|
||||
// The rescaling factor represents how many bits to add to renormalize the
|
||||
@ -1110,7 +1100,7 @@ static ALWAYS_INLINE PackedRGBA8 sampleYUV(S0 sampler0, ivec2 uv0, S1 sampler1,
|
||||
auto y = textureLinearUnpackedR16(sampler0, uv0) >> rescaleBits;
|
||||
auto u = textureLinearUnpackedR16(sampler1, uv1) >> rescaleBits;
|
||||
auto v = textureLinearUnpackedR16(sampler2, uv2) >> rescaleBits;
|
||||
return convertYUV(rgb_from_ycbcr, U16(y), U16(u), U16(v));
|
||||
return convertYUV(colorSpace, U16(y), U16(u), U16(v));
|
||||
}
|
||||
default:
|
||||
assert(false);
|
||||
@ -1128,18 +1118,15 @@ static void blendYUVFallback(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
vec2_scalar uv_step1, vec2_scalar min_uv1,
|
||||
vec2_scalar max_uv1, S2 sampler2, vec2 uv2,
|
||||
vec2_scalar uv_step2, vec2_scalar min_uv2,
|
||||
vec2_scalar max_uv2, const vec3_scalar& ycbcr_bias,
|
||||
const mat3_scalar& rgb_from_debiased_ycbcr,
|
||||
vec2_scalar max_uv2, int colorSpace,
|
||||
int rescaleFactor, C color) {
|
||||
const auto rgb_from_ycbcr =
|
||||
YUVMatrix::From(ycbcr_bias, rgb_from_debiased_ycbcr);
|
||||
for (auto* end = buf + span; buf < end; buf += swgl_StepSize, uv0 += uv_step0,
|
||||
uv1 += uv_step1, uv2 += uv_step2) {
|
||||
commit_blend_span<BLEND>(
|
||||
buf, applyColor(sampleYUV(sampler0, ivec2(clamp(uv0, min_uv0, max_uv0)),
|
||||
sampler1, ivec2(clamp(uv1, min_uv1, max_uv1)),
|
||||
sampler2, ivec2(clamp(uv2, min_uv2, max_uv2)),
|
||||
rgb_from_ycbcr, rescaleFactor),
|
||||
colorSpace, rescaleFactor),
|
||||
color));
|
||||
}
|
||||
}
|
||||
@ -1149,8 +1136,7 @@ template <bool BLEND, typename S0, typename S1, typename S2, typename P,
|
||||
static int blendYUV(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
const vec4_scalar& uv_rect0, S1 sampler1, vec2 uv1,
|
||||
const vec4_scalar& uv_rect1, S2 sampler2, vec2 uv2,
|
||||
const vec4_scalar& uv_rect2, const vec3_scalar& ycbcr_bias,
|
||||
const mat3_scalar& rgb_from_debiased_ycbcr,
|
||||
const vec4_scalar& uv_rect2, int colorSpace,
|
||||
int rescaleFactor, C color = C()) {
|
||||
if (!swgl_isTextureLinear(sampler0) || !swgl_isTextureLinear(sampler1) ||
|
||||
!swgl_isTextureLinear(sampler2)) {
|
||||
@ -1162,8 +1148,8 @@ static int blendYUV(P* buf, int span, S0 sampler0, vec2 uv0,
|
||||
auto c = packColor(buf, color);
|
||||
blendYUVFallback<BLEND>(buf, span, sampler0, uv0, uv_step0, min_uv0, max_uv0,
|
||||
sampler1, uv1, uv_step1, min_uv1, max_uv1, sampler2,
|
||||
uv2, uv_step2, min_uv2, max_uv2, ycbcr_bias,
|
||||
rgb_from_debiased_ycbcr, rescaleFactor, c);
|
||||
uv2, uv_step2, min_uv2, max_uv2, colorSpace,
|
||||
rescaleFactor, c);
|
||||
return span;
|
||||
}
|
||||
|
||||
@ -1180,8 +1166,7 @@ static int blendYUV(uint32_t* buf, int span, sampler2DRect sampler0, vec2 uv0,
|
||||
const vec4_scalar& uv_rect0, sampler2DRect sampler1,
|
||||
vec2 uv1, const vec4_scalar& uv_rect1,
|
||||
sampler2DRect sampler2, vec2 uv2,
|
||||
const vec4_scalar& uv_rect2, const vec3_scalar& ycbcr_bias,
|
||||
const mat3_scalar& rgb_from_debiased_ycbcr,
|
||||
const vec4_scalar& uv_rect2, int colorSpace,
|
||||
int rescaleFactor, NoColor noColor = NoColor()) {
|
||||
if (!swgl_isTextureLinear(sampler0) || !swgl_isTextureLinear(sampler1) ||
|
||||
!swgl_isTextureLinear(sampler2)) {
|
||||
@ -1207,11 +1192,10 @@ static int blendYUV(uint32_t* buf, int span, sampler2DRect sampler0, vec2 uv0,
|
||||
(min_uv1.x - uv1.x.x) / uv_step1.x))),
|
||||
(end - buf) / swgl_StepSize);
|
||||
if (outside > 0) {
|
||||
blendYUVFallback<BLEND>(buf, outside * swgl_StepSize, sampler0, uv0,
|
||||
uv_step0, min_uv0, max_uv0, sampler1, uv1,
|
||||
uv_step1, min_uv1, max_uv1, sampler2, uv2,
|
||||
uv_step2, min_uv2, max_uv2, ycbcr_bias,
|
||||
rgb_from_debiased_ycbcr, rescaleFactor, noColor);
|
||||
blendYUVFallback<BLEND>(
|
||||
buf, outside * swgl_StepSize, sampler0, uv0, uv_step0, min_uv0,
|
||||
max_uv0, sampler1, uv1, uv_step1, min_uv1, max_uv1, sampler2, uv2,
|
||||
uv_step2, min_uv2, max_uv2, colorSpace, rescaleFactor, noColor);
|
||||
buf += outside * swgl_StepSize;
|
||||
uv0.x += outside * uv_step0.x;
|
||||
uv1.x += outside * uv_step1.x;
|
||||
@ -1229,12 +1213,10 @@ static int blendYUV(uint32_t* buf, int span, sampler2DRect sampler0, vec2 uv0,
|
||||
int colorDepth =
|
||||
(sampler0->format == TextureFormat::R16 ? 16 : 8) - rescaleFactor;
|
||||
// Finally, call the inner loop of CompositeYUV.
|
||||
const auto rgb_from_ycbcr =
|
||||
YUVMatrix::From(ycbcr_bias, rgb_from_debiased_ycbcr);
|
||||
linear_row_yuv<BLEND>(
|
||||
buf, inside * swgl_StepSize, sampler0, force_scalar(uv0),
|
||||
uv_step0.x / swgl_StepSize, sampler1, sampler2, force_scalar(uv1),
|
||||
uv_step1.x / swgl_StepSize, colorDepth, rgb_from_ycbcr);
|
||||
uv_step1.x / swgl_StepSize, colorDepth, yuvMatrix[colorSpace]);
|
||||
// Now that we're done, advance past the processed inside portion.
|
||||
buf += inside * swgl_StepSize;
|
||||
uv0.x += inside * uv_step0.x;
|
||||
@ -1247,8 +1229,8 @@ static int blendYUV(uint32_t* buf, int span, sampler2DRect sampler0, vec2 uv0,
|
||||
// left of the span.
|
||||
blendYUVFallback<BLEND>(buf, end - buf, sampler0, uv0, uv_step0, min_uv0,
|
||||
max_uv0, sampler1, uv1, uv_step1, min_uv1, max_uv1,
|
||||
sampler2, uv2, uv_step2, min_uv2, max_uv2, ycbcr_bias,
|
||||
rgb_from_debiased_ycbcr, rescaleFactor, noColor);
|
||||
sampler2, uv2, uv_step2, min_uv2, max_uv2, colorSpace,
|
||||
rescaleFactor, noColor);
|
||||
return span;
|
||||
}
|
||||
|
||||
|
@ -296,7 +296,7 @@ extern "C" {
|
||||
locked_y: *mut LockedTexture,
|
||||
locked_u: *mut LockedTexture,
|
||||
locked_v: *mut LockedTexture,
|
||||
color_space: YuvRangedColorSpace,
|
||||
color_space: YUVColorSpace,
|
||||
color_depth: GLuint,
|
||||
src_x: GLint,
|
||||
src_y: GLint,
|
||||
@ -2295,15 +2295,12 @@ pub struct LockedResource(*mut LockedTexture);
|
||||
unsafe impl Send for LockedResource {}
|
||||
unsafe impl Sync for LockedResource {}
|
||||
|
||||
#[repr(u8)]
|
||||
pub enum YuvRangedColorSpace {
|
||||
Rec601Narrow = 0,
|
||||
Rec601Full,
|
||||
Rec709Narrow,
|
||||
Rec709Full,
|
||||
Rec2020Narrow,
|
||||
Rec2020Full,
|
||||
GbrIdentity,
|
||||
#[repr(C)]
|
||||
pub enum YUVColorSpace {
|
||||
Rec601 = 0,
|
||||
Rec709,
|
||||
Rec2020,
|
||||
Identity,
|
||||
}
|
||||
|
||||
impl LockedResource {
|
||||
@ -2358,7 +2355,7 @@ impl LockedResource {
|
||||
locked_y: &LockedResource,
|
||||
locked_u: &LockedResource,
|
||||
locked_v: &LockedResource,
|
||||
color_space: YuvRangedColorSpace,
|
||||
color_space: YUVColorSpace,
|
||||
color_depth: GLuint,
|
||||
src_x: GLint,
|
||||
src_y: GLint,
|
||||
|
@ -15,23 +15,26 @@ flat varying vec4 vUvBounds_U;
|
||||
varying vec2 vUv_V;
|
||||
flat varying vec4 vUvBounds_V;
|
||||
|
||||
YUV_PRECISION flat varying vec3 vYcbcrBias;
|
||||
YUV_PRECISION flat varying mat3 vRgbFromDebiasedYcbcr;
|
||||
flat varying mat3 vYuvColorMatrix;
|
||||
flat varying vec4 vYuvOffsetVector_Coefficient;
|
||||
flat varying int vFormat;
|
||||
|
||||
#ifdef SWGL_DRAW_SPAN
|
||||
flat varying int vYuvColorSpace;
|
||||
flat varying int vRescaleFactor;
|
||||
#endif
|
||||
|
||||
#ifdef WR_VERTEX_SHADER
|
||||
|
||||
struct YuvPrimitive {
|
||||
float coefficient;
|
||||
int color_space;
|
||||
int yuv_format;
|
||||
};
|
||||
|
||||
YuvPrimitive fetch_yuv_primitive(int address) {
|
||||
vec4 data = fetch_from_gpu_cache_1(address);
|
||||
// From YuvImageData.write_prim_gpu_blocks:
|
||||
int channel_bit_depth = int(data.x);
|
||||
int color_space = int(data.y);
|
||||
int yuv_format = int(data.z);
|
||||
return YuvPrimitive(channel_bit_depth, color_space, yuv_format);
|
||||
return YuvPrimitive(data.x, int(data.y), int(data.z));
|
||||
}
|
||||
|
||||
void brush_vs(
|
||||
@ -49,26 +52,19 @@ void brush_vs(
|
||||
vec2 f = (vi.local_pos - local_rect.p0) / rect_size(local_rect);
|
||||
|
||||
YuvPrimitive prim = fetch_yuv_primitive(prim_address);
|
||||
vYuvOffsetVector_Coefficient.w = prim.coefficient;
|
||||
|
||||
vYuvColorMatrix = get_yuv_color_matrix(prim.color_space);
|
||||
vYuvOffsetVector_Coefficient.xyz = get_yuv_offset_vector(prim.color_space);
|
||||
vFormat = prim.yuv_format;
|
||||
|
||||
#ifdef SWGL_DRAW_SPAN
|
||||
// swgl_commitTextureLinearYUV needs to know the color space specifier and
|
||||
// also needs to know how many bits of scaling are required to normalize
|
||||
// HDR textures.
|
||||
vRescaleFactor = 0;
|
||||
if (prim.channel_bit_depth > 8) {
|
||||
vRescaleFactor = 16 - prim.channel_bit_depth;
|
||||
}
|
||||
// Since SWGL rescales filtered YUV values to 8bpc before yuv->rgb
|
||||
// conversion, don't embed a 10bpc channel multiplier into the yuv matrix.
|
||||
prim.channel_bit_depth = 8;
|
||||
vYuvColorSpace = prim.color_space;
|
||||
vRescaleFactor = int(log2(prim.coefficient));
|
||||
#endif
|
||||
|
||||
YuvColorMatrixInfo mat_info = get_rgb_from_ycbcr_info(prim);
|
||||
vYcbcrBias = mat_info.ycbcr_bias;
|
||||
vRgbFromDebiasedYcbcr = mat_info.rgb_from_debiased_ycbrc;
|
||||
|
||||
vFormat = prim.yuv_format;
|
||||
|
||||
// The additional test for 99 works around a gen6 shader compiler bug: 1708937
|
||||
if (vFormat == YUV_FORMAT_PLANAR || vFormat == 99) {
|
||||
ImageSource res_y = fetch_image_source(prim_user_data.x);
|
||||
@ -94,8 +90,9 @@ void brush_vs(
|
||||
Fragment brush_fs() {
|
||||
vec4 color = sample_yuv(
|
||||
vFormat,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vYuvColorMatrix,
|
||||
vYuvOffsetVector_Coefficient.xyz,
|
||||
vYuvOffsetVector_Coefficient.w,
|
||||
vUv_Y,
|
||||
vUv_U,
|
||||
vUv_V,
|
||||
@ -108,9 +105,6 @@ Fragment brush_fs() {
|
||||
color *= antialias_brush();
|
||||
#endif
|
||||
|
||||
//color.r = float(100+vFormat) / 255.0;
|
||||
//color.g = vYcbcrBias.x;
|
||||
//color.b = vYcbcrBias.y;
|
||||
return Fragment(color);
|
||||
}
|
||||
|
||||
@ -120,20 +114,14 @@ void swgl_drawSpanRGBA8() {
|
||||
swgl_commitTextureLinearYUV(sColor0, vUv_Y, vUvBounds_Y,
|
||||
sColor1, vUv_U, vUvBounds_U,
|
||||
sColor2, vUv_V, vUvBounds_V,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vRescaleFactor);
|
||||
vYuvColorSpace, vRescaleFactor);
|
||||
} else if (vFormat == YUV_FORMAT_NV12) {
|
||||
swgl_commitTextureLinearYUV(sColor0, vUv_Y, vUvBounds_Y,
|
||||
sColor1, vUv_U, vUvBounds_U,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vRescaleFactor);
|
||||
vYuvColorSpace, vRescaleFactor);
|
||||
} else if (vFormat == YUV_FORMAT_INTERLEAVED) {
|
||||
swgl_commitTextureLinearYUV(sColor0, vUv_Y, vUvBounds_Y,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vRescaleFactor);
|
||||
vYuvColorSpace, vRescaleFactor);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -16,11 +16,12 @@
|
||||
#endif
|
||||
|
||||
#ifdef WR_FEATURE_YUV
|
||||
YUV_PRECISION flat varying vec3 vYcbcrBias;
|
||||
YUV_PRECISION flat varying mat3 vRgbFromDebiasedYcbcr;
|
||||
flat varying mat3 vYuvColorMatrix;
|
||||
flat varying vec3 vYuvOffsetVector;
|
||||
flat varying float vYuvCoefficient;
|
||||
flat varying int vYuvFormat;
|
||||
|
||||
#ifdef SWGL_DRAW_SPAN
|
||||
flat varying int vYuvColorSpace;
|
||||
flat varying int vRescaleFactor;
|
||||
#endif
|
||||
varying vec2 vUV_y;
|
||||
@ -58,16 +59,6 @@ PER_INSTANCE attribute vec4 aUvRect2;
|
||||
PER_INSTANCE attribute vec4 aUvRect0;
|
||||
#endif
|
||||
|
||||
#ifdef WR_FEATURE_YUV
|
||||
YuvPrimitive fetch_yuv_primitive() {
|
||||
// From ExternalSurfaceDependency::Yuv:
|
||||
int color_space = int(aParams.y);
|
||||
int yuv_format = int(aParams.z);
|
||||
int channel_bit_depth = int(aParams.w);
|
||||
return YuvPrimitive(channel_bit_depth, color_space, yuv_format);
|
||||
}
|
||||
#endif
|
||||
|
||||
void main(void) {
|
||||
// Get world position
|
||||
vec2 world_pos = mix(aDeviceRect.xy, aDeviceRect.zw, aPosition.xy);
|
||||
@ -79,27 +70,23 @@ void main(void) {
|
||||
vec2 uv = (clipped_world_pos - aDeviceRect.xy) / (aDeviceRect.zw - aDeviceRect.xy);
|
||||
|
||||
#ifdef WR_FEATURE_YUV
|
||||
YuvPrimitive prim = fetch_yuv_primitive();
|
||||
int yuv_color_space = int(aParams.y);
|
||||
int yuv_format = int(aParams.z);
|
||||
float yuv_coefficient = aParams.w;
|
||||
|
||||
vYuvColorMatrix = get_yuv_color_matrix(yuv_color_space);
|
||||
vYuvOffsetVector = get_yuv_offset_vector(yuv_color_space);
|
||||
vYuvCoefficient = yuv_coefficient;
|
||||
vYuvFormat = yuv_format;
|
||||
|
||||
#ifdef SWGL_DRAW_SPAN
|
||||
// swgl_commitTextureLinearYUV needs to know the color space specifier and
|
||||
// also needs to know how many bits of scaling are required to normalize
|
||||
// HDR textures.
|
||||
vRescaleFactor = 0;
|
||||
if (prim.channel_bit_depth > 8) {
|
||||
vRescaleFactor = 16 - prim.channel_bit_depth;
|
||||
}
|
||||
// Since SWGL rescales filtered YUV values to 8bpc before yuv->rgb
|
||||
// conversion, don't embed a 10bpc channel multiplier into the yuv matrix.
|
||||
prim.channel_bit_depth = 8;
|
||||
vYuvColorSpace = yuv_color_space;
|
||||
vRescaleFactor = int(log2(yuv_coefficient));
|
||||
#endif
|
||||
|
||||
YuvColorMatrixInfo mat_info = get_rgb_from_ycbcr_info(prim);
|
||||
vYcbcrBias = mat_info.ycbcr_bias;
|
||||
vRgbFromDebiasedYcbcr = mat_info.rgb_from_debiased_ycbrc;
|
||||
|
||||
vYuvFormat = prim.yuv_format;
|
||||
|
||||
write_uv_rect(
|
||||
aUvRect0.xy,
|
||||
aUvRect0.zw,
|
||||
@ -166,8 +153,9 @@ void main(void) {
|
||||
#ifdef WR_FEATURE_YUV
|
||||
vec4 color = sample_yuv(
|
||||
vYuvFormat,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vYuvColorMatrix,
|
||||
vYuvOffsetVector,
|
||||
vYuvCoefficient,
|
||||
vUV_y,
|
||||
vUV_u,
|
||||
vUV_v,
|
||||
@ -200,20 +188,14 @@ void swgl_drawSpanRGBA8() {
|
||||
swgl_commitTextureLinearYUV(sColor0, vUV_y, vUVBounds_y,
|
||||
sColor1, vUV_u, vUVBounds_u,
|
||||
sColor2, vUV_v, vUVBounds_v,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vRescaleFactor);
|
||||
vYuvColorSpace, vRescaleFactor);
|
||||
} else if (vYuvFormat == YUV_FORMAT_NV12) {
|
||||
swgl_commitTextureLinearYUV(sColor0, vUV_y, vUVBounds_y,
|
||||
sColor1, vUV_u, vUVBounds_u,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vRescaleFactor);
|
||||
vYuvColorSpace, vRescaleFactor);
|
||||
} else if (vYuvFormat == YUV_FORMAT_INTERLEAVED) {
|
||||
swgl_commitTextureLinearYUV(sColor0, vUV_y, vUVBounds_y,
|
||||
vYcbcrBias,
|
||||
vRgbFromDebiasedYcbcr,
|
||||
vRescaleFactor);
|
||||
vYuvColorSpace, vRescaleFactor);
|
||||
}
|
||||
#else
|
||||
#ifdef WR_FEATURE_FAST_PATH
|
||||
|
@ -8,9 +8,6 @@
|
||||
#define YUV_FORMAT_PLANAR 1
|
||||
#define YUV_FORMAT_INTERLEAVED 2
|
||||
|
||||
//#define YUV_PRECISION mediump
|
||||
#define YUV_PRECISION
|
||||
|
||||
#ifdef WR_VERTEX_SHADER
|
||||
|
||||
#ifdef WR_FEATURE_TEXTURE_RECT
|
||||
@ -19,140 +16,81 @@
|
||||
#define TEX_SIZE_YUV(sampler) vec2(TEX_SIZE(sampler).xy)
|
||||
#endif
|
||||
|
||||
// `YuvRangedColorSpace`
|
||||
#define YUV_COLOR_SPACE_REC601_NARROW 0
|
||||
#define YUV_COLOR_SPACE_REC601_FULL 1
|
||||
#define YUV_COLOR_SPACE_REC709_NARROW 2
|
||||
#define YUV_COLOR_SPACE_REC709_FULL 3
|
||||
#define YUV_COLOR_SPACE_REC2020_NARROW 4
|
||||
#define YUV_COLOR_SPACE_REC2020_FULL 5
|
||||
#define YUV_COLOR_SPACE_GBR_IDENTITY 6
|
||||
#define YUV_COLOR_SPACE_REC601 0
|
||||
#define YUV_COLOR_SPACE_REC709 1
|
||||
#define YUV_COLOR_SPACE_REC2020 2
|
||||
#define YUV_COLOR_SPACE_IDENTITY 3
|
||||
|
||||
// The constants added to the Y, U and V components are applied in the fragment shader.
|
||||
|
||||
// `rgbFromYuv` from https://jdashg.github.io/misc/colors/from-coeffs.html
|
||||
// From Rec601:
|
||||
// [R] [1.1643835616438356, 0.0, 1.5960267857142858 ] [Y - 16]
|
||||
// [G] = [1.1643835616438358, -0.3917622900949137, -0.8129676472377708 ] x [U - 128]
|
||||
// [B] [1.1643835616438356, 2.017232142857143, 8.862867620416422e-17] [V - 128]
|
||||
//
|
||||
// For the range [0,1] instead of [0,255].
|
||||
//
|
||||
// The matrix is stored in column-major.
|
||||
const mat3 RgbFromYuv_Rec601 = mat3(
|
||||
1.00000, 1.00000, 1.00000,
|
||||
0.00000,-0.17207, 0.88600,
|
||||
0.70100,-0.35707, 0.00000
|
||||
const mat3 YuvColorMatrixRec601 = mat3(
|
||||
1.16438, 1.16438, 1.16438,
|
||||
0.0, -0.39176, 2.01723,
|
||||
1.59603, -0.81297, 0.0
|
||||
);
|
||||
const mat3 RgbFromYuv_Rec709 = mat3(
|
||||
1.00000, 1.00000, 1.00000,
|
||||
0.00000,-0.09366, 0.92780,
|
||||
0.78740,-0.23406, 0.00000
|
||||
|
||||
// From Rec709:
|
||||
// [R] [1.1643835616438356, 0.0, 1.7927410714285714] [Y - 16]
|
||||
// [G] = [1.1643835616438358, -0.21324861427372963, -0.532909328559444 ] x [U - 128]
|
||||
// [B] [1.1643835616438356, 2.1124017857142854, 0.0 ] [V - 128]
|
||||
//
|
||||
// For the range [0,1] instead of [0,255]:
|
||||
//
|
||||
// The matrix is stored in column-major.
|
||||
const mat3 YuvColorMatrixRec709 = mat3(
|
||||
1.16438, 1.16438, 1.16438,
|
||||
0.0 , -0.21325, 2.11240,
|
||||
1.79274, -0.53291, 0.0
|
||||
);
|
||||
const mat3 RgbFromYuv_Rec2020 = mat3(
|
||||
1.00000, 1.00000, 1.00000,
|
||||
0.00000,-0.08228, 0.94070,
|
||||
0.73730,-0.28568, 0.00000
|
||||
|
||||
// From Re2020:
|
||||
// [R] [1.16438356164384, 0.0, 1.678674107142860 ] [Y - 16]
|
||||
// [G] = [1.16438356164384, -0.187326104219343, -0.650424318505057 ] x [U - 128]
|
||||
// [B] [1.16438356164384, 2.14177232142857, 0.0 ] [V - 128]
|
||||
//
|
||||
// For the range [0,1] instead of [0,255]:
|
||||
//
|
||||
// The matrix is stored in column-major.
|
||||
const mat3 YuvColorMatrixRec2020 = mat3(
|
||||
1.16438356164384 , 1.164383561643840, 1.16438356164384,
|
||||
0.0 , -0.187326104219343, 2.14177232142857,
|
||||
1.67867410714286 , -0.650424318505057, 0.0
|
||||
);
|
||||
|
||||
// The matrix is stored in column-major.
|
||||
// Identity is stored as GBR
|
||||
const mat3 RgbFromYuv_GbrIdentity = mat3(
|
||||
const mat3 IdentityColorMatrix = mat3(
|
||||
0.0 , 1.0, 0.0,
|
||||
0.0 , 0.0, 1.0,
|
||||
1.0 , 0.0, 0.0
|
||||
);
|
||||
|
||||
// -
|
||||
|
||||
struct YuvPrimitive {
|
||||
int channel_bit_depth;
|
||||
int color_space;
|
||||
int yuv_format;
|
||||
};
|
||||
|
||||
struct YuvColorSamplingInfo {
|
||||
mat3 rgb_from_yuv;
|
||||
vec4 packed_zero_one_vals;
|
||||
};
|
||||
|
||||
struct YuvColorMatrixInfo {
|
||||
vec3 ycbcr_bias;
|
||||
mat3 rgb_from_debiased_ycbrc;
|
||||
};
|
||||
|
||||
// -
|
||||
|
||||
vec4 yuv_channel_zero_one_identity(int bit_depth) {
|
||||
int channel_depth = 8;
|
||||
if (bit_depth > 8) {
|
||||
// For >8bpc, we get the low bits, not the high bits:
|
||||
// 10bpc(1.0): 0b0000_0011_1111_1111
|
||||
channel_depth = 16;
|
||||
}
|
||||
|
||||
float all_ones_normalized = float((1 << bit_depth) - 1) / float((1 << channel_depth) - 1);
|
||||
return vec4(0.0, 0.0, all_ones_normalized, all_ones_normalized);
|
||||
}
|
||||
|
||||
vec4 yuv_channel_zero_one_narrow_range(int bit_depth) {
|
||||
// Note: 512/1023 != 128/255
|
||||
ivec4 zero_one_ints = ivec4(16, 128, 235, 240) << (bit_depth - 8);
|
||||
|
||||
int channel_depth = 8;
|
||||
if (bit_depth > 8) {
|
||||
// For >8bpc, we get the low bits, not the high bits:
|
||||
// 10bpc(1.0): 0b0000_0011_1111_1111
|
||||
channel_depth = 16;
|
||||
}
|
||||
|
||||
return vec4(zero_one_ints) / float((1 << channel_depth) - 1);
|
||||
}
|
||||
|
||||
vec4 yuv_channel_zero_one_full_range(int bit_depth) {
|
||||
vec4 narrow = yuv_channel_zero_one_narrow_range(bit_depth);
|
||||
vec4 identity = yuv_channel_zero_one_identity(bit_depth);
|
||||
|
||||
return vec4(0.0, narrow.y, identity.z, identity.w);
|
||||
}
|
||||
|
||||
YuvColorSamplingInfo get_yuv_color_info(YuvPrimitive prim) {
|
||||
if (prim.color_space == YUV_COLOR_SPACE_REC601_NARROW) {
|
||||
return YuvColorSamplingInfo(RgbFromYuv_Rec601,
|
||||
yuv_channel_zero_one_narrow_range(prim.channel_bit_depth));
|
||||
} else if (prim.color_space == YUV_COLOR_SPACE_REC601_FULL) {
|
||||
return YuvColorSamplingInfo(RgbFromYuv_Rec601,
|
||||
yuv_channel_zero_one_full_range(prim.channel_bit_depth));
|
||||
|
||||
} else if (prim.color_space == YUV_COLOR_SPACE_REC709_NARROW) {
|
||||
return YuvColorSamplingInfo(RgbFromYuv_Rec709,
|
||||
yuv_channel_zero_one_narrow_range(prim.channel_bit_depth));
|
||||
} else if (prim.color_space == YUV_COLOR_SPACE_REC709_FULL) {
|
||||
return YuvColorSamplingInfo(RgbFromYuv_Rec709,
|
||||
yuv_channel_zero_one_full_range(prim.channel_bit_depth));
|
||||
|
||||
} else if (prim.color_space == YUV_COLOR_SPACE_REC2020_NARROW) {
|
||||
return YuvColorSamplingInfo(RgbFromYuv_Rec2020,
|
||||
yuv_channel_zero_one_narrow_range(prim.channel_bit_depth));
|
||||
} else if (prim.color_space == YUV_COLOR_SPACE_REC2020_FULL) {
|
||||
return YuvColorSamplingInfo(RgbFromYuv_Rec2020,
|
||||
yuv_channel_zero_one_full_range(prim.channel_bit_depth));
|
||||
|
||||
mat3 get_yuv_color_matrix(int color_space) {
|
||||
if (color_space == YUV_COLOR_SPACE_REC601) {
|
||||
return YuvColorMatrixRec601;
|
||||
} else if (color_space == YUV_COLOR_SPACE_REC709) {
|
||||
return YuvColorMatrixRec709;
|
||||
} else if (color_space == YUV_COLOR_SPACE_IDENTITY) {
|
||||
return IdentityColorMatrix;
|
||||
} else {
|
||||
// Identity
|
||||
return YuvColorSamplingInfo(RgbFromYuv_GbrIdentity,
|
||||
yuv_channel_zero_one_identity(prim.channel_bit_depth));
|
||||
return YuvColorMatrixRec2020;
|
||||
}
|
||||
}
|
||||
|
||||
YuvColorMatrixInfo get_rgb_from_ycbcr_info(YuvPrimitive prim) {
|
||||
YuvColorSamplingInfo info = get_yuv_color_info(prim);
|
||||
|
||||
vec2 zero = info.packed_zero_one_vals.xy;
|
||||
vec2 one = info.packed_zero_one_vals.zw;
|
||||
// Such that yuv_value = (ycbcr_sample - zero) / (one - zero)
|
||||
vec2 scale = 1.0 / (one - zero);
|
||||
|
||||
YuvColorMatrixInfo mat_info;
|
||||
mat_info.ycbcr_bias = zero.xyy;
|
||||
mat3 yuv_from_debiased_ycbcr = mat3(scale.x, 0.0, 0.0,
|
||||
0.0, scale.y, 0.0,
|
||||
0.0, 0.0, scale.y);
|
||||
mat_info.rgb_from_debiased_ycbrc = info.rgb_from_yuv * yuv_from_debiased_ycbcr;
|
||||
return mat_info;
|
||||
vec3 get_yuv_offset_vector(int color_space) {
|
||||
if (color_space == YUV_COLOR_SPACE_IDENTITY) {
|
||||
return vec3(0.0, 0.0, 0.0);
|
||||
} else {
|
||||
return vec3(0.06275, 0.50196, 0.50196);
|
||||
}
|
||||
}
|
||||
|
||||
void write_uv_rect(
|
||||
@ -178,8 +116,9 @@ void write_uv_rect(
|
||||
|
||||
vec4 sample_yuv(
|
||||
int format,
|
||||
YUV_PRECISION vec3 ycbcr_bias,
|
||||
YUV_PRECISION mat3 rgb_from_debiased_ycbrc,
|
||||
mat3 yuv_color_matrix,
|
||||
vec3 yuv_offset_vector,
|
||||
float coefficient,
|
||||
vec2 in_uv_y,
|
||||
vec2 in_uv_u,
|
||||
vec2 in_uv_v,
|
||||
@ -187,7 +126,7 @@ vec4 sample_yuv(
|
||||
vec4 uv_bounds_u,
|
||||
vec4 uv_bounds_v
|
||||
) {
|
||||
YUV_PRECISION vec3 ycbcr_sample;
|
||||
vec3 yuv_value;
|
||||
|
||||
switch (format) {
|
||||
case YUV_FORMAT_PLANAR:
|
||||
@ -196,9 +135,9 @@ vec4 sample_yuv(
|
||||
vec2 uv_y = clamp(in_uv_y, uv_bounds_y.xy, uv_bounds_y.zw);
|
||||
vec2 uv_u = clamp(in_uv_u, uv_bounds_u.xy, uv_bounds_u.zw);
|
||||
vec2 uv_v = clamp(in_uv_v, uv_bounds_v.xy, uv_bounds_v.zw);
|
||||
ycbcr_sample.x = TEX_SAMPLE(sColor0, uv_y).r;
|
||||
ycbcr_sample.y = TEX_SAMPLE(sColor1, uv_u).r;
|
||||
ycbcr_sample.z = TEX_SAMPLE(sColor2, uv_v).r;
|
||||
yuv_value.x = TEX_SAMPLE(sColor0, uv_y).r;
|
||||
yuv_value.y = TEX_SAMPLE(sColor1, uv_u).r;
|
||||
yuv_value.z = TEX_SAMPLE(sColor2, uv_v).r;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -206,8 +145,8 @@ vec4 sample_yuv(
|
||||
{
|
||||
vec2 uv_y = clamp(in_uv_y, uv_bounds_y.xy, uv_bounds_y.zw);
|
||||
vec2 uv_uv = clamp(in_uv_u, uv_bounds_u.xy, uv_bounds_u.zw);
|
||||
ycbcr_sample.x = TEX_SAMPLE(sColor0, uv_y).r;
|
||||
ycbcr_sample.yz = TEX_SAMPLE(sColor1, uv_uv).rg;
|
||||
yuv_value.x = TEX_SAMPLE(sColor0, uv_y).r;
|
||||
yuv_value.yz = TEX_SAMPLE(sColor1, uv_uv).rg;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -217,24 +156,25 @@ vec4 sample_yuv(
|
||||
// the existing green, blue and red color channels."
|
||||
// https://www.khronos.org/registry/OpenGL/extensions/APPLE/APPLE_rgb_422.txt
|
||||
vec2 uv_y = clamp(in_uv_y, uv_bounds_y.xy, uv_bounds_y.zw);
|
||||
ycbcr_sample = TEX_SAMPLE(sColor0, uv_y).gbr;
|
||||
yuv_value = TEX_SAMPLE(sColor0, uv_y).gbr;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
ycbcr_sample = vec3(0.0);
|
||||
yuv_value = vec3(0.0);
|
||||
break;
|
||||
}
|
||||
//if (true) return vec4(ycbcr_sample, 1.0);
|
||||
|
||||
// See the YuvColorMatrix definition for an explanation of where the constants come from.
|
||||
YUV_PRECISION vec3 rgb = rgb_from_debiased_ycbrc * (ycbcr_sample - ycbcr_bias);
|
||||
|
||||
vec3 yuv = yuv_value * coefficient - yuv_offset_vector;
|
||||
vec3 rgb = yuv_color_matrix * yuv;
|
||||
#if defined(WR_FEATURE_ALPHA_PASS) && defined(SWGL_CLIP_MASK)
|
||||
// Avoid out-of-range RGB values that can mess with blending. These occur due to invalid
|
||||
// YUV values outside the mappable space that never the less can be generated.
|
||||
rgb = clamp(rgb, 0.0, 1.0);
|
||||
#endif
|
||||
return vec4(rgb, 1.0);
|
||||
vec4 color = vec4(rgb, 1.0);
|
||||
|
||||
return color;
|
||||
}
|
||||
#endif
|
||||
|
@ -2,7 +2,7 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use api::{ColorF, YuvRangedColorSpace, YuvFormat, ImageRendering, ExternalImageId, ImageBufferKind};
|
||||
use api::{ColorF, YuvColorSpace, YuvFormat, ImageRendering, ExternalImageId, ImageBufferKind};
|
||||
use api::units::*;
|
||||
use api::ColorDepth;
|
||||
use crate::image_source::resolve_image;
|
||||
@ -155,9 +155,9 @@ pub fn tile_kind(surface: &CompositeTileSurface, is_opaque: bool) -> TileKind {
|
||||
pub enum ExternalSurfaceDependency {
|
||||
Yuv {
|
||||
image_dependencies: [ImageDependency; 3],
|
||||
color_space: YuvRangedColorSpace,
|
||||
color_space: YuvColorSpace,
|
||||
format: YuvFormat,
|
||||
channel_bit_depth: u32,
|
||||
rescale: f32,
|
||||
},
|
||||
Rgb {
|
||||
image_dependency: ImageDependency,
|
||||
@ -221,9 +221,9 @@ pub enum ResolvedExternalSurfaceColorData {
|
||||
// YUV specific information
|
||||
image_dependencies: [ImageDependency; 3],
|
||||
planes: [ExternalPlaneDescriptor; 3],
|
||||
color_space: YuvRangedColorSpace,
|
||||
color_space: YuvColorSpace,
|
||||
format: YuvFormat,
|
||||
channel_bit_depth: u32,
|
||||
rescale: f32,
|
||||
},
|
||||
Rgb {
|
||||
image_dependency: ImageDependency,
|
||||
@ -844,7 +844,7 @@ impl CompositeState {
|
||||
});
|
||||
|
||||
match external_surface.dependency {
|
||||
ExternalSurfaceDependency::Yuv{ color_space, format, channel_bit_depth, .. } => {
|
||||
ExternalSurfaceDependency::Yuv{ color_space, format, rescale, .. } => {
|
||||
|
||||
let image_buffer_kind = planes[0].texture.image_buffer_kind();
|
||||
|
||||
@ -854,8 +854,8 @@ impl CompositeState {
|
||||
planes,
|
||||
color_space,
|
||||
format,
|
||||
channel_bit_depth,
|
||||
},
|
||||
rescale,
|
||||
},
|
||||
image_buffer_kind,
|
||||
update_params,
|
||||
});
|
||||
@ -1134,7 +1134,7 @@ pub struct SWGLCompositeSurfaceInfo {
|
||||
/// Textures for planes of the surface, or 0 if not applicable.
|
||||
pub textures: [u32; 3],
|
||||
/// Color space of surface if using a YUV format.
|
||||
pub color_space: YuvRangedColorSpace,
|
||||
pub color_space: YuvColorSpace,
|
||||
/// Color depth of surface if using a YUV format.
|
||||
pub color_depth: ColorDepth,
|
||||
/// The actual source surface size before transformation.
|
||||
|
@ -11,7 +11,7 @@ use std::sync::atomic::{AtomicBool, AtomicI8, AtomicIsize, AtomicPtr, AtomicU32,
|
||||
use std::sync::{Arc, Condvar, Mutex, MutexGuard};
|
||||
use std::thread;
|
||||
use crate::{
|
||||
api::units::*, api::ColorDepth, api::ExternalImageId, api::ImageRendering, api::YuvRangedColorSpace, Compositor,
|
||||
api::units::*, api::ColorDepth, api::ExternalImageId, api::ImageRendering, api::YuvColorSpace, Compositor,
|
||||
CompositorCapabilities, CompositorSurfaceTransform, NativeSurfaceId, NativeSurfaceInfo, NativeTileId,
|
||||
profiler, MappableCompositor, SWGLCompositeSurfaceInfo,
|
||||
};
|
||||
@ -178,7 +178,7 @@ enum SwCompositeSource {
|
||||
swgl::LockedResource,
|
||||
swgl::LockedResource,
|
||||
swgl::LockedResource,
|
||||
YuvRangedColorSpace,
|
||||
YuvColorSpace,
|
||||
ColorDepth,
|
||||
),
|
||||
}
|
||||
@ -242,13 +242,10 @@ impl SwCompositeJob {
|
||||
}
|
||||
SwCompositeSource::YUV(ref y, ref u, ref v, color_space, color_depth) => {
|
||||
let swgl_color_space = match color_space {
|
||||
YuvRangedColorSpace::Rec601Narrow => swgl::YuvRangedColorSpace::Rec601Narrow,
|
||||
YuvRangedColorSpace::Rec601Full => swgl::YuvRangedColorSpace::Rec601Full,
|
||||
YuvRangedColorSpace::Rec709Narrow => swgl::YuvRangedColorSpace::Rec709Narrow,
|
||||
YuvRangedColorSpace::Rec709Full => swgl::YuvRangedColorSpace::Rec709Full,
|
||||
YuvRangedColorSpace::Rec2020Narrow => swgl::YuvRangedColorSpace::Rec2020Narrow,
|
||||
YuvRangedColorSpace::Rec2020Full => swgl::YuvRangedColorSpace::Rec2020Full,
|
||||
YuvRangedColorSpace::GbrIdentity => swgl::YuvRangedColorSpace::GbrIdentity,
|
||||
YuvColorSpace::Rec601 => swgl::YUVColorSpace::Rec601,
|
||||
YuvColorSpace::Rec709 => swgl::YUVColorSpace::Rec709,
|
||||
YuvColorSpace::Rec2020 => swgl::YUVColorSpace::Rec2020,
|
||||
YuvColorSpace::Identity => swgl::YUVColorSpace::Identity,
|
||||
};
|
||||
self.locked_dst.composite_yuv(
|
||||
y,
|
||||
@ -1007,7 +1004,7 @@ impl SwCompositor {
|
||||
let mut info = SWGLCompositeSurfaceInfo {
|
||||
yuv_planes: 0,
|
||||
textures: [0; 3],
|
||||
color_space: YuvRangedColorSpace::GbrIdentity,
|
||||
color_space: YuvColorSpace::Identity,
|
||||
color_depth: ColorDepth::Color8,
|
||||
size: DeviceIntSize::zero(),
|
||||
};
|
||||
|
@ -2,7 +2,7 @@
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
use api::{AlphaType, PremultipliedColorF, YuvFormat, YuvRangedColorSpace};
|
||||
use api::{AlphaType, PremultipliedColorF, YuvFormat, YuvColorSpace};
|
||||
use api::units::*;
|
||||
use crate::composite::CompositeFeatures;
|
||||
use crate::segment::EdgeAaSegmentMask;
|
||||
@ -254,7 +254,7 @@ pub struct CompositeInstance {
|
||||
color_space_or_uv_type: f32, // YuvColorSpace for YUV;
|
||||
// UV coordinate space for RGB
|
||||
yuv_format: f32, // YuvFormat
|
||||
yuv_channel_bit_depth: f32,
|
||||
yuv_rescale: f32,
|
||||
|
||||
// UV rectangles (pixel space) for color / yuv texture planes
|
||||
uv_rects: [TexelRect; 3],
|
||||
@ -275,7 +275,7 @@ impl CompositeInstance {
|
||||
z_id: z_id.0 as f32,
|
||||
color_space_or_uv_type: pack_as_float(UV_TYPE_NORMALIZED),
|
||||
yuv_format: 0.0,
|
||||
yuv_channel_bit_depth: 0.0,
|
||||
yuv_rescale: 0.0,
|
||||
uv_rects: [uv, uv, uv],
|
||||
}
|
||||
}
|
||||
@ -294,7 +294,7 @@ impl CompositeInstance {
|
||||
z_id: z_id.0 as f32,
|
||||
color_space_or_uv_type: pack_as_float(UV_TYPE_UNNORMALIZED),
|
||||
yuv_format: 0.0,
|
||||
yuv_channel_bit_depth: 0.0,
|
||||
yuv_rescale: 0.0,
|
||||
uv_rects: [uv_rect, uv_rect, uv_rect],
|
||||
}
|
||||
}
|
||||
@ -303,9 +303,9 @@ impl CompositeInstance {
|
||||
rect: DeviceRect,
|
||||
clip_rect: DeviceRect,
|
||||
z_id: ZBufferId,
|
||||
yuv_color_space: YuvRangedColorSpace,
|
||||
yuv_color_space: YuvColorSpace,
|
||||
yuv_format: YuvFormat,
|
||||
yuv_channel_bit_depth: u32,
|
||||
yuv_rescale: f32,
|
||||
uv_rects: [TexelRect; 3],
|
||||
) -> Self {
|
||||
CompositeInstance {
|
||||
@ -315,7 +315,7 @@ impl CompositeInstance {
|
||||
z_id: z_id.0 as f32,
|
||||
color_space_or_uv_type: pack_as_float(yuv_color_space as u32),
|
||||
yuv_format: pack_as_float(yuv_format as u32),
|
||||
yuv_channel_bit_depth: pack_as_float(yuv_channel_bit_depth),
|
||||
yuv_rescale,
|
||||
uv_rects,
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,7 @@
|
||||
use api::{MixBlendMode, PremultipliedColorF, FilterPrimitiveKind};
|
||||
use api::{PropertyBinding, PropertyBindingId, FilterPrimitive};
|
||||
use api::{DebugFlags, ImageKey, ColorF, ColorU, PrimitiveFlags};
|
||||
use api::{ImageRendering, ColorDepth, YuvRangedColorSpace, YuvFormat, AlphaType};
|
||||
use api::{ImageRendering, ColorDepth, YuvColorSpace, YuvFormat, AlphaType};
|
||||
use api::units::*;
|
||||
use crate::batch::BatchFilter;
|
||||
use crate::box_shadow::BLUR_SAMPLE_SCALE;
|
||||
@ -2983,7 +2983,7 @@ impl TileCacheInstance {
|
||||
gpu_cache: &mut GpuCache,
|
||||
image_rendering: ImageRendering,
|
||||
color_depth: ColorDepth,
|
||||
color_space: YuvRangedColorSpace,
|
||||
color_space: YuvColorSpace,
|
||||
format: YuvFormat,
|
||||
) -> bool {
|
||||
for &key in api_keys {
|
||||
@ -3011,7 +3011,7 @@ impl TileCacheInstance {
|
||||
image_dependencies: *image_dependencies,
|
||||
color_space,
|
||||
format,
|
||||
channel_bit_depth: color_depth.bit_depth(),
|
||||
rescale: color_depth.rescaling_factor(),
|
||||
},
|
||||
api_keys,
|
||||
resource_cache,
|
||||
@ -3592,7 +3592,7 @@ impl TileCacheInstance {
|
||||
gpu_cache,
|
||||
prim_data.kind.image_rendering,
|
||||
prim_data.kind.color_depth,
|
||||
prim_data.kind.color_space.with_range(prim_data.kind.color_range),
|
||||
prim_data.kind.color_space,
|
||||
prim_data.kind.format,
|
||||
);
|
||||
}
|
||||
|
@ -603,10 +603,9 @@ impl YuvImageData {
|
||||
}
|
||||
|
||||
pub fn write_prim_gpu_blocks(&self, request: &mut GpuDataRequest) {
|
||||
let ranged_color_space = self.color_space.with_range(self.color_range);
|
||||
request.push([
|
||||
pack_as_float(self.color_depth.bit_depth()),
|
||||
pack_as_float(ranged_color_space as u32),
|
||||
self.color_depth.rescaling_factor(),
|
||||
pack_as_float(self.color_space as u32),
|
||||
pack_as_float(self.format as u32),
|
||||
0.0
|
||||
]);
|
||||
|
@ -2968,7 +2968,7 @@ impl Renderer {
|
||||
|
||||
let ( textures, instance ) = match surface.color_data {
|
||||
ResolvedExternalSurfaceColorData::Yuv{
|
||||
ref planes, color_space, format, channel_bit_depth, .. } => {
|
||||
ref planes, color_space, format, rescale, .. } => {
|
||||
|
||||
// Bind an appropriate YUV shader for the texture format kind
|
||||
self.shaders
|
||||
@ -3009,7 +3009,7 @@ impl Renderer {
|
||||
ZBufferId(0),
|
||||
color_space,
|
||||
format,
|
||||
channel_bit_depth,
|
||||
rescale,
|
||||
uv_rects,
|
||||
);
|
||||
|
||||
@ -3142,7 +3142,7 @@ impl Renderer {
|
||||
let surface = &external_surfaces[external_surface_index.0];
|
||||
|
||||
match surface.color_data {
|
||||
ResolvedExternalSurfaceColorData::Yuv{ ref planes, color_space, format, channel_bit_depth, .. } => {
|
||||
ResolvedExternalSurfaceColorData::Yuv{ ref planes, color_space, format, rescale, .. } => {
|
||||
let textures = BatchTextures::composite_yuv(
|
||||
planes[0].texture,
|
||||
planes[1].texture,
|
||||
@ -3167,7 +3167,7 @@ impl Renderer {
|
||||
tile.z_id,
|
||||
color_space,
|
||||
format,
|
||||
channel_bit_depth,
|
||||
rescale,
|
||||
uv_rects,
|
||||
),
|
||||
textures,
|
||||
|
@ -1344,7 +1344,7 @@ pub enum YuvColorSpace {
|
||||
Rec601 = 0,
|
||||
Rec709 = 1,
|
||||
Rec2020 = 2,
|
||||
Identity = 3, // aka GBR as per ISO/IEC 23091-2:2019
|
||||
Identity = 3, // aka RGB as per ISO/IEC 23091-2:2019
|
||||
}
|
||||
|
||||
#[repr(u8)]
|
||||
@ -1354,44 +1354,6 @@ pub enum ColorRange {
|
||||
Full = 1,
|
||||
}
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize, PeekPoke)]
|
||||
pub enum YuvRangedColorSpace {
|
||||
Rec601Narrow = 0,
|
||||
Rec601Full = 1,
|
||||
Rec709Narrow = 2,
|
||||
Rec709Full = 3,
|
||||
Rec2020Narrow = 4,
|
||||
Rec2020Full = 5,
|
||||
GbrIdentity = 6,
|
||||
}
|
||||
|
||||
impl YuvColorSpace {
|
||||
pub fn with_range(self, range: ColorRange) -> YuvRangedColorSpace {
|
||||
match self {
|
||||
YuvColorSpace::Identity => YuvRangedColorSpace::GbrIdentity,
|
||||
YuvColorSpace::Rec601 => {
|
||||
match range {
|
||||
ColorRange::Limited => YuvRangedColorSpace::Rec601Narrow,
|
||||
ColorRange::Full => YuvRangedColorSpace::Rec601Full,
|
||||
}
|
||||
}
|
||||
YuvColorSpace::Rec709 => {
|
||||
match range {
|
||||
ColorRange::Limited => YuvRangedColorSpace::Rec709Narrow,
|
||||
ColorRange::Full => YuvRangedColorSpace::Rec709Full,
|
||||
}
|
||||
}
|
||||
YuvColorSpace::Rec2020 => {
|
||||
match range {
|
||||
ColorRange::Limited => YuvRangedColorSpace::Rec2020Narrow,
|
||||
ColorRange::Full => YuvRangedColorSpace::Rec2020Full,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize, PeekPoke)]
|
||||
pub enum YuvData {
|
||||
NV12(ImageKey, ImageKey), // (Y channel, CbCr interleaved channel)
|
||||
@ -1767,7 +1729,6 @@ impl_default_for_enums! {
|
||||
ImageRendering => Auto,
|
||||
AlphaType => Alpha,
|
||||
YuvColorSpace => Rec601,
|
||||
YuvRangedColorSpace => Rec601Narrow,
|
||||
ColorRange => Limited,
|
||||
YuvData => NV12(ImageKey::default(), ImageKey::default()),
|
||||
YuvFormat => NV12,
|
||||
|
@ -451,5 +451,4 @@ include display-list/reftest.list
|
||||
|
||||
# Media
|
||||
include ../../dom/media/test/reftest/reftest.list
|
||||
include ../../dom/media/test/reftest/color_quads/reftest.list
|
||||
include ../../dom/media/webvtt/test/reftest/reftest.list
|
||||
|
@ -603,7 +603,7 @@ function update_pixel_difference_text() {
|
||||
}
|
||||
// Disable this for now, because per bug 1633504, the numbers may be
|
||||
// inaccurate and dependent on the browser's configuration.
|
||||
ID("pixel-differences").textContent = differenceText;
|
||||
// ID("pixel-differences").textContent = differenceText;
|
||||
}
|
||||
|
||||
function get_pixel_differences() {
|
||||
|
Loading…
Reference in New Issue
Block a user