Bug 1437572 - Update webrender to 4af31b8aa79d5a1f3c0242dea6be4876c71075c5. r=jrmuizel

MozReview-Commit-ID: FdccoNdrEjZ

--HG--
extra : rebase_source : c838d90bb46407194fede80e755901d98d9e9a67
This commit is contained in:
Kartikaya Gupta 2018-02-16 11:02:20 -05:00
parent 561ed54bf6
commit 3e868500aa
59 changed files with 2292 additions and 1958 deletions

View File

@ -11,14 +11,12 @@ extern crate webrender;
mod boilerplate;
use boilerplate::{Example, HandyDandyRectBuilder};
use rayon::Configuration as ThreadPoolConfig;
use rayon::ThreadPool;
use rayon::{Configuration as ThreadPoolConfig, ThreadPool};
use std::collections::HashMap;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::sync::mpsc::{channel, Receiver, Sender};
use webrender::api::{self,
DisplayListBuilder, DocumentId, PipelineId, RenderApi, ResourceUpdates};
use std::sync::mpsc::{Receiver, Sender, channel};
use webrender::api::{self, DisplayListBuilder, DocumentId, PipelineId, RenderApi, ResourceUpdates};
// This example shows how to implement a very basic BlobImageRenderer that can only render
// a checkerboard pattern.

View File

@ -215,7 +215,9 @@ pub fn main_wrapper<E: Example>(
glutin::VirtualKeyCode::O => renderer.toggle_debug_flags(webrender::DebugFlags::RENDER_TARGET_DBG),
glutin::VirtualKeyCode::I => renderer.toggle_debug_flags(webrender::DebugFlags::TEXTURE_CACHE_DBG),
glutin::VirtualKeyCode::S => renderer.toggle_debug_flags(webrender::DebugFlags::COMPACT_PROFILER),
glutin::VirtualKeyCode::Q => renderer.toggle_debug_flags(webrender::DebugFlags::GPU_TIME_QUERIES | webrender::DebugFlags::GPU_SAMPLE_QUERIES),
glutin::VirtualKeyCode::Q => renderer.toggle_debug_flags(
webrender::DebugFlags::GPU_TIME_QUERIES | webrender::DebugFlags::GPU_SAMPLE_QUERIES
),
glutin::VirtualKeyCode::Key1 => txn.set_window_parameters(
framebuffer_size,
DeviceUintRect::new(DeviceUintPoint::zero(), framebuffer_size),

View File

@ -11,9 +11,9 @@ extern crate webrender;
mod boilerplate;
use boilerplate::{Example, HandyDandyRectBuilder};
use euclid::TypedScale;
use gleam::gl;
use webrender::api::*;
use euclid::TypedScale;
// This example demonstrates using the frame output feature to copy
// the output of a WR framebuffer to a custom texture.
@ -138,7 +138,7 @@ impl Example for App {
_pipeline_id: PipelineId,
_document_id: DocumentId,
) {
if self.output_document.is_none(){
if self.output_document.is_none() {
let device_pixel_ratio = framebuffer_size.width as f32 /
builder.content_size().width;
self.init_output_document(api, DeviceUintSize::new(200, 200), device_pixel_ratio);

View File

@ -8,12 +8,12 @@ extern crate gleam;
extern crate glutin;
extern crate webrender;
use std::fs::File;
use std::io::Read;
use webrender::api::*;
use app_units::Au;
use gleam::gl;
use glutin::GlContext;
use std::fs::File;
use std::io::Read;
use webrender::api::*;
struct Notifier {
events_proxy: glutin::EventsLoopProxy,
@ -189,10 +189,16 @@ impl Window {
Vec::new(),
);
let info = LayoutPrimitiveInfo::new(LayoutRect::new(LayoutPoint::new(100.0, 100.0), LayoutSize::new(100.0, 200.0)));
let info = LayoutPrimitiveInfo::new(LayoutRect::new(
LayoutPoint::new(100.0, 100.0),
LayoutSize::new(100.0, 200.0)
));
builder.push_rect(&info, ColorF::new(0.0, 1.0, 0.0, 1.0));
let text_bounds = LayoutRect::new(LayoutPoint::new(100.0, 50.0), LayoutSize::new(700.0, 200.0));
let text_bounds = LayoutRect::new(
LayoutPoint::new(100.0, 50.0),
LayoutSize::new(700.0, 200.0)
);
let glyphs = vec![
GlyphInstance {
index: 48,

View File

@ -10,158 +10,67 @@ extern crate webrender;
mod boilerplate;
use boilerplate::Example;
use glutin::TouchPhase;
use std::collections::HashMap;
use gleam::gl;
use webrender::api::*;
#[derive(Debug)]
enum Gesture {
None,
Pan,
Zoom,
fn init_gl_texture(
id: gl::GLuint,
internal: gl::GLenum,
external: gl::GLenum,
bytes: &[u8],
gl: &gl::Gl,
) {
gl.bind_texture(gl::TEXTURE_2D, id);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MAG_FILTER, gl::LINEAR as gl::GLint);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_MIN_FILTER, gl::LINEAR as gl::GLint);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as gl::GLint);
gl.tex_parameter_i(gl::TEXTURE_2D, gl::TEXTURE_WRAP_T, gl::CLAMP_TO_EDGE as gl::GLint);
gl.tex_image_2d(
gl::TEXTURE_2D,
0,
internal as gl::GLint,
100,
100,
0,
external,
gl::UNSIGNED_BYTE,
Some(bytes),
);
gl.bind_texture(gl::TEXTURE_2D, 0);
}
#[derive(Debug)]
struct Touch {
id: u64,
start_x: f32,
start_y: f32,
current_x: f32,
current_y: f32,
struct YuvImageProvider {
texture_ids: Vec<gl::GLuint>,
}
fn dist(x0: f32, y0: f32, x1: f32, y1: f32) -> f32 {
let dx = x0 - x1;
let dy = y0 - y1;
((dx * dx) + (dy * dy)).sqrt()
}
impl YuvImageProvider {
fn new(gl: &gl::Gl) -> Self {
let texture_ids = gl.gen_textures(4);
impl Touch {
fn distance_from_start(&self) -> f32 {
dist(self.start_x, self.start_y, self.current_x, self.current_y)
}
init_gl_texture(texture_ids[0], gl::RED, gl::RED, &[127; 100 * 100], gl);
init_gl_texture(texture_ids[1], gl::RG8, gl::RG, &[0; 100 * 100 * 2], gl);
init_gl_texture(texture_ids[2], gl::RED, gl::RED, &[127; 100 * 100], gl);
init_gl_texture(texture_ids[3], gl::RED, gl::RED, &[127; 100 * 100], gl);
fn initial_distance_from_other(&self, other: &Touch) -> f32 {
dist(self.start_x, self.start_y, other.start_x, other.start_y)
}
fn current_distance_from_other(&self, other: &Touch) -> f32 {
dist(
self.current_x,
self.current_y,
other.current_x,
other.current_y,
)
}
}
struct TouchState {
active_touches: HashMap<u64, Touch>,
current_gesture: Gesture,
start_zoom: f32,
current_zoom: f32,
start_pan: DeviceIntPoint,
current_pan: DeviceIntPoint,
}
enum TouchResult {
None,
Pan(DeviceIntPoint),
Zoom(f32),
}
impl TouchState {
fn new() -> TouchState {
TouchState {
active_touches: HashMap::new(),
current_gesture: Gesture::None,
start_zoom: 1.0,
current_zoom: 1.0,
start_pan: DeviceIntPoint::zero(),
current_pan: DeviceIntPoint::zero(),
YuvImageProvider {
texture_ids
}
}
}
fn handle_event(&mut self, touch: glutin::Touch) -> TouchResult {
match touch.phase {
TouchPhase::Started => {
debug_assert!(!self.active_touches.contains_key(&touch.id));
self.active_touches.insert(
touch.id,
Touch {
id: touch.id,
start_x: touch.location.0 as f32,
start_y: touch.location.1 as f32,
current_x: touch.location.0 as f32,
current_y: touch.location.1 as f32,
},
);
self.current_gesture = Gesture::None;
}
TouchPhase::Moved => {
match self.active_touches.get_mut(&touch.id) {
Some(active_touch) => {
active_touch.current_x = touch.location.0 as f32;
active_touch.current_y = touch.location.1 as f32;
}
None => panic!("move touch event with unknown touch id!"),
}
match self.current_gesture {
Gesture::None => {
let mut over_threshold_count = 0;
let active_touch_count = self.active_touches.len();
for (_, touch) in &self.active_touches {
if touch.distance_from_start() > 8.0 {
over_threshold_count += 1;
}
}
if active_touch_count == over_threshold_count {
if active_touch_count == 1 {
self.start_pan = self.current_pan;
self.current_gesture = Gesture::Pan;
} else if active_touch_count == 2 {
self.start_zoom = self.current_zoom;
self.current_gesture = Gesture::Zoom;
}
}
}
Gesture::Pan => {
let keys: Vec<u64> = self.active_touches.keys().cloned().collect();
debug_assert!(keys.len() == 1);
let active_touch = &self.active_touches[&keys[0]];
let x = active_touch.current_x - active_touch.start_x;
let y = active_touch.current_y - active_touch.start_y;
self.current_pan.x = self.start_pan.x + x.round() as i32;
self.current_pan.y = self.start_pan.y + y.round() as i32;
return TouchResult::Pan(self.current_pan);
}
Gesture::Zoom => {
let keys: Vec<u64> = self.active_touches.keys().cloned().collect();
debug_assert!(keys.len() == 2);
let touch0 = &self.active_touches[&keys[0]];
let touch1 = &self.active_touches[&keys[1]];
let initial_distance = touch0.initial_distance_from_other(touch1);
let current_distance = touch0.current_distance_from_other(touch1);
self.current_zoom = self.start_zoom * current_distance / initial_distance;
return TouchResult::Zoom(self.current_zoom);
}
}
}
TouchPhase::Ended | TouchPhase::Cancelled => {
self.active_touches.remove(&touch.id).unwrap();
self.current_gesture = Gesture::None;
}
impl webrender::ExternalImageHandler for YuvImageProvider {
fn lock(&mut self, key: ExternalImageId, _channel_index: u8) -> webrender::ExternalImage {
let id = self.texture_ids[key.0 as usize];
webrender::ExternalImage {
uv: TexelRect::new(0.0, 0.0, 1.0, 1.0),
source: webrender::ExternalImageSource::NativeTexture(id),
}
TouchResult::None
}
fn unlock(&mut self, _key: ExternalImageId, _channel_index: u8) {
}
}
struct App {
touch_state: TouchState,
}
impl Example for App {
@ -193,25 +102,49 @@ impl Example for App {
resources.add_image(
yuv_chanel1,
ImageDescriptor::new(100, 100, ImageFormat::R8, true),
ImageData::new(vec![127; 100 * 100]),
ImageData::External(ExternalImageData {
id: ExternalImageId(0),
channel_index: 0,
image_type: ExternalImageType::TextureHandle(
TextureTarget::Default,
),
}),
None,
);
resources.add_image(
yuv_chanel2,
ImageDescriptor::new(100, 100, ImageFormat::RG8, true),
ImageData::new(vec![0; 100 * 100 * 2]),
ImageData::External(ExternalImageData {
id: ExternalImageId(1),
channel_index: 0,
image_type: ExternalImageType::TextureHandle(
TextureTarget::Default,
),
}),
None,
);
resources.add_image(
yuv_chanel2_1,
ImageDescriptor::new(100, 100, ImageFormat::R8, true),
ImageData::new(vec![127; 100 * 100]),
ImageData::External(ExternalImageData {
id: ExternalImageId(2),
channel_index: 0,
image_type: ExternalImageType::TextureHandle(
TextureTarget::Default,
),
}),
None,
);
resources.add_image(
yuv_chanel3,
ImageDescriptor::new(100, 100, ImageFormat::R8, true),
ImageData::new(vec![127; 100 * 100]),
ImageData::External(ExternalImageData {
id: ExternalImageId(3),
channel_index: 0,
image_type: ExternalImageType::TextureHandle(
TextureTarget::Default,
),
}),
None,
);
@ -240,33 +173,26 @@ impl Example for App {
builder.pop_stacking_context();
}
fn on_event(&mut self, event: glutin::WindowEvent, api: &RenderApi, document_id: DocumentId) -> bool {
let mut txn = Transaction::new();
match event {
glutin::WindowEvent::Touch(touch) => match self.touch_state.handle_event(touch) {
TouchResult::Pan(pan) => {
txn.set_pan(pan);
}
TouchResult::Zoom(zoom) => {
txn.set_pinch_zoom(ZoomFactor::new(zoom));
}
TouchResult::None => {}
},
_ => (),
}
if !txn.is_empty() {
txn.generate_frame();
api.send_transaction(document_id, txn);
}
fn on_event(
&mut self,
_event: glutin::WindowEvent,
_api: &RenderApi,
_document_id: DocumentId,
) -> bool {
false
}
fn get_image_handlers(
&mut self,
gl: &gl::Gl,
) -> (Option<Box<webrender::ExternalImageHandler>>,
Option<Box<webrender::OutputImageHandler>>) {
(Some(Box::new(YuvImageProvider::new(gl))), None)
}
}
fn main() {
let mut app = App {
touch_state: TouchState::new(),
};
boilerplate::main_wrapper(&mut app, None);
}

View File

@ -5,10 +5,10 @@
#ifdef WR_VERTEX_SHADER
void brush_vs(
VertexInfo vi,
int prim_address,
vec2 local_pos,
RectWithSize local_rect,
ivec2 user_data,
ivec3 user_data,
PictureTask pic_task
);
@ -24,21 +24,21 @@ struct BrushInstance {
int z;
int segment_index;
int edge_mask;
ivec2 user_data;
ivec3 user_data;
};
BrushInstance load_brush() {
BrushInstance bi;
bi.picture_address = aData0.x;
bi.picture_address = aData0.x & 0xffff;
bi.clip_address = aData0.x >> 16;
bi.prim_address = aData0.y;
bi.clip_chain_rect_index = aData0.z / 65536;
bi.scroll_node_id = aData0.z % 65536;
bi.clip_address = aData0.w;
bi.z = aData1.x;
bi.segment_index = aData1.y & 0xffff;
bi.edge_mask = aData1.y >> 16;
bi.user_data = aData1.zw;
bi.clip_chain_rect_index = aData0.z >> 16;
bi.scroll_node_id = aData0.z & 0xffff;
bi.z = aData0.w;
bi.segment_index = aData1.x & 0xffff;
bi.edge_mask = aData1.x >> 16;
bi.user_data = aData1.yzw;
return bi;
}
@ -79,19 +79,20 @@ void main(void) {
vec4[2] segment_data = fetch_from_resource_cache_2(segment_address);
RectWithSize local_segment_rect = RectWithSize(segment_data[0].xy, segment_data[0].zw);
vec2 device_pos, local_pos;
VertexInfo vi;
// Fetch the dynamic picture that we are drawing on.
PictureTask pic_task = fetch_picture_task(brush.picture_address);
ClipArea clip_area = fetch_clip_area(brush.clip_address);
if (pic_task.pic_kind_and_raster_mode > 0.0) {
local_pos = local_segment_rect.p0 + aPosition.xy * local_segment_rect.size;
vec2 local_pos = local_segment_rect.p0 + aPosition.xy * local_segment_rect.size;
// Right now - pictures only support local positions. In the future, this
// will be expanded to support transform picture types (the common kind).
device_pos = pic_task.common_data.task_rect.p0 +
uDevicePixelRatio * (local_pos - pic_task.content_origin);
vec2 device_pos = uDevicePixelRatio * local_pos;
vec2 final_pos = device_pos +
pic_task.common_data.task_rect.p0 -
uDevicePixelRatio * pic_task.content_origin;
#ifdef WR_FEATURE_ALPHA_PASS
write_clip(
@ -100,10 +101,16 @@ void main(void) {
);
#endif
vi = VertexInfo(
local_pos,
device_pos,
1.0,
device_pos
);
// Write the final position transformed by the orthographic device-pixel projection.
gl_Position = uTransform * vec4(device_pos, 0.0, 1.0);
gl_Position = uTransform * vec4(final_pos, 0.0, 1.0);
} else {
VertexInfo vi;
ClipScrollNode scroll_node = fetch_clip_scroll_node(brush.scroll_node_id);
// Write the normal vertex information out.
@ -139,8 +146,6 @@ void main(void) {
);
}
local_pos = vi.local_pos;
// For brush instances in the alpha pass, always write
// out clip information.
// TODO(gw): It's possible that we might want alpha
@ -157,8 +162,8 @@ void main(void) {
// Run the specific brush VS code to write interpolators.
brush_vs(
vi,
brush.prim_address + VECS_PER_BRUSH_PRIM,
local_pos,
brush_prim.local_rect,
brush.user_data,
pic_task

View File

@ -0,0 +1,151 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#define VECS_PER_SPECIFIC_BRUSH 5
#include shared,prim_shared,brush
varying vec3 vUv;
varying float vW;
flat varying float vAmount;
flat varying int vOp;
flat varying mat4 vColorMat;
flat varying vec4 vColorOffset;
#ifdef WR_VERTEX_SHADER
void brush_vs(
VertexInfo vi,
int prim_address,
RectWithSize local_rect,
ivec3 user_data,
PictureTask pic_task
) {
PictureTask src_task = fetch_picture_task(user_data.x);
vec2 texture_size = vec2(textureSize(sColor0, 0).xy);
vec2 uv = (vi.snapped_device_pos +
src_task.common_data.task_rect.p0 -
src_task.content_origin) * vi.w;
vW = vi.w;
vUv = vec3(uv / texture_size, src_task.common_data.texture_layer_index);
vOp = user_data.y;
float lumR = 0.2126;
float lumG = 0.7152;
float lumB = 0.0722;
float oneMinusLumR = 1.0 - lumR;
float oneMinusLumG = 1.0 - lumG;
float oneMinusLumB = 1.0 - lumB;
vec4 amount = fetch_from_resource_cache_1(prim_address);
vAmount = amount.x;
switch (vOp) {
case 2: {
// Grayscale
vColorMat = mat4(vec4(lumR + oneMinusLumR * amount.y, lumR - lumR * amount.y, lumR - lumR * amount.y, 0.0),
vec4(lumG - lumG * amount.y, lumG + oneMinusLumG * amount.y, lumG - lumG * amount.y, 0.0),
vec4(lumB - lumB * amount.y, lumB - lumB * amount.y, lumB + oneMinusLumB * amount.y, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 3: {
// HueRotate
float c = cos(amount.x);
float s = sin(amount.x);
vColorMat = mat4(vec4(lumR + oneMinusLumR * c - lumR * s, lumR - lumR * c + 0.143 * s, lumR - lumR * c - oneMinusLumR * s, 0.0),
vec4(lumG - lumG * c - lumG * s, lumG + oneMinusLumG * c + 0.140 * s, lumG - lumG * c + lumG * s, 0.0),
vec4(lumB - lumB * c + oneMinusLumB * s, lumB - lumB * c - 0.283 * s, lumB + oneMinusLumB * c + lumB * s, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 5: {
// Saturate
vColorMat = mat4(vec4(amount.y * lumR + amount.x, amount.y * lumR, amount.y * lumR, 0.0),
vec4(amount.y * lumG, amount.y * lumG + amount.x, amount.y * lumG, 0.0),
vec4(amount.y * lumB, amount.y * lumB, amount.y * lumB + amount.x, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 6: {
// Sepia
vColorMat = mat4(vec4(0.393 + 0.607 * amount.y, 0.349 - 0.349 * amount.y, 0.272 - 0.272 * amount.y, 0.0),
vec4(0.769 - 0.769 * amount.y, 0.686 + 0.314 * amount.y, 0.534 - 0.534 * amount.y, 0.0),
vec4(0.189 - 0.189 * amount.y, 0.168 - 0.168 * amount.y, 0.131 + 0.869 * amount.y, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 10: {
// Color Matrix
vec4 data[4] = fetch_from_resource_cache_4(prim_address + 1);
vColorMat = mat4(amount, data[0], data[1], data[2]);
vColorOffset = data[3];
break;
}
default: break;
}
}
#endif
#ifdef WR_FRAGMENT_SHADER
vec4 Contrast(vec4 Cs, float amount) {
return vec4(Cs.rgb * amount - 0.5 * amount + 0.5, Cs.a);
}
vec4 Invert(vec4 Cs, float amount) {
return vec4(mix(Cs.rgb, vec3(1.0) - Cs.rgb, amount), Cs.a);
}
vec4 Brightness(vec4 Cs, float amount) {
// Apply the brightness factor.
// Resulting color needs to be clamped to output range
// since we are pre-multiplying alpha in the shader.
return vec4(clamp(Cs.rgb * amount, vec3(0.0), vec3(1.0)), Cs.a);
}
vec4 Opacity(vec4 Cs, float amount) {
return vec4(Cs.rgb, Cs.a * amount);
}
vec4 brush_fs() {
vec2 uv = vUv.xy / vW;
vec4 Cs = texture(sColor0, vec3(uv, vUv.z));
// Un-premultiply the input.
Cs.rgb /= Cs.a;
vec4 color;
switch (vOp) {
case 0:
color = Cs;
break;
case 1:
color = Contrast(Cs, vAmount);
break;
case 4:
color = Invert(Cs, vAmount);
break;
case 7:
color = Brightness(Cs, vAmount);
break;
case 8:
color = Opacity(Cs, vAmount);
break;
default:
color = vColorMat * Cs + vColorOffset;
}
// Pre-multiply the alpha into the output value.
color.rgb *= color.a;
return color;
}
#endif

View File

@ -0,0 +1,63 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#define VECS_PER_SPECIFIC_BRUSH 0
#include shared,prim_shared,brush
#ifdef WR_FEATURE_ALPHA_PASS
varying vec2 vLocalPos;
#endif
varying vec3 vUv;
flat varying vec4 vUvBounds;
#ifdef WR_VERTEX_SHADER
void brush_vs(
VertexInfo vi,
int prim_address,
RectWithSize local_rect,
ivec3 user_data,
PictureTask pic_task
) {
// If this is in WR_FEATURE_TEXTURE_RECT mode, the rect and size use
// non-normalized texture coordinates.
#ifdef WR_FEATURE_TEXTURE_RECT
vec2 texture_size = vec2(1, 1);
#else
vec2 texture_size = vec2(textureSize(sColor0, 0));
#endif
ImageResource res = fetch_image_resource(user_data.x);
vec2 uv0 = res.uv_rect.p0;
vec2 uv1 = res.uv_rect.p1;
vUv.z = res.layer;
vec2 f = (vi.local_pos - local_rect.p0) / local_rect.size;
vUv.xy = mix(uv0, uv1, f);
vUv.xy /= texture_size;
vUvBounds = vec4(uv0 + vec2(0.5), uv1 - vec2(0.5)) / texture_size.xyxy;
#ifdef WR_FEATURE_ALPHA_PASS
vLocalPos = vi.local_pos;
#endif
}
#endif
#ifdef WR_FRAGMENT_SHADER
vec4 brush_fs() {
vec2 uv = clamp(vUv.xy, vUvBounds.xy, vUvBounds.zw);
vec4 color = TEX_SAMPLE(sColor0, vec3(uv, vUv.z));
#ifdef WR_FEATURE_ALPHA_PASS
color *= init_transform_fs(vLocalPos);
#endif
return color;
}
#endif

View File

@ -32,13 +32,13 @@ Line fetch_line(int address) {
}
void brush_vs(
VertexInfo vi,
int prim_address,
vec2 local_pos,
RectWithSize local_rect,
ivec2 user_data,
ivec3 user_data,
PictureTask pic_task
) {
vLocalPos = local_pos;
vLocalPos = vi.local_pos;
// Note: `line` name is reserved in HLSL
Line line_prim = fetch_line(prim_address);
@ -65,6 +65,9 @@ void brush_vs(
pos = local_rect.p0.yx;
size = local_rect.size.yx;
break;
default:
vAxisSelect = 0.0;
pos = size = vec2(0.0);
}
vLocalOrigin = pos;
@ -108,6 +111,8 @@ void brush_vs(
size.y);
break;
}
default:
vParams = vec4(0.0);
}
}
#endif
@ -223,6 +228,7 @@ vec4 brush_fs() {
break;
}
default: break;
}
return vColor * alpha;

View File

@ -24,10 +24,10 @@ BrushMaskCornerPrimitive fetch_primitive(int address) {
}
void brush_vs(
VertexInfo vi,
int prim_address,
vec2 local_pos,
RectWithSize local_rect,
ivec2 user_data,
ivec3 user_data,
PictureTask pic_task
) {
// Load the specific primitive.
@ -38,7 +38,7 @@ void brush_vs(
vClipCenter_Radius = vec4(local_rect.p0 + prim.radius, prim.radius);
vLocalRect = vec4(local_rect.p0, local_rect.p0 + local_rect.size);
vLocalPos = local_pos;
vLocalPos = vi.local_pos;
}
#endif

View File

@ -38,10 +38,10 @@ RoundedRectPrimitive fetch_rounded_rect_primitive(int address) {
}
void brush_vs(
VertexInfo vi,
int prim_address,
vec2 local_pos,
RectWithSize local_rect,
ivec2 user_data,
ivec3 user_data,
PictureTask pic_task
) {
// Load the specific primitive.
@ -62,7 +62,7 @@ void brush_vs(
vClipCenter_Radius_BL = vec4(clip_rect.xw + vec2(prim.radius_bl.x, -prim.radius_bl.y), prim.radius_bl);
vLocalRect = clip_rect;
vLocalPos = local_pos;
vLocalPos = vi.local_pos;
}
#endif

View File

@ -2,40 +2,41 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include shared,prim_shared
#define VECS_PER_SPECIFIC_BRUSH 5
varying vec3 vUv0;
varying vec3 vUv1;
#include shared,prim_shared,brush
varying vec3 vUv;
varying float vW;
varying vec3 vSrcUv;
varying vec3 vBackdropUv;
flat varying int vOp;
#ifdef WR_VERTEX_SHADER
void main(void) {
CompositeInstance ci = fetch_composite_instance();
PictureTask dest_task = fetch_picture_task(ci.render_task_index);
RenderTaskCommonData backdrop_task = fetch_render_task_common_data(ci.backdrop_task_index);
PictureTask src_task = fetch_picture_task(ci.src_task_index);
vec2 dest_origin = dest_task.common_data.task_rect.p0 -
dest_task.content_origin +
src_task.content_origin;
vec2 local_pos = mix(dest_origin,
dest_origin + src_task.common_data.task_rect.size,
aPosition.xy);
void brush_vs(
VertexInfo vi,
int prim_address,
RectWithSize local_rect,
ivec3 user_data,
PictureTask pic_task
) {
vec2 texture_size = vec2(textureSize(sCacheRGBA8, 0));
vOp = user_data.x;
vW = vi.w;
vec2 st0 = backdrop_task.task_rect.p0 / texture_size;
vec2 st1 = (backdrop_task.task_rect.p0 + backdrop_task.task_rect.size) / texture_size;
vUv0 = vec3(mix(st0, st1, aPosition.xy), backdrop_task.texture_layer_index);
PictureTask src_task = fetch_picture_task(user_data.z);
vec2 src_uv = (vi.snapped_device_pos +
src_task.common_data.task_rect.p0 -
src_task.content_origin) * vi.w;
vSrcUv = vec3(src_uv / texture_size, src_task.common_data.texture_layer_index);
st0 = src_task.common_data.task_rect.p0 / texture_size;
st1 = (src_task.common_data.task_rect.p0 + src_task.common_data.task_rect.size) / texture_size;
vUv1 = vec3(mix(st0, st1, aPosition.xy), src_task.common_data.texture_layer_index);
vOp = ci.user_data0;
gl_Position = uTransform * vec4(local_pos, ci.z, 1.0);
RenderTaskCommonData backdrop_task = fetch_render_task_common_data(user_data.y);
vec2 backdrop_uv = (vi.snapped_device_pos +
backdrop_task.task_rect.p0 -
src_task.content_origin) * vi.w;
vBackdropUv = vec3(backdrop_uv / texture_size, backdrop_task.texture_layer_index);
}
#endif
@ -203,17 +204,17 @@ const int MixBlendMode_Saturation = 13;
const int MixBlendMode_Color = 14;
const int MixBlendMode_Luminosity = 15;
void main(void) {
vec4 Cb = texture(sCacheRGBA8, vUv0);
vec4 Cs = texture(sCacheRGBA8, vUv1);
vec4 brush_fs() {
vec2 uv = vUv.xy / vW;
vec4 Cb = texture(sCacheRGBA8, vBackdropUv);
vec4 Cs = texture(sCacheRGBA8, vSrcUv);
if (Cb.a == 0.0) {
oFragColor = Cs;
return;
return Cs;
}
if (Cs.a == 0.0) {
oFragColor = vec4(0.0, 0.0, 0.0, 0.0);
return;
return vec4(0.0);
}
// The mix-blend-mode functions assume no premultiplied alpha
@ -276,6 +277,7 @@ void main(void) {
case MixBlendMode_Luminosity:
result.rgb = Luminosity(Cb.rgb, Cs.rgb);
break;
default: break;
}
result.rgb = (1.0 - Cb.a) * Cs.rgb + Cb.a * result.rgb;
@ -283,6 +285,6 @@ void main(void) {
result.rgb *= result.a;
oFragColor = result;
return result;
}
#endif

View File

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#define VECS_PER_SPECIFIC_BRUSH 1
#define VECS_PER_SPECIFIC_BRUSH 5
#include shared,prim_shared,brush
@ -36,10 +36,10 @@ Picture fetch_picture(int address) {
}
void brush_vs(
VertexInfo vi,
int prim_address,
vec2 local_pos,
RectWithSize local_rect,
ivec2 user_data,
ivec3 user_data,
PictureTask pic_task
) {
vImageKind = user_data.y;
@ -80,31 +80,34 @@ void brush_vs(
switch (vImageKind) {
case BRUSH_PICTURE_SIMPLE: {
vec2 f = (local_pos - local_rect.p0) / local_rect.size;
vec2 f = (vi.local_pos - local_rect.p0) / local_rect.size;
vUv.xy = mix(uv0, uv1, f);
vUv.xy /= texture_size;
break;
}
case BRUSH_PICTURE_NINEPATCH: {
vec2 local_src_size = src_size / uDevicePixelRatio;
vUv.xy = (local_pos - local_rect.p0) / local_src_size;
vUv.xy = (vi.local_pos - local_rect.p0) / local_src_size;
vParams.xy = vec2(0.5);
vParams.zw = (local_rect.size / local_src_size - 0.5);
break;
}
case BRUSH_PICTURE_MIRROR: {
vec2 local_src_size = src_size / uDevicePixelRatio;
vUv.xy = (local_pos - local_rect.p0) / local_src_size;
vUv.xy = (vi.local_pos - local_rect.p0) / local_src_size;
vParams.xy = 0.5 * local_rect.size / local_src_size;
break;
}
default:
vUv.xy = vec2(0.0);
vParams = vec4(0.0);
}
vUvBounds = vec4(uv0 + vec2(0.5), uv1 - vec2(0.5)) / texture_size.xyxy;
vUvBounds_NoClamp = vec4(uv0, uv1) / texture_size.xyxy;
#ifdef WR_FEATURE_ALPHA_PASS
vLocalPos = local_pos;
vLocalPos = vi.local_pos;
#endif
}
#endif
@ -138,6 +141,8 @@ vec4 brush_fs() {
uv = clamp(uv, vUvBounds.xy, vUvBounds.zw);
break;
}
default:
uv = vec2(0.0);
}
#if defined WR_FEATURE_COLOR_TARGET

View File

@ -24,17 +24,17 @@ SolidBrush fetch_solid_primitive(int address) {
}
void brush_vs(
VertexInfo vi,
int prim_address,
vec2 local_pos,
RectWithSize local_rect,
ivec2 user_data,
ivec3 user_data,
PictureTask pic_task
) {
SolidBrush prim = fetch_solid_primitive(prim_address);
vColor = prim.color;
#ifdef WR_FEATURE_ALPHA_PASS
vLocalPos = local_pos;
vLocalPos = vi.local_pos;
#endif
}
#endif

View File

@ -44,6 +44,8 @@ void main(void) {
case DIR_VERTICAL:
vOffsetScale = vec2(0.0, 1.0 / texture_size.y);
break;
default:
vOffsetScale = vec2(0.0);
}
vUvRect = vec4(src_rect.p0 + vec2(0.5),

View File

@ -91,6 +91,8 @@ void main(void) {
case CORNER_BOTTOM_LEFT:
sign_modifier = vec2(-1.0, 1.0);
break;
default:
sign_modifier = vec2(0.0);
};
switch (corner.clip_mode) {
@ -111,6 +113,10 @@ void main(void) {
vAlphaMask = vec2(1.0, 1.0);
break;
}
default:
vPoint_Tangent0 = vPoint_Tangent1 = vec4(1.0);
vDotParams = vec3(0.0);
vAlphaMask = vec2(0.0);
}
}

View File

@ -288,7 +288,7 @@ struct ClipArea {
ClipArea fetch_clip_area(int index) {
ClipArea area;
if (index == 0x7FFFFFFF) { //special sentinel task index
if (index == 0x7FFF) { //special sentinel task index
area.common_data = RenderTaskCommonData(
RectWithSize(vec2(0.0), vec2(0.0)),
0.0
@ -368,6 +368,7 @@ Glyph fetch_glyph(int specific_prim_address,
case SUBPX_DIR_VERTICAL:
glyph.y = floor(glyph.y + 0.125);
break;
default: break;
}
return Glyph(glyph);
@ -554,6 +555,8 @@ vec2 compute_snap_offset(vec2 local_pos,
struct VertexInfo {
vec2 local_pos;
vec2 screen_pos;
float w;
vec2 snapped_device_pos;
};
VertexInfo write_vertex(RectWithSize instance_rect,
@ -579,13 +582,20 @@ VertexInfo write_vertex(RectWithSize instance_rect,
vec2 device_pos = world_pos.xy / world_pos.w * uDevicePixelRatio;
// Apply offsets for the render task to get correct screen location.
vec2 final_pos = device_pos + snap_offset -
vec2 snapped_device_pos = device_pos + snap_offset;
vec2 final_pos = snapped_device_pos -
task.content_origin +
task.common_data.task_rect.p0;
gl_Position = uTransform * vec4(final_pos, z, 1.0);
VertexInfo vi = VertexInfo(clamped_local_pos, device_pos);
VertexInfo vi = VertexInfo(
clamped_local_pos,
device_pos,
world_pos.w,
snapped_device_pos
);
return vi;
}
@ -666,7 +676,13 @@ VertexInfo write_transform_vertex(RectWithSize local_segment_rect,
clip_edge_mask
);
VertexInfo vi = VertexInfo(local_pos, device_pos);
VertexInfo vi = VertexInfo(
local_pos,
device_pos,
world_pos.w,
device_pos
);
return vi;
}

View File

@ -1,161 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include shared,prim_shared
varying vec3 vUv;
flat varying vec4 vUvBounds;
flat varying float vAmount;
flat varying int vOp;
flat varying mat4 vColorMat;
flat varying vec4 vColorOffset;
#ifdef WR_VERTEX_SHADER
void main(void) {
CompositeInstance ci = fetch_composite_instance();
PictureTask dest_task = fetch_picture_task(ci.render_task_index);
PictureTask src_task = fetch_picture_task(ci.src_task_index);
vec2 dest_origin = dest_task.common_data.task_rect.p0 -
dest_task.content_origin +
src_task.content_origin;
vec2 local_pos = mix(dest_origin,
dest_origin + src_task.common_data.task_rect.size,
aPosition.xy);
vec2 texture_size = vec2(textureSize(sCacheRGBA8, 0));
vec2 st0 = src_task.common_data.task_rect.p0;
vec2 st1 = src_task.common_data.task_rect.p0 + src_task.common_data.task_rect.size;
vec2 uv = src_task.common_data.task_rect.p0 + aPosition.xy * src_task.common_data.task_rect.size;
vUv = vec3(uv / texture_size, src_task.common_data.texture_layer_index);
vUvBounds = vec4(st0 + 0.5, st1 - 0.5) / texture_size.xyxy;
vOp = ci.user_data0;
vAmount = float(ci.user_data1) / 65535.0;
float lumR = 0.2126;
float lumG = 0.7152;
float lumB = 0.0722;
float oneMinusLumR = 1.0 - lumR;
float oneMinusLumG = 1.0 - lumG;
float oneMinusLumB = 1.0 - lumB;
float oneMinusAmount = 1.0 - vAmount;
switch (vOp) {
case 2: {
// Grayscale
vColorMat = mat4(vec4(lumR + oneMinusLumR * oneMinusAmount, lumR - lumR * oneMinusAmount, lumR - lumR * oneMinusAmount, 0.0),
vec4(lumG - lumG * oneMinusAmount, lumG + oneMinusLumG * oneMinusAmount, lumG - lumG * oneMinusAmount, 0.0),
vec4(lumB - lumB * oneMinusAmount, lumB - lumB * oneMinusAmount, lumB + oneMinusLumB * oneMinusAmount, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 3: {
// HueRotate
float c = cos(vAmount * 0.01745329251);
float s = sin(vAmount * 0.01745329251);
vColorMat = mat4(vec4(lumR + oneMinusLumR * c - lumR * s, lumR - lumR * c + 0.143 * s, lumR - lumR * c - oneMinusLumR * s, 0.0),
vec4(lumG - lumG * c - lumG * s, lumG + oneMinusLumG * c + 0.140 * s, lumG - lumG * c + lumG * s, 0.0),
vec4(lumB - lumB * c + oneMinusLumB * s, lumB - lumB * c - 0.283 * s, lumB + oneMinusLumB * c + lumB * s, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 5: {
// Saturate
vColorMat = mat4(vec4(oneMinusAmount * lumR + vAmount, oneMinusAmount * lumR, oneMinusAmount * lumR, 0.0),
vec4(oneMinusAmount * lumG, oneMinusAmount * lumG + vAmount, oneMinusAmount * lumG, 0.0),
vec4(oneMinusAmount * lumB, oneMinusAmount * lumB, oneMinusAmount * lumB + vAmount, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 6: {
// Sepia
vColorMat = mat4(vec4(0.393 + 0.607 * oneMinusAmount, 0.349 - 0.349 * oneMinusAmount, 0.272 - 0.272 * oneMinusAmount, 0.0),
vec4(0.769 - 0.769 * oneMinusAmount, 0.686 + 0.314 * oneMinusAmount, 0.534 - 0.534 * oneMinusAmount, 0.0),
vec4(0.189 - 0.189 * oneMinusAmount, 0.168 - 0.168 * oneMinusAmount, 0.131 + 0.869 * oneMinusAmount, 0.0),
vec4(0.0, 0.0, 0.0, 1.0));
vColorOffset = vec4(0.0);
break;
}
case 10: {
// Color Matrix
const int dataOffset = 2; // Offset in GPU cache where matrix starts
vec4 data[8] = fetch_from_resource_cache_8(ci.user_data2);
vColorMat = mat4(data[dataOffset], data[dataOffset + 1],
data[dataOffset + 2], data[dataOffset + 3]);
vColorOffset = data[dataOffset + 4];
break;
}
}
gl_Position = uTransform * vec4(local_pos, ci.z, 1.0);
}
#endif
#ifdef WR_FRAGMENT_SHADER
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
vec4 Contrast(vec4 Cs, float amount) {
return vec4(Cs.rgb * amount - 0.5 * amount + 0.5, Cs.a);
}
vec4 Invert(vec4 Cs, float amount) {
return vec4(mix(Cs.rgb, vec3(1.0) - Cs.rgb, amount), Cs.a);
}
vec4 Brightness(vec4 Cs, float amount) {
// Apply the brightness factor.
// Resulting color needs to be clamped to output range
// since we are pre-multiplying alpha in the shader.
return vec4(clamp(Cs.rgb * amount, vec3(0.0), vec3(1.0)), Cs.a);
}
vec4 Opacity(vec4 Cs, float amount) {
return vec4(Cs.rgb, Cs.a * amount);
}
void main(void) {
vec2 uv = clamp(vUv.xy, vUvBounds.xy, vUvBounds.zw);
vec4 Cs = textureLod(sCacheRGBA8, vec3(uv, vUv.z), 0.0);
if (Cs.a == 0.0) {
discard;
}
// Un-premultiply the input.
Cs.rgb /= Cs.a;
switch (vOp) {
case 0:
oFragColor = Cs;
break;
case 1:
oFragColor = Contrast(Cs, vAmount);
break;
case 4:
oFragColor = Invert(Cs, vAmount);
break;
case 7:
oFragColor = Brightness(Cs, vAmount);
break;
case 8:
oFragColor = Opacity(Cs, vAmount);
break;
default:
oFragColor = vColorMat * Cs + vColorOffset;
}
// Pre-multiply the alpha into the output value.
oFragColor.rgb *= oFragColor.a;
}
#endif

View File

@ -106,6 +106,7 @@ void write_color(vec4 color0, vec4 color1, int style, vec2 delta, int instance_k
case SIDE_SECOND:
color1.a = 0.0;
break;
default: break;
}
vColor00 = vec4(clamp(color0.rgb * modulate.x, vec3(0.0), vec3(color0.a)), color0.a);
@ -136,6 +137,8 @@ int select_style(int color_select, vec2 fstyle) {
return style.x;
case SIDE_SECOND:
return style.y;
default:
return 0;
}
}
@ -155,7 +158,7 @@ void main(void) {
vec2 color_delta;
vec4 edge_mask;
// TODO(gw): Now that all border styles are supported, the switch
// TODO(gw): Now that all border styles are supported, the
// statement below can be tidied up quite a bit.
switch (sub_part) {
@ -265,6 +268,14 @@ void main(void) {
edge_mask = vec4(1.0, 0.0, 0.0, 1.0);
break;
}
default:
p0 = p1 = vec2(0.0);
color0 = color1 = vec4(1.0);
vClipCenter = vClipSign = vec2(0.0);
style = 0;
edge_distances = edge_mask = vec4(0.0);
color_delta = vec2(0.0);
vIsBorderRadiusLessThanBorderWidth = 0.0;
}
switch (style) {

View File

@ -145,7 +145,7 @@ void main(void) {
BorderCorners corners = get_border_corners(border, prim.local_rect);
vec4 color = border.colors[sub_part];
// TODO(gw): Now that all border styles are supported, the switch
// TODO(gw): Now that all border styles are supported, the
// statement below can be tidied up quite a bit.
float style;
@ -215,6 +215,11 @@ void main(void) {
edge_mask = vec4(0.0, 1.0, 0.0, 1.0);
break;
}
default:
segment_rect.p0 = segment_rect.size = vec2(0.0);
style = 0.0;
color_flip = false;
edge_mask = vec4(0.0);
}
write_alpha_select(style);

View File

@ -55,7 +55,13 @@ VertexInfo write_text_vertex(vec2 clamped_local_pos,
gl_Position = uTransform * vec4(final_pos, z, 1.0);
VertexInfo vi = VertexInfo(clamped_local_pos, device_pos);
VertexInfo vi = VertexInfo(
clamped_local_pos,
device_pos,
world_pos.w,
final_pos
);
return vi;
}
@ -147,6 +153,9 @@ void main(void) {
vMaskSwizzle = vec2(-1.0, 1.0);
vColor = vec4(text.color.a) * text.bg_color;
break;
default:
vMaskSwizzle = vec2(0.0);
vColor = vec4(1.0);
}
vec2 texture_size = vec2(textureSize(sColor0, 0));

View File

@ -24,7 +24,7 @@
#define TEX_SAMPLE(sampler, tex_coord) texture(sampler, tex_coord.xy)
#else
// In normal case, we use textureLod(). We haven't used the lod yet. So, we always pass 0.0 now.
#define TEX_SAMPLE(sampler, tex_coord) textureLod(sampler, tex_coord, 0.0)
#define TEX_SAMPLE(sampler, tex_coord) texture(sampler, tex_coord)
#endif
//======================================================================================

View File

@ -32,7 +32,7 @@ use util::{MatrixHelpers, TransformedRectKind};
// Special sentinel value recognized by the shader. It is considered to be
// a dummy task that doesn't mask out anything.
const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(i32::MAX as u32);
const OPAQUE_TASK_ADDRESS: RenderTaskAddress = RenderTaskAddress(0x7fff);
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
@ -73,20 +73,21 @@ pub enum BrushBatchKind {
Picture(BrushImageSourceKind),
Solid,
Line,
Image(ImageBufferKind),
Blend,
MixBlend {
task_id: RenderTaskId,
source_id: RenderTaskId,
backdrop_id: RenderTaskId,
},
}
#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug)]
#[cfg_attr(feature = "capture", derive(Serialize))]
#[cfg_attr(feature = "replay", derive(Deserialize))]
pub enum BatchKind {
Composite {
task_id: RenderTaskId,
source_id: RenderTaskId,
backdrop_id: RenderTaskId,
},
HardwareComposite,
SplitComposite,
Blend,
Transformable(TransformedRectKind, TransformBatchKind),
Brush(BrushBatchKind),
}
@ -176,12 +177,6 @@ impl AlphaBatchList {
let mut selected_batch_index = None;
match (key.kind, key.blend_mode) {
(BatchKind::Composite { .. }, _) => {
// Composites always get added to their own batch.
// This is because the result of a composite can affect
// the input to the next composite. Perhaps we can
// optimize this in the future.
}
(BatchKind::Transformable(_, TransformBatchKind::TextRun(_)), BlendMode::SubpixelWithBgColor) |
(BatchKind::Transformable(_, TransformBatchKind::TextRun(_)), BlendMode::SubpixelVariableTextColor) => {
'outer_text: for (batch_index, batch) in self.batches.iter().enumerate().rev().take(10) {
@ -606,24 +601,6 @@ impl AlphaBatchBuilder {
}
}
fn get_buffer_kind(texture: SourceTexture) -> ImageBufferKind {
match texture {
SourceTexture::External(ext_image) => {
match ext_image.image_type {
ExternalImageType::TextureHandle(target) => {
target.into()
}
ExternalImageType::Buffer => {
// The ExternalImageType::Buffer should be handled by resource_cache.
// It should go through the non-external case.
panic!("Unexpected non-texture handle type");
}
}
}
_ => ImageBufferKind::Texture2DArray,
}
}
// Adds a primitive to a batch.
// It can recursively call itself in some situations, for
// example if it encounters a picture where the items
@ -690,29 +667,43 @@ impl AlphaBatchBuilder {
z,
);
let blend_mode = ctx.prim_store.get_blend_mode(prim_metadata, transform_kind);
let specified_blend_mode = ctx.prim_store.get_blend_mode(prim_metadata);
let non_segmented_blend_mode = if !prim_metadata.opacity.is_opaque ||
prim_metadata.clip_task_id.is_some() ||
transform_kind == TransformedRectKind::Complex {
specified_blend_mode
} else {
BlendMode::None
};
match prim_metadata.prim_kind {
PrimitiveKind::Brush => {
let brush = &ctx.prim_store.cpu_brushes[prim_metadata.cpu_prim_index.0];
let batch_key = brush.get_batch_key(blend_mode);
self.add_brush_to_batch(
brush,
prim_metadata,
batch_key,
clip_chain_rect_index,
clip_task_address,
&task_relative_bounding_rect,
prim_cache_address,
scroll_id,
task_address,
transform_kind,
z,
render_tasks,
0,
0,
);
if let Some((batch_kind, textures, user_data)) = brush.get_batch_params(
ctx.resource_cache,
gpu_cache,
deferred_resolves,
) {
self.add_brush_to_batch(
brush,
prim_metadata,
batch_kind,
specified_blend_mode,
non_segmented_blend_mode,
textures,
clip_chain_rect_index,
clip_task_address,
&task_relative_bounding_rect,
prim_cache_address,
scroll_id,
task_address,
transform_kind,
z,
render_tasks,
user_data,
);
}
}
PrimitiveKind::Border => {
let border_cpu =
@ -722,12 +713,12 @@ impl AlphaBatchBuilder {
transform_kind,
TransformBatchKind::BorderCorner,
);
let corner_key = BatchKey::new(corner_kind, blend_mode, no_textures);
let corner_key = BatchKey::new(corner_kind, non_segmented_blend_mode, no_textures);
let edge_kind = BatchKind::Transformable(
transform_kind,
TransformBatchKind::BorderEdge,
);
let edge_key = BatchKey::new(edge_kind, blend_mode, no_textures);
let edge_key = BatchKey::new(edge_kind, non_segmented_blend_mode, no_textures);
// Work around borrow ck on borrowing batch_list twice.
{
@ -794,10 +785,10 @@ impl AlphaBatchBuilder {
return;
}
let batch_kind = TransformBatchKind::Image(Self::get_buffer_kind(cache_item.texture_id));
let batch_kind = TransformBatchKind::Image(get_buffer_kind(cache_item.texture_id));
let key = BatchKey::new(
BatchKind::Transformable(transform_kind, batch_kind),
blend_mode,
non_segmented_blend_mode,
BatchTextures {
colors: [
cache_item.texture_id,
@ -915,20 +906,17 @@ impl AlphaBatchBuilder {
}
PictureKind::BoxShadow { image_kind, .. } => {
let textures = BatchTextures::color(cache_item.texture_id);
let kind = BatchKind::Brush(
BrushBatchKind::Picture(
BrushImageSourceKind::from_render_target_kind(picture.target_kind())),
);
let alpha_batch_key = BatchKey::new(
kind,
blend_mode,
textures,
let kind = BrushBatchKind::Picture(
BrushImageSourceKind::from_render_target_kind(picture.target_kind()),
);
self.add_brush_to_batch(
&picture.brush,
prim_metadata,
alpha_batch_key,
kind,
specified_blend_mode,
non_segmented_blend_mode,
textures,
clip_chain_rect_index,
clip_task_address,
&task_relative_bounding_rect,
@ -938,8 +926,7 @@ impl AlphaBatchBuilder {
transform_kind,
z,
render_tasks,
cache_item.uv_rect_handle.as_int(gpu_cache),
image_kind as i32,
[cache_item.uv_rect_handle.as_int(gpu_cache), image_kind as i32, 0],
);
}
}
@ -954,7 +941,7 @@ impl AlphaBatchBuilder {
BrushBatchKind::Picture(
BrushImageSourceKind::from_render_target_kind(picture.target_kind())),
);
let key = BatchKey::new(kind, blend_mode, textures);
let key = BatchKey::new(kind, non_segmented_blend_mode, textures);
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
let instance = BrushInstance {
@ -966,8 +953,11 @@ impl AlphaBatchBuilder {
z,
segment_index: 0,
edge_flags: EdgeAaSegmentMask::empty(),
user_data0: cache_task_address.0 as i32,
user_data1: BrushImageKind::Simple as i32,
user_data: [
cache_task_address.0 as i32,
BrushImageKind::Simple as i32,
0,
],
};
batch.push(PrimitiveInstance::from(instance));
}
@ -1038,7 +1028,7 @@ impl AlphaBatchBuilder {
let kind = BatchKind::Brush(
BrushBatchKind::Picture(BrushImageSourceKind::ColorAlphaMask),
);
let key = BatchKey::new(kind, blend_mode, textures);
let key = BatchKey::new(kind, non_segmented_blend_mode, textures);
let instance = BrushInstance {
picture_address: task_address,
@ -1049,8 +1039,11 @@ impl AlphaBatchBuilder {
z,
segment_index: 0,
edge_flags: EdgeAaSegmentMask::empty(),
user_data0: cache_task_address.0 as i32,
user_data1: BrushImageKind::Simple as i32,
user_data: [
cache_task_address.0 as i32,
BrushImageKind::Simple as i32,
0,
],
};
{
@ -1095,40 +1088,42 @@ impl AlphaBatchBuilder {
}
_ => {
let key = BatchKey::new(
BatchKind::Blend,
BatchKind::Brush(BrushBatchKind::Blend),
BlendMode::PremultipliedAlpha,
BatchTextures::no_texture(),
BatchTextures::render_target_cache(),
);
let src_task_address = render_tasks.get_task_address(source_id);
let (filter_mode, amount) = match filter {
FilterOp::Blur(..) => (0, 0.0),
FilterOp::Contrast(amount) => (1, amount),
FilterOp::Grayscale(amount) => (2, amount),
FilterOp::HueRotate(angle) => (3, angle),
FilterOp::Invert(amount) => (4, amount),
FilterOp::Saturate(amount) => (5, amount),
FilterOp::Sepia(amount) => (6, amount),
FilterOp::Brightness(amount) => (7, amount),
FilterOp::Opacity(_, amount) => (8, amount),
FilterOp::DropShadow(..) => unreachable!(),
FilterOp::ColorMatrix(_) => (10, 0.0),
let filter_mode = match filter {
FilterOp::Blur(..) => 0,
FilterOp::Contrast(..) => 1,
FilterOp::Grayscale(..) => 2,
FilterOp::HueRotate(..) => 3,
FilterOp::Invert(..) => 4,
FilterOp::Saturate(..) => 5,
FilterOp::Sepia(..) => 6,
FilterOp::Brightness(..) => 7,
FilterOp::Opacity(..) => 8,
FilterOp::DropShadow(..) => 9,
FilterOp::ColorMatrix(..) => 10,
};
let amount = (amount * 65535.0).round() as i32;
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
let instance = CompositePrimitiveInstance::new(
task_address,
src_task_address,
RenderTaskAddress(0),
filter_mode,
amount,
let instance = BrushInstance {
picture_address: task_address,
prim_address: prim_cache_address,
clip_chain_rect_index,
scroll_id,
clip_task_address,
z,
prim_cache_address.as_int(),
0,
);
segment_index: 0,
edge_flags: EdgeAaSegmentMask::empty(),
user_data: [
cache_task_address.0 as i32,
filter_mode,
0,
],
};
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
batch.push(PrimitiveInstance::from(instance));
}
}
@ -1137,11 +1132,13 @@ impl AlphaBatchBuilder {
let backdrop_id = secondary_render_task_id.expect("no backdrop!?");
let key = BatchKey::new(
BatchKind::Composite {
task_id,
source_id,
backdrop_id,
},
BatchKind::Brush(
BrushBatchKind::MixBlend {
task_id,
source_id,
backdrop_id,
},
),
BlendMode::PremultipliedAlpha,
BatchTextures::no_texture(),
);
@ -1149,16 +1146,21 @@ impl AlphaBatchBuilder {
let backdrop_task_address = render_tasks.get_task_address(backdrop_id);
let source_task_address = render_tasks.get_task_address(source_id);
let instance = CompositePrimitiveInstance::new(
task_address,
source_task_address,
backdrop_task_address,
mode as u32 as i32,
0,
let instance = BrushInstance {
picture_address: task_address,
prim_address: prim_cache_address,
clip_chain_rect_index,
scroll_id,
clip_task_address,
z,
0,
0,
);
segment_index: 0,
edge_flags: EdgeAaSegmentMask::empty(),
user_data: [
mode as u32 as i32,
backdrop_task_address.0 as i32,
source_task_address.0 as i32,
],
};
batch.push(PrimitiveInstance::from(instance));
}
@ -1209,7 +1211,7 @@ impl AlphaBatchBuilder {
transform_kind,
TransformBatchKind::AlignedGradient,
);
let key = BatchKey::new(kind, blend_mode, no_textures);
let key = BatchKey::new(kind, non_segmented_blend_mode, no_textures);
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
for part_index in 0 .. (gradient_cpu.stops_count - 1) {
batch.push(base_instance.build(part_index as i32, 0, 0));
@ -1220,7 +1222,7 @@ impl AlphaBatchBuilder {
transform_kind,
TransformBatchKind::AngleGradient,
);
let key = BatchKey::new(kind, blend_mode, no_textures);
let key = BatchKey::new(kind, non_segmented_blend_mode, no_textures);
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
batch.push(base_instance.build(0, 0, 0));
}
@ -1229,7 +1231,7 @@ impl AlphaBatchBuilder {
transform_kind,
TransformBatchKind::RadialGradient,
);
let key = BatchKey::new(kind, blend_mode, no_textures);
let key = BatchKey::new(kind, non_segmented_blend_mode, no_textures);
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
batch.push(base_instance.build(0, 0, 0));
}
@ -1267,11 +1269,11 @@ impl AlphaBatchBuilder {
}
// All yuv textures should be the same type.
let buffer_kind = Self::get_buffer_kind(textures.colors[0]);
let buffer_kind = get_buffer_kind(textures.colors[0]);
assert!(
textures.colors[1 .. image_yuv_cpu.format.get_plane_num()]
.iter()
.all(|&tid| buffer_kind == Self::get_buffer_kind(tid))
.all(|&tid| buffer_kind == get_buffer_kind(tid))
);
let kind = BatchKind::Transformable(
@ -1282,7 +1284,7 @@ impl AlphaBatchBuilder {
image_yuv_cpu.color_space,
),
);
let key = BatchKey::new(kind, blend_mode, textures);
let key = BatchKey::new(kind, non_segmented_blend_mode, textures);
let batch = self.batch_list.get_suitable_batch(key, &task_relative_bounding_rect);
batch.push(base_instance.build(
@ -1298,7 +1300,10 @@ impl AlphaBatchBuilder {
&mut self,
brush: &BrushPrimitive,
prim_metadata: &PrimitiveMetadata,
batch_key: BatchKey,
batch_kind: BrushBatchKind,
alpha_blend_mode: BlendMode,
non_segmented_blend_mode: BlendMode,
textures: BatchTextures,
clip_chain_rect_index: ClipChainRectIndex,
clip_task_address: RenderTaskAddress,
task_relative_bounding_rect: &DeviceIntRect,
@ -1308,8 +1313,7 @@ impl AlphaBatchBuilder {
transform_kind: TransformedRectKind,
z: i32,
render_tasks: &RenderTaskTree,
user_data0: i32,
user_data1: i32,
user_data: [i32; 3],
) {
let base_instance = BrushInstance {
picture_address: task_address,
@ -1320,15 +1324,15 @@ impl AlphaBatchBuilder {
z,
segment_index: 0,
edge_flags: EdgeAaSegmentMask::all(),
user_data0,
user_data1,
user_data,
};
match brush.segment_desc {
Some(ref segment_desc) => {
let alpha_batch_key = BatchKey {
blend_mode: BlendMode::PremultipliedAlpha,
..batch_key
blend_mode: alpha_blend_mode,
kind: BatchKind::Brush(batch_kind),
textures,
};
let alpha_batch = self.batch_list.alpha_batch_list.get_suitable_batch(
@ -1338,7 +1342,8 @@ impl AlphaBatchBuilder {
let opaque_batch_key = BatchKey {
blend_mode: BlendMode::None,
..batch_key
kind: BatchKind::Brush(batch_kind),
textures,
};
let opaque_batch = self.batch_list.opaque_batch_list.get_suitable_batch(
@ -1371,6 +1376,11 @@ impl AlphaBatchBuilder {
}
}
None => {
let batch_key = BatchKey {
blend_mode: non_segmented_blend_mode,
kind: BatchKind::Brush(batch_kind),
textures,
};
let batch = self.batch_list.get_suitable_batch(batch_key, task_relative_bounding_rect);
batch.push(PrimitiveInstance::from(base_instance));
}
@ -1379,31 +1389,56 @@ impl AlphaBatchBuilder {
}
impl BrushPrimitive {
fn get_batch_key(&self, blend_mode: BlendMode) -> BatchKey {
fn get_batch_params(
&self,
resource_cache: &ResourceCache,
gpu_cache: &mut GpuCache,
deferred_resolves: &mut Vec<DeferredResolve>,
) -> Option<(BrushBatchKind, BatchTextures, [i32; 3])> {
match self.kind {
BrushKind::Line { .. } => {
BatchKey::new(
BatchKind::Brush(BrushBatchKind::Line),
blend_mode,
Some((
BrushBatchKind::Line,
BatchTextures::no_texture(),
)
[0; 3],
))
}
BrushKind::Image { request, .. } => {
let cache_item = resolve_image(
request,
resource_cache,
gpu_cache,
deferred_resolves,
);
if cache_item.texture_id == SourceTexture::Invalid {
None
} else {
let textures = BatchTextures::color(cache_item.texture_id);
Some((
BrushBatchKind::Image(get_buffer_kind(cache_item.texture_id)),
textures,
[cache_item.uv_rect_handle.as_int(gpu_cache), 0, 0],
))
}
}
BrushKind::Picture => {
panic!("bug: get_batch_key is handled at higher level for pictures");
}
BrushKind::Solid { .. } => {
BatchKey::new(
BatchKind::Brush(BrushBatchKind::Solid),
blend_mode,
Some((
BrushBatchKind::Solid,
BatchTextures::no_texture(),
)
[0; 3],
))
}
BrushKind::Clear => {
BatchKey::new(
BatchKind::Brush(BrushBatchKind::Solid),
BlendMode::PremultipliedDestOut,
Some((
BrushBatchKind::Solid,
BatchTextures::no_texture(),
)
[0; 3],
))
}
BrushKind::Mask { .. } => {
unreachable!("bug: mask brushes not expected in normal alpha pass");
@ -1416,42 +1451,53 @@ trait AlphaBatchHelpers {
fn get_blend_mode(
&self,
metadata: &PrimitiveMetadata,
transform_kind: TransformedRectKind,
) -> BlendMode;
}
impl AlphaBatchHelpers for PrimitiveStore {
fn get_blend_mode(
&self,
metadata: &PrimitiveMetadata,
transform_kind: TransformedRectKind,
) -> BlendMode {
let needs_blending = !metadata.opacity.is_opaque || metadata.clip_task_id.is_some() ||
transform_kind == TransformedRectKind::Complex;
fn get_blend_mode(&self, metadata: &PrimitiveMetadata) -> BlendMode {
match metadata.prim_kind {
// Can only resolve the TextRun's blend mode once glyphs are fetched.
PrimitiveKind::TextRun => BlendMode::PremultipliedAlpha,
PrimitiveKind::TextRun => {
BlendMode::PremultipliedAlpha
}
PrimitiveKind::Border |
PrimitiveKind::YuvImage |
PrimitiveKind::AlignedGradient |
PrimitiveKind::AngleGradient |
PrimitiveKind::RadialGradient |
PrimitiveKind::Brush |
PrimitiveKind::Picture => if needs_blending {
PrimitiveKind::Picture => {
BlendMode::PremultipliedAlpha
} else {
BlendMode::None
},
PrimitiveKind::Image => if needs_blending {
}
PrimitiveKind::Brush => {
let brush = &self.cpu_brushes[metadata.cpu_prim_index.0];
match brush.kind {
BrushKind::Clear => {
BlendMode::PremultipliedDestOut
}
BrushKind::Image { alpha_type, .. } => {
match alpha_type {
AlphaType::PremultipliedAlpha => BlendMode::PremultipliedAlpha,
AlphaType::Alpha => BlendMode::Alpha,
}
}
BrushKind::Solid { .. } |
BrushKind::Mask { .. } |
BrushKind::Line { .. } |
BrushKind::Picture => {
BlendMode::PremultipliedAlpha
}
}
}
PrimitiveKind::Image => {
let image_cpu = &self.cpu_images[metadata.cpu_prim_index.0];
match image_cpu.alpha_type {
AlphaType::PremultipliedAlpha => BlendMode::PremultipliedAlpha,
AlphaType::Alpha => BlendMode::Alpha,
}
} else {
BlendMode::None
},
}
}
}
}
@ -1641,3 +1687,21 @@ impl ClipBatcher {
}
}
}
fn get_buffer_kind(texture: SourceTexture) -> ImageBufferKind {
match texture {
SourceTexture::External(ext_image) => {
match ext_image.image_type {
ExternalImageType::TextureHandle(target) => {
target.into()
}
ExternalImageType::Buffer => {
// The ExternalImageType::Buffer should be handled by resource_cache.
// It should go through the non-external case.
panic!("Unexpected non-texture handle type");
}
}
}
_ => ImageBufferKind::Texture2DArray,
}
}

View File

@ -3,8 +3,8 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{ClipChainId, ClipId, DeviceIntRect, DevicePixelScale, ExternalScrollId, LayerPoint};
use api::{LayerRect, LayerToWorldTransform, LayerVector2D, LayoutTransform, PipelineId};
use api::{PropertyBinding, ScrollClamping, ScrollEventPhase, ScrollLocation, ScrollNodeIdType};
use api::{LayerRect, LayerToWorldTransform, LayerVector2D, PipelineId, ScrollClamping};
use api::{ScrollEventPhase, ScrollLocation, ScrollNodeIdType};
use api::{ScrollNodeState, WorldPoint};
use clip::ClipStore;
use clip_scroll_node::{ClipScrollNode, NodeType, ScrollFrameInfo, StickyFrameInfo};
@ -466,40 +466,6 @@ impl ClipScrollTree {
}
}
pub fn generate_new_clip_id(&mut self, pipeline_id: PipelineId) -> ClipId {
let new_id = ClipId::DynamicallyAddedNode(self.current_new_node_item, pipeline_id);
self.current_new_node_item += 1;
new_id
}
pub fn add_reference_frame(
&mut self,
rect: &LayerRect,
source_transform: Option<PropertyBinding<LayoutTransform>>,
source_perspective: Option<LayoutTransform>,
origin_in_parent_reference_frame: LayerVector2D,
pipeline_id: PipelineId,
parent_id: Option<ClipId>,
root_for_pipeline: bool,
) -> ClipId {
let reference_frame_id = if root_for_pipeline {
ClipId::root_reference_frame(pipeline_id)
} else {
self.generate_new_clip_id(pipeline_id)
};
let node = ClipScrollNode::new_reference_frame(
parent_id,
rect,
source_transform,
source_perspective,
origin_in_parent_reference_frame,
pipeline_id,
);
self.add_node(node, reference_frame_id);
reference_frame_id
}
pub fn add_sticky_frame(
&mut self,
id: ClipId,

File diff suppressed because it is too large Load Diff

View File

@ -70,6 +70,7 @@ pub enum DepthFunction {
pub enum TextureFilter {
Nearest,
Linear,
Trilinear,
}
#[derive(Debug)]
@ -924,7 +925,9 @@ impl Device {
}
pub fn create_texture(
&mut self, target: TextureTarget, format: ImageFormat,
&mut self,
target: TextureTarget,
format: ImageFormat,
) -> Texture {
Texture {
id: self.gl.gen_textures(1)[0],
@ -941,15 +944,21 @@ impl Device {
}
fn set_texture_parameters(&mut self, target: gl::GLuint, filter: TextureFilter) {
let filter = match filter {
let mag_filter = match filter {
TextureFilter::Nearest => gl::NEAREST,
TextureFilter::Linear | TextureFilter::Trilinear => gl::LINEAR,
};
let min_filter = match filter {
TextureFilter::Nearest => gl::NEAREST,
TextureFilter::Linear => gl::LINEAR,
TextureFilter::Trilinear => gl::LINEAR_MIPMAP_LINEAR,
};
self.gl
.tex_parameter_i(target, gl::TEXTURE_MAG_FILTER, filter as gl::GLint);
.tex_parameter_i(target, gl::TEXTURE_MAG_FILTER, mag_filter as gl::GLint);
self.gl
.tex_parameter_i(target, gl::TEXTURE_MIN_FILTER, filter as gl::GLint);
.tex_parameter_i(target, gl::TEXTURE_MIN_FILTER, min_filter as gl::GLint);
self.gl
.tex_parameter_i(target, gl::TEXTURE_WRAP_S, gl::CLAMP_TO_EDGE as gl::GLint);
@ -2229,6 +2238,11 @@ impl<'a> UploadTarget<'a> {
_ => panic!("BUG: Unexpected texture target!"),
}
// If using tri-linear filtering, build the mip-map chain for this texture.
if self.texture.filter == TextureFilter::Trilinear {
self.gl.generate_mipmap(self.texture.target);
}
// Reset row length to 0, otherwise the stride would apply to all texture uploads.
if chunk.stride.is_some() {
self.gl.pixel_store_i(gl::UNPACK_ROW_LENGTH, 0 as _);

View File

@ -5,11 +5,11 @@
use api::{BuiltDisplayListIter, ClipAndScrollInfo, ClipId, ColorF, ComplexClipRegion};
use api::{DevicePixelScale, DeviceUintRect, DeviceUintSize, DisplayItemRef, DocumentLayer, Epoch};
use api::{ExternalScrollId, FilterOp, ImageDisplayItem, ItemRange, LayerPoint, LayerPrimitiveInfo};
use api::{LayerRect, LayerSize, LayerVector2D, LayoutSize, LocalClip, PipelineId, ScrollClamping};
use api::{ScrollEventPhase, ScrollLocation, ScrollNodeIdType, ScrollNodeState, ScrollPolicy};
use api::{ScrollSensitivity, SpecificDisplayItem, StackingContext, TileOffset, TransformStyle};
use api::WorldPoint;
use api::{ExternalScrollId, FilterOp, IframeDisplayItem, ImageDisplayItem, ItemRange, LayerPoint};
use api::{LayerPrimitiveInfo, LayerRect, LayerSize, LayerVector2D, LayoutSize, PipelineId};
use api::{ScrollClamping, ScrollEventPhase, ScrollFrameDisplayItem, ScrollLocation};
use api::{ScrollNodeIdType, ScrollNodeState, ScrollPolicy, ScrollSensitivity, SpecificDisplayItem};
use api::{StackingContext, TileOffset, TransformStyle, WorldPoint};
use clip::ClipRegion;
use clip_scroll_node::StickyFrameInfo;
use clip_scroll_tree::{ClipScrollTree, ScrollStates};
@ -100,10 +100,9 @@ impl<'a> FlattenContext<'a> {
traversal: &mut BuiltDisplayListIter<'a>,
pipeline_id: PipelineId,
frame_size: &LayoutSize,
root_reference_frame_id: ClipId,
root_scroll_frame_id: ClipId,
) {
let clip_id = ClipId::root_scroll_node(pipeline_id);
let root_reference_frame_id = ClipId::root_reference_frame(pipeline_id);
let root_scroll_frame_id = ClipId::root_scroll_node(pipeline_id);
self.builder.push_stacking_context(
pipeline_id,
@ -111,7 +110,7 @@ impl<'a> FlattenContext<'a> {
TransformStyle::Flat,
true,
true,
ClipAndScrollInfo::simple(clip_id),
ClipAndScrollInfo::simple(root_scroll_frame_id),
self.output_pipelines,
);
@ -203,32 +202,46 @@ impl<'a> FlattenContext<'a> {
fn flatten_scroll_frame(
&mut self,
item: &DisplayItemRef,
info: &ScrollFrameDisplayItem,
pipeline_id: PipelineId,
parent_id: &ClipId,
new_scroll_frame_id: &ClipId,
external_id: Option<ExternalScrollId>,
frame_rect: &LayerRect,
content_rect: &LayerRect,
clip_region: ClipRegion,
scroll_sensitivity: ScrollSensitivity,
clip_and_scroll: &ClipAndScrollInfo,
reference_frame_relative_offset: &LayerVector2D,
) {
let clip_id = self.clip_scroll_tree.generate_new_clip_id(pipeline_id);
let complex_clips = self.get_complex_clips(pipeline_id, item.complex_clip().0);
let clip_region = ClipRegion::create_for_clip_node(
*item.local_clip().clip_rect(),
complex_clips,
info.image_mask,
&reference_frame_relative_offset,
);
// Just use clip rectangle as the frame rect for this scroll frame.
// This is useful when calculating scroll extents for the
// ClipScrollNode::scroll(..) API as well as for properly setting sticky
// positioning offsets.
let frame_rect = item.local_clip()
.clip_rect()
.translate(&reference_frame_relative_offset);
let content_rect = item.rect().translate(&reference_frame_relative_offset);
debug_assert!(info.clip_id != info.scroll_frame_id);
self.builder.add_clip_node(
clip_id,
*parent_id,
info.clip_id,
clip_and_scroll.scroll_node_id,
pipeline_id,
clip_region,
self.clip_scroll_tree,
);
self.builder.add_scroll_frame(
*new_scroll_frame_id,
clip_id,
external_id,
info.scroll_frame_id,
info.clip_id,
info.external_id,
pipeline_id,
&frame_rect,
&content_rect.size,
scroll_sensitivity,
info.scroll_sensitivity,
self.clip_scroll_tree,
);
}
@ -259,10 +272,7 @@ impl<'a> FlattenContext<'a> {
.expect("No display list?!")
.display_list;
CompositeOps::new(
stacking_context.filter_ops_for_compositing(
display_list,
filters,
),
stacking_context.filter_ops_for_compositing(display_list, filters),
stacking_context.mix_blend_mode_for_compositing(),
)
};
@ -274,29 +284,32 @@ impl<'a> FlattenContext<'a> {
));
}
// If we have a transformation, we establish a new reference frame. This means
// that fixed position stacking contexts are positioned relative to us.
let is_reference_frame =
stacking_context.transform.is_some() || stacking_context.perspective.is_some();
let origin = reference_frame_relative_offset + bounds.origin.to_vector();
reference_frame_relative_offset = if is_reference_frame {
reference_frame_relative_offset += bounds.origin.to_vector();
// If we have a transformation or a perspective, we should have been assigned a new
// reference frame id. This means this stacking context establishes a new reference frame.
// Descendant fixed position content will be positioned relative to us.
if let Some(reference_frame_id) = stacking_context.reference_frame_id {
debug_assert!(
stacking_context.transform.is_some() ||
stacking_context.perspective.is_some()
);
let reference_frame_bounds = LayerRect::new(LayerPoint::zero(), bounds.size);
let mut clip_id = self.apply_scroll_frame_id_replacement(context_scroll_node_id);
clip_id = self.builder.push_reference_frame(
Some(clip_id),
let mut parent_id = self.apply_scroll_frame_id_replacement(context_scroll_node_id);
self.builder.push_reference_frame(
reference_frame_id,
Some(parent_id),
pipeline_id,
&reference_frame_bounds,
stacking_context.transform,
stacking_context.perspective,
origin,
false,
reference_frame_relative_offset,
self.clip_scroll_tree,
);
self.replacements.push((context_scroll_node_id, clip_id));
LayerVector2D::zero()
} else {
origin
};
self.replacements.push((context_scroll_node_id, reference_frame_id));
reference_frame_relative_offset = LayerVector2D::zero();
}
let sc_scroll_node_id = self.apply_scroll_frame_id_replacement(context_scroll_node_id);
@ -320,7 +333,7 @@ impl<'a> FlattenContext<'a> {
self.replacements.pop();
}
if is_reference_frame {
if stacking_context.reference_frame_id.is_some() {
self.replacements.pop();
self.builder.pop_reference_frame();
}
@ -330,65 +343,57 @@ impl<'a> FlattenContext<'a> {
fn flatten_iframe(
&mut self,
pipeline_id: PipelineId,
parent_id: ClipId,
bounds: &LayerRect,
local_clip: &LocalClip,
reference_frame_relative_offset: LayerVector2D,
item: &DisplayItemRef,
info: &IframeDisplayItem,
parent_pipeline_id: PipelineId,
clip_and_scroll: &ClipAndScrollInfo,
reference_frame_relative_offset: &LayerVector2D,
) {
let pipeline = match self.scene.pipelines.get(&pipeline_id) {
let iframe_pipeline_id = info.pipeline_id;
let pipeline = match self.scene.pipelines.get(&iframe_pipeline_id) {
Some(pipeline) => pipeline,
None => return,
};
let clip_region = ClipRegion::create_for_clip_node_with_local_clip(
local_clip,
&reference_frame_relative_offset
);
let parent_pipeline_id = parent_id.pipeline_id();
let clip_id = self.clip_scroll_tree
.generate_new_clip_id(parent_pipeline_id);
self.builder.add_clip_node(
clip_id,
parent_id,
info.clip_id,
clip_and_scroll.scroll_node_id,
parent_pipeline_id,
clip_region,
ClipRegion::create_for_clip_node_with_local_clip(
&item.local_clip(),
&reference_frame_relative_offset
),
self.clip_scroll_tree,
);
self.pipeline_epochs.push((pipeline_id, pipeline.epoch));
self.pipeline_epochs.push((iframe_pipeline_id, pipeline.epoch));
let bounds = item.rect();
let iframe_rect = LayerRect::new(LayerPoint::zero(), bounds.size);
let origin = reference_frame_relative_offset + bounds.origin.to_vector();
let iframe_reference_frame_id = self.builder.push_reference_frame(
Some(clip_id),
pipeline_id,
let origin = *reference_frame_relative_offset + bounds.origin.to_vector();
self.builder.push_reference_frame(
ClipId::root_reference_frame(iframe_pipeline_id),
Some(info.clip_id),
iframe_pipeline_id,
&iframe_rect,
None,
None,
origin,
true,
self.clip_scroll_tree,
);
self.builder.add_scroll_frame(
ClipId::root_scroll_node(pipeline_id),
iframe_reference_frame_id,
Some(ExternalScrollId(0, pipeline_id)),
pipeline_id,
ClipId::root_scroll_node(iframe_pipeline_id),
ClipId::root_reference_frame(iframe_pipeline_id),
Some(ExternalScrollId(0, iframe_pipeline_id)),
iframe_pipeline_id,
&iframe_rect,
&pipeline.content_size,
ScrollSensitivity::ScriptAndInputEvents,
self.clip_scroll_tree,
);
self.flatten_root(
&mut pipeline.display_list.iter(),
pipeline_id,
&iframe_rect.size,
iframe_reference_frame_id,
ClipId::root_scroll_node(pipeline_id),
);
self.flatten_root(&mut pipeline.display_list.iter(), iframe_pipeline_id, &iframe_rect.size);
self.builder.pop_reference_frame();
}
@ -562,11 +567,11 @@ impl<'a> FlattenContext<'a> {
}
SpecificDisplayItem::Iframe(ref info) => {
self.flatten_iframe(
info.pipeline_id,
clip_and_scroll.scroll_node_id,
&item.rect(),
&item.local_clip(),
reference_frame_relative_offset,
&item,
info,
pipeline_id,
&clip_and_scroll,
&reference_frame_relative_offset
);
}
SpecificDisplayItem::Clip(ref info) => {
@ -589,30 +594,12 @@ impl<'a> FlattenContext<'a> {
self.clip_scroll_tree.add_clip_chain_descriptor(info.id, info.parent, items);
},
SpecificDisplayItem::ScrollFrame(ref info) => {
let complex_clips = self.get_complex_clips(pipeline_id, item.complex_clip().0);
let clip_region = ClipRegion::create_for_clip_node(
*item.local_clip().clip_rect(),
complex_clips,
info.image_mask,
&reference_frame_relative_offset,
);
// Just use clip rectangle as the frame rect for this scroll frame.
// This is useful when calculating scroll extents for the
// ClipScrollNode::scroll(..) API as well as for properly setting sticky
// positioning offsets.
let frame_rect = item.local_clip()
.clip_rect()
.translate(&reference_frame_relative_offset);
let content_rect = item.rect().translate(&reference_frame_relative_offset);
self.flatten_scroll_frame(
&item,
info,
pipeline_id,
&clip_and_scroll.scroll_node_id,
&info.id,
info.external_id,
&frame_rect,
&content_rect,
clip_region,
info.scroll_sensitivity,
&clip_and_scroll,
&reference_frame_relative_offset
);
}
SpecificDisplayItem::StickyFrame(ref info) => {
@ -1084,14 +1071,10 @@ impl FrameContext {
roller.clip_scroll_tree,
);
let reference_frame_id = roller.clip_scroll_tree.root_reference_frame_id;
let scroll_frame_id = roller.clip_scroll_tree.topmost_scrolling_node_id;
roller.flatten_root(
&mut root_pipeline.display_list.iter(),
root_pipeline_id,
&root_pipeline.viewport_size,
reference_frame_id,
scroll_frame_id,
);
debug_assert!(roller.builder.picture_stack.is_empty());

View File

@ -632,26 +632,25 @@ impl FrameBuilder {
pub fn push_reference_frame(
&mut self,
reference_frame_id: ClipId,
parent_id: Option<ClipId>,
pipeline_id: PipelineId,
rect: &LayerRect,
source_transform: Option<PropertyBinding<LayoutTransform>>,
source_perspective: Option<LayoutTransform>,
origin_in_parent_reference_frame: LayerVector2D,
root_for_pipeline: bool,
clip_scroll_tree: &mut ClipScrollTree,
) -> ClipId {
let new_id = clip_scroll_tree.add_reference_frame(
) {
let node = ClipScrollNode::new_reference_frame(
parent_id,
rect,
source_transform,
source_perspective,
origin_in_parent_reference_frame,
pipeline_id,
parent_id,
root_for_pipeline,
);
self.reference_frame_stack.push(new_id);
new_id
clip_scroll_tree.add_node(node, reference_frame_id);
self.reference_frame_stack.push(reference_frame_id);
}
pub fn current_reference_frame_id(&self) -> ClipId {
@ -686,13 +685,13 @@ impl FrameBuilder {
) -> ClipId {
let viewport_rect = LayerRect::new(LayerPoint::zero(), *viewport_size);
self.push_reference_frame(
ClipId::root_reference_frame(pipeline_id),
None,
pipeline_id,
&viewport_rect,
None,
None,
LayerVector2D::zero(),
true,
clip_scroll_tree,
);
@ -1512,39 +1511,64 @@ impl FrameBuilder {
tile_spacing = LayerSize::zero();
}
let prim_cpu = ImagePrimitiveCpu {
tile_spacing,
alpha_type,
stretch_size,
current_epoch: Epoch::invalid(),
source: ImageSource::Default,
key: ImageCacheKey {
request: ImageRequest {
key: image_key,
rendering: image_rendering,
tile: tile_offset,
},
texel_rect: sub_rect.map(|texel_rect| {
DeviceIntRect::new(
DeviceIntPoint::new(
texel_rect.uv0.x as i32,
texel_rect.uv0.y as i32,
),
DeviceIntSize::new(
(texel_rect.uv1.x - texel_rect.uv0.x) as i32,
(texel_rect.uv1.y - texel_rect.uv0.y) as i32,
),
)
}),
},
let request = ImageRequest {
key: image_key,
rendering: image_rendering,
tile: tile_offset,
};
self.add_primitive(
clip_and_scroll,
info,
Vec::new(),
PrimitiveContainer::Image(prim_cpu),
);
// See if conditions are met to run through the new
// image brush shader, which supports segments.
if tile_spacing == LayerSize::zero() &&
stretch_size == info.rect.size &&
sub_rect.is_none() &&
tile_offset.is_none() {
let prim = BrushPrimitive::new(
BrushKind::Image {
request,
current_epoch: Epoch::invalid(),
alpha_type,
},
None,
);
self.add_primitive(
clip_and_scroll,
info,
Vec::new(),
PrimitiveContainer::Brush(prim),
);
} else {
let prim_cpu = ImagePrimitiveCpu {
tile_spacing,
alpha_type,
stretch_size,
current_epoch: Epoch::invalid(),
source: ImageSource::Default,
key: ImageCacheKey {
request,
texel_rect: sub_rect.map(|texel_rect| {
DeviceIntRect::new(
DeviceIntPoint::new(
texel_rect.uv0.x as i32,
texel_rect.uv0.y as i32,
),
DeviceIntSize::new(
(texel_rect.uv1.x - texel_rect.uv0.x) as i32,
(texel_rect.uv1.y - texel_rect.uv0.y) as i32,
),
)
}),
},
};
self.add_primitive(
clip_and_scroll,
info,
Vec::new(),
PrimitiveContainer::Image(prim_cpu),
);
}
}
pub fn add_yuv_image(

View File

@ -164,22 +164,21 @@ pub struct BrushInstance {
pub z: i32,
pub segment_index: i32,
pub edge_flags: EdgeAaSegmentMask,
pub user_data0: i32,
pub user_data1: i32,
pub user_data: [i32; 3],
}
impl From<BrushInstance> for PrimitiveInstance {
fn from(instance: BrushInstance) -> Self {
PrimitiveInstance {
data: [
instance.picture_address.0 as i32,
instance.picture_address.0 as i32 | (instance.clip_task_address.0 as i32) << 16,
instance.prim_address.as_int(),
((instance.clip_chain_rect_index.0 as i32) << 16) | instance.scroll_id.0 as i32,
instance.clip_task_address.0 as i32,
instance.z,
instance.segment_index | ((instance.edge_flags.bits() as i32) << 16),
instance.user_data0,
instance.user_data1,
instance.user_data[0],
instance.user_data[1],
instance.user_data[2],
]
}
}

View File

@ -586,25 +586,59 @@ impl PicturePrimitive {
}
pub fn write_gpu_blocks(&self, request: &mut GpuDataRequest) {
// TODO(gw): It's unfortunate that we pay a fixed cost
// of 5 GPU blocks / picture, just due to the size
// of the color matrix. There aren't typically very
// many pictures in a scene, but we should consider
// making this more efficient for the common case.
match self.kind {
PictureKind::TextShadow { .. } => {
request.push([0.0; 4])
for _ in 0 .. 5 {
request.push([0.0; 4]);
}
}
PictureKind::Image { composite_mode, .. } => {
match composite_mode {
Some(PictureCompositeMode::Filter(FilterOp::ColorMatrix(m))) => {
// When we start pushing Image pictures through the brush path
// this may need to change as the number of GPU blocks written will
// need to be determinate.
for i in 0..5 {
request.push([m[i], m[i+5], m[i+10], m[i+15]]);
}
},
_ => request.push([0.0; 4]),
}
Some(PictureCompositeMode::Filter(filter)) => {
let amount = match filter {
FilterOp::Contrast(amount) => amount,
FilterOp::Grayscale(amount) => amount,
FilterOp::HueRotate(angle) => 0.01745329251 * angle,
FilterOp::Invert(amount) => amount,
FilterOp::Saturate(amount) => amount,
FilterOp::Sepia(amount) => amount,
FilterOp::Brightness(amount) => amount,
FilterOp::Opacity(_, amount) => amount,
// Go through different paths
FilterOp::Blur(..) |
FilterOp::DropShadow(..) |
FilterOp::ColorMatrix(_) => 0.0,
};
request.push([amount, 1.0 - amount, 0.0, 0.0]);
for _ in 0 .. 4 {
request.push([0.0; 4]);
}
}
_ => {
for _ in 0 .. 5 {
request.push([0.0; 4]);
}
}
}
}
PictureKind::BoxShadow { color, .. } => {
request.push(color.premultiplied());
for _ in 0 .. 4 {
request.push([0.0; 4]);
}
}
}
}

View File

@ -190,14 +190,23 @@ pub enum BrushKind {
orientation: LineOrientation,
},
Picture,
Image {
request: ImageRequest,
current_epoch: Epoch,
alpha_type: AlphaType,
},
}
impl BrushKind {
fn supports_segments(&self) -> bool {
match *self {
BrushKind::Solid { .. } |
BrushKind::Picture => true,
_ => false,
BrushKind::Picture |
BrushKind::Image { .. } => true,
BrushKind::Mask { .. } |
BrushKind::Clear |
BrushKind::Line { .. } => false,
}
}
}
@ -274,7 +283,8 @@ impl BrushPrimitive {
fn write_gpu_blocks(&self, request: &mut GpuDataRequest) {
// has to match VECS_PER_SPECIFIC_BRUSH
match self.kind {
BrushKind::Picture => {
BrushKind::Picture |
BrushKind::Image { .. } => {
}
BrushKind::Solid { color } => {
request.push(color.premultiplied());
@ -1007,6 +1017,7 @@ impl PrimitiveStore {
BrushKind::Solid { ref color } => PrimitiveOpacity::from_alpha(color.a),
BrushKind::Mask { .. } => PrimitiveOpacity::translucent(),
BrushKind::Line { .. } => PrimitiveOpacity::translucent(),
BrushKind::Image { .. } => PrimitiveOpacity::translucent(),
BrushKind::Picture => {
// TODO(gw): This is not currently used. In the future
// we should detect opaque pictures.
@ -1298,7 +1309,36 @@ impl PrimitiveStore {
);
}
}
PrimitiveKind::Brush |
PrimitiveKind::Brush => {
let brush = &mut self.cpu_brushes[metadata.cpu_prim_index.0];
match brush.kind {
BrushKind::Image { request, ref mut current_epoch, .. } => {
let image_properties = frame_state
.resource_cache
.get_image_properties(request.key);
if let Some(image_properties) = image_properties {
// See if this image has been updated since we last hit this code path.
// If so, we need to update the opacity.
if image_properties.epoch != *current_epoch {
*current_epoch = image_properties.epoch;
metadata.opacity.is_opaque = image_properties.descriptor.is_opaque;
}
}
frame_state.resource_cache.request_image(
request,
frame_state.gpu_cache,
);
}
BrushKind::Mask { .. } |
BrushKind::Solid { .. } |
BrushKind::Clear |
BrushKind::Line { .. } |
BrushKind::Picture { .. } => {}
}
}
PrimitiveKind::AlignedGradient |
PrimitiveKind::AngleGradient |
PrimitiveKind::RadialGradient => {}

View File

@ -83,6 +83,18 @@ const GPU_CACHE_RESIZE_TEST: bool = false;
/// Number of GPU blocks per UV rectangle provided for an image.
pub const BLOCKS_PER_UV_RECT: usize = 2;
const GPU_TAG_BRUSH_MIXBLEND: GpuProfileTag = GpuProfileTag {
label: "B_MixBlend",
color: debug_colors::MAGENTA,
};
const GPU_TAG_BRUSH_BLEND: GpuProfileTag = GpuProfileTag {
label: "B_Blend",
color: debug_colors::LIGHTBLUE,
};
const GPU_TAG_BRUSH_IMAGE: GpuProfileTag = GpuProfileTag {
label: "B_Image",
color: debug_colors::SPRINGGREEN,
};
const GPU_TAG_BRUSH_SOLID: GpuProfileTag = GpuProfileTag {
label: "B_Solid",
color: debug_colors::RED,
@ -123,10 +135,6 @@ const GPU_TAG_PRIM_YUV_IMAGE: GpuProfileTag = GpuProfileTag {
label: "YuvImage",
color: debug_colors::DARKGREEN,
};
const GPU_TAG_PRIM_BLEND: GpuProfileTag = GpuProfileTag {
label: "Blend",
color: debug_colors::LIGHTBLUE,
};
const GPU_TAG_PRIM_HW_COMPOSITE: GpuProfileTag = GpuProfileTag {
label: "HwComposite",
color: debug_colors::DODGERBLUE,
@ -135,10 +143,6 @@ const GPU_TAG_PRIM_SPLIT_COMPOSITE: GpuProfileTag = GpuProfileTag {
label: "SplitComposite",
color: debug_colors::DARKBLUE,
};
const GPU_TAG_PRIM_COMPOSITE: GpuProfileTag = GpuProfileTag {
label: "Composite",
color: debug_colors::MAGENTA,
};
const GPU_TAG_PRIM_TEXT_RUN: GpuProfileTag = GpuProfileTag {
label: "TextRun",
color: debug_colors::BLUE,
@ -223,15 +227,16 @@ impl BatchKind {
#[cfg(feature = "debugger")]
fn debug_name(&self) -> &'static str {
match *self {
BatchKind::Composite { .. } => "Composite",
BatchKind::HardwareComposite => "HardwareComposite",
BatchKind::SplitComposite => "SplitComposite",
BatchKind::Blend => "Blend",
BatchKind::Brush(kind) => {
match kind {
BrushBatchKind::Picture(..) => "Brush (Picture)",
BrushBatchKind::Solid => "Brush (Solid)",
BrushBatchKind::Line => "Brush (Line)",
BrushBatchKind::Image(..) => "Brush (Image)",
BrushBatchKind::Blend => "Brush (Blend)",
BrushBatchKind::MixBlend { .. } => "Brush (Composite)",
}
}
BatchKind::Transformable(_, batch_kind) => batch_kind.debug_name(),
@ -240,15 +245,16 @@ impl BatchKind {
fn gpu_sampler_tag(&self) -> GpuProfileTag {
match *self {
BatchKind::Composite { .. } => GPU_TAG_PRIM_COMPOSITE,
BatchKind::HardwareComposite => GPU_TAG_PRIM_HW_COMPOSITE,
BatchKind::SplitComposite => GPU_TAG_PRIM_SPLIT_COMPOSITE,
BatchKind::Blend => GPU_TAG_PRIM_BLEND,
BatchKind::Brush(kind) => {
match kind {
BrushBatchKind::Picture(..) => GPU_TAG_BRUSH_PICTURE,
BrushBatchKind::Solid => GPU_TAG_BRUSH_SOLID,
BrushBatchKind::Line => GPU_TAG_BRUSH_LINE,
BrushBatchKind::Image(..) => GPU_TAG_BRUSH_IMAGE,
BrushBatchKind::Blend => GPU_TAG_BRUSH_BLEND,
BrushBatchKind::MixBlend { .. } => GPU_TAG_BRUSH_MIXBLEND,
}
}
BatchKind::Transformable(_, batch_kind) => batch_kind.gpu_sampler_tag(),
@ -1596,6 +1602,9 @@ pub struct Renderer {
brush_picture_a8: BrushShader,
brush_solid: BrushShader,
brush_line: BrushShader,
brush_image: Vec<Option<BrushShader>>,
brush_blend: BrushShader,
brush_mix_blend: BrushShader,
/// These are "cache clip shaders". These shaders are used to
/// draw clip instances into the cached clip mask. The results
@ -1621,10 +1630,8 @@ pub struct Renderer {
ps_angle_gradient: PrimitiveShader,
ps_radial_gradient: PrimitiveShader,
ps_blend: LazilyCompiledShader,
ps_hw_composite: LazilyCompiledShader,
ps_split_composite: LazilyCompiledShader,
ps_composite: LazilyCompiledShader,
max_texture_size: u32,
@ -1819,6 +1826,20 @@ impl Renderer {
options.precache_shaders)
};
let brush_blend = try!{
BrushShader::new("brush_blend",
&mut device,
&[],
options.precache_shaders)
};
let brush_mix_blend = try!{
BrushShader::new("brush_mix_blend",
&mut device,
&[],
options.precache_shaders)
};
let brush_picture_a8 = try!{
BrushShader::new("brush_picture",
&mut device,
@ -1896,10 +1917,12 @@ impl Renderer {
// All image configuration.
let mut image_features = Vec::new();
let mut ps_image: Vec<Option<PrimitiveShader>> = Vec::new();
let mut ps_image = Vec::new();
let mut brush_image = Vec::new();
// PrimitiveShader is not clonable. Use push() to initialize the vec.
for _ in 0 .. IMAGE_BUFFER_KINDS.len() {
ps_image.push(None);
brush_image.push(None);
}
for buffer_kind in 0 .. IMAGE_BUFFER_KINDS.len() {
if IMAGE_BUFFER_KINDS[buffer_kind].has_platform_support(&gl_type) {
@ -1914,6 +1937,14 @@ impl Renderer {
options.precache_shaders)
};
ps_image[buffer_kind] = Some(shader);
let shader = try!{
BrushShader::new("brush_image",
&mut device,
&image_features,
options.precache_shaders)
};
brush_image[buffer_kind] = Some(shader);
}
image_features.clear();
}
@ -2011,22 +2042,6 @@ impl Renderer {
options.precache_shaders)
};
let ps_blend = try!{
LazilyCompiledShader::new(ShaderKind::Primitive,
"ps_blend",
&[],
&mut device,
options.precache_shaders)
};
let ps_composite = try!{
LazilyCompiledShader::new(ShaderKind::Primitive,
"ps_composite",
&[],
&mut device,
options.precache_shaders)
};
let ps_hw_composite = try!{
LazilyCompiledShader::new(ShaderKind::Primitive,
"ps_hardware_composite",
@ -2272,6 +2287,9 @@ impl Renderer {
brush_picture_a8,
brush_solid,
brush_line,
brush_image,
brush_blend,
brush_mix_blend,
cs_clip_rectangle,
cs_clip_border,
cs_clip_image,
@ -2284,10 +2302,8 @@ impl Renderer {
ps_gradient,
ps_angle_gradient,
ps_radial_gradient,
ps_blend,
ps_hw_composite,
ps_split_composite,
ps_composite,
debug: debug_renderer,
debug_flags,
backend_profile_counters: BackendProfileCounters::new(),
@ -3165,9 +3181,6 @@ impl Renderer {
scissor_rect: Option<DeviceIntRect>,
) {
match key.kind {
BatchKind::Composite { .. } => {
self.ps_composite.bind(&mut self.device, projection, 0, &mut self.renderer_errors);
}
BatchKind::HardwareComposite => {
self.ps_hw_composite
.bind(&mut self.device, projection, 0, &mut self.renderer_errors);
@ -3180,9 +3193,6 @@ impl Renderer {
&mut self.renderer_errors,
);
}
BatchKind::Blend => {
self.ps_blend.bind(&mut self.device, projection, 0, &mut self.renderer_errors);
}
BatchKind::Brush(brush_kind) => {
match brush_kind {
BrushBatchKind::Solid => {
@ -3194,6 +3204,18 @@ impl Renderer {
&mut self.renderer_errors,
);
}
BrushBatchKind::Image(image_buffer_kind) => {
self.brush_image[image_buffer_kind as usize]
.as_mut()
.expect("Unsupported image shader kind")
.bind(
&mut self.device,
key.blend_mode,
projection,
0,
&mut self.renderer_errors,
);
}
BrushBatchKind::Picture(target_kind) => {
let shader = match target_kind {
BrushImageSourceKind::Alpha => &mut self.brush_picture_a8,
@ -3217,6 +3239,24 @@ impl Renderer {
&mut self.renderer_errors,
);
}
BrushBatchKind::Blend => {
self.brush_blend.bind(
&mut self.device,
key.blend_mode,
projection,
0,
&mut self.renderer_errors,
);
}
BrushBatchKind::MixBlend { .. } => {
self.brush_mix_blend.bind(
&mut self.device,
key.blend_mode,
projection,
0,
&mut self.renderer_errors,
);
}
}
}
BatchKind::Transformable(transform_kind, batch_kind) => match batch_kind {
@ -3298,7 +3338,7 @@ impl Renderer {
};
// Handle special case readback for composites.
if let BatchKind::Composite { task_id, source_id, backdrop_id } = key.kind {
if let BatchKind::Brush(BrushBatchKind::MixBlend { task_id, source_id, backdrop_id }) = key.kind {
if scissor_rect.is_some() {
self.device.disable_scissor();
}
@ -4708,11 +4748,18 @@ impl Renderer {
self.brush_picture_a8.deinit(&mut self.device);
self.brush_solid.deinit(&mut self.device);
self.brush_line.deinit(&mut self.device);
self.brush_blend.deinit(&mut self.device);
self.brush_mix_blend.deinit(&mut self.device);
self.cs_clip_rectangle.deinit(&mut self.device);
self.cs_clip_image.deinit(&mut self.device);
self.cs_clip_border.deinit(&mut self.device);
self.ps_text_run.deinit(&mut self.device);
self.ps_text_run_dual_source.deinit(&mut self.device);
for shader in self.brush_image {
if let Some(shader) = shader {
shader.deinit(&mut self.device);
}
}
for shader in self.ps_image {
if let Some(shader) = shader {
shader.deinit(&mut self.device);
@ -4731,10 +4778,8 @@ impl Renderer {
self.ps_gradient.deinit(&mut self.device);
self.ps_angle_gradient.deinit(&mut self.device);
self.ps_radial_gradient.deinit(&mut self.device);
self.ps_blend.deinit(&mut self.device);
self.ps_hw_composite.deinit(&mut self.device);
self.ps_split_composite.deinit(&mut self.device);
self.ps_composite.deinit(&mut self.device);
#[cfg(feature = "capture")]
self.device.delete_fbo(self.read_fbo);
#[cfg(feature = "replay")]

View File

@ -857,11 +857,6 @@ impl ResourceCache {
}
};
let filter = match request.rendering {
ImageRendering::Pixelated => TextureFilter::Nearest,
ImageRendering::Auto | ImageRendering::CrispEdges => TextureFilter::Linear,
};
let descriptor = if let Some(tile) = request.tile {
let tile_size = image_template.tiling.unwrap();
let image_descriptor = &image_template.descriptor;
@ -897,6 +892,31 @@ impl ResourceCache {
image_template.descriptor.clone()
};
let filter = match request.rendering {
ImageRendering::Pixelated => {
TextureFilter::Nearest
}
ImageRendering::Auto | ImageRendering::CrispEdges => {
// If the texture uses linear filtering, enable mipmaps and
// trilinear filtering, for better image quality. We only
// support this for now on textures that are not placed
// into the shared cache. This accounts for any image
// that is > 512 in either dimension, so it should cover
// the most important use cases. We may want to support
// mip-maps on shared cache items in the future.
if descriptor.width > 512 &&
descriptor.height > 512 &&
!self.texture_cache.is_allowed_in_shared_cache(
TextureFilter::Linear,
&descriptor,
) {
TextureFilter::Trilinear
} else {
TextureFilter::Linear
}
}
};
let entry = self.cached_images.get_mut(&request).as_mut().unwrap();
self.texture_cache.update(
&mut entry.texture_cache_handle,

View File

@ -384,7 +384,9 @@ impl TextureCache {
(ImageFormat::BGRA8, TextureFilter::Nearest) => &mut self.array_rgba8_nearest,
(ImageFormat::RGBAF32, _) |
(ImageFormat::RG8, _) |
(ImageFormat::R8, TextureFilter::Nearest) => unreachable!(),
(ImageFormat::R8, TextureFilter::Nearest) |
(ImageFormat::R8, TextureFilter::Trilinear) |
(ImageFormat::BGRA8, TextureFilter::Trilinear) => unreachable!(),
};
&mut texture_array.regions[region_index as usize]
@ -605,6 +607,8 @@ impl TextureCache {
(ImageFormat::BGRA8, TextureFilter::Nearest) => &mut self.array_rgba8_nearest,
(ImageFormat::RGBAF32, _) |
(ImageFormat::R8, TextureFilter::Nearest) |
(ImageFormat::R8, TextureFilter::Trilinear) |
(ImageFormat::BGRA8, TextureFilter::Trilinear) |
(ImageFormat::RG8, _) => unreachable!(),
};
@ -645,6 +649,34 @@ impl TextureCache {
)
}
// Returns true if the given image descriptor *may* be
// placed in the shared texture cache.
pub fn is_allowed_in_shared_cache(
&self,
filter: TextureFilter,
descriptor: &ImageDescriptor,
) -> bool {
let mut allowed_in_shared_cache = true;
// TODO(gw): For now, anything that requests nearest filtering and isn't BGRA8
// just fails to allocate in a texture page, and gets a standalone
// texture. This is probably rare enough that it can be fixed up later.
if filter == TextureFilter::Nearest &&
descriptor.format != ImageFormat::BGRA8 {
allowed_in_shared_cache = false;
}
// Anything larger than 512 goes in a standalone texture.
// TODO(gw): If we find pages that suffer from batch breaks in this
// case, add support for storing these in a standalone
// texture array.
if descriptor.width > 512 || descriptor.height > 512 {
allowed_in_shared_cache = false;
}
allowed_in_shared_cache
}
// Allocate storage for a given image. This attempts to allocate
// from the shared cache, but falls back to standalone texture
// if the image is too large, or the cache is full.
@ -658,27 +690,15 @@ impl TextureCache {
assert!(descriptor.width > 0 && descriptor.height > 0);
// Work out if this image qualifies to go in the shared (batching) cache.
let mut allowed_in_shared_cache = true;
let allowed_in_shared_cache = self.is_allowed_in_shared_cache(
filter,
&descriptor,
);
let mut allocated_in_shared_cache = true;
let mut new_cache_entry = None;
let size = DeviceUintSize::new(descriptor.width, descriptor.height);
let frame_id = self.frame_id;
// TODO(gw): For now, anything that requests nearest filtering and isn't BGRA8
// just fails to allocate in a texture page, and gets a standalone
// texture. This is probably rare enough that it can be fixed up later.
if filter == TextureFilter::Nearest && descriptor.format != ImageFormat::BGRA8 {
allowed_in_shared_cache = false;
}
// Anything larger than 512 goes in a standalone texture.
// TODO(gw): If we find pages that suffer from batch breaks in this
// case, add support for storing these in a standalone
// texture array.
if descriptor.width > 512 || descriptor.height > 512 {
allowed_in_shared_cache = false;
}
// If it's allowed in the cache, see if there is a spot for it.
if allowed_in_shared_cache {
new_cache_entry = self.allocate_from_shared_cache(

View File

@ -590,15 +590,15 @@ impl RenderTarget for AlphaRenderTarget {
z: 0,
segment_index: 0,
edge_flags: EdgeAaSegmentMask::empty(),
user_data0: 0,
user_data1: 0,
user_data: [0; 3],
};
let brush = &ctx.prim_store.cpu_brushes[sub_metadata.cpu_prim_index.0];
let batch = match brush.kind {
BrushKind::Solid { .. } |
BrushKind::Clear |
BrushKind::Picture |
BrushKind::Line { .. } => {
BrushKind::Line { .. } |
BrushKind::Image { .. } => {
unreachable!("bug: unexpected brush here");
}
BrushKind::Mask { ref kind, .. } => {

View File

@ -1,3 +1,7 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
extern crate angle;
extern crate webrender;
@ -62,14 +66,6 @@ const SHADERS: &[Shader] = &[
name: "ps_radial_gradient",
features: PRIM_FEATURES,
},
Shader {
name: "ps_blend",
features: PRIM_FEATURES,
},
Shader {
name: "ps_composite",
features: PRIM_FEATURES,
},
Shader {
name: "ps_hardware_composite",
features: PRIM_FEATURES,
@ -103,6 +99,14 @@ const SHADERS: &[Shader] = &[
name: "brush_picture",
features: &["COLOR_TARGET", "ALPHA_TARGET"],
},
Shader {
name: "brush_blend",
features: &[],
},
Shader {
name: "brush_composite",
features: &[],
},
Shader {
name: "brush_line",
features: &[],
@ -141,6 +145,11 @@ fn validate_shaders() {
}
fn validate(validator: &ShaderValidator, name: &str, source: String) {
// Check for each `switch` to have a `default`, see
// https://github.com/servo/webrender/wiki/Driver-issues#lack-of-default-case-in-a-switch
assert_eq!(source.matches("switch").count(), source.matches("default:").count(),
"Shader '{}' doesn't have all `switch` covered with `default` cases", name);
// Run Angle validator
match validator.compile_and_translate(&[&source]) {
Ok(_) => {
println!("Shader translated succesfully: {}", name);

View File

@ -2,4 +2,4 @@
<div style="background: blue; width: 100%; height: 100%; position: absolute; overflow: hidden;">
<div style="background: red; height: 40px; position: absolute; width: 40px; box-shadow: 10px 10px 10px black;"></div>
</div>
</body>
</body>

View File

@ -26,4 +26,4 @@
<div>
<iframe src="data:,XXX"></iframe>
</div>
</div>

View File

@ -22,4 +22,4 @@
</style>
<div></div>
<div></div>

View File

@ -8,4 +8,4 @@
background-image: url(http://www.google.com/favicon.ico);
}
</style>
<div></div>
<div></div>

View File

@ -1,2 +1,2 @@
<h1>foobar</h1>
<div style="height: 100px; width: 100px; box-shadow: 0px 0px 50px rgba(255,0,0,0.2);"></div>
<div style="height: 100px; width: 100px; box-shadow: 0px 0px 50px rgba(255,0,0,0.2);"></div>

View File

@ -7,4 +7,4 @@
</style>
<div style="opacity: 0.999; color: white">FOOBAR</div>
<div style="opacity: 1.0; color: white">FOOBAR</div>
<div style="opacity: 1.0; color: white">FOOBAR</div>

View File

@ -1 +1 @@
<div style="opacity: 0.8">FOOBAR</div>
<div style="opacity: 0.8">FOOBAR</div>

View File

@ -1,7 +1,7 @@
<style>
.green {
background: green;
background: green;
position: absolute;
width: 100%;
height: 100px;
@ -21,4 +21,3 @@
<div class="green">
<div class="red"></div>
</div>

View File

@ -21,4 +21,4 @@
<div class="green">
<div class="red"></div>
</div>
</div>

View File

@ -10,4 +10,4 @@
height: 200px;
}
</style>
<iframe src="bug_203b.html"></iframe>
<iframe src="bug_203b.html"></iframe>

View File

@ -11,4 +11,4 @@
<body>
<div></div>
<h1>content</h1>
</body>
</body>

View File

@ -2,11 +2,6 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use {BuiltDisplayList, BuiltDisplayListDescriptor, ClipId, ColorF, DeviceIntPoint, DeviceUintRect};
use {DeviceUintSize, ExternalScrollId, FontInstanceKey, FontInstanceOptions};
use {FontInstancePlatformOptions, FontKey, FontVariation, GlyphDimensions, GlyphKey, ImageData};
use {ImageDescriptor, ImageKey, ItemTag, LayoutPoint, LayoutSize, LayoutTransform, LayoutVector2D};
use {NativeFontHandle, WorldPoint};
use app_units::Au;
use channel::{self, MsgSender, Payload, PayloadSender, PayloadSenderHelperMethods};
use std::cell::Cell;
@ -14,6 +9,11 @@ use std::fmt;
use std::marker::PhantomData;
use std::path::PathBuf;
use std::u32;
use {BuiltDisplayList, BuiltDisplayListDescriptor, ClipId, ColorF, DeviceIntPoint, DeviceUintRect};
use {DeviceUintSize, ExternalScrollId, FontInstanceKey, FontInstanceOptions};
use {FontInstancePlatformOptions, FontKey, FontVariation, GlyphDimensions, GlyphKey, ImageData};
use {ImageDescriptor, ImageKey, ItemTag, LayoutPoint, LayoutSize, LayoutTransform, LayoutVector2D};
use {NativeFontHandle, WorldPoint};
pub type TileSize = u16;
/// Documents are rendered in the ascending order of their associated layer values.

View File

@ -2,12 +2,10 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use ipc_channel::ipc::{self, IpcBytesReceiver, IpcBytesSender, IpcReceiver, IpcSender};
use serde::{Deserialize, Serialize};
use std::io::{Error, ErrorKind};
use std::io;
use std::error;
use ipc_channel::ipc::{self, IpcSender, IpcReceiver, IpcBytesSender, IpcBytesReceiver};
use std::{error, io};
///
/// Handles the channel implementation when IPC is enabled.

View File

@ -2,10 +2,9 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use serde::{Deserialize, Serialize};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use std::io;
use std::io::{Error, ErrorKind};
use serde::{Deserializer, Serializer};
use std::sync::mpsc;
///
@ -34,7 +33,6 @@ pub struct MsgReceiver<T> {
impl<T> MsgReceiver<T> {
pub fn recv(&self) -> Result<T, Error> {
use std::io;
use std::error::Error;
self.rx.recv().map_err(|e| io::Error::new(ErrorKind::Other, e.description()))
}

View File

@ -2,14 +2,13 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use {ColorF, FontInstanceKey, ImageKey, LayerPixel, LayoutPixel, LayoutPoint, LayoutRect,
LayoutSize, LayoutTransform};
use {GlyphOptions, LayoutVector2D, PipelineId, PropertyBinding};
use euclid::{SideOffsets2D, TypedRect};
use std::ops::Not;
#[cfg(any(feature = "serialize", feature = "deserialize"))]
use GlyphInstance;
use euclid::{SideOffsets2D, TypedRect};
use std::ops::Not;
use {ColorF, FontInstanceKey, GlyphOptions, ImageKey, LayerPixel, LayoutPixel, LayoutPoint};
use {LayoutRect, LayoutSize, LayoutTransform, LayoutVector2D, PipelineId, PropertyBinding};
// NOTE: some of these structs have an "IMPLICIT" comment.
// This indicates that the BuiltDisplayList will have serialized
@ -213,7 +212,8 @@ pub enum ScrollSensitivity {
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct ScrollFrameDisplayItem {
pub id: ClipId,
pub clip_id: ClipId,
pub scroll_frame_id: ClipId,
pub external_id: Option<ExternalScrollId>,
pub image_mask: Option<ImageMask>,
pub scroll_sensitivity: ScrollSensitivity,
@ -455,6 +455,7 @@ pub struct StackingContext {
pub transform_style: TransformStyle,
pub perspective: Option<LayoutTransform>,
pub mix_blend_mode: MixBlendMode,
pub reference_frame_id: Option<ClipId>,
} // IMPLICIT: filters: Vec<FilterOp>
#[repr(u32)]
@ -509,6 +510,7 @@ pub enum FilterOp {
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]
pub struct IframeDisplayItem {
pub clip_id: ClipId,
pub pipeline_id: PipelineId,
}
@ -753,29 +755,30 @@ pub struct ClipChainId(pub u64, pub PipelineId);
pub enum ClipId {
Clip(u64, PipelineId),
ClipChain(ClipChainId),
DynamicallyAddedNode(u64, PipelineId),
}
const ROOT_REFERENCE_FRAME_CLIP_ID: u64 = 0;
const ROOT_SCROLL_NODE_CLIP_ID: u64 = 1;
impl ClipId {
pub fn root_scroll_node(pipeline_id: PipelineId) -> ClipId {
ClipId::Clip(0, pipeline_id)
ClipId::Clip(ROOT_SCROLL_NODE_CLIP_ID, pipeline_id)
}
pub fn root_reference_frame(pipeline_id: PipelineId) -> ClipId {
ClipId::DynamicallyAddedNode(0, pipeline_id)
ClipId::Clip(ROOT_REFERENCE_FRAME_CLIP_ID, pipeline_id)
}
pub fn pipeline_id(&self) -> PipelineId {
match *self {
ClipId::Clip(_, pipeline_id) |
ClipId::ClipChain(ClipChainId(_, pipeline_id)) |
ClipId::DynamicallyAddedNode(_, pipeline_id) => pipeline_id,
ClipId::ClipChain(ClipChainId(_, pipeline_id)) => pipeline_id,
}
}
pub fn is_root_scroll_node(&self) -> bool {
match *self {
ClipId::Clip(0, _) => true,
ClipId::Clip(1, _) => true,
_ => false,
}
}

View File

@ -2,6 +2,17 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use bincode;
use euclid::SideOffsets2D;
#[cfg(feature = "deserialize")]
use serde::de::Deserializer;
#[cfg(feature = "serialize")]
use serde::ser::{Serializer, SerializeSeq};
use serde::{Deserialize, Serialize};
use std::io::{Read, Write};
use std::marker::PhantomData;
use std::{io, mem, ptr, slice};
use time::precise_time_ns;
use {AlphaType, BorderDetails, BorderDisplayItem, BorderRadius, BorderWidths, BoxShadowClipMode};
use {BoxShadowDisplayItem, ClipAndScrollInfo, ClipChainId, ClipChainItem, ClipDisplayItem, ClipId};
use {ColorF, ComplexClipRegion, DisplayItem, ExtendMode, ExternalScrollId, FilterOp};
@ -13,24 +24,14 @@ use {PropertyBinding, PushStackingContextDisplayItem, RadialGradient, RadialGrad
use {RectangleDisplayItem, ScrollFrameDisplayItem, ScrollPolicy, ScrollSensitivity, Shadow};
use {SpecificDisplayItem, StackingContext, StickyFrameDisplayItem, StickyOffsetBounds};
use {TextDisplayItem, TransformStyle, YuvColorSpace, YuvData, YuvImageDisplayItem};
use bincode;
use euclid::SideOffsets2D;
use serde::{Deserialize, Serialize};
use std::io::{Read, Write};
use std::{io, mem, ptr};
use std::marker::PhantomData;
use std::slice;
use time::precise_time_ns;
#[cfg(feature = "deserialize")]
use serde::de::Deserializer;
#[cfg(feature = "serialize")]
use serde::ser::{Serializer, SerializeSeq};
// We don't want to push a long text-run. If a text-run is too long, split it into several parts.
// This needs to be set to (renderer::MAX_VERTEX_TEXTURE_WIDTH - VECS_PER_PRIM_HEADER - VECS_PER_TEXT_RUN) * 2
pub const MAX_TEXT_RUN_LENGTH: usize = 2038;
// We start at 2 , because the root reference is always 0 and the root scroll node is always 1.
const FIRST_CLIP_ID: u64 = 2;
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub struct ItemRange<T> {
@ -386,7 +387,8 @@ impl<'de, 'a, T: Deserialize<'de>> AuxIter<'a, T> {
let size: usize = if data.len() == 0 {
0 // Accept empty ItemRanges pointing anywhere
} else {
bincode::deserialize_from(&mut UnsafeReader::new(&mut data), bincode::Infinite).expect("MEH: malicious input?")
bincode::deserialize_from(&mut UnsafeReader::new(&mut data), bincode::Infinite)
.expect("MEH: malicious input?")
};
AuxIter {
@ -644,7 +646,7 @@ impl<'a> Write for SizeCounter {
fn serialize_fast<T: Serialize>(vec: &mut Vec<u8>, e: &T) {
// manually counting the size is faster than vec.reserve(bincode::serialized_size(&e) as usize) for some reason
let mut size = SizeCounter(0);
bincode::serialize_into(&mut size,e , bincode::Infinite).unwrap();
bincode::serialize_into(&mut size, e, bincode::Infinite).unwrap();
vec.reserve(size.0);
let old_len = vec.len();
@ -809,9 +811,6 @@ impl DisplayListBuilder {
) -> Self {
let start_time = precise_time_ns();
// We start at 1 here, because the root scroll id is always 0.
const FIRST_CLIP_ID: u64 = 1;
DisplayListBuilder {
data: Vec::with_capacity(capacity),
pipeline_id,
@ -1312,6 +1311,12 @@ impl DisplayListBuilder {
mix_blend_mode: MixBlendMode,
filters: Vec<FilterOp>,
) {
let reference_frame_id = if transform.is_some() || perspective.is_some() {
Some(self.generate_clip_id())
} else {
None
};
let item = SpecificDisplayItem::PushStackingContext(PushStackingContextDisplayItem {
stacking_context: StackingContext {
scroll_policy,
@ -1319,6 +1324,7 @@ impl DisplayListBuilder {
transform_style,
perspective,
mix_blend_mode,
reference_frame_id,
},
});
@ -1386,19 +1392,24 @@ impl DisplayListBuilder {
I: IntoIterator<Item = ComplexClipRegion>,
I::IntoIter: ExactSizeIterator + Clone,
{
let id = self.generate_clip_id();
let clip_id = self.generate_clip_id();
let scroll_frame_id = self.generate_clip_id();
let item = SpecificDisplayItem::ScrollFrame(ScrollFrameDisplayItem {
id,
clip_id,
scroll_frame_id,
external_id,
image_mask,
scroll_sensitivity,
});
let info = LayoutPrimitiveInfo::with_clip_rect(content_rect, clip_rect);
let scrollinfo = ClipAndScrollInfo::simple(parent);
self.push_item_with_clip_scroll_info(item, &info, scrollinfo);
self.push_item_with_clip_scroll_info(
item,
&LayoutPrimitiveInfo::with_clip_rect(content_rect, clip_rect),
ClipAndScrollInfo::simple(parent),
);
self.push_iter(complex_clips);
id
scroll_frame_id
}
pub fn define_clip_chain<I>(
@ -1411,7 +1422,7 @@ impl DisplayListBuilder {
I::IntoIter: ExactSizeIterator + Clone,
{
let id = self.generate_clip_chain_id();
self.push_new_empty_item(SpecificDisplayItem::ClipChain(ClipChainItem { id, parent}));
self.push_new_empty_item(SpecificDisplayItem::ClipChain(ClipChainItem { id, parent }));
self.push_iter(clips);
id
}
@ -1502,7 +1513,8 @@ impl DisplayListBuilder {
pub fn push_iframe(&mut self, info: &LayoutPrimitiveInfo, pipeline_id: PipelineId) {
let item = SpecificDisplayItem::Iframe(IframeDisplayItem {
pipeline_id: pipeline_id,
clip_id: self.generate_clip_id(),
pipeline_id,
});
self.push_item(item, info);
}

View File

@ -2,7 +2,6 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use {ColorU, IdNamespace, LayoutPoint};
#[cfg(target_os = "macos")]
use core_foundation::string::CFString;
#[cfg(target_os = "macos")]
@ -16,6 +15,7 @@ use serde::ser::{Serialize, Serializer};
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use {ColorU, IdNamespace, LayoutPoint};
#[cfg(not(any(target_os = "macos", target_os = "windows")))]

View File

@ -2,11 +2,9 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use {DevicePoint, DeviceUintRect};
use {TileOffset, TileSize};
use IdNamespace;
use font::{FontInstanceKey, FontKey, FontTemplate};
use std::sync::Arc;
use {DevicePoint, DeviceUintRect, IdNamespace, TileOffset, TileSize};
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]

View File

@ -12,6 +12,12 @@ extern crate bitflags;
extern crate byteorder;
#[cfg(feature = "nightly")]
extern crate core;
#[cfg(target_os = "macos")]
extern crate core_foundation;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "windows")]
extern crate dwrote;
extern crate euclid;
#[cfg(feature = "ipc")]
extern crate ipc_channel;
@ -20,23 +26,15 @@ extern crate serde;
extern crate serde_derive;
extern crate time;
#[cfg(target_os = "macos")]
extern crate core_foundation;
#[cfg(target_os = "macos")]
extern crate core_graphics;
#[cfg(target_os = "windows")]
extern crate dwrote;
mod units;
mod api;
mod color;
pub mod channel;
mod color;
mod display_item;
mod display_list;
mod font;
mod image;
mod units;
pub use api::*;
pub use color::*;

View File

@ -1 +1 @@
342bc314db94aa439b2001249c5f24ccfcbccc22
4af31b8aa79d5a1f3c0242dea6be4876c71075c5

View File

@ -1025,6 +1025,63 @@ impl YamlFrameReader {
);
}
fn handle_yuv_image(
&mut self,
dl: &mut DisplayListBuilder,
wrench: &mut Wrench,
item: &Yaml,
info: &mut LayoutPrimitiveInfo,
) {
// TODO(gw): Support other YUV color spaces.
let color_space = YuvColorSpace::Rec709;
let yuv_data = match item["format"].as_str().expect("no format supplied") {
"planar" => {
let y_path = rsrc_path(&item["src-y"], &self.aux_dir);
let (y_key, _) = self.add_or_get_image(&y_path, None, wrench);
let u_path = rsrc_path(&item["src-u"], &self.aux_dir);
let (u_key, _) = self.add_or_get_image(&u_path, None, wrench);
let v_path = rsrc_path(&item["src-v"], &self.aux_dir);
let (v_key, _) = self.add_or_get_image(&v_path, None, wrench);
YuvData::PlanarYCbCr(y_key, u_key, v_key)
}
"nv12" => {
let y_path = rsrc_path(&item["src-y"], &self.aux_dir);
let (y_key, _) = self.add_or_get_image(&y_path, None, wrench);
let uv_path = rsrc_path(&item["src-uv"], &self.aux_dir);
let (uv_key, _) = self.add_or_get_image(&uv_path, None, wrench);
YuvData::NV12(y_key, uv_key)
}
"interleaved" => {
let yuv_path = rsrc_path(&item["src"], &self.aux_dir);
let (yuv_key, _) = self.add_or_get_image(&yuv_path, None, wrench);
YuvData::InterleavedYCbCr(yuv_key)
}
_ => {
panic!("unexpected yuv format");
}
};
let bounds = item["bounds"].as_vec_f32().unwrap();
info.rect = LayoutRect::new(
LayoutPoint::new(bounds[0], bounds[1]),
LayoutSize::new(bounds[2], bounds[3]),
);
dl.push_yuv_image(
&info,
yuv_data,
color_space,
ImageRendering::Auto,
);
}
fn handle_image(
&mut self,
dl: &mut DisplayListBuilder,
@ -1263,6 +1320,7 @@ impl YamlFrameReader {
"clear-rect" => self.handle_clear_rect(dl, item, &mut info),
"line" => self.handle_line(dl, item, &mut info),
"image" => self.handle_image(dl, wrench, item, &mut info),
"yuv-image" => self.handle_yuv_image(dl, wrench, item, &mut info),
"text" | "glyphs" => self.handle_text(dl, wrench, item, &mut info),
"scroll-frame" => self.handle_scroll_frame(dl, wrench, item),
"sticky-frame" => self.handle_sticky_frame(dl, wrench, item),

View File

@ -1032,7 +1032,7 @@ impl YamlFrameWriter {
}
ScrollFrame(item) => {
str_node(&mut v, "type", "scroll-frame");
usize_node(&mut v, "id", clip_id_mapper.add_id(item.id));
usize_node(&mut v, "id", clip_id_mapper.add_id(item.scroll_frame_id));
size_node(&mut v, "content-size", &base.rect().size);
rect_node(&mut v, "bounds", &base.local_clip().clip_rect());