Bug 1461342 - Update webrender to commit 672f480af48b0ad69c1b2781151278d99816763a. r=jrmuizel

MozReview-Commit-ID: 2mwEtwE2flc

--HG--
extra : rebase_source : d12a363446b07d2c4525bdd9eec42167097ccbe9
This commit is contained in:
Kartikaya Gupta 2018-05-18 08:28:15 -04:00
parent e5e709834d
commit f7cfb71dcb
25 changed files with 268 additions and 195 deletions

View File

@ -51,7 +51,7 @@ fn render_blob(
// Allocate storage for the result. Right now the resource cache expects the
// tiles to have have no stride or offset.
let mut texels = Vec::with_capacity((descriptor.width * descriptor.height * 4) as usize);
let mut texels = Vec::with_capacity((descriptor.size.width * descriptor.size.height * 4) as usize);
// Generate a per-tile pattern to see it in the demo. For a real use case it would not
// make sense for the rendered content to depend on its tile.
@ -60,8 +60,8 @@ fn render_blob(
None => true,
};
for y in 0 .. descriptor.height {
for x in 0 .. descriptor.width {
for y in 0 .. descriptor.size.height {
for x in 0 .. descriptor.size.width {
// Apply the tile's offset. This is important: all drawing commands should be
// translated by this offset to give correct results with tiled blob images.
let x2 = x + descriptor.offset.x as u32;
@ -97,8 +97,7 @@ fn render_blob(
Ok(api::RasterizedBlobImage {
data: texels,
width: descriptor.width,
height: descriptor.height,
size: descriptor.size,
})
}
@ -138,12 +137,12 @@ impl CheckerboardRenderer {
}
impl api::BlobImageRenderer for CheckerboardRenderer {
fn add(&mut self, key: api::ImageKey, cmds: api::BlobImageData, _: Option<api::TileSize>) {
fn add(&mut self, key: api::ImageKey, cmds: Arc<api::BlobImageData>, _: Option<api::TileSize>) {
self.image_cmds
.insert(key, Arc::new(deserialize_blob(&cmds[..]).unwrap()));
}
fn update(&mut self, key: api::ImageKey, cmds: api::BlobImageData, _dirty_rect: Option<api::DeviceUintRect>) {
fn update(&mut self, key: api::ImageKey, cmds: Arc<api::BlobImageData>, _dirty_rect: Option<api::DeviceUintRect>) {
// Here, updating is just replacing the current version of the commands with
// the new one (no incremental updates).
self.image_cmds

View File

@ -74,7 +74,7 @@ impl Example for App {
);
// red rect under the iframe: if this is visible, things have gone wrong
builder.push_rect(&info, ColorF::new(1.0, 0.0, 0.0, 1.0));
builder.push_iframe(&info, sub_pipeline_id);
builder.push_iframe(&info, sub_pipeline_id, false);
builder.pop_stacking_context();
}
}

View File

@ -24,6 +24,14 @@ void brush_vs(
#define BRUSH_FLAG_SEGMENT_REPEAT_X 4
#define BRUSH_FLAG_SEGMENT_REPEAT_Y 8
//Note: these have to match `gpu_types` constants
#define INT_BITS (31)
#define CLIP_CHAIN_RECT_BITS (22)
#define SEGMENT_BITS (INT_BITS - CLIP_CHAIN_RECT_BITS)
#define EDGE_FLAG_BITS (4)
#define BRUSH_FLAG_BITS (4)
#define CLIP_SCROLL_INDEX_BITS (INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS)
struct BrushInstance {
int picture_address;
int prim_address;
@ -43,12 +51,12 @@ BrushInstance load_brush() {
bi.picture_address = aData0.x & 0xffff;
bi.clip_address = aData0.x >> 16;
bi.prim_address = aData0.y;
bi.clip_chain_rect_index = aData0.z >> 16;
bi.scroll_node_id = aData0.z & 0xffff;
bi.clip_chain_rect_index = aData0.z & ((1 << CLIP_CHAIN_RECT_BITS) - 1);
bi.segment_index = aData0.z >> CLIP_CHAIN_RECT_BITS;
bi.z = aData0.w;
bi.segment_index = aData1.x & 0xffff;
bi.edge_mask = (aData1.x >> 16) & 0xff;
bi.flags = (aData1.x >> 24);
bi.scroll_node_id = aData1.x & ((1 << CLIP_SCROLL_INDEX_BITS) - 1);
bi.edge_mask = (aData1.x >> CLIP_SCROLL_INDEX_BITS) & 0xf;
bi.flags = (aData1.x >> (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)) & 0xf;
bi.user_data = aData1.yzw;
return bi;

View File

@ -119,10 +119,10 @@ PrimitiveInstance fetch_prim_instance() {
pi.prim_address = aData0.x;
pi.specific_prim_address = pi.prim_address + VECS_PER_PRIM_HEADER;
pi.render_task_index = aData0.y;
pi.clip_task_index = aData0.z;
pi.clip_chain_rect_index = aData0.w / 65536;
pi.scroll_node_id = aData0.w % 65536;
pi.render_task_index = aData0.y % 0x10000;
pi.clip_task_index = aData0.y / 0x10000;
pi.clip_chain_rect_index = aData0.z;
pi.scroll_node_id = aData0.w;
pi.z = aData1.x;
pi.user_data0 = aData1.y;
pi.user_data1 = aData1.z;

View File

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{AlphaType, ClipMode, DeviceIntRect, DeviceIntSize};
use api::{DeviceUintRect, DeviceUintPoint, DeviceUintSize, ExternalImageType, FilterOp, ImageRendering, LayoutRect};
use api::{DeviceUintRect, DeviceUintPoint, ExternalImageType, FilterOp, ImageRendering, LayoutRect};
use api::{DeviceIntPoint, SubpixelDirection, YuvColorSpace, YuvFormat};
use api::{LayoutToWorldTransform, WorldPixel};
use border::{BorderCornerInstance, BorderCornerSide, BorderEdgeKind};
@ -1755,10 +1755,7 @@ pub fn resolve_image(
uv_rect_handle: cache_handle,
uv_rect: DeviceUintRect::new(
DeviceUintPoint::zero(),
DeviceUintSize::new(
image_properties.descriptor.width,
image_properties.descriptor.height,
)
image_properties.descriptor.size,
),
texture_layer: 0,
};

View File

@ -216,9 +216,11 @@ impl ClipScrollNode {
NodeType::ScrollFrame(ref mut scrolling) => {
let scroll_sensitivity = scrolling.scroll_sensitivity;
let scrollable_size = scrolling.scrollable_size;
let viewport_rect = scrolling.viewport_rect;
*scrolling = *old_scrolling_state;
scrolling.scroll_sensitivity = scroll_sensitivity;
scrolling.scrollable_size = scrollable_size;
scrolling.viewport_rect = viewport_rect;
}
_ if old_scrolling_state.offset != LayoutVector2D::zero() => {
warn!("Tried to scroll a non-scroll node.")

View File

@ -1605,8 +1605,8 @@ impl Device {
let desc = self.gl_describe_format(img_desc.format);
self.gl.read_pixels(
0, 0,
img_desc.width as i32,
img_desc.height as i32,
img_desc.size.width as i32,
img_desc.size.height as i32,
desc.external,
desc.pixel_type,
)

View File

@ -556,8 +556,7 @@ impl<'a> DisplayListFlattener<'a> {
let pipeline = match self.scene.pipelines.get(&iframe_pipeline_id) {
Some(pipeline) => pipeline,
None => {
//TODO: assert/debug_assert?
error!("Unknown pipeline used for iframe {:?}", info);
debug_assert!(info.ignore_missing_pipeline);
return
},
};

View File

@ -7,6 +7,7 @@
use api::{GlyphKey, ImageData, ImageDescriptor, ImageFormat};
use device::TextureFilter;
use euclid::size2;
use gpu_types::UvRectKind;
use rayon::prelude::*;
use std::sync::{Arc, MutexGuard};
@ -172,8 +173,7 @@ impl GlyphRasterizer {
texture_cache.update(
&mut texture_cache_handle,
ImageDescriptor {
width: glyph.width,
height: glyph.height,
size: size2(glyph.width, glyph.height),
stride: None,
format: ImageFormat::BGRA8,
is_opaque: false,

View File

@ -4,11 +4,22 @@
use api::{DevicePoint, LayoutToWorldTransform, WorldToLayoutTransform};
use gpu_cache::{GpuCacheAddress, GpuDataRequest};
use prim_store::EdgeAaSegmentMask;
use prim_store::{VECS_PER_SEGMENT, EdgeAaSegmentMask};
use render_task::RenderTaskAddress;
use renderer::MAX_VERTEX_TEXTURE_WIDTH;
// Contains type that must exactly match the same structures declared in GLSL.
const INT_BITS: usize = 31; //TODO: convert to unsigned
const CLIP_CHAIN_RECT_BITS: usize = 22;
const SEGMENT_BITS: usize = INT_BITS - CLIP_CHAIN_RECT_BITS;
// The guard ensures (at compile time) that the designated number of bits cover
// the maximum supported segment count for the texture width.
const _SEGMENT_GUARD: usize = (1 << SEGMENT_BITS) * VECS_PER_SEGMENT - MAX_VERTEX_TEXTURE_WIDTH;
const EDGE_FLAG_BITS: usize = 4;
const BRUSH_FLAG_BITS: usize = 4;
const CLIP_SCROLL_INDEX_BITS: usize = INT_BITS - EDGE_FLAG_BITS - BRUSH_FLAG_BITS;
#[derive(Copy, Clone, Debug)]
#[repr(C)]
pub struct ZBufferId(i32);
@ -18,7 +29,7 @@ pub struct ZBufferIdGenerator {
}
impl ZBufferIdGenerator {
pub fn new() -> ZBufferIdGenerator {
pub fn new() -> Self {
ZBufferIdGenerator {
next: 0
}
@ -133,9 +144,9 @@ impl SimplePrimitiveInstance {
PrimitiveInstance {
data: [
self.specific_prim_address.as_int(),
self.task_address.0 as i32,
self.clip_task_address.0 as i32,
((self.clip_chain_rect_index.0 as i32) << 16) | self.scroll_id.0 as i32,
self.task_address.0 as i32 | (self.clip_task_address.0 as i32) << 16,
self.clip_chain_rect_index.0 as i32,
self.scroll_id.0 as i32,
self.z.0,
data0,
data1,
@ -236,15 +247,18 @@ pub struct BrushInstance {
impl From<BrushInstance> for PrimitiveInstance {
fn from(instance: BrushInstance) -> Self {
debug_assert_eq!(0, instance.clip_chain_rect_index.0 >> CLIP_CHAIN_RECT_BITS);
debug_assert_eq!(0, instance.scroll_id.0 >> CLIP_SCROLL_INDEX_BITS);
debug_assert_eq!(0, instance.segment_index >> SEGMENT_BITS);
PrimitiveInstance {
data: [
instance.picture_address.0 as i32 | (instance.clip_task_address.0 as i32) << 16,
instance.prim_address.as_int(),
((instance.clip_chain_rect_index.0 as i32) << 16) | instance.scroll_id.0 as i32,
instance.clip_chain_rect_index.0 as i32 | (instance.segment_index << CLIP_CHAIN_RECT_BITS),
instance.z.0,
instance.segment_index |
((instance.edge_flags.bits() as i32) << 16) |
((instance.brush_flags.bits() as i32) << 24),
instance.scroll_id.0 as i32 |
((instance.edge_flags.bits() as i32) << CLIP_SCROLL_INDEX_BITS) |
((instance.brush_flags.bits() as i32) << (CLIP_SCROLL_INDEX_BITS + EDGE_FLAG_BITS)),
instance.user_data[0],
instance.user_data[1],
instance.user_data[2],

View File

@ -3,7 +3,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{AlphaType, BorderRadius, BoxShadowClipMode, BuiltDisplayList, ClipMode, ColorF, ComplexClipRegion};
use api::{DeviceIntRect, DeviceIntSize, DeviceUintSize, DevicePixelScale, Epoch, ExtendMode, FontRenderMode};
use api::{DeviceIntRect, DeviceIntSize, DevicePixelScale, Epoch, ExtendMode, FontRenderMode};
use api::{FilterOp, GlyphInstance, GlyphKey, GradientStop, ImageKey, ImageRendering, ItemRange, ItemTag, TileOffset};
use api::{GlyphRasterSpace, LayoutPoint, LayoutRect, LayoutSize, LayoutToWorldTransform, LayoutVector2D};
use api::{PipelineId, PremultipliedColorF, PropertyBinding, Shadow, YuvColorSpace, YuvFormat, DeviceIntSideOffsets};
@ -36,6 +36,7 @@ use util::{pack_as_float, recycle_vec};
const MIN_BRUSH_SPLIT_AREA: f32 = 256.0 * 256.0;
pub const VECS_PER_SEGMENT: usize = 2;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct ScrollNodeAndClipChain {
@ -543,25 +544,6 @@ pub enum ImageSource {
},
}
#[derive(Debug)]
pub struct ImagePrimitiveCpu {
pub tile_spacing: LayoutSize,
pub alpha_type: AlphaType,
pub stretch_size: LayoutSize,
pub current_epoch: Epoch,
pub source: ImageSource,
pub key: ImageCacheKey,
}
impl ToGpuBlocks for ImagePrimitiveCpu {
fn write_gpu_blocks(&self, mut request: GpuDataRequest) {
request.push([
self.stretch_size.width, self.stretch_size.height,
self.tile_spacing.width, self.tile_spacing.height,
]);
}
}
#[derive(Debug)]
pub struct BorderPrimitiveCpu {
pub corner_instances: [BorderCornerInstance; 4],
@ -1158,7 +1140,6 @@ pub struct PrimitiveStore {
/// CPU side information only.
pub cpu_brushes: Vec<BrushPrimitive>,
pub cpu_text_runs: Vec<TextRunPrimitiveCpu>,
pub cpu_images: Vec<ImagePrimitiveCpu>,
pub cpu_metadata: Vec<PrimitiveMetadata>,
pub cpu_borders: Vec<BorderPrimitiveCpu>,
@ -1172,7 +1153,6 @@ impl PrimitiveStore {
cpu_metadata: Vec::new(),
cpu_brushes: Vec::new(),
cpu_text_runs: Vec::new(),
cpu_images: Vec::new(),
cpu_borders: Vec::new(),
pictures: Vec::new(),
@ -1185,7 +1165,6 @@ impl PrimitiveStore {
cpu_metadata: recycle_vec(self.cpu_metadata),
cpu_brushes: recycle_vec(self.cpu_brushes),
cpu_text_runs: recycle_vec(self.cpu_text_runs),
cpu_images: recycle_vec(self.cpu_images),
cpu_borders: recycle_vec(self.cpu_borders),
pictures: recycle_vec(self.pictures),
@ -1493,10 +1472,7 @@ impl PrimitiveStore {
if *tile_spacing != LayoutSize::zero() && !is_tiled {
*source = ImageSource::Cache {
// Size in device-pixels we need to allocate in render task cache.
size: DeviceIntSize::new(
image_properties.descriptor.width as i32,
image_properties.descriptor.height as i32
),
size: image_properties.descriptor.size.to_i32(),
handle: None,
};
}
@ -1594,10 +1570,7 @@ impl PrimitiveStore {
if let Some(tile_size) = image_properties.tiling {
let device_image_size = DeviceUintSize::new(
image_properties.descriptor.width,
image_properties.descriptor.height,
);
let device_image_size = image_properties.descriptor.size;
// Tighten the clip rect because decomposing the repeated image can
// produce primitives that are partially covering the original image
@ -2764,6 +2737,7 @@ impl<'a> GpuDataRequest<'a> {
local_rect: LayoutRect,
extra_data: [f32; 4],
) {
let _ = VECS_PER_SEGMENT;
self.push(local_rect);
self.push(extra_data);
}

View File

@ -1898,7 +1898,7 @@ impl Renderer {
let desc = ImageDescriptor::new(1024, 768, ImageFormat::BGRA8, true, false);
let data = self.device.read_pixels(&desc);
let screenshot = debug_server::Screenshot::new(desc.width, desc.height, data);
let screenshot = debug_server::Screenshot::new(desc.size.width, desc.size.height, data);
serde_json::to_string(&screenshot).unwrap()
}
@ -4411,7 +4411,7 @@ impl Renderer {
let (layer_count, filter) = (1, TextureFilter::Linear);
let plain_tex = PlainTexture {
data: e.key().clone(),
size: (descriptor.width, descriptor.height, layer_count),
size: (descriptor.size.width, descriptor.size.height, layer_count),
format: descriptor.format,
filter,
render_target: None,

View File

@ -2,7 +2,7 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use api::{AddFont, BlobImageData, BlobImageResources, ResourceUpdate, ResourceUpdates};
use api::{AddFont, BlobImageResources, ResourceUpdate, ResourceUpdates};
use api::{BlobImageDescriptor, BlobImageError, BlobImageRenderer, BlobImageRequest};
use api::{ClearCache, ColorF, DevicePoint, DeviceUintPoint, DeviceUintRect, DeviceUintSize};
use api::{Epoch, FontInstanceKey, FontKey, FontTemplate};
@ -19,6 +19,7 @@ use capture::PlainExternalImage;
#[cfg(any(feature = "replay", feature = "png"))]
use capture::CaptureConfig;
use device::TextureFilter;
use euclid::size2;
use glyph_cache::GlyphCache;
#[cfg(not(feature = "pathfinder"))]
use glyph_cache::GlyphCacheEntry;
@ -34,7 +35,6 @@ use std::collections::hash_map::Entry::{self, Occupied, Vacant};
use std::cmp;
use std::fmt::Debug;
use std::hash::Hash;
use std::mem;
#[cfg(any(feature = "capture", feature = "replay"))]
use std::path::PathBuf;
use std::sync::{Arc, RwLock};
@ -149,24 +149,23 @@ pub struct ResourceClassCache<K: Hash + Eq, V, U: Default> {
pub fn intersect_for_tile(
dirty: DeviceUintRect,
width: u32,
height: u32,
clipped_tile_size: DeviceUintSize,
tile_size: TileSize,
tile_offset: TileOffset,
) -> Option<DeviceUintRect> {
dirty.intersection(&DeviceUintRect::new(
DeviceUintPoint::new(
tile_offset.x as u32 * tile_size as u32,
tile_offset.y as u32 * tile_size as u32
),
DeviceUintSize::new(width, height),
)).map(|mut r| {
// we can't translate by a negative size so do it manually
r.origin.x -= tile_offset.x as u32 * tile_size as u32;
r.origin.y -= tile_offset.y as u32 * tile_size as u32;
r
})
dirty.intersection(&DeviceUintRect::new(
DeviceUintPoint::new(
tile_offset.x as u32 * tile_size as u32,
tile_offset.y as u32 * tile_size as u32
),
clipped_tile_size,
)).map(|mut r| {
// we can't translate by a negative size so do it manually
r.origin.x -= tile_offset.x as u32 * tile_size as u32;
r.origin.y -= tile_offset.y as u32 * tile_size as u32;
r
})
}
@ -334,7 +333,7 @@ impl ResourceCache {
}
fn should_tile(limit: u32, descriptor: &ImageDescriptor, data: &ImageData) -> bool {
let size_check = descriptor.width > limit || descriptor.height > limit;
let size_check = descriptor.size.width > limit || descriptor.size.height > limit;
match *data {
ImageData::Raw(_) | ImageData::Blob(_) => size_check,
ImageData::External(info) => {
@ -495,7 +494,7 @@ impl ResourceCache {
&mut self,
image_key: ImageKey,
descriptor: ImageDescriptor,
mut data: ImageData,
data: ImageData,
mut tiling: Option<TileSize>,
) {
if tiling.is_none() && Self::should_tile(self.max_texture_size(), &descriptor, &data) {
@ -504,10 +503,10 @@ impl ResourceCache {
tiling = Some(DEFAULT_TILE_SIZE);
}
if let ImageData::Blob(ref mut blob) = data {
if let ImageData::Blob(ref blob) = data {
self.blob_image_renderer.as_mut().unwrap().add(
image_key,
mem::replace(blob, BlobImageData::new()),
Arc::clone(&blob),
tiling,
);
}
@ -517,8 +516,10 @@ impl ResourceCache {
data,
epoch: Epoch(0),
tiling,
dirty_rect: Some(DeviceUintRect::new(DeviceUintPoint::zero(),
DeviceUintSize::new(descriptor.width, descriptor.height))),
dirty_rect: Some(DeviceUintRect::new(
DeviceUintPoint::zero(),
descriptor.size,
)),
};
self.resources.image_templates.insert(image_key, resource);
@ -546,7 +547,7 @@ impl ResourceCache {
self.blob_image_renderer
.as_mut()
.unwrap()
.update(image_key, mem::replace(blob, BlobImageData::new()), dirty_rect);
.update(image_key, Arc::clone(&blob), dirty_rect);
}
*image = ImageResource {
@ -601,12 +602,12 @@ impl ResourceCache {
}
let side_size =
template.tiling.map_or(cmp::max(template.descriptor.width, template.descriptor.height),
template.tiling.map_or(cmp::max(template.descriptor.size.width, template.descriptor.size.height),
|tile_size| tile_size as u32);
if side_size > self.texture_cache.max_texture_size() {
// The image or tiling size is too big for hardware texture size.
warn!("Dropping image, image:(w:{},h:{}, tile:{}) is too big for hardware!",
template.descriptor.width, template.descriptor.height, template.tiling.unwrap_or(0));
template.descriptor.size.width, template.descriptor.size.height, template.tiling.unwrap_or(0));
self.cached_images.insert(request, Err(ImageCacheError::OverLimitSize));
return;
}
@ -644,10 +645,10 @@ impl ResourceCache {
// - The blob hasn't already been requested this frame.
if self.pending_image_requests.insert(request) && template.data.is_blob() {
if let Some(ref mut renderer) = self.blob_image_renderer {
let (offset, w, h) = match template.tiling {
let (offset, size) = match template.tiling {
Some(tile_size) => {
let tile_offset = request.tile.unwrap();
let (w, h) = compute_tile_size(
let actual_size = compute_tile_size(
&template.descriptor,
tile_size,
tile_offset,
@ -658,27 +659,22 @@ impl ResourceCache {
);
if let Some(dirty) = template.dirty_rect {
if intersect_for_tile(dirty, w, h, tile_size, tile_offset).is_none() {
if intersect_for_tile(dirty, actual_size, tile_size, tile_offset).is_none() {
// don't bother requesting unchanged tiles
return
}
}
(offset, w, h)
(offset, actual_size)
}
None => (
DevicePoint::zero(),
template.descriptor.width,
template.descriptor.height,
),
None => (DevicePoint::zero(), template.descriptor.size),
};
renderer.request(
&self.resources,
request.into(),
&BlobImageDescriptor {
width: w,
height: h,
size,
offset,
format: template.descriptor.format,
},
@ -970,11 +966,10 @@ impl ResourceCache {
let tile_size = image_template.tiling.unwrap();
let image_descriptor = &image_template.descriptor;
let (actual_width, actual_height) =
compute_tile_size(image_descriptor, tile_size, tile);
let clipped_tile_size = compute_tile_size(image_descriptor, tile_size, tile);
if let Some(dirty) = dirty_rect {
dirty_rect = intersect_for_tile(dirty, actual_width, actual_height, tile_size, tile);
dirty_rect = intersect_for_tile(dirty, clipped_tile_size, tile_size, tile);
if dirty_rect.is_none() {
continue
}
@ -997,8 +992,7 @@ impl ResourceCache {
};
ImageDescriptor {
width: actual_width,
height: actual_height,
size: clipped_tile_size,
stride,
offset,
..*image_descriptor
@ -1020,8 +1014,8 @@ impl ResourceCache {
// the most important use cases. We may want to support
// mip-maps on shared cache items in the future.
if descriptor.allow_mipmaps &&
descriptor.width > 512 &&
descriptor.height > 512 &&
descriptor.size.width > 512 &&
descriptor.size.height > 512 &&
!self.texture_cache.is_allowed_in_shared_cache(
TextureFilter::Linear,
&descriptor,
@ -1104,24 +1098,24 @@ pub fn compute_tile_size(
descriptor: &ImageDescriptor,
base_size: TileSize,
tile: TileOffset,
) -> (u32, u32) {
) -> DeviceUintSize {
let base_size = base_size as u32;
// Most tiles are going to have base_size as width and height,
// except for tiles around the edges that are shrunk to fit the mage data
// (See decompose_tiled_image in frame.rs).
let actual_width = if (tile.x as u32) < descriptor.width / base_size {
let actual_width = if (tile.x as u32) < descriptor.size.width / base_size {
base_size
} else {
descriptor.width % base_size
descriptor.size.width % base_size
};
let actual_height = if (tile.y as u32) < descriptor.height / base_size {
let actual_height = if (tile.y as u32) < descriptor.size.height / base_size {
base_size
} else {
descriptor.height % base_size
descriptor.size.height % base_size
};
(actual_width, actual_height)
size2(actual_width, actual_height)
}
#[cfg(any(feature = "capture", feature = "replay"))]
@ -1247,7 +1241,7 @@ impl ResourceCache {
#[cfg(feature = "png")]
CaptureConfig::save_png(
root.join(format!("images/{}.png", image_id)),
(desc.width, desc.height),
(desc.size.width, desc.size.height),
ReadPixelsFormat::Standard(desc.format),
&arc,
);
@ -1272,8 +1266,7 @@ impl ResourceCache {
&self.resources,
request,
&BlobImageDescriptor {
width: desc.width,
height: desc.height,
size: desc.size,
offset: DevicePoint::zero(),
format: desc.format,
},
@ -1281,14 +1274,14 @@ impl ResourceCache {
);
let result = renderer.resolve(request)
.expect("Blob resolve failed");
assert_eq!((result.width, result.height), (desc.width, desc.height));
assert_eq!(result.size, desc.size);
assert_eq!(result.data.len(), desc.compute_total_size() as usize);
num_blobs += 1;
#[cfg(feature = "png")]
CaptureConfig::save_png(
root.join(format!("blobs/{}.png", num_blobs)),
(desc.width, desc.height),
(desc.size.width, desc.size.height),
ReadPixelsFormat::Standard(desc.format),
&result.data,
);

View File

@ -411,9 +411,7 @@ impl TextureCache {
Some(ref handle) => {
match self.entries.get_opt(handle) {
Some(entry) => {
entry.size.width != descriptor.width ||
entry.size.height != descriptor.height ||
entry.format != descriptor.format
entry.size != descriptor.size || entry.format != descriptor.format
}
None => {
// Was previously allocated but has been evicted.
@ -650,8 +648,7 @@ impl TextureCache {
// AND
// - We have freed an item that will definitely allow us to
// fit the currently requested allocation.
let needed_slab_size =
SlabSize::new(required_alloc.width, required_alloc.height);
let needed_slab_size = SlabSize::new(required_alloc.size);
let mut found_matching_slab = false;
let mut freed_complete_page = false;
let mut evicted_items = 0;
@ -754,8 +751,7 @@ impl TextureCache {
// Do the allocation. This can fail and return None
// if there are no free slots or regions available.
texture_array.alloc(
descriptor.width,
descriptor.height,
descriptor.size,
user_data,
self.frame_id,
uv_rect_kind,
@ -783,8 +779,8 @@ impl TextureCache {
// TODO(gw): If we find pages that suffer from batch breaks in this
// case, add support for storing these in a standalone
// texture array.
if descriptor.width > TEXTURE_REGION_DIMENSIONS ||
descriptor.height > TEXTURE_REGION_DIMENSIONS {
if descriptor.size.width > TEXTURE_REGION_DIMENSIONS ||
descriptor.size.height > TEXTURE_REGION_DIMENSIONS {
allowed_in_shared_cache = false;
}
@ -802,7 +798,7 @@ impl TextureCache {
user_data: [f32; 3],
uv_rect_kind: UvRectKind,
) {
assert!(descriptor.width > 0 && descriptor.height > 0);
assert!(descriptor.size.width > 0 && descriptor.size.height > 0);
// Work out if this image qualifies to go in the shared (batching) cache.
let allowed_in_shared_cache = self.is_allowed_in_shared_cache(
@ -811,7 +807,6 @@ impl TextureCache {
);
let mut allocated_in_shared_cache = true;
let mut new_cache_entry = None;
let size = DeviceUintSize::new(descriptor.width, descriptor.height);
let frame_id = self.frame_id;
// If it's allowed in the cache, see if there is a spot for it.
@ -848,8 +843,8 @@ impl TextureCache {
let update_op = TextureUpdate {
id: texture_id,
op: TextureUpdateOp::Create {
width: descriptor.width,
height: descriptor.height,
width: descriptor.size.width,
height: descriptor.size.height,
format: descriptor.format,
filter,
render_target: Some(RenderTargetInfo { has_depth: false }),
@ -860,7 +855,7 @@ impl TextureCache {
new_cache_entry = Some(CacheEntry::new_standalone(
texture_id,
size,
descriptor.size,
descriptor.format,
filter,
user_data,
@ -928,9 +923,9 @@ struct SlabSize {
}
impl SlabSize {
fn new(width: u32, height: u32) -> SlabSize {
let x_size = quantize_dimension(width);
let y_size = quantize_dimension(height);
fn new(size: DeviceUintSize) -> SlabSize {
let x_size = quantize_dimension(size.width);
let y_size = quantize_dimension(size.height);
assert!(x_size > 0 && x_size <= TEXTURE_REGION_DIMENSIONS);
assert!(y_size > 0 && y_size <= TEXTURE_REGION_DIMENSIONS);
@ -1109,8 +1104,7 @@ impl TextureArray {
// Allocate space in this texture array.
fn alloc(
&mut self,
width: u32,
height: u32,
size: DeviceUintSize,
user_data: [f32; 3],
frame_id: FrameId,
uv_rect_kind: UvRectKind,
@ -1142,7 +1136,7 @@ impl TextureArray {
// Quantize the size of the allocation to select a region to
// allocate from.
let slab_size = SlabSize::new(width, height);
let slab_size = SlabSize::new(size);
// TODO(gw): For simplicity, the initial implementation just
// has a single vec<> of regions. We could easily
@ -1189,7 +1183,7 @@ impl TextureArray {
entry_kind.map(|kind| {
CacheEntry {
size: DeviceUintSize::new(width, height),
size,
user_data,
last_access: frame_id,
kind,
@ -1232,8 +1226,8 @@ impl TextureUpdate {
},
ImageData::Raw(bytes) => {
let finish = descriptor.offset +
descriptor.width * descriptor.format.bytes_per_pixel() +
(descriptor.height - 1) * descriptor.compute_stride();
descriptor.size.width * descriptor.format.bytes_per_pixel() +
(descriptor.size.height - 1) * descriptor.compute_stride();
assert!(bytes.len() >= finish as usize);
TextureUpdateSource::Bytes { data: bytes }

View File

@ -543,6 +543,7 @@ pub enum FilterOp {
pub struct IframeDisplayItem {
pub clip_id: ClipId,
pub pipeline_id: PipelineId,
pub ignore_missing_pipeline: bool,
}
#[derive(Clone, Copy, Debug, Deserialize, PartialEq, Serialize)]

View File

@ -1491,10 +1491,16 @@ impl DisplayListBuilder {
assert!(!self.clip_stack.is_empty());
}
pub fn push_iframe(&mut self, info: &LayoutPrimitiveInfo, pipeline_id: PipelineId) {
pub fn push_iframe(
&mut self,
info: &LayoutPrimitiveInfo,
pipeline_id: PipelineId,
ignore_missing_pipeline: bool
) {
let item = SpecificDisplayItem::Iframe(IframeDisplayItem {
clip_id: self.generate_clip_id(),
pipeline_id,
ignore_missing_pipeline,
});
self.push_item(item, info);
}

View File

@ -6,7 +6,8 @@ extern crate serde_bytes;
use font::{FontInstanceKey, FontKey, FontTemplate};
use std::sync::Arc;
use {DevicePoint, DeviceUintRect, IdNamespace, TileOffset, TileSize};
use {DevicePoint, DeviceUintRect, DeviceUintSize, IdNamespace, TileOffset, TileSize};
use euclid::size2;
#[repr(C)]
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
@ -73,8 +74,7 @@ impl ImageFormat {
#[derive(Copy, Clone, Debug, Deserialize, PartialEq, Serialize)]
pub struct ImageDescriptor {
pub format: ImageFormat,
pub width: u32,
pub height: u32,
pub size: DeviceUintSize,
pub stride: Option<u32>,
pub offset: u32,
pub is_opaque: bool,
@ -90,8 +90,7 @@ impl ImageDescriptor {
allow_mipmaps: bool,
) -> Self {
ImageDescriptor {
width,
height,
size: size2(width, height),
format,
stride: None,
offset: 0,
@ -101,19 +100,18 @@ impl ImageDescriptor {
}
pub fn compute_stride(&self) -> u32 {
self.stride
.unwrap_or(self.width * self.format.bytes_per_pixel())
self.stride.unwrap_or(self.size.width * self.format.bytes_per_pixel())
}
pub fn compute_total_size(&self) -> u32 {
self.compute_stride() * self.height
self.compute_stride() * self.size.height
}
}
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum ImageData {
Raw(#[serde(with = "serde_image_data_raw")] Arc<Vec<u8>>),
Blob(#[serde(with = "serde_bytes")] BlobImageData),
Blob(#[serde(with = "serde_image_data_raw")] Arc<BlobImageData>),
External(ExternalImageData),
}
@ -141,8 +139,8 @@ impl ImageData {
ImageData::Raw(bytes)
}
pub fn new_blob_image(commands: Vec<u8>) -> Self {
ImageData::Blob(commands)
pub fn new_blob_image(commands: BlobImageData) -> Self {
ImageData::Blob(Arc::new(commands))
}
#[inline]
@ -172,9 +170,9 @@ pub trait BlobImageResources {
}
pub trait BlobImageRenderer: Send {
fn add(&mut self, key: ImageKey, data: BlobImageData, tiling: Option<TileSize>);
fn add(&mut self, key: ImageKey, data: Arc<BlobImageData>, tiling: Option<TileSize>);
fn update(&mut self, key: ImageKey, data: BlobImageData, dirty_rect: Option<DeviceUintRect>);
fn update(&mut self, key: ImageKey, data: Arc<BlobImageData>, dirty_rect: Option<DeviceUintRect>);
fn delete(&mut self, key: ImageKey);
@ -202,15 +200,13 @@ pub type BlobImageResult = Result<RasterizedBlobImage, BlobImageError>;
#[repr(C)]
#[derive(Copy, Clone, Debug)]
pub struct BlobImageDescriptor {
pub width: u32,
pub height: u32,
pub size: DeviceUintSize,
pub offset: DevicePoint,
pub format: ImageFormat,
}
pub struct RasterizedBlobImage {
pub width: u32,
pub height: u32,
pub size: DeviceUintSize,
pub data: Vec<u8>,
}

View File

@ -1 +1 @@
9d20df4e76e3b19c569fd89965f70a2c278ff0c8
672f480af48b0ad69c1b2781151278d99816763a

View File

@ -9,6 +9,7 @@ use std::sync::Arc;
use std::sync::Mutex;
use webrender::api::*;
use webrender::intersect_for_tile;
use euclid::size2;
// Serialize/deserialize the blob.
@ -41,7 +42,10 @@ fn render_blob(
) -> BlobImageResult {
// Allocate storage for the result. Right now the resource cache expects the
// tiles to have have no stride or offset.
let mut texels = vec![0u8; (descriptor.width * descriptor.height * descriptor.format.bytes_per_pixel()) as usize];
let buf_size = descriptor.size.width *
descriptor.size.height *
descriptor.format.bytes_per_pixel();
let mut texels = vec![0u8; (buf_size) as usize];
// Generate a per-tile pattern to see it in the demo. For a real use case it would not
// make sense for the rendered content to depend on its tile.
@ -52,10 +56,10 @@ fn render_blob(
let mut dirty_rect = dirty_rect.unwrap_or(DeviceUintRect::new(
DeviceUintPoint::new(0, 0),
DeviceUintSize::new(descriptor.width, descriptor.height)));
DeviceUintSize::new(descriptor.size.width, descriptor.size.height)));
if let Some((tile_size, tile)) = tile {
dirty_rect = intersect_for_tile(dirty_rect, tile_size as u32, tile_size as u32,
dirty_rect = intersect_for_tile(dirty_rect, size2(tile_size as u32, tile_size as u32),
tile_size, tile)
.expect("empty rects should be culled by webrender");
}
@ -80,13 +84,13 @@ fn render_blob(
match descriptor.format {
ImageFormat::BGRA8 => {
let a = color.a * checker + tc;
texels[((y * descriptor.width + x) * 4 + 0) as usize] = premul(color.b * checker + tc, a);
texels[((y * descriptor.width + x) * 4 + 1) as usize] = premul(color.g * checker + tc, a);
texels[((y * descriptor.width + x) * 4 + 2) as usize] = premul(color.r * checker + tc, a);
texels[((y * descriptor.width + x) * 4 + 3) as usize] = a;
texels[((y * descriptor.size.width + x) * 4 + 0) as usize] = premul(color.b * checker + tc, a);
texels[((y * descriptor.size.width + x) * 4 + 1) as usize] = premul(color.g * checker + tc, a);
texels[((y * descriptor.size.width + x) * 4 + 2) as usize] = premul(color.r * checker + tc, a);
texels[((y * descriptor.size.width + x) * 4 + 3) as usize] = a;
}
ImageFormat::R8 => {
texels[(y * descriptor.width + x) as usize] = color.a * checker + tc;
texels[(y * descriptor.size.width + x) as usize] = color.a * checker + tc;
}
_ => {
return Err(BlobImageError::Other(
@ -99,8 +103,7 @@ fn render_blob(
Ok(RasterizedBlobImage {
data: texels,
width: descriptor.width,
height: descriptor.height,
size: descriptor.size,
})
}
@ -134,12 +137,12 @@ impl CheckerboardRenderer {
}
impl BlobImageRenderer for CheckerboardRenderer {
fn add(&mut self, key: ImageKey, cmds: BlobImageData, tile_size: Option<TileSize>) {
fn add(&mut self, key: ImageKey, cmds: Arc<BlobImageData>, tile_size: Option<TileSize>) {
self.image_cmds
.insert(key, (deserialize_blob(&cmds[..]).unwrap(), tile_size));
}
fn update(&mut self, key: ImageKey, cmds: BlobImageData, _dirty_rect: Option<DeviceUintRect>) {
fn update(&mut self, key: ImageKey, cmds: Arc<BlobImageData>, _dirty_rect: Option<DeviceUintRect>) {
// Here, updating is just replacing the current version of the commands with
// the new one (no incremental updates).
self.image_cmds.get_mut(&key).unwrap().0 = deserialize_blob(&cmds[..]).unwrap();

View File

@ -116,7 +116,7 @@ impl JsonFrameWriter {
match *update {
ResourceUpdate::AddImage(ref img) => {
let stride = img.descriptor.stride.unwrap_or(
img.descriptor.width * img.descriptor.format.bytes_per_pixel(),
img.descriptor.size.width * img.descriptor.format.bytes_per_pixel(),
);
let bytes = match img.data {
ImageData::Raw(ref v) => (**v).clone(),
@ -127,8 +127,8 @@ impl JsonFrameWriter {
self.images.insert(
img.key,
CachedImage {
width: img.descriptor.width,
height: img.descriptor.height,
width: img.descriptor.size.width,
height: img.descriptor.size.height,
stride,
format: img.descriptor.format,
bytes: Some(bytes),
@ -138,8 +138,8 @@ impl JsonFrameWriter {
}
ResourceUpdate::UpdateImage(ref img) => {
if let Some(ref mut data) = self.images.get_mut(&img.key) {
assert_eq!(data.width, img.descriptor.width);
assert_eq!(data.height, img.descriptor.height);
assert_eq!(data.width, img.descriptor.size.width);
assert_eq!(data.height, img.descriptor.size.height);
assert_eq!(data.format, img.descriptor.format);
if let ImageData::Raw(ref bytes) = img.data {

View File

@ -44,6 +44,7 @@ impl<'a> RawtestHarness<'a> {
self.test_blob_update_test();
self.test_blob_update_epoch_test();
self.test_tile_decomposition();
self.test_very_large_blob();
self.test_save_restore();
self.test_capture();
}
@ -63,6 +64,7 @@ impl<'a> RawtestHarness<'a> {
) {
let mut txn = Transaction::new();
let root_background_color = Some(ColorF::new(1.0, 1.0, 1.0, 1.0));
txn.use_scene_builder_thread();
if let Some(resources) = resources {
txn.update_resources(resources);
}
@ -121,6 +123,89 @@ impl<'a> RawtestHarness<'a> {
self.wrench.api.update_resources(resources);
}
fn test_very_large_blob(&mut self) {
println!("\tvery large blob...");
assert_eq!(self.wrench.device_pixel_ratio, 1.);
let window_size = self.window.get_inner_size();
let test_size = DeviceUintSize::new(800, 800);
let window_rect = DeviceUintRect::new(
DeviceUintPoint::new(0, window_size.height - test_size.height),
test_size,
);
// This exposes a crash in tile decomposition
let layout_size = LayoutSize::new(800., 800.);
let mut resources = ResourceUpdates::new();
let blob_img = self.wrench.api.generate_image_key();
resources.add_image(
blob_img,
ImageDescriptor::new(1510, 111256, ImageFormat::BGRA8, false, false),
ImageData::new_blob_image(blob::serialize_blob(ColorU::new(50, 50, 150, 255))),
Some(31),
);
let mut builder = DisplayListBuilder::new(self.wrench.root_pipeline_id, layout_size);
let info = LayoutPrimitiveInfo::new(rect(0., -9600.0, 1510.000031, 111256.));
let image_size = size(1510., 111256.);
let clip_id = builder.define_clip(rect(40., 41., 200., 201.), vec![], None);
builder.push_clip_id(clip_id);
// setup some malicious image size parameters
builder.push_image(
&info,
image_size * 2.,
image_size,
ImageRendering::Auto,
AlphaType::PremultipliedAlpha,
blob_img,
);
builder.pop_clip_id();
let mut epoch = Epoch(0);
self.submit_dl(&mut epoch, layout_size, builder, Some(resources));
let called = Arc::new(AtomicIsize::new(0));
let called_inner = Arc::clone(&called);
self.wrench.callbacks.lock().unwrap().request = Box::new(move |_| {
called_inner.fetch_add(1, Ordering::SeqCst);
});
let pixels = self.render_and_get_pixels(window_rect);
// make sure we didn't request too many blobs
assert_eq!(called.load(Ordering::SeqCst), 16);
// make sure things are in the right spot
assert!(
pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4] == 255 &&
pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 1] == 255 &&
pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 2] == 255 &&
pixels[(148 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 3] == 255
);
assert!(
pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4] == 50 &&
pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 1] == 50 &&
pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 2] == 150 &&
pixels[(132 + (window_rect.size.height as usize - 148) * window_rect.size.width as usize) * 4 + 3] == 255
);
// Leaving a tiled blob image in the resource cache
// confuses the `test_capture`. TODO: remove this
resources = ResourceUpdates::new();
resources.delete_image(blob_img);
self.wrench.api.update_resources(resources);
}
fn test_retained_blob_images_test(&mut self) {
println!("\tretained blob images test...");
let blob_img;

View File

@ -100,8 +100,8 @@ impl RonFrameWriter {
self.images.insert(
img.key,
CachedImage {
width: img.descriptor.width,
height: img.descriptor.height,
width: img.descriptor.size.width,
height: img.descriptor.size.height,
format: img.descriptor.format,
bytes: Some(bytes),
path: None,
@ -110,8 +110,8 @@ impl RonFrameWriter {
}
ResourceUpdate::UpdateImage(ref img) => {
if let Some(ref mut data) = self.images.get_mut(&img.key) {
assert_eq!(data.width, img.descriptor.width);
assert_eq!(data.height, img.descriptor.height);
assert_eq!(data.width, img.descriptor.size.width);
assert_eq!(data.height, img.descriptor.size.height);
assert_eq!(data.format, img.descriptor.format);
if let ImageData::Raw(ref bytes) = img.data {

View File

@ -144,7 +144,7 @@ impl WrenchThing for CapturedDocument {
pub struct Wrench {
window_size: DeviceUintSize,
device_pixel_ratio: f32,
pub device_pixel_ratio: f32,
page_zoom_factor: ZoomFactor,
pub renderer: webrender::Renderer,

View File

@ -516,7 +516,7 @@ impl YamlFrameReader {
wrench.api.update_resources(resources);
let val = (
image_key,
LayoutSize::new(descriptor.width as f32, descriptor.height as f32),
LayoutSize::new(descriptor.size.width as f32, descriptor.size.height as f32),
);
self.image_map.insert(key, val);
val
@ -1243,7 +1243,8 @@ impl YamlFrameReader {
) {
info.rect = item["bounds"].as_rect().expect("iframe must have bounds");
let pipeline_id = item["id"].as_pipeline_id().unwrap();
dl.push_iframe(&info, pipeline_id);
let ignore = item["ignore_missing_pipeline"].as_bool().unwrap_or(true);
dl.push_iframe(&info, pipeline_id, ignore);
}
pub fn get_complex_clip_for_item(&mut self, yaml: &Yaml) -> Option<ComplexClipRegion> {

View File

@ -514,7 +514,7 @@ impl YamlFrameWriter {
}
let stride = img.descriptor.stride.unwrap_or(
img.descriptor.width * img.descriptor.format.bytes_per_pixel(),
img.descriptor.size.width * img.descriptor.format.bytes_per_pixel(),
);
let bytes = match img.data {
ImageData::Raw(ref v) => (**v).clone(),
@ -525,8 +525,8 @@ impl YamlFrameWriter {
self.images.insert(
img.key,
CachedImage {
width: img.descriptor.width,
height: img.descriptor.height,
width: img.descriptor.size.width,
height: img.descriptor.size.height,
stride,
format: img.descriptor.format,
bytes: Some(bytes),
@ -537,8 +537,8 @@ impl YamlFrameWriter {
}
ResourceUpdate::UpdateImage(ref img) => {
if let Some(ref mut data) = self.images.get_mut(&img.key) {
assert_eq!(data.width, img.descriptor.width);
assert_eq!(data.height, img.descriptor.height);
assert_eq!(data.width, img.descriptor.size.width);
assert_eq!(data.height, img.descriptor.size.height);
assert_eq!(data.format, img.descriptor.format);
if let ImageData::Raw(ref bytes) = img.data {
@ -1013,6 +1013,7 @@ impl YamlFrameWriter {
Iframe(item) => {
str_node(&mut v, "type", "iframe");
u32_vec_node(&mut v, "id", &[item.pipeline_id.0, item.pipeline_id.1]);
bool_node(&mut v, "ignore_missing_pipeline", item.ignore_missing_pipeline);
}
PushStackingContext(item) => {
str_node(&mut v, "type", "stacking-context");