Bug 1343019 - Update webrender to cset 178a65d098afcda7de0298d80d423bc80c6426ba. r=jrmuizel

In addition to updating webrender and the third-party deps, this includes:
- Marking some more reftests as passing with the new version of webrender.
- Updating webrender bindings code to go with changes to push_stacking_context.
- Passing the window dimensions to the Renderer::new function.

MozReview-Commit-ID: 6dRHvAmIQ40

--HG--
rename : third_party/rust/servo-dwrote/.gitignore => third_party/rust/dwrote/.gitignore
rename : third_party/rust/servo-dwrote/README.md => third_party/rust/dwrote/README.md
rename : third_party/rust/servo-dwrote/src/bitmap_render_target.rs => third_party/rust/dwrote/src/bitmap_render_target.rs
rename : third_party/rust/servo-dwrote/src/com_helpers.rs => third_party/rust/dwrote/src/com_helpers.rs
rename : third_party/rust/servo-dwrote/src/comptr.rs => third_party/rust/dwrote/src/comptr.rs
rename : third_party/rust/servo-dwrote/src/font.rs => third_party/rust/dwrote/src/font.rs
rename : third_party/rust/servo-dwrote/src/font_collection.rs => third_party/rust/dwrote/src/font_collection.rs
rename : third_party/rust/servo-dwrote/src/font_face.rs => third_party/rust/dwrote/src/font_face.rs
rename : third_party/rust/servo-dwrote/src/font_family.rs => third_party/rust/dwrote/src/font_family.rs
rename : third_party/rust/servo-dwrote/src/font_file.rs => third_party/rust/dwrote/src/font_file.rs
rename : third_party/rust/servo-dwrote/src/font_file_loader_impl.rs => third_party/rust/dwrote/src/font_file_loader_impl.rs
rename : third_party/rust/servo-dwrote/src/gdi_interop.rs => third_party/rust/dwrote/src/gdi_interop.rs
rename : third_party/rust/servo-dwrote/src/glyph_run_analysis.rs => third_party/rust/dwrote/src/glyph_run_analysis.rs
rename : third_party/rust/servo-dwrote/src/helpers.rs => third_party/rust/dwrote/src/helpers.rs
rename : third_party/rust/servo-dwrote/src/rendering_params.rs => third_party/rust/dwrote/src/rendering_params.rs
rename : third_party/rust/servo-dwrote/src/test.rs => third_party/rust/dwrote/src/test.rs
rename : third_party/rust/servo-dwrote/src/types.rs => third_party/rust/dwrote/src/types.rs
This commit is contained in:
Kartikaya Gupta 2017-03-06 18:46:30 -05:00
parent d1c0646e94
commit cc2056e901
130 changed files with 4556 additions and 13132 deletions

View File

@ -79,4 +79,4 @@ to make sure that mozjs_sys also has its Cargo.lock file updated if needed, henc
the need to run the cargo update command in js/src as well. Hopefully this will
be resolved soon.
Latest Commit: 501e3d79c8a3019762bd8bd2d00eecf7811a84de
Latest Commit: 178a65d098afcda7de0298d80d423bc80c6426ba

View File

@ -1130,7 +1130,10 @@ CompositorBridgeChild::HandleFatalError(const char* aName, const char* aMsg) con
}
PWebRenderBridgeChild*
CompositorBridgeChild::AllocPWebRenderBridgeChild(const wr::PipelineId& aPipelineId, TextureFactoryIdentifier*, uint32_t *aIdNamespace)
CompositorBridgeChild::AllocPWebRenderBridgeChild(const wr::PipelineId& aPipelineId,
const LayoutDeviceIntSize&,
TextureFactoryIdentifier*,
uint32_t *aIdNamespace)
{
WebRenderBridgeChild* child = new WebRenderBridgeChild(aPipelineId);
child->AddIPDLReference();

View File

@ -223,7 +223,9 @@ public:
void WillEndTransaction();
PWebRenderBridgeChild* AllocPWebRenderBridgeChild(const wr::PipelineId& aPipelineId, TextureFactoryIdentifier*,
PWebRenderBridgeChild* AllocPWebRenderBridgeChild(const wr::PipelineId& aPipelineId,
const LayoutDeviceIntSize&,
TextureFactoryIdentifier*,
uint32_t*) override;
bool DeallocPWebRenderBridgeChild(PWebRenderBridgeChild* aActor) override;

View File

@ -1572,6 +1572,7 @@ CompositorBridgeParent::RecvAdoptChild(const uint64_t& child)
PWebRenderBridgeParent*
CompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipelineId,
const LayoutDeviceIntSize& aSize,
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t* aIdNamespace)
{
@ -1588,7 +1589,8 @@ CompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipel
MOZ_ASSERT(mWidget);
RefPtr<widget::CompositorWidget> widget = mWidget;
RefPtr<wr::WebRenderAPI> api = wr::WebRenderAPI::Create(gfxPrefs::WebRenderProfilerEnabled(), this, Move(widget));
RefPtr<wr::WebRenderAPI> api = wr::WebRenderAPI::Create(
gfxPrefs::WebRenderProfilerEnabled(), this, Move(widget), aSize);
RefPtr<WebRenderCompositableHolder> holder = new WebRenderCompositableHolder();
MOZ_ASSERT(api); // TODO have a fallback
api->SetRootPipeline(aPipelineId);

View File

@ -452,6 +452,7 @@ public:
}
PWebRenderBridgeParent* AllocPWebRenderBridgeParent(const wr::PipelineId& aPipelineId,
const LayoutDeviceIntSize& aSize,
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t* aIdNamespace) override;
bool DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor) override;

View File

@ -198,6 +198,7 @@ CrossProcessCompositorBridgeParent::DeallocPAPZParent(PAPZParent* aActor)
PWebRenderBridgeParent*
CrossProcessCompositorBridgeParent::AllocPWebRenderBridgeParent(const wr::PipelineId& aPipelineId,
const LayoutDeviceIntSize& aSize,
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t *aIdNamespace)
{

View File

@ -152,6 +152,7 @@ public:
virtual void UpdatePaintTime(LayerTransactionParent* aLayerTree, const TimeDuration& aPaintTime) override;
PWebRenderBridgeParent* AllocPWebRenderBridgeParent(const wr::PipelineId& aPipelineId,
const LayoutDeviceIntSize& aSize,
TextureFactoryIdentifier* aTextureFactoryIdentifier,
uint32_t* aIdNamespace) override;
bool DeallocPWebRenderBridgeParent(PWebRenderBridgeParent* aActor) override;

View File

@ -30,6 +30,7 @@ using mozilla::ipc::SharedMemoryBasic::Handle from "mozilla/ipc/SharedMemoryBasi
using mozilla::CSSIntRegion from "Units.h";
using mozilla::LayoutDeviceIntPoint from "Units.h";
using mozilla::LayoutDeviceIntRegion from "Units.h";
using mozilla::LayoutDeviceIntSize from "Units.h";
using class mozilla::TimeStamp from "mozilla/TimeStamp.h";
using class mozilla::layers::FrameUniformityData from "mozilla/layers/FrameUniformityData.h";
using mozilla::layers::TextureFlags from "mozilla/layers/CompositorTypes.h";
@ -242,7 +243,7 @@ parent:
sync SyncWithCompositor();
// The pipelineId is the same as the layersId
sync PWebRenderBridge(PipelineId pipelineId)
sync PWebRenderBridge(PipelineId pipelineId, LayoutDeviceIntSize aSize)
returns (TextureFactoryIdentifier textureFactoryIdentifier, uint32_t idNamespace); //XXX: use the WrIdNamespace type
child:

View File

@ -218,14 +218,15 @@ WebRenderLayerManager::Initialize(PCompositorBridgeChild* aCBChild,
MOZ_ASSERT(mWrChild == nullptr);
MOZ_ASSERT(aTextureFactoryIdentifier);
LayoutDeviceIntSize size = mWidget->GetClientSize();
TextureFactoryIdentifier textureFactoryIdentifier;
uint32_t id_namespace;
PWebRenderBridgeChild* bridge = aCBChild->SendPWebRenderBridgeConstructor(aLayersId,
size,
&textureFactoryIdentifier,
&id_namespace);
MOZ_ASSERT(bridge);
mWrChild = static_cast<WebRenderBridgeChild*>(bridge);
LayoutDeviceIntSize size = mWidget->GetClientSize();
WrBridge()->SendCreate(size.ToUnknownSize());
WrBridge()->IdentifyTextureHost(textureFactoryIdentifier);
WrBridge()->SetNamespace(id_namespace);

View File

@ -1,16 +1,14 @@
[package]
name = "webrender"
version = "0.19.0"
version = "0.22.1"
authors = ["Glenn Watson <gw@intuitionlibrary.com>"]
license = "MPL-2.0"
repository = "https://github.com/servo/webrender"
build = "build.rs"
[features]
default = ["codegen", "freetype-lib"]
codegen = ["webrender_traits/codegen"]
default = ["freetype-lib"]
freetype-lib = ["freetype/servo-freetype-sys"]
serde_derive = ["webrender_traits/serde_derive"]
profiler = ["thread_profiler/thread_profiler"]
[dependencies]
@ -19,7 +17,7 @@ bincode = "1.0.0-alpha2"
bit-set = "0.4"
byteorder = "1.0"
euclid = "0.11"
fnv="1.0"
fnv = "1.0"
gleam = "0.2.30"
lazy_static = "0.2"
log = "0.3"
@ -27,7 +25,7 @@ num-traits = "0.1.32"
offscreen_gl_context = {version = "0.6", features = ["serde", "osmesa"]}
time = "0.1"
threadpool = "1.3.2"
webrender_traits = {path = "../webrender_traits", default-features = false}
webrender_traits = {path = "../webrender_traits"}
bitflags = "0.7"
gamma-lut = "0.1"
thread_profiler = "0.1.1"
@ -39,7 +37,7 @@ angle = {git = "https://github.com/servo/angle", branch = "servo"}
freetype = { version = "0.2", default-features = false }
[target.'cfg(target_os = "windows")'.dependencies]
servo-dwrote = "0.2"
dwrote = "0.3"
[target.'cfg(target_os = "macos")'.dependencies]
core-graphics = "0.7.0"

View File

@ -151,6 +151,22 @@ vec3 Luminosity(vec3 Cb, vec3 Cs) {
return SetLum(Cb, Lum(Cs));
}
const int MixBlendMode_Multiply = 1;
const int MixBlendMode_Screen = 2;
const int MixBlendMode_Overlay = 3;
const int MixBlendMode_Darken = 4;
const int MixBlendMode_Lighten = 5;
const int MixBlendMode_ColorDodge = 6;
const int MixBlendMode_ColorBurn = 7;
const int MixBlendMode_HardLight = 8;
const int MixBlendMode_SoftLight = 9;
const int MixBlendMode_Difference = 10;
const int MixBlendMode_Exclusion = 11;
const int MixBlendMode_Hue = 12;
const int MixBlendMode_Saturation = 13;
const int MixBlendMode_Color = 14;
const int MixBlendMode_Luminosity = 15;
void main(void) {
vec4 Cb = texture(sCache, vUv0);
vec4 Cs = texture(sCache, vUv1);
@ -159,60 +175,62 @@ void main(void) {
vec4 result = vec4(1.0, 1.0, 0.0, 1.0);
switch (vOp) {
case 1:
case MixBlendMode_Multiply:
result.rgb = Multiply(Cb.rgb, Cs.rgb);
break;
case 2:
case MixBlendMode_Screen:
result.rgb = Screen(Cb.rgb, Cs.rgb);
break;
case 3:
result.rgb = HardLight(Cs.rgb, Cb.rgb); // Overlay is inverse of Hardlight
case MixBlendMode_Overlay:
// Overlay is inverse of Hardlight
result.rgb = HardLight(Cs.rgb, Cb.rgb);
break;
case 4:
// mix-blend-mode: darken
case MixBlendMode_Darken:
result.rgb = min(Cs.rgb, Cb.rgb);
break;
case 5:
// mix-blend-mode: lighten
case MixBlendMode_Lighten:
result.rgb = max(Cs.rgb, Cb.rgb);
break;
case 6:
case MixBlendMode_ColorDodge:
result.r = ColorDodge(Cb.r, Cs.r);
result.g = ColorDodge(Cb.g, Cs.g);
result.b = ColorDodge(Cb.b, Cs.b);
break;
case 7:
case MixBlendMode_ColorBurn:
result.r = ColorBurn(Cb.r, Cs.r);
result.g = ColorBurn(Cb.g, Cs.g);
result.b = ColorBurn(Cb.b, Cs.b);
break;
case 8:
case MixBlendMode_HardLight:
result.rgb = HardLight(Cb.rgb, Cs.rgb);
break;
case 9:
case MixBlendMode_SoftLight:
result.r = SoftLight(Cb.r, Cs.r);
result.g = SoftLight(Cb.g, Cs.g);
result.b = SoftLight(Cb.b, Cs.b);
break;
case 10:
case MixBlendMode_Difference:
result.rgb = Difference(Cb.rgb, Cs.rgb);
break;
case 11:
case MixBlendMode_Exclusion:
result.rgb = Exclusion(Cb.rgb, Cs.rgb);
break;
case 12:
case MixBlendMode_Hue:
result.rgb = Hue(Cb.rgb, Cs.rgb);
break;
case 13:
case MixBlendMode_Saturation:
result.rgb = Saturation(Cb.rgb, Cs.rgb);
break;
case 14:
case MixBlendMode_Color:
result.rgb = Color(Cb.rgb, Cs.rgb);
break;
case 15:
case MixBlendMode_Luminosity:
result.rgb = Luminosity(Cb.rgb, Cs.rgb);
break;
}
result.rgb = (1.0 - Cb.a) * Cs.rgb + Cb.a * result.rgb;
result.a = Cs.a;
oFragColor = result;
}

View File

@ -19,8 +19,8 @@ void main(void) {
vec2 texture_size = vec2(textureSize(sCache, 0));
vec2 st0 = (backdrop_task.render_target_origin + vec2(0.0, backdrop_task.size.y)) / texture_size;
vec2 st1 = (backdrop_task.render_target_origin + vec2(backdrop_task.size.x, 0.0)) / texture_size;
vec2 st0 = backdrop_task.render_target_origin / texture_size;
vec2 st1 = (backdrop_task.render_target_origin + backdrop_task.size) / texture_size;
vUv0 = vec3(mix(st0, st1, aPosition.xy), backdrop_task.render_target_layer_index);
st0 = src_task.render_target_origin / texture_size;

View File

@ -5,10 +5,9 @@
use euclid::Point3D;
use geometry::ray_intersects_rect;
use spring::{DAMPING, STIFFNESS, Spring};
use webrender_traits::{LayerPoint, LayerRect, LayerSize, LayerToScrollTransform};
use webrender_traits::{LayerPixel, LayerPoint, LayerRect, LayerSize, LayerToScrollTransform};
use webrender_traits::{LayerToWorldTransform, PipelineId, ScrollEventPhase, ScrollLayerId};
use webrender_traits::{ScrollLayerRect, ScrollLocation, ScrollToWorldTransform, WorldPoint};
use webrender_traits::{WorldPoint4D};
use webrender_traits::{ScrollLayerRect, ScrollLocation, WorldPoint, WorldPoint4D};
#[cfg(target_os = "macos")]
const CAN_OVERSCROLL: bool = true;
@ -16,6 +15,15 @@ const CAN_OVERSCROLL: bool = true;
#[cfg(not(target_os = "macos"))]
const CAN_OVERSCROLL: bool = false;
#[derive(Clone)]
pub enum NodeType {
/// Transform for this layer, relative to parent reference frame. A reference
/// frame establishes a new coordinate space in the tree.
ReferenceFrame(LayerToScrollTransform),
/// Other nodes just do clipping, but no transformation.
ClipRect,
}
/// Contains scrolling and transform information stacking contexts.
#[derive(Clone)]
@ -26,44 +34,72 @@ pub struct ClipScrollNode {
/// Size of the content inside the scroll region (in logical pixels)
pub content_size: LayerSize,
/// Viewing rectangle
/// Viewing rectangle in the coordinate system of the parent reference frame.
pub local_viewport_rect: LayerRect,
/// Viewing rectangle clipped against parent layer(s)
/// Clip rect of this node - typically the same as viewport rect, except
/// in overscroll cases.
pub local_clip_rect: LayerRect,
/// Viewport rectangle clipped against parent layer(s) viewport rectangles.
/// This is in the coordinate system of the parent reference frame.
pub combined_local_viewport_rect: LayerRect,
/// World transform for the viewport rect itself.
/// World transform for the viewport rect itself. This is the parent
/// reference frame transformation plus the scrolling offsets provided by
/// the nodes in between the reference frame and this node.
pub world_viewport_transform: LayerToWorldTransform,
/// World transform for content within this layer
/// World transform for content transformed by this node.
pub world_content_transform: LayerToWorldTransform,
/// Transform for this layer, relative to parent scrollable layer.
pub local_transform: LayerToScrollTransform,
/// Pipeline that this layer belongs to
pub pipeline_id: PipelineId,
/// Child layers
pub children: Vec<ScrollLayerId>,
/// Whether or not this node is a reference frame.
pub node_type: NodeType,
}
impl ClipScrollNode {
pub fn new(local_viewport_rect: &LayerRect,
local_clip_rect: &LayerRect,
content_size: LayerSize,
local_transform: &LayerToScrollTransform,
pipeline_id: PipelineId)
-> ClipScrollNode {
ClipScrollNode {
scrolling: ScrollingState::new(),
content_size: content_size,
local_viewport_rect: *local_viewport_rect,
combined_local_viewport_rect: *local_viewport_rect,
local_clip_rect: *local_clip_rect,
combined_local_viewport_rect: *local_clip_rect,
world_viewport_transform: LayerToWorldTransform::identity(),
world_content_transform: LayerToWorldTransform::identity(),
local_transform: *local_transform,
children: Vec::new(),
pipeline_id: pipeline_id,
node_type: NodeType::ClipRect,
}
}
pub fn new_reference_frame(local_viewport_rect: &LayerRect,
local_clip_rect: &LayerRect,
content_size: LayerSize,
local_transform: &LayerToScrollTransform,
pipeline_id: PipelineId)
-> ClipScrollNode {
ClipScrollNode {
scrolling: ScrollingState::new(),
content_size: content_size,
local_viewport_rect: *local_viewport_rect,
local_clip_rect: *local_clip_rect,
combined_local_viewport_rect: *local_clip_rect,
world_viewport_transform: LayerToWorldTransform::identity(),
world_content_transform: LayerToWorldTransform::identity(),
children: Vec::new(),
pipeline_id: pipeline_id,
node_type: NodeType::ReferenceFrame(*local_transform),
}
}
@ -117,9 +153,16 @@ impl ClipScrollNode {
}
pub fn update_transform(&mut self,
parent_world_transform: &ScrollToWorldTransform,
parent_viewport_rect: &ScrollLayerRect) {
let inv_transform = match self.local_transform.inverse() {
parent_reference_frame_transform: &LayerToWorldTransform,
parent_combined_viewport_rect: &ScrollLayerRect,
parent_accumulated_scroll_offset: LayerPoint) {
let local_transform = match self.node_type {
NodeType::ReferenceFrame(transform) => transform,
NodeType::ClipRect => LayerToScrollTransform::identity(),
};
let inv_transform = match local_transform.inverse() {
Some(transform) => transform,
None => {
// If a transform function causes the current transformation matrix of an object
@ -129,18 +172,40 @@ impl ClipScrollNode {
}
};
let parent_viewport_rect_in_local_space = inv_transform.transform_rect(parent_viewport_rect)
.translate(&-self.scrolling.offset);
let local_viewport_rect = self.local_viewport_rect.translate(&-self.scrolling.offset);
let viewport_rect = parent_viewport_rect_in_local_space.intersection(&local_viewport_rect)
.unwrap_or(LayerRect::zero());
// We are trying to move the combined viewport rectangle of our parent nodes into the
// coordinate system of this node, so we must invert our transformation (only for
// reference frames) and then apply the scroll offset of all the parent layers.
let parent_combined_viewport_in_local_space =
inv_transform.pre_translated(-parent_accumulated_scroll_offset.x,
-parent_accumulated_scroll_offset.y,
0.0)
.transform_rect(parent_combined_viewport_rect);
self.combined_local_viewport_rect = viewport_rect;
self.world_viewport_transform = parent_world_transform.pre_mul(&self.local_transform);
self.world_content_transform = self.world_viewport_transform
.pre_translated(self.scrolling.offset.x,
self.scrolling.offset.y,
0.0);
// Now that we have the combined viewport rectangle of the parent nodes in local space,
// we do the intersection and get our combined viewport rect in the coordinate system
// starting from our origin.
self.combined_local_viewport_rect =
parent_combined_viewport_in_local_space.intersection(&self.local_clip_rect)
.unwrap_or(LayerRect::zero());
// The transformation for this viewport in world coordinates is the transformation for
// our parent reference frame, plus any accumulated scrolling offsets from nodes
// between our reference frame and this node. For reference frames, we also include
// whatever local transformation this reference frame provides. This can be combined
// with the local_viewport_rect to get its position in world space.
self.world_viewport_transform =
parent_reference_frame_transform
.pre_translated(parent_accumulated_scroll_offset.x,
parent_accumulated_scroll_offset.y,
0.0)
.pre_mul(&local_transform.with_destination::<LayerPixel>());
// The transformation for any content inside of us is the viewport transformation, plus
// whatever scrolling offset we supply as well.
self.world_content_transform =
self.world_viewport_transform.pre_translated(self.scrolling.offset.x,
self.scrolling.offset.y,
0.0);
}
pub fn scrollable_height(&self) -> f32 {

View File

@ -2,13 +2,13 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
use clip_scroll_node::{ClipScrollNode, NodeType, ScrollingState};
use fnv::FnvHasher;
use clip_scroll_node::{ClipScrollNode, ScrollingState};
use std::collections::{HashMap, HashSet};
use std::hash::BuildHasherDefault;
use webrender_traits::{LayerPoint, LayerRect, LayerSize, LayerToScrollTransform, PipelineId};
use webrender_traits::{ScrollEventPhase, ScrollLayerId, ScrollLayerInfo, ScrollLayerPixel};
use webrender_traits::{ScrollLayerRect, ScrollLayerState, ScrollLocation, ScrollToWorldTransform};
use webrender_traits::{LayerPoint, LayerRect, LayerSize, LayerToScrollTransform};
use webrender_traits::{LayerToWorldTransform, PipelineId, ScrollEventPhase, ScrollLayerId};
use webrender_traits::{ScrollLayerInfo, ScrollLayerRect, ScrollLayerState, ScrollLocation};
use webrender_traits::{ServoScrollRootId, WorldPoint, as_scroll_parent_rect};
pub type ScrollStates = HashMap<ScrollLayerId, ScrollingState, BuildHasherDefault<FnvHasher>>;
@ -70,18 +70,25 @@ impl ClipScrollTree {
pub fn establish_root(&mut self,
pipeline_id: PipelineId,
viewport_size: &LayerSize,
viewport_offset: LayerPoint,
clip_size: LayerSize,
content_size: &LayerSize) {
debug_assert!(self.nodes.is_empty());
let identity = LayerToScrollTransform::identity();
let transform = LayerToScrollTransform::create_translation(viewport_offset.x, viewport_offset.y, 0.0);
let viewport = LayerRect::new(LayerPoint::zero(), *viewport_size);
let clip = LayerRect::new(LayerPoint::new(-viewport_offset.x, -viewport_offset.y),
LayerSize::new(clip_size.width, clip_size.height));
let root_reference_frame_id = ScrollLayerId::root_reference_frame(pipeline_id);
self.root_reference_frame_id = root_reference_frame_id;
let reference_frame = ClipScrollNode::new(&viewport, viewport.size, &identity, pipeline_id);
let reference_frame = ClipScrollNode::new_reference_frame(&viewport,
&clip,
viewport.size,
&transform,
pipeline_id);
self.nodes.insert(self.root_reference_frame_id, reference_frame);
let scroll_node = ClipScrollNode::new(&viewport, *content_size, &identity, pipeline_id);
let scroll_node = ClipScrollNode::new(&viewport, &clip, *content_size, pipeline_id);
let topmost_scroll_layer_id = ScrollLayerId::root_scroll_layer(pipeline_id);
self.topmost_scroll_layer_id = topmost_scroll_layer_id;
self.add_node(scroll_node, topmost_scroll_layer_id, root_reference_frame_id);
@ -294,25 +301,43 @@ impl ClipScrollTree {
}
let root_reference_frame_id = self.root_reference_frame_id();
let root_viewport = self.nodes[&root_reference_frame_id].local_viewport_rect;
let root_viewport = self.nodes[&root_reference_frame_id].local_clip_rect;
self.update_node_transform(root_reference_frame_id,
&ScrollToWorldTransform::identity(),
&as_scroll_parent_rect(&root_viewport));
&LayerToWorldTransform::identity(),
&as_scroll_parent_rect(&root_viewport),
LayerPoint::zero());
}
fn update_node_transform(&mut self,
layer_id: ScrollLayerId,
parent_world_transform: &ScrollToWorldTransform,
parent_viewport_rect: &ScrollLayerRect) {
parent_reference_frame_transform: &LayerToWorldTransform,
parent_viewport_rect: &ScrollLayerRect,
parent_accumulated_scroll_offset: LayerPoint) {
// TODO(gw): This is an ugly borrow check workaround to clone these.
// Restructure this to avoid the clones!
let (node_transform_for_children, viewport_rect, node_children) = {
let (reference_frame_transform, viewport_rect, accumulated_scroll_offset, node_children) = {
match self.nodes.get_mut(&layer_id) {
Some(node) => {
node.update_transform(parent_world_transform, parent_viewport_rect);
node.update_transform(parent_reference_frame_transform,
parent_viewport_rect,
parent_accumulated_scroll_offset);
(node.world_content_transform.with_source::<ScrollLayerPixel>(),
// The transformation we are passing is the transformation of the parent
// reference frame and the offset is the accumulated offset of all the nodes
// between us and the parent reference frame. If we are a reference frame,
// we need to reset both these values.
let (transform, offset) = match node.node_type {
NodeType::ReferenceFrame(..) =>
(node.world_viewport_transform, LayerPoint::zero()),
NodeType::ClipRect => {
(*parent_reference_frame_transform,
parent_accumulated_scroll_offset + node.scrolling.offset)
}
};
(transform,
as_scroll_parent_rect(&node.combined_local_viewport_rect),
offset,
node.children.clone())
}
None => return,
@ -321,8 +346,9 @@ impl ClipScrollTree {
for child_layer_id in node_children {
self.update_node_transform(child_layer_id,
&node_transform_for_children,
&viewport_rect);
&reference_frame_transform,
&viewport_rect,
accumulated_scroll_offset);
}
}
@ -369,7 +395,7 @@ impl ClipScrollTree {
};
self.current_reference_frame_id += 1;
let node = ClipScrollNode::new(&rect, rect.size, &transform, pipeline_id);
let node = ClipScrollNode::new_reference_frame(&rect, &rect, rect.size, &transform, pipeline_id);
self.add_node(node, reference_frame_id, parent_id);
reference_frame_id
}

View File

@ -1886,6 +1886,30 @@ impl Device {
}
}
pub fn clear_target_rect(&self,
color: Option<[f32; 4]>,
depth: Option<f32>,
rect: DeviceIntRect) {
let mut clear_bits = 0;
if let Some(color) = color {
gl::clear_color(color[0], color[1], color[2], color[3]);
clear_bits |= gl::COLOR_BUFFER_BIT;
}
if let Some(depth) = depth {
gl::clear_depth(depth as f64);
clear_bits |= gl::DEPTH_BUFFER_BIT;
}
if clear_bits != 0 {
gl::enable(gl::SCISSOR_TEST);
gl::scissor(rect.origin.x, rect.origin.y, rect.size.width, rect.size.height);
gl::clear(clear_bits);
gl::disable(gl::SCISSOR_TEST);
}
}
pub fn enable_depth(&self) {
gl::enable(gl::DEPTH_TEST);
}

View File

@ -20,7 +20,7 @@ use webrender_traits::{AuxiliaryLists, ClipRegion, ColorF, DisplayItem, Epoch, F
use webrender_traits::{LayerPoint, LayerRect, LayerSize, LayerToScrollTransform, LayoutTransform, TileOffset};
use webrender_traits::{MixBlendMode, PipelineId, ScrollEventPhase, ScrollLayerId, ScrollLayerState};
use webrender_traits::{ScrollLocation, ScrollPolicy, ServoScrollRootId, SpecificDisplayItem};
use webrender_traits::{StackingContext, WorldPoint, ImageDisplayItem, DeviceUintSize};
use webrender_traits::{StackingContext, WorldPoint, ImageDisplayItem, DeviceUintRect, DeviceUintSize};
#[derive(Copy, Clone, PartialEq, PartialOrd, Debug)]
pub struct FrameId(pub u32);
@ -228,7 +228,12 @@ impl Frame {
self.clip_scroll_tree.discard_frame_state_for_pipeline(pipeline_id);
}
pub fn create(&mut self, scene: &Scene, resource_cache: &mut ResourceCache) {
pub fn create(&mut self,
scene: &Scene,
resource_cache: &mut ResourceCache,
window_size: DeviceUintSize,
inner_rect: DeviceUintRect,
device_pixel_ratio: f32) {
let root_pipeline_id = match scene.root_pipeline_id {
Some(root_pipeline_id) => root_pipeline_id,
None => return,
@ -245,6 +250,11 @@ impl Frame {
None => return,
};
if window_size.width == 0 || window_size.height == 0 {
println!("ERROR: Invalid window dimensions! Please call api.set_window_size()");
return;
}
let old_scrolling_states = self.reset();
self.pipeline_auxiliary_lists = scene.pipeline_auxiliary_lists.clone();
@ -258,8 +268,19 @@ impl Frame {
}
};
let inner_origin = inner_rect.origin.to_f32();
let viewport_offset = LayerPoint::new((inner_origin.x / device_pixel_ratio).round(),
(inner_origin.y / device_pixel_ratio).round());
let outer_size = window_size.to_f32();
let outer_size = LayerSize::new((outer_size.width / device_pixel_ratio).round(),
(outer_size.height / device_pixel_ratio).round());
let clip_size = LayerSize::new(outer_size.width + 2.0 * viewport_offset.x,
outer_size.height + 2.0 * viewport_offset.y);
self.clip_scroll_tree.establish_root(root_pipeline_id,
&root_pipeline.viewport_size,
viewport_offset,
clip_size,
&root_clip.main.size);
let background_color = root_pipeline.background_color.and_then(|color| {
@ -270,7 +291,7 @@ impl Frame {
}
});
let mut frame_builder = FrameBuilder::new(root_pipeline.viewport_size,
let mut frame_builder = FrameBuilder::new(window_size,
background_color,
self.frame_builder_config);
@ -288,21 +309,15 @@ impl Frame {
let viewport_rect = LayerRect::new(LayerPoint::zero(), root_pipeline.viewport_size);
let clip = ClipRegion::simple(&viewport_rect);
context.builder.push_clip_scroll_node(reference_frame_id,
&clip,
&LayerPoint::zero(),
&root_pipeline.viewport_size);
context.builder.push_clip_scroll_node(topmost_scroll_layer_id,
&clip,
&LayerPoint::zero(),
&root_clip.main.size);
context.builder.push_clip_scroll_node(reference_frame_id, &clip);
context.builder.push_clip_scroll_node(topmost_scroll_layer_id, &clip);
self.flatten_stacking_context(&mut traversal,
root_pipeline_id,
&mut context,
reference_frame_id,
topmost_scroll_layer_id,
LayerToScrollTransform::identity(),
LayerPoint::zero(),
0,
&root_stacking_context,
root_clip);
@ -321,7 +336,7 @@ impl Frame {
context: &mut FlattenContext,
current_reference_frame_id: ScrollLayerId,
parent_scroll_layer_id: ScrollLayerId,
layer_relative_transform: LayerToScrollTransform,
reference_frame_relative_offset: LayerPoint,
level: i32,
clip: &ClipRegion,
content_size: &LayerSize,
@ -332,23 +347,17 @@ impl Frame {
return;
}
let clip_rect = clip.main;
let node = ClipScrollNode::new(&clip_rect,
*content_size,
&layer_relative_transform,
pipeline_id);
let clip_rect = clip.main.translate(&reference_frame_relative_offset);
let node = ClipScrollNode::new(&clip_rect, &clip_rect, *content_size, pipeline_id);
self.clip_scroll_tree.add_node(node, new_scroll_layer_id, parent_scroll_layer_id);
context.builder.push_clip_scroll_node(new_scroll_layer_id,
clip,
&clip_rect.origin,
&content_size);
context.builder.push_clip_scroll_node(new_scroll_layer_id, clip);
self.flatten_items(traversal,
pipeline_id,
context,
current_reference_frame_id,
new_scroll_layer_id,
LayerToScrollTransform::identity(),
reference_frame_relative_offset,
level);
context.builder.pop_clip_scroll_node();
@ -360,7 +369,7 @@ impl Frame {
context: &mut FlattenContext,
current_reference_frame_id: ScrollLayerId,
current_scroll_layer_id: ScrollLayerId,
layer_relative_transform: LayerToScrollTransform,
mut reference_frame_relative_offset: LayerPoint,
level: i32,
stacking_context: &StackingContext,
clip_region: &ClipRegion) {
@ -384,17 +393,6 @@ impl Frame {
return;
}
let stacking_context_transform = context.scene
.properties
.resolve_layout_transform(&stacking_context.transform);
let mut transform =
layer_relative_transform.pre_translated(stacking_context.bounds.origin.x,
stacking_context.bounds.origin.y,
0.0)
.pre_mul(&stacking_context_transform)
.pre_mul(&stacking_context.perspective);
let mut reference_frame_id = current_reference_frame_id;
let mut scroll_layer_id = match stacking_context.scroll_policy {
ScrollPolicy::Fixed => current_reference_frame_id,
@ -403,32 +401,48 @@ impl Frame {
// If we have a transformation, we establish a new reference frame. This means
// that fixed position stacking contexts are positioned relative to us.
if stacking_context_transform != LayoutTransform::identity() ||
stacking_context.perspective != LayoutTransform::identity() {
if stacking_context.transform.is_some() || stacking_context.perspective.is_some() {
let transform = stacking_context.transform.as_ref();
let transform = context.scene.properties.resolve_layout_transform(transform);
let perspective =
stacking_context.perspective.unwrap_or_else(LayoutTransform::identity);
let transform =
LayerToScrollTransform::create_translation(reference_frame_relative_offset.x,
reference_frame_relative_offset.y,
0.0)
.pre_translated(stacking_context.bounds.origin.x,
stacking_context.bounds.origin.y,
0.0)
.pre_mul(&transform)
.pre_mul(&perspective);
scroll_layer_id = self.clip_scroll_tree.add_reference_frame(clip_region.main,
transform,
pipeline_id,
scroll_layer_id);
reference_frame_id = scroll_layer_id;
transform = LayerToScrollTransform::identity();
reference_frame_relative_offset = LayerPoint::zero();
} else {
reference_frame_relative_offset = LayerPoint::new(
reference_frame_relative_offset.x + stacking_context.bounds.origin.x,
reference_frame_relative_offset.y + stacking_context.bounds.origin.y);
}
if level == 0 {
if let Some(pipeline) = context.scene.pipeline_map.get(&pipeline_id) {
if let Some(bg_color) = pipeline.background_color {
// Adding a dummy layer for this rectangle in order to disable clipping.
let no_clip = ClipRegion::simple(&clip_region.main);
context.builder.push_stacking_context(clip_region.main,
transform,
context.builder.push_stacking_context(reference_frame_relative_offset,
clip_region.main,
pipeline_id,
scroll_layer_id,
false,
CompositeOps::empty());
//Note: we don't use the original clip region here,
// Note: we don't use the original clip region here,
// it's already processed by the node we just pushed.
context.builder.add_solid_rectangle(&clip_region.main,
&no_clip,
let background_rect = LayerRect::new(LayerPoint::zero(), clip_region.main.size);
context.builder.add_solid_rectangle(scroll_layer_id,
&clip_region.main,
&ClipRegion::simple(&background_rect),
&bg_color,
PrimitiveFlags::None);
@ -438,10 +452,10 @@ impl Frame {
}
// TODO(gw): Int with overflow etc
context.builder.push_stacking_context(clip_region.main,
transform,
context.builder.push_stacking_context(reference_frame_relative_offset,
clip_region.main,
pipeline_id,
scroll_layer_id,
level == 0,
composition_operations);
self.flatten_items(traversal,
@ -449,12 +463,13 @@ impl Frame {
context,
reference_frame_id,
scroll_layer_id,
transform,
reference_frame_relative_offset,
level);
if level == 0 && self.frame_builder_config.enable_scrollbars {
let scrollbar_rect = LayerRect::new(LayerPoint::zero(), LayerSize::new(10.0, 70.0));
context.builder.add_solid_rectangle(
scroll_layer_id,
&scrollbar_rect,
&ClipRegion::simple(&scrollbar_rect),
&DEFAULT_SCROLLBAR_COLOR,
@ -469,7 +484,7 @@ impl Frame {
bounds: &LayerRect,
context: &mut FlattenContext,
current_scroll_layer_id: ScrollLayerId,
layer_relative_transform: LayerToScrollTransform) {
reference_frame_relative_offset: LayerPoint) {
let pipeline = match context.scene.pipeline_map.get(&pipeline_id) {
Some(pipeline) => pipeline,
@ -493,9 +508,10 @@ impl Frame {
self.pipeline_epoch_map.insert(pipeline_id, pipeline.epoch);
let iframe_rect = LayerRect::new(LayerPoint::zero(), bounds.size);
let transform = layer_relative_transform.pre_translated(bounds.origin.x,
bounds.origin.y,
0.0);
let transform = LayerToScrollTransform::create_translation(
reference_frame_relative_offset.x + bounds.origin.x,
reference_frame_relative_offset.y + bounds.origin.y,
0.0);
let iframe_reference_frame_id =
self.clip_scroll_tree.add_reference_frame(iframe_rect,
transform,
@ -503,21 +519,15 @@ impl Frame {
current_scroll_layer_id);
let iframe_scroll_layer_id = ScrollLayerId::root_scroll_layer(pipeline_id);
let node = ClipScrollNode::new(&LayerRect::new(LayerPoint::zero(), iframe_rect.size),
&LayerRect::new(LayerPoint::zero(), iframe_rect.size),
iframe_clip.main.size,
&LayerToScrollTransform::identity(),
pipeline_id);
self.clip_scroll_tree.add_node(node.clone(),
iframe_scroll_layer_id,
iframe_reference_frame_id);
context.builder.push_clip_scroll_node(iframe_reference_frame_id,
iframe_clip,
&LayerPoint::zero(),
&iframe_rect.size);
context.builder.push_clip_scroll_node(iframe_scroll_layer_id,
iframe_clip,
&LayerPoint::zero(),
&iframe_clip.main.size);
context.builder.push_clip_scroll_node(iframe_reference_frame_id, iframe_clip);
context.builder.push_clip_scroll_node(iframe_scroll_layer_id, iframe_clip);
let mut traversal = DisplayListTraversal::new_skipping_first(display_list);
@ -526,7 +536,7 @@ impl Frame {
context,
iframe_reference_frame_id,
iframe_scroll_layer_id,
LayerToScrollTransform::identity(),
LayerPoint::zero(),
0,
&iframe_stacking_context,
iframe_clip);
@ -541,13 +551,15 @@ impl Frame {
context: &mut FlattenContext,
current_reference_frame_id: ScrollLayerId,
current_scroll_layer_id: ScrollLayerId,
layer_relative_transform: LayerToScrollTransform,
reference_frame_relative_offset: LayerPoint,
level: i32) {
while let Some(item) = traversal.next() {
match item.item {
SpecificDisplayItem::WebGL(ref info) => {
context.builder.add_webgl_rectangle(item.rect,
&item.clip, info.context_id);
context.builder.add_webgl_rectangle(current_scroll_layer_id,
item.rect,
&item.clip,
info.context_id);
}
SpecificDisplayItem::Image(ref info) => {
let image = context.resource_cache.get_image_properties(info.image_key);
@ -555,9 +567,15 @@ impl Frame {
// The image resource is tiled. We have to generate an image primitive
// for each tile.
let image_size = DeviceUintSize::new(image.descriptor.width, image.descriptor.height);
self.decompose_tiled_image(context, &item, info, image_size, tile_size as u32);
self.decompose_tiled_image(current_scroll_layer_id,
context,
&item,
info,
image_size,
tile_size as u32);
} else {
context.builder.add_image(item.rect,
context.builder.add_image(current_scroll_layer_id,
item.rect,
&item.clip,
&info.stretch_size,
&info.tile_spacing,
@ -568,7 +586,8 @@ impl Frame {
}
}
SpecificDisplayItem::YuvImage(ref info) => {
context.builder.add_yuv_image(item.rect,
context.builder.add_yuv_image(current_scroll_layer_id,
item.rect,
&item.clip,
info.y_image_key,
info.u_image_key,
@ -576,7 +595,8 @@ impl Frame {
info.color_space);
}
SpecificDisplayItem::Text(ref text_info) => {
context.builder.add_text(item.rect,
context.builder.add_text(current_scroll_layer_id,
item.rect,
&item.clip,
text_info.font_key,
text_info.size,
@ -586,13 +606,15 @@ impl Frame {
text_info.glyph_options);
}
SpecificDisplayItem::Rectangle(ref info) => {
context.builder.add_solid_rectangle(&item.rect,
context.builder.add_solid_rectangle(current_scroll_layer_id,
&item.rect,
&item.clip,
&info.color,
PrimitiveFlags::None);
}
SpecificDisplayItem::Gradient(ref info) => {
context.builder.add_gradient(item.rect,
context.builder.add_gradient(current_scroll_layer_id,
item.rect,
&item.clip,
info.start_point,
info.end_point,
@ -600,7 +622,8 @@ impl Frame {
info.extend_mode);
}
SpecificDisplayItem::RadialGradient(ref info) => {
context.builder.add_radial_gradient(item.rect,
context.builder.add_radial_gradient(current_scroll_layer_id,
item.rect,
&item.clip,
info.start_center,
info.start_radius,
@ -610,7 +633,8 @@ impl Frame {
info.extend_mode);
}
SpecificDisplayItem::BoxShadow(ref box_shadow_info) => {
context.builder.add_box_shadow(&box_shadow_info.box_bounds,
context.builder.add_box_shadow(current_scroll_layer_id,
&box_shadow_info.box_bounds,
&item.clip,
&box_shadow_info.offset,
&box_shadow_info.color,
@ -620,7 +644,10 @@ impl Frame {
box_shadow_info.clip_mode);
}
SpecificDisplayItem::Border(ref info) => {
context.builder.add_border(item.rect, &item.clip, info);
context.builder.add_border(current_scroll_layer_id,
item.rect,
&item.clip,
info);
}
SpecificDisplayItem::PushStackingContext(ref info) => {
self.flatten_stacking_context(traversal,
@ -628,7 +655,7 @@ impl Frame {
context,
current_reference_frame_id,
current_scroll_layer_id,
layer_relative_transform,
reference_frame_relative_offset,
level + 1,
&info.stacking_context,
&item.clip);
@ -639,7 +666,7 @@ impl Frame {
context,
current_reference_frame_id,
current_scroll_layer_id,
layer_relative_transform,
reference_frame_relative_offset,
level,
&item.clip,
&info.content_size,
@ -650,7 +677,7 @@ impl Frame {
&item.rect,
context,
current_scroll_layer_id,
layer_relative_transform);
reference_frame_relative_offset);
}
SpecificDisplayItem::PopStackingContext |
SpecificDisplayItem::PopScrollLayer => return,
@ -659,6 +686,7 @@ impl Frame {
}
fn decompose_tiled_image(&mut self,
scroll_layer_id: ScrollLayerId,
context: &mut FlattenContext,
item: &DisplayItem,
info: &ImageDisplayItem,
@ -749,7 +777,10 @@ impl Frame {
for ty in 0..num_tiles_y {
for tx in 0..num_tiles_x {
self.add_tile_primitive(context, item, info,
self.add_tile_primitive(scroll_layer_id,
context,
item,
info,
TileOffset::new(tx, ty),
stretched_tile_size,
1.0, 1.0,
@ -757,7 +788,10 @@ impl Frame {
}
if leftover.width != 0 {
// Tiles on the right edge that are smaller than the tile size.
self.add_tile_primitive(context, item, info,
self.add_tile_primitive(scroll_layer_id,
context,
item,
info,
TileOffset::new(num_tiles_x, ty),
stretched_tile_size,
(leftover.width as f32) / tile_size_f32,
@ -769,27 +803,36 @@ impl Frame {
if leftover.height != 0 {
for tx in 0..num_tiles_x {
// Tiles on the bottom edge that are smaller than the tile size.
self.add_tile_primitive(context, item, info,
self.add_tile_primitive(scroll_layer_id,
context,
item,
info,
TileOffset::new(tx, num_tiles_y),
stretched_tile_size,
1.0,
(leftover.height as f32) / tile_size_f32,
repeat_x, repeat_y);
repeat_x,
repeat_y);
}
if leftover.width != 0 {
// Finally, the bottom-right tile with a "leftover" size.
self.add_tile_primitive(context, item, info,
self.add_tile_primitive(scroll_layer_id,
context,
item,
info,
TileOffset::new(num_tiles_x, num_tiles_y),
stretched_tile_size,
(leftover.width as f32) / tile_size_f32,
(leftover.height as f32) / tile_size_f32,
repeat_x, repeat_y);
repeat_x,
repeat_y);
}
}
}
fn add_tile_primitive(&mut self,
scroll_layer_id: ScrollLayerId,
context: &mut FlattenContext,
item: &DisplayItem,
info: &ImageDisplayItem,
@ -833,7 +876,8 @@ impl Frame {
// Fix up the primitive's rect if it overflows the original item rect.
if let Some(prim_rect) = prim_rect.intersection(&item.rect) {
context.builder.add_image(prim_rect,
context.builder.add_image(scroll_layer_id,
prim_rect,
&item.clip,
&stretched_size,
&info.tile_spacing,

View File

@ -24,14 +24,14 @@ use tiling::{AuxiliaryListsMap, ClipScrollGroup, ClipScrollGroupIndex, Composite
use tiling::{PackedLayer, PackedLayerIndex, PrimitiveFlags, PrimitiveRunCmd, RenderPass};
use tiling::{RenderTargetContext, RenderTaskCollection, ScrollbarPrimitive, ScrollLayer};
use tiling::{ScrollLayerIndex, StackingContext, StackingContextIndex};
use util::{self, pack_as_float, rect_from_points_f, subtract_rect, TransformedRect};
use util::{self, pack_as_float, rect_from_points_f, subtract_rect};
use util::{RectHelpers, TransformedRectKind};
use webrender_traits::{as_scroll_parent_rect, BorderDetails, BorderDisplayItem, BorderSide, BorderStyle};
use webrender_traits::{BoxShadowClipMode, ClipRegion, ColorF, device_length, DeviceIntPoint};
use webrender_traits::{BorderDetails, BorderDisplayItem, BorderSide, BorderStyle};
use webrender_traits::{BoxShadowClipMode, ClipRegion, ColorF, DeviceIntPoint};
use webrender_traits::{DeviceIntRect, DeviceIntSize, DeviceUintSize, ExtendMode, FontKey, TileOffset};
use webrender_traits::{FontRenderMode, GlyphOptions, ImageKey, ImageRendering, ItemRange};
use webrender_traits::{LayerPoint, LayerRect, LayerSize, LayerToScrollTransform, PipelineId};
use webrender_traits::{RepeatMode, ScrollLayerId, ScrollLayerPixel, WebGLContextId, YuvColorSpace};
use webrender_traits::{LayerPoint, LayerRect, LayerSize, PipelineId, RepeatMode, ScrollLayerId};
use webrender_traits::{WebGLContextId, YuvColorSpace};
#[derive(Debug, Clone)]
struct ImageBorderSegment {
@ -102,7 +102,7 @@ impl FrameBuilderConfig {
}
pub struct FrameBuilder {
screen_rect: LayerRect,
screen_size: DeviceUintSize,
background_color: Option<ColorF>,
prim_store: PrimitiveStore,
cmds: Vec<PrimitiveRunCmd>,
@ -118,14 +118,18 @@ pub struct FrameBuilder {
/// A stack of scroll nodes used during display list processing to properly
/// parent new scroll nodes.
clip_scroll_node_stack: Vec<ScrollLayerIndex>,
/// A stack of stacking contexts used for creating ClipScrollGroups as
/// primitives are added to the frame.
stacking_context_stack: Vec<StackingContextIndex>,
}
impl FrameBuilder {
pub fn new(viewport_size: LayerSize,
pub fn new(screen_size: DeviceUintSize,
background_color: Option<ColorF>,
config: FrameBuilderConfig) -> FrameBuilder {
FrameBuilder {
screen_rect: LayerRect::new(LayerPoint::zero(), viewport_size),
screen_size: screen_size,
background_color: background_color,
stacking_context_store: Vec::new(),
scroll_layer_store: Vec::new(),
@ -136,13 +140,24 @@ impl FrameBuilder {
scrollbar_prims: Vec::new(),
config: config,
clip_scroll_node_stack: Vec::new(),
stacking_context_stack: Vec::new(),
}
}
fn add_primitive(&mut self,
scroll_layer_id: ScrollLayerId,
rect: &LayerRect,
clip_region: &ClipRegion,
container: PrimitiveContainer) -> PrimitiveIndex {
container: PrimitiveContainer)
-> PrimitiveIndex {
let stacking_context_index = *self.stacking_context_stack.last().unwrap();
if !self.stacking_context_store[stacking_context_index.0]
.has_clip_scroll_group(scroll_layer_id) {
let group_index = self.create_clip_scroll_group(stacking_context_index,
scroll_layer_id);
let stacking_context = &mut self.stacking_context_store[stacking_context_index.0];
stacking_context.clip_scroll_groups.push(group_index);
}
let geometry = PrimitiveGeometry {
local_rect: *rect,
@ -163,7 +178,7 @@ impl FrameBuilder {
container);
match self.cmds.last_mut().unwrap() {
&mut PrimitiveRunCmd::PrimitiveRun(_run_prim_index, ref mut count) => {
&mut PrimitiveRunCmd::PrimitiveRun(_run_prim_index, ref mut count, _) => {
debug_assert!(_run_prim_index.0 + *count == prim_index.0);
*count += 1;
return prim_index;
@ -174,15 +189,14 @@ impl FrameBuilder {
&mut PrimitiveRunCmd::PopScrollLayer => {}
}
self.cmds.push(PrimitiveRunCmd::PrimitiveRun(prim_index, 1));
self.cmds.push(PrimitiveRunCmd::PrimitiveRun(prim_index, 1, scroll_layer_id));
prim_index
}
pub fn create_clip_scroll_group(&mut self,
stacking_context_index: StackingContextIndex,
scroll_layer_id: ScrollLayerId,
pipeline_id: PipelineId)
scroll_layer_id: ScrollLayerId)
-> ClipScrollGroupIndex {
let packed_layer_index = PackedLayerIndex(self.packed_layers.len());
self.packed_layers.push(PackedLayer::empty());
@ -191,40 +205,47 @@ impl FrameBuilder {
stacking_context_index: stacking_context_index,
scroll_layer_id: scroll_layer_id,
packed_layer_index: packed_layer_index,
pipeline_id: pipeline_id,
xf_rect: None,
});
ClipScrollGroupIndex(self.clip_scroll_group_store.len() - 1)
ClipScrollGroupIndex(self.clip_scroll_group_store.len() - 1, scroll_layer_id)
}
pub fn push_stacking_context(&mut self,
reference_frame_offset: LayerPoint,
rect: LayerRect,
transform: LayerToScrollTransform,
pipeline_id: PipelineId,
scroll_layer_id: ScrollLayerId,
is_page_root: bool,
composite_ops: CompositeOps) {
if let Some(parent_index) = self.stacking_context_stack.last() {
let parent_is_root = self.stacking_context_store[parent_index.0].is_page_root;
if composite_ops.mix_blend_mode.is_some() && !parent_is_root {
// the parent stacking context of a stacking context with mix-blend-mode
// must be drawn with a transparent background, unless the parent stacking context
// is the root of the page
self.stacking_context_store[parent_index.0].should_isolate = true;
}
}
let stacking_context_index = StackingContextIndex(self.stacking_context_store.len());
let group_index = self.create_clip_scroll_group(stacking_context_index,
scroll_layer_id,
pipeline_id);
self.stacking_context_store.push(StackingContext::new(pipeline_id,
transform,
reference_frame_offset,
rect,
composite_ops,
group_index));
is_page_root,
composite_ops));
self.cmds.push(PrimitiveRunCmd::PushStackingContext(stacking_context_index));
self.stacking_context_stack.push(stacking_context_index);
}
pub fn pop_stacking_context(&mut self) {
self.cmds.push(PrimitiveRunCmd::PopStackingContext);
self.stacking_context_stack.pop();
}
pub fn push_clip_scroll_node(&mut self,
scroll_layer_id: ScrollLayerId,
clip_region: &ClipRegion,
node_origin: &LayerPoint,
content_size: &LayerSize) {
clip_region: &ClipRegion) {
let scroll_layer_index = ScrollLayerIndex(self.scroll_layer_store.len());
let parent_index = *self.clip_scroll_node_stack.last().unwrap_or(&scroll_layer_index);
self.clip_scroll_node_stack.push(scroll_layer_index);
@ -246,29 +267,15 @@ impl FrameBuilder {
});
self.packed_layers.push(PackedLayer::empty());
self.cmds.push(PrimitiveRunCmd::PushScrollLayer(scroll_layer_index));
// We need to push a fake stacking context here, because primitives that are
// direct children of this stacking context, need to be adjusted by the scroll
// offset of this layer. Eventually we should be able to remove this.
let rect = LayerRect::new(LayerPoint::zero(),
LayerSize::new(content_size.width + node_origin.x,
content_size.height + node_origin.y));
self.push_stacking_context(rect,
LayerToScrollTransform::identity(),
scroll_layer_id.pipeline_id,
scroll_layer_id,
CompositeOps::empty());
}
pub fn pop_clip_scroll_node(&mut self) {
self.pop_stacking_context();
self.cmds.push(PrimitiveRunCmd::PopScrollLayer);
self.clip_scroll_node_stack.pop();
}
pub fn add_solid_rectangle(&mut self,
scroll_layer_id: ScrollLayerId,
rect: &LayerRect,
clip_region: &ClipRegion,
color: &ColorF,
@ -281,7 +288,8 @@ impl FrameBuilder {
color: *color,
};
let prim_index = self.add_primitive(rect,
let prim_index = self.add_primitive(scroll_layer_id,
rect,
clip_region,
PrimitiveContainer::Rectangle(prim));
@ -318,6 +326,7 @@ impl FrameBuilder {
}
pub fn add_border(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
border_item: &BorderDisplayItem) {
@ -411,7 +420,8 @@ impl FrameBuilder {
}
for segment in segments {
self.add_image(segment.geom_rect,
self.add_image(scroll_layer_id,
segment.geom_rect,
clip_region,
&segment.stretch_size,
&segment.tile_spacing,
@ -492,7 +502,8 @@ impl FrameBuilder {
];
for rect in &rects {
self.add_solid_rectangle(rect,
self.add_solid_rectangle(scroll_layer_id,
rect,
clip_region,
&top_color,
PrimitiveFlags::None);
@ -533,7 +544,8 @@ impl FrameBuilder {
],
};
self.add_primitive(&rect,
self.add_primitive(scroll_layer_id,
&rect,
clip_region,
PrimitiveContainer::Border(prim_cpu, prim_gpu));
}
@ -541,6 +553,7 @@ impl FrameBuilder {
}
pub fn add_gradient(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
start_point: LayerPoint,
@ -591,10 +604,11 @@ impl FrameBuilder {
PrimitiveContainer::AngleGradient(gradient_cpu, gradient_gpu)
};
self.add_primitive(&rect, clip_region, prim);
self.add_primitive(scroll_layer_id, &rect, clip_region, prim);
}
pub fn add_radial_gradient(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
start_center: LayerPoint,
@ -618,12 +632,14 @@ impl FrameBuilder {
padding: [0.0],
};
self.add_primitive(&rect,
self.add_primitive(scroll_layer_id,
&rect,
clip_region,
PrimitiveContainer::RadialGradient(radial_gradient_cpu, radial_gradient_gpu));
}
pub fn add_text(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
font_key: FontKey,
@ -685,13 +701,15 @@ impl FrameBuilder {
color: *color,
};
self.add_primitive(&rect,
self.add_primitive(scroll_layer_id,
&rect,
clip_region,
PrimitiveContainer::TextRun(prim_cpu, prim_gpu));
}
}
pub fn add_box_shadow(&mut self,
scroll_layer_id: ScrollLayerId,
box_bounds: &LayerRect,
clip_region: &ClipRegion,
box_offset: &LayerPoint,
@ -706,7 +724,8 @@ impl FrameBuilder {
// Fast path.
if blur_radius == 0.0 && spread_radius == 0.0 && clip_mode == BoxShadowClipMode::None {
self.add_solid_rectangle(&box_bounds,
self.add_solid_rectangle(scroll_layer_id,
&box_bounds,
clip_region,
color,
PrimitiveFlags::None);
@ -782,7 +801,8 @@ impl FrameBuilder {
match shadow_kind {
BoxShadowKind::Simple(rects) => {
for rect in &rects {
self.add_solid_rectangle(rect,
self.add_solid_rectangle(scroll_layer_id,
rect,
clip_region,
color,
PrimitiveFlags::None)
@ -794,6 +814,7 @@ impl FrameBuilder {
BoxShadowClipMode::Inset => 1.0,
};
let prim_gpu = BoxShadowPrimitiveGpu {
src_rect: *box_bounds,
bs_rect: bs_rect,
@ -804,7 +825,8 @@ impl FrameBuilder {
inverted: inverted,
};
self.add_primitive(&outer_rect,
self.add_primitive(scroll_layer_id,
&outer_rect,
clip_region,
PrimitiveContainer::BoxShadow(prim_gpu, rects));
}
@ -812,6 +834,7 @@ impl FrameBuilder {
}
pub fn add_webgl_rectangle(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
context_id: WebGLContextId) {
@ -827,12 +850,14 @@ impl FrameBuilder {
tile_spacing: LayerSize::zero(),
};
self.add_primitive(&rect,
self.add_primitive(scroll_layer_id,
&rect,
clip_region,
PrimitiveContainer::Image(prim_cpu, prim_gpu));
}
pub fn add_image(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
stretch_size: &LayerSize,
@ -856,12 +881,14 @@ impl FrameBuilder {
tile_spacing: *tile_spacing,
};
self.add_primitive(&rect,
self.add_primitive(scroll_layer_id,
&rect,
clip_region,
PrimitiveContainer::Image(prim_cpu, prim_gpu));
}
pub fn add_yuv_image(&mut self,
scroll_layer_id: ScrollLayerId,
rect: LayerRect,
clip_region: &ClipRegion,
y_image_key: ImageKey,
@ -880,7 +907,8 @@ impl FrameBuilder {
let prim_gpu = YuvImagePrimitiveGpu::new(rect.size, color_space);
self.add_primitive(&rect,
self.add_primitive(scroll_layer_id,
&rect,
clip_region,
PrimitiveContainer::YuvImage(prim_cpu, prim_gpu));
}
@ -958,6 +986,7 @@ impl FrameBuilder {
let mut sc_stack = Vec::new();
let mut current_task = RenderTask::new_alpha_batch(next_task_index,
DeviceIntPoint::zero(),
false,
RenderTaskLocation::Fixed);
next_task_index.0 += 1;
let mut alpha_task_stack = Vec::new();
@ -974,10 +1003,23 @@ impl FrameBuilder {
let stacking_context_rect = &stacking_context.bounding_rect;
let composite_count = stacking_context.composite_ops.count();
if composite_count == 0 && stacking_context.should_isolate {
let location = RenderTaskLocation::Dynamic(None, stacking_context_rect.size);
let new_task = RenderTask::new_alpha_batch(next_task_index,
stacking_context_rect.origin,
stacking_context.should_isolate,
location);
next_task_index.0 += 1;
let prev_task = mem::replace(&mut current_task, new_task);
alpha_task_stack.push(prev_task);
}
for _ in 0..composite_count {
let location = RenderTaskLocation::Dynamic(None, stacking_context_rect.size);
let new_task = RenderTask::new_alpha_batch(next_task_index,
stacking_context_rect.origin,
stacking_context.should_isolate,
location);
next_task_index.0 += 1;
let prev_task = mem::replace(&mut current_task, new_task);
@ -992,6 +1034,20 @@ impl FrameBuilder {
continue;
}
let composite_count = stacking_context.composite_ops.count();
if composite_count == 0 && stacking_context.should_isolate {
let mut prev_task = alpha_task_stack.pop().unwrap();
let item = AlphaRenderItem::HardwareComposite(stacking_context_index,
current_task.id,
HardwareCompositeOp::Alpha,
next_z);
next_z += 1;
prev_task.as_alpha_batch().alpha_items.push(item);
prev_task.children.push(current_task);
current_task = prev_task;
}
for filter in &stacking_context.composite_ops.filters {
let mut prev_task = alpha_task_stack.pop().unwrap();
let item = AlphaRenderItem::Blend(stacking_context_index,
@ -1004,39 +1060,24 @@ impl FrameBuilder {
current_task = prev_task;
}
if let Some(mix_blend_mode) = stacking_context.composite_ops.mix_blend_mode {
match HardwareCompositeOp::from_mix_blend_mode(mix_blend_mode) {
Some(op) => {
let mut prev_task = alpha_task_stack.pop().unwrap();
let item = AlphaRenderItem::HardwareComposite(stacking_context_index,
current_task.id,
op,
next_z);
next_z += 1;
prev_task.as_alpha_batch().alpha_items.push(item);
prev_task.children.push(current_task);
current_task = prev_task;
}
None => {
let readback_task =
RenderTask::new_readback(stacking_context_index,
stacking_context.bounding_rect);
let readback_task =
RenderTask::new_readback(stacking_context_index,
stacking_context.bounding_rect);
let mut prev_task = alpha_task_stack.pop().unwrap();
let item = AlphaRenderItem::Composite(stacking_context_index,
readback_task.id,
current_task.id,
mix_blend_mode,
next_z);
next_z += 1;
prev_task.as_alpha_batch().alpha_items.push(item);
prev_task.children.push(current_task);
prev_task.children.push(readback_task);
current_task = prev_task;
}
}
let mut prev_task = alpha_task_stack.pop().unwrap();
let item = AlphaRenderItem::Composite(stacking_context_index,
readback_task.id,
current_task.id,
mix_blend_mode,
next_z);
next_z += 1;
prev_task.as_alpha_batch().alpha_items.push(item);
prev_task.children.push(current_task);
prev_task.children.push(readback_task);
current_task = prev_task;
}
}
PrimitiveRunCmd::PrimitiveRun(first_prim_index, prim_count) => {
PrimitiveRunCmd::PrimitiveRun(first_prim_index, prim_count, scroll_layer_id) => {
let stacking_context_index = *sc_stack.last().unwrap();
let stacking_context = &self.stacking_context_store[stacking_context_index.0];
@ -1045,8 +1086,8 @@ impl FrameBuilder {
}
let stacking_context_index = *sc_stack.last().unwrap();
let group_index =
self.stacking_context_store[stacking_context_index.0].clip_scroll_group();
let group_index = self.stacking_context_store[stacking_context_index.0]
.clip_scroll_group(scroll_layer_id);
let clip_scroll_group = &self.clip_scroll_group_store[group_index.0];
for i in 0..prim_count {
@ -1104,10 +1145,8 @@ impl FrameBuilder {
let screen_rect = DeviceIntRect::new(
DeviceIntPoint::zero(),
DeviceIntSize::from_lengths(device_length(self.screen_rect.size.width as f32,
device_pixel_ratio),
device_length(self.screen_rect.size.height as f32,
device_pixel_ratio)));
DeviceIntSize::new(self.screen_size.width as i32,
self.screen_size.height as i32));
// Pick a size for the cache render targets to be. The main requirement is that it
// has to be at least as large as the framebuffer size. This ensures that it will
@ -1173,7 +1212,7 @@ impl FrameBuilder {
Frame {
device_pixel_ratio: device_pixel_ratio,
background_color: self.background_color,
viewport_size: self.screen_rect.size,
window_size: self.screen_size,
profile_counters: profile_counters,
passes: passes,
cache_size: cache_size,
@ -1248,8 +1287,8 @@ impl<'a> LayerRectCalculationAndCullingPass<'a> {
self.handle_push_stacking_context(stacking_context_index),
&PrimitiveRunCmd::PushScrollLayer(scroll_layer_index) =>
self.handle_push_scroll_layer(scroll_layer_index),
&PrimitiveRunCmd::PrimitiveRun(prim_index, prim_count) =>
self.handle_primitive_run(prim_index, prim_count),
&PrimitiveRunCmd::PrimitiveRun(prim_index, prim_count, scroll_layer_id) =>
self.handle_primitive_run(prim_index, prim_count, scroll_layer_id),
&PrimitiveRunCmd::PopStackingContext => self.handle_pop_stacking_context(),
&PrimitiveRunCmd::PopScrollLayer => self.handle_pop_scroll_layer(),
}
@ -1264,45 +1303,41 @@ impl<'a> LayerRectCalculationAndCullingPass<'a> {
let stacking_context = &mut self.frame_builder
.stacking_context_store[stacking_context_index.0];
let scroll_tree_layer = &self.clip_scroll_tree.nodes[&group.scroll_layer_id];
let node = &self.clip_scroll_tree.nodes[&group.scroll_layer_id];
let packed_layer = &mut self.frame_builder.packed_layers[group.packed_layer_index.0];
packed_layer.transform = scroll_tree_layer.world_content_transform
.with_source::<ScrollLayerPixel>()
.pre_mul(&stacking_context.local_transform);
packed_layer.inv_transform = packed_layer.transform.inverse().unwrap();
// The world content transform is relative to the containing reference frame,
// so we translate into the origin of the stacking context itself.
let transform = node.world_content_transform
.pre_translated(stacking_context.reference_frame_offset.x,
stacking_context.reference_frame_offset.y,
0.0);
packed_layer.set_transform(transform);
if !stacking_context.can_contribute_to_scene() {
return;
}
let inv_layer_transform = stacking_context.local_transform.inverse().unwrap();
let local_viewport_rect =
as_scroll_parent_rect(&scroll_tree_layer.combined_local_viewport_rect);
let viewport_rect = inv_layer_transform.transform_rect(&local_viewport_rect);
let layer_local_rect = stacking_context.local_rect.intersection(&viewport_rect);
// Here we want to find the intersection between the clipping region and the
// stacking context content, so we move the viewport rectangle into the coordinate
// system of the stacking context content.
let viewport_rect =
&node.combined_local_viewport_rect
.translate(&-stacking_context.reference_frame_offset)
.translate(&-node.scrolling.offset);
let intersected_rect = stacking_context.local_rect.intersection(viewport_rect);
group.xf_rect = None;
let layer_local_rect = match layer_local_rect {
Some(layer_local_rect) if !layer_local_rect.is_empty() => layer_local_rect,
_ => continue,
};
let layer_xf_rect = TransformedRect::new(&layer_local_rect,
&packed_layer.transform,
self.device_pixel_ratio);
if layer_xf_rect.bounding_rect.intersects(&self.screen_rect) {
packed_layer.screen_vertices = layer_xf_rect.vertices.clone();
packed_layer.local_clip_rect = layer_local_rect;
group.xf_rect = Some(layer_xf_rect);
}
group.xf_rect = packed_layer.set_rect(intersected_rect,
self.screen_rect,
self.device_pixel_ratio);
}
}
fn compute_stacking_context_visibility(&mut self) {
for context_index in 0..self.frame_builder.stacking_context_store.len() {
let is_visible = {
// We don't take into account visibility of children here, so we must
// do that later.
let stacking_context = &self.frame_builder.stacking_context_store[context_index];
stacking_context.clip_scroll_groups.iter().any(|group_index| {
self.frame_builder.clip_scroll_group_store[group_index.0].is_visible()
@ -1315,18 +1350,22 @@ impl<'a> LayerRectCalculationAndCullingPass<'a> {
fn handle_pop_stacking_context(&mut self) {
let stacking_context_index = self.stacking_context_stack.pop().unwrap();
let bounding_rect = {
let (bounding_rect, is_visible) = {
let stacking_context =
&mut self.frame_builder.stacking_context_store[stacking_context_index.0];
stacking_context.bounding_rect = stacking_context.bounding_rect
.intersection(self.screen_rect)
.unwrap_or(DeviceIntRect::zero());
stacking_context.bounding_rect.clone()
(stacking_context.bounding_rect.clone(), stacking_context.is_visible)
};
if let Some(ref mut parent_index) = self.stacking_context_stack.last_mut() {
let parent = &mut self.frame_builder.stacking_context_store[parent_index.0];
parent.bounding_rect = parent.bounding_rect.union(&bounding_rect);
// The previous compute_stacking_context_visibility pass did not take into
// account visibility of children, so we do that now.
parent.is_visible = parent.is_visible || is_visible;
}
}
@ -1334,27 +1373,29 @@ impl<'a> LayerRectCalculationAndCullingPass<'a> {
self.scroll_layer_stack.push(scroll_layer_index);
let scroll_layer = &mut self.frame_builder.scroll_layer_store[scroll_layer_index.0];
let node = &self.clip_scroll_tree.nodes[&scroll_layer.scroll_layer_id];
let packed_layer_index = scroll_layer.packed_layer_index;
let scroll_tree_layer = &self.clip_scroll_tree.nodes[&scroll_layer.scroll_layer_id];
let packed_layer = &mut self.frame_builder.packed_layers[packed_layer_index.0];
packed_layer.transform = scroll_tree_layer.world_viewport_transform;
packed_layer.inv_transform = packed_layer.transform.inverse().unwrap();
// The coordinates of the mask are relative to the origin of the node itself,
// so we need to account for that origin in the transformation we assign to
// the packed layer.
let transform = node.world_viewport_transform
.pre_translated(node.local_viewport_rect.origin.x,
node.local_viewport_rect.origin.y,
0.0);
packed_layer.set_transform(transform);
let local_rect = &scroll_tree_layer.combined_local_viewport_rect
.translate(&scroll_tree_layer.scrolling.offset);
if !local_rect.is_empty() {
let layer_xf_rect = TransformedRect::new(local_rect,
&packed_layer.transform,
// Meanwhile, the combined viewport rect is relative to the reference frame, so
// we move it into the local coordinate system of the node.
let local_viewport_rect =
node.combined_local_viewport_rect.translate(&-node.local_viewport_rect.origin);
scroll_layer.xf_rect = packed_layer.set_rect(Some(local_viewport_rect),
self.screen_rect,
self.device_pixel_ratio);
if layer_xf_rect.bounding_rect.intersects(&self.screen_rect) {
packed_layer.screen_vertices = layer_xf_rect.vertices.clone();
packed_layer.local_clip_rect = *local_rect;
scroll_layer.xf_rect = Some(layer_xf_rect);
}
}
let clip_info = match scroll_layer.clip_cache_info {
Some(ref mut clip_info) => clip_info,
None => return,
@ -1417,7 +1458,10 @@ impl<'a> LayerRectCalculationAndCullingPass<'a> {
self.current_clip_stack.reverse();
}
fn handle_primitive_run(&mut self, prim_index: PrimitiveIndex, prim_count: usize) {
fn handle_primitive_run(&mut self,
prim_index: PrimitiveIndex,
prim_count: usize,
scroll_layer_id: ScrollLayerId) {
let stacking_context_index = *self.stacking_context_stack.last().unwrap();
let (packed_layer_index, pipeline_id) = {
let stacking_context =
@ -1427,7 +1471,7 @@ impl<'a> LayerRectCalculationAndCullingPass<'a> {
return;
}
let group_index = stacking_context.clip_scroll_group();
let group_index = stacking_context.clip_scroll_group(scroll_layer_id);
let clip_scroll_group = &self.frame_builder.clip_scroll_group_store[group_index.0];
(clip_scroll_group.packed_layer_index, stacking_context.pipeline_id)
};

View File

@ -20,7 +20,7 @@ use std::sync::Arc;
use tiling;
use renderer::BlendMode;
use webrender_traits::{Epoch, ColorF, PipelineId, DeviceIntSize};
use webrender_traits::{ImageFormat, NativeFontHandle, MixBlendMode};
use webrender_traits::{ImageFormat, NativeFontHandle};
use webrender_traits::{ExternalImageId, ScrollLayerId, WebGLCommand};
use webrender_traits::{ImageData};
use webrender_traits::{DeviceUintRect};
@ -492,26 +492,13 @@ pub enum LowLevelFilterOp {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub enum HardwareCompositeOp {
Multiply,
Max,
Min,
Alpha,
}
impl HardwareCompositeOp {
pub fn from_mix_blend_mode(mix_blend_mode: MixBlendMode) -> Option<HardwareCompositeOp> {
match mix_blend_mode {
MixBlendMode::Multiply => Some(HardwareCompositeOp::Multiply),
MixBlendMode::Lighten => Some(HardwareCompositeOp::Max),
MixBlendMode::Darken => Some(HardwareCompositeOp::Min),
_ => None,
}
}
pub fn to_blend_mode(&self) -> BlendMode {
match self {
&HardwareCompositeOp::Multiply => BlendMode::Multiply,
&HardwareCompositeOp::Max => BlendMode::Max,
&HardwareCompositeOp::Min => BlendMode::Min,
&HardwareCompositeOp::Alpha => BlendMode::Alpha,
}
}
}

View File

@ -147,6 +147,18 @@ impl FontContext {
self.cg_fonts.insert((*font_key).clone(), native_font_handle);
}
pub fn delete_font(&mut self, font_key: &FontKey) {
if let Some(cg_font) = self.cg_fonts.remove(font_key) {
let ct_font_keys = self.ct_fonts.keys()
.filter(|k| k.0 == *font_key)
.cloned()
.collect::<Vec<_>>();
for ct_font_key in ct_font_keys {
self.ct_fonts.remove(&ct_font_key);
}
}
}
fn get_ct_font(&mut self,
font_key: FontKey,
size: Au) -> Option<CTFont> {

View File

@ -13,6 +13,7 @@ use freetype::freetype::{FT_Library, FT_Set_Char_Size};
use freetype::freetype::{FT_Face, FT_Long, FT_UInt, FT_F26Dot6};
use freetype::freetype::{FT_Init_FreeType, FT_Load_Glyph, FT_Render_Glyph};
use freetype::freetype::{FT_New_Memory_Face, FT_GlyphSlot, FT_LcdFilter};
use freetype::freetype::{FT_Done_Face};
use std::{mem, ptr, slice};
use std::collections::HashMap;
@ -88,6 +89,15 @@ impl FontContext {
panic!("TODO: Not supported on Linux");
}
pub fn delete_font(&mut self, font_key: &FontKey) {
if let Some(face) = self.faces.remove(font_key) {
let result = unsafe {
FT_Done_Face(face.face)
};
assert!(result.succeeded());
}
}
fn load_glyph(&self,
font_key: FontKey,
size: Au,

View File

@ -143,6 +143,10 @@ impl FontContext {
self.fonts.insert((*font_key).clone(), face);
}
pub fn delete_font(&mut self, font_key: &FontKey) {
self.fonts.remove(font_key);
}
// Assumes RGB format from dwrite, which is 3 bytes per pixel as dwrite
// doesn't output an alpha value via GlyphRunAnalysis::CreateAlphaTexture
#[allow(dead_code)]
@ -230,13 +234,8 @@ impl FontContext {
let length = pixels.len() / 3;
let mut rgba_pixels: Vec<u8> = vec![0; length * 4];
for i in 0..length {
// TODO(vlad): we likely need to do something smarter
// This is what skia does
let alpha = ((pixels[i*3+0] as u32 +
pixels[i*3+1] as u32 +
pixels[i*3+2] as u32)
/ 3) as u8;
// Only take the G channel, as its closest to D2D
let alpha = pixels[i*3 + 1] as u8;
rgba_pixels[i*4+0] = alpha;
rgba_pixels[i*4+1] = alpha;
rgba_pixels[i*4+2] = alpha;
@ -278,22 +277,24 @@ impl FontContext {
let mut pixels = analysis.create_alpha_texture(texture_type, bounds);
let lut_correction = match glyph_options {
Some(option) => {
if option.force_gdi_rendering {
&self.gdi_gamma_lut
} else {
&self.gamma_lut
}
},
None => &self.gamma_lut
};
if render_mode != FontRenderMode::Mono {
let lut_correction = match glyph_options {
Some(option) => {
if option.force_gdi_rendering {
&self.gdi_gamma_lut
} else {
&self.gamma_lut
}
},
None => &self.gamma_lut
};
lut_correction.preblend_rgb(&mut pixels, width, height,
ColorLut::new(key.color.r,
key.color.g,
key.color.b,
key.color.a));
lut_correction.preblend_rgb(&mut pixels, width, height,
ColorLut::new(key.color.r,
key.color.g,
key.color.b,
key.color.a));
}
let rgba_pixels = self.convert_to_rgba(&mut pixels, render_mode);

View File

@ -66,6 +66,7 @@ pub fn should_record_msg(msg: &ApiMsg) -> bool {
match msg {
&ApiMsg::AddRawFont(..) |
&ApiMsg::AddNativeFont(..) |
&ApiMsg::DeleteFont(..) |
&ApiMsg::AddImage(..) |
&ApiMsg::GenerateFrame(..) |
&ApiMsg::UpdateImage(..) |

View File

@ -18,6 +18,7 @@ use std::sync::mpsc::Sender;
use texture_cache::TextureCache;
use thread_profiler::register_thread_with_profiler;
use threadpool::ThreadPool;
use webrender_traits::{DeviceUintPoint, DeviceUintRect, DeviceUintSize};
use webrender_traits::{ApiMsg, AuxiliaryLists, BuiltDisplayList, IdNamespace, ImageData};
use webrender_traits::{PipelineId, RenderNotifier, RenderDispatcher, WebGLCommand, WebGLContextId};
use webrender_traits::channel::{PayloadHelperMethods, PayloadReceiver, PayloadSender, MsgReceiver};
@ -35,6 +36,9 @@ pub struct RenderBackend {
result_tx: Sender<ResultMsg>,
device_pixel_ratio: f32,
page_zoom_factor: f32,
window_size: DeviceUintSize,
inner_rect: DeviceUintRect,
next_namespace_id: IdNamespace,
resource_cache: ResourceCache,
@ -69,7 +73,8 @@ impl RenderBackend {
recorder: Option<Box<ApiRecordingReceiver>>,
main_thread_dispatcher: Arc<Mutex<Option<Box<RenderDispatcher>>>>,
blob_image_renderer: Option<Box<BlobImageRenderer>>,
vr_compositor_handler: Arc<Mutex<Option<Box<VRCompositorHandler>>>>) -> RenderBackend {
vr_compositor_handler: Arc<Mutex<Option<Box<VRCompositorHandler>>>>,
initial_window_size: DeviceUintSize) -> RenderBackend {
let resource_cache = ResourceCache::new(texture_cache, workers, blob_image_renderer, enable_aa);
@ -81,6 +86,7 @@ impl RenderBackend {
payload_tx: payload_tx,
result_tx: result_tx,
device_pixel_ratio: device_pixel_ratio,
page_zoom_factor: 1.0,
resource_cache: resource_cache,
scene: Scene::new(),
frame: Frame::new(config),
@ -92,7 +98,9 @@ impl RenderBackend {
recorder: recorder,
main_thread_dispatcher: main_thread_dispatcher,
next_webgl_id: 0,
vr_compositor_handler: vr_compositor_handler
vr_compositor_handler: vr_compositor_handler,
window_size: initial_window_size,
inner_rect: DeviceUintRect::new(DeviceUintPoint::zero(), initial_window_size),
}
}
@ -117,6 +125,9 @@ impl RenderBackend {
self.resource_cache
.add_font_template(id, FontTemplate::Native(native_font_handle));
}
ApiMsg::DeleteFont(id) => {
self.resource_cache.delete_font_template(id);
}
ApiMsg::GetGlyphDimensions(glyph_keys, tx) => {
let mut glyph_dimensions = Vec::with_capacity(glyph_keys.len());
for glyph_key in &glyph_keys {
@ -137,6 +148,13 @@ impl RenderBackend {
ApiMsg::DeleteImage(id) => {
self.resource_cache.delete_image_template(id);
}
ApiMsg::SetPageZoom(factor) => {
self.page_zoom_factor = factor.get();
}
ApiMsg::SetWindowParameters(window_size, inner_rect) => {
self.window_size = window_size;
self.inner_rect = inner_rect;
}
ApiMsg::CloneApi(sender) => {
let result = self.next_namespace_id;
@ -418,17 +436,23 @@ impl RenderBackend {
webgl_context.unbind();
}
self.frame.create(&self.scene, &mut self.resource_cache);
let device_pixel_ratio = self.device_pixel_ratio * self.page_zoom_factor;
self.frame.create(&self.scene,
&mut self.resource_cache,
self.window_size,
self.inner_rect,
device_pixel_ratio);
}
fn render(&mut self,
texture_cache_profile: &mut TextureCacheProfileCounters)
-> RendererFrame {
let device_pixel_ratio = self.device_pixel_ratio * self.page_zoom_factor;
let frame = self.frame.build(&mut self.resource_cache,
&self.scene.pipeline_auxiliary_lists,
self.device_pixel_ratio,
device_pixel_ratio,
texture_cache_profile);
frame
}

View File

@ -62,6 +62,7 @@ pub struct AlphaRenderTask {
screen_origin: DeviceIntPoint,
pub opaque_items: Vec<AlphaRenderItem>,
pub alpha_items: Vec<AlphaRenderItem>,
pub isolate_clear: bool,
}
#[derive(Debug, Copy, Clone)]
@ -144,6 +145,7 @@ pub struct RenderTask {
impl RenderTask {
pub fn new_alpha_batch(task_index: RenderTaskIndex,
screen_origin: DeviceIntPoint,
isolate_clear: bool,
location: RenderTaskLocation) -> RenderTask {
RenderTask {
id: RenderTaskId::Static(task_index),
@ -153,6 +155,7 @@ impl RenderTask {
screen_origin: screen_origin,
alpha_items: Vec::new(),
opaque_items: Vec::new(),
isolate_clear: isolate_clear,
}),
}
}

View File

@ -80,10 +80,6 @@ pub enum BlendMode {
None,
Alpha,
Multiply,
Max,
Min,
// Use the color of the text itself as a constant color blend factor.
Subpixel(ColorF),
}
@ -513,7 +509,8 @@ impl Renderer {
/// };
/// let (renderer, sender) = Renderer::new(opts);
/// ```
pub fn new(mut options: RendererOptions) -> Result<(Renderer, RenderApiSender), InitError> {
pub fn new(mut options: RendererOptions,
initial_window_size: DeviceUintSize) -> Result<(Renderer, RenderApiSender), InitError> {
let (api_tx, api_rx) = try!{ channel::msg_channel() };
let (payload_tx, payload_rx) = try!{ channel::payload_channel() };
let (result_tx, result_rx) = channel();
@ -805,7 +802,8 @@ impl Renderer {
recorder,
backend_main_thread_dispatcher,
blob_image_renderer,
backend_vr_compositor);
backend_vr_compositor,
initial_window_size);
backend.run(backend_profile_counters);
})};
@ -1206,7 +1204,6 @@ impl Renderer {
let shader = match batch.key.blend_mode {
BlendMode::Subpixel(..) => self.ps_text_run_subpixel.get(&mut self.device, transform_kind),
BlendMode::Alpha | BlendMode::None => self.ps_text_run.get(&mut self.device, transform_kind),
_ => unreachable!(),
};
(GPU_TAG_PRIM_TEXT_RUN, shader)
}
@ -1290,20 +1287,18 @@ impl Renderer {
let width = readback.data[2];
let height = readback.data[3];
// Need to invert the y coordinates when reading back from
// the framebuffer.
let y0 = if render_target.is_some() {
src_y as i32
} else {
target_dimensions.height as i32 - height as i32 - src_y as i32
};
let mut src = DeviceIntRect::new(DeviceIntPoint::new(src_x as i32, src_y as i32),
DeviceIntSize::new(width as i32, height as i32));
let mut dest = DeviceIntRect::new(DeviceIntPoint::new(dest_x as i32, dest_y as i32),
DeviceIntSize::new(width as i32, height as i32));
let src = DeviceIntRect::new(DeviceIntPoint::new(src_x as i32,
y0),
DeviceIntSize::new(width as i32, height as i32));
let dest = DeviceIntRect::new(DeviceIntPoint::new(dest_x as i32,
dest_y as i32),
DeviceIntSize::new(width as i32, height as i32));
// Need to invert the y coordinates and flip the image vertically when
// reading back from the framebuffer.
if render_target.is_none() {
src.origin.y = target_dimensions.height as i32 - src.size.height - src.origin.y;
dest.origin.y += dest.size.height;
dest.size.height = -dest.size.height;
}
self.device.blit_render_target(render_target,
Some(src),
@ -1381,6 +1376,11 @@ impl Renderer {
self.device.clear_target(clear_color, clear_depth);
let isolate_clear_color = Some([0.0, 0.0, 0.0, 0.0]);
for isolate_clear in &target.isolate_clears {
self.device.clear_target_rect(isolate_clear_color, None, *isolate_clear);
}
projection
};
@ -1501,18 +1501,6 @@ impl Renderer {
BlendMode::None => {
self.device.set_blend(false);
}
BlendMode::Multiply => {
self.device.set_blend(true);
self.device.set_blend_mode_multiply();
}
BlendMode::Max => {
self.device.set_blend(true);
self.device.set_blend_mode_max();
}
BlendMode::Min => {
self.device.set_blend(true);
self.device.set_blend_mode_min();
}
BlendMode::Alpha => {
self.device.set_blend(true);
self.device.set_blend_mode_alpha();
@ -1601,10 +1589,8 @@ impl Renderer {
// Some tests use a restricted viewport smaller than the main screen size.
// Ensure we clear the framebuffer in these tests.
// TODO(gw): Find a better solution for this?
let viewport_size = DeviceIntSize::new((frame.viewport_size.width * frame.device_pixel_ratio) as i32,
(frame.viewport_size.height * frame.device_pixel_ratio) as i32);
let needs_clear = viewport_size.width < framebuffer_size.width as i32 ||
viewport_size.height < framebuffer_size.height as i32;
let needs_clear = frame.window_size.width < framebuffer_size.width ||
frame.window_size.height < framebuffer_size.height;
self.device.disable_depth_write();
self.device.disable_stencil();

View File

@ -41,6 +41,8 @@ enum GlyphCacheMsg {
AddFont(FontKey, FontTemplate),
/// Request glyphs for a text run.
RequestGlyphs(FontKey, Au, ColorF, Vec<GlyphInstance>, FontRenderMode, Option<GlyphOptions>),
// Remove an existing font.
DeleteFont(FontKey),
/// Finished requesting glyphs. Reply with new glyphs.
EndFrame,
}
@ -260,6 +262,13 @@ impl ResourceCache {
self.font_templates.insert(font_key, template);
}
pub fn delete_font_template(&mut self, font_key: FontKey) {
self.glyph_cache_tx
.send(GlyphCacheMsg::DeleteFont(font_key))
.unwrap();
self.font_templates.remove(&font_key);
}
pub fn add_image_template(&mut self,
image_key: ImageKey,
descriptor: ImageDescriptor,
@ -839,6 +848,23 @@ fn spawn_glyph_cache_thread(workers: Arc<Mutex<ThreadPool>>) -> (Sender<GlyphCac
});
}
}
GlyphCacheMsg::DeleteFont(font_key) => {
profile_scope!("DeleteFont");
// Delete a font from the font context in each worker thread.
let barrier = Arc::new(Barrier::new(worker_count));
for _ in 0..worker_count {
let barrier = barrier.clone();
workers.lock().unwrap().execute(move || {
FONT_CONTEXT.with(|font_context| {
let mut font_context = font_context.borrow_mut();
font_context.delete_font(&font_key);
});
barrier.wait();
});
}
}
GlyphCacheMsg::RequestGlyphs(key, size, color, glyph_instances, render_mode, glyph_options) => {
profile_scope!("RequestGlyphs");

View File

@ -41,7 +41,14 @@ impl SceneProperties {
}
/// Get the current value for a transform property.
pub fn resolve_layout_transform(&self, property: &PropertyBinding<LayoutTransform>) -> LayoutTransform {
pub fn resolve_layout_transform(&self,
property: Option<&PropertyBinding<LayoutTransform>>)
-> LayoutTransform {
let property = match property {
Some(property) => property,
None => return LayoutTransform::identity(),
};
match *property {
PropertyBinding::Value(matrix) => matrix,
PropertyBinding::Binding(ref key) => {

View File

@ -23,9 +23,9 @@ use std::hash::BuildHasherDefault;
use texture_cache::TexturePage;
use util::{TransformedRect, TransformedRectKind};
use webrender_traits::{AuxiliaryLists, ColorF, DeviceIntPoint, DeviceIntRect, DeviceUintPoint};
use webrender_traits::{DeviceUintSize, FontRenderMode, ImageRendering, LayerRect, LayerSize};
use webrender_traits::{LayerToScrollTransform, LayerToWorldTransform, MixBlendMode, PipelineId};
use webrender_traits::{ScrollLayerId, WorldPoint4D, WorldToLayerTransform};
use webrender_traits::{DeviceUintSize, FontRenderMode, ImageRendering, LayerPoint, LayerRect};
use webrender_traits::{LayerToWorldTransform, MixBlendMode, PipelineId, ScrollLayerId};
use webrender_traits::{WorldPoint4D, WorldToLayerTransform};
// Special sentinel value recognized by the shader. It is considered to be
// a dummy task that doesn't mask out anything.
@ -398,7 +398,7 @@ pub enum PrimitiveRunCmd {
PushScrollLayer(ScrollLayerIndex),
PopScrollLayer,
PrimitiveRun(PrimitiveIndex, usize),
PrimitiveRun(PrimitiveIndex, usize, ScrollLayerId),
}
#[derive(Debug, Copy, Clone)]
@ -870,6 +870,7 @@ pub struct RenderTarget {
pub vertical_blurs: Vec<BlurCommand>,
pub horizontal_blurs: Vec<BlurCommand>,
pub readbacks: Vec<DeviceIntRect>,
pub isolate_clears: Vec<DeviceIntRect>,
page_allocator: TexturePage,
}
@ -884,6 +885,7 @@ impl RenderTarget {
vertical_blurs: Vec::new(),
horizontal_blurs: Vec::new(),
readbacks: Vec::new(),
isolate_clears: Vec::new(),
page_allocator: TexturePage::new(CacheTextureId(0), size),
}
}
@ -909,6 +911,16 @@ impl RenderTarget {
opaque_items: info.opaque_items,
alpha_items: info.alpha_items,
});
if info.isolate_clear {
let location = match task.location {
RenderTaskLocation::Dynamic(origin, size) => {
DeviceIntRect::new(origin.unwrap().0, size)
}
RenderTaskLocation::Fixed => panic!()
};
self.isolate_clears.push(location);
}
}
RenderTaskKind::VerticalBlur(_, prim_index) => {
// Find the child render task that we are applying
@ -1301,53 +1313,79 @@ pub struct StackingContextIndex(pub usize);
#[derive(Debug)]
pub struct StackingContext {
pub pipeline_id: PipelineId,
pub local_transform: LayerToScrollTransform,
// Offset in the parent reference frame to the origin of this stacking
// context's coordinate system.
pub reference_frame_offset: LayerPoint,
// Bounds of this stacking context in its own coordinate system.
pub local_rect: LayerRect,
pub bounding_rect: DeviceIntRect,
pub composite_ops: CompositeOps,
pub clip_scroll_groups: Vec<ClipScrollGroupIndex>,
// Signifies that this stacking context should be drawn in a separate render pass
// with a transparent background and then composited back to its parent. Used to
// support mix-blend-mode in certain cases.
pub should_isolate: bool,
// Set for the root stacking context of a display list or an iframe. Used for determining
// when to isolate a mix-blend-mode composite.
pub is_page_root: bool,
pub is_visible: bool,
}
impl StackingContext {
pub fn new(pipeline_id: PipelineId,
local_transform: LayerToScrollTransform,
reference_frame_offset: LayerPoint,
local_rect: LayerRect,
composite_ops: CompositeOps,
clip_scroll_group_index: ClipScrollGroupIndex)
is_page_root: bool,
composite_ops: CompositeOps)
-> StackingContext {
StackingContext {
pipeline_id: pipeline_id,
local_transform: local_transform,
reference_frame_offset: reference_frame_offset,
local_rect: local_rect,
bounding_rect: DeviceIntRect::zero(),
composite_ops: composite_ops,
clip_scroll_groups: vec![clip_scroll_group_index],
clip_scroll_groups: Vec::new(),
should_isolate: false,
is_page_root: is_page_root,
is_visible: false,
}
}
pub fn clip_scroll_group(&self) -> ClipScrollGroupIndex {
pub fn clip_scroll_group(&self, scroll_layer_id: ScrollLayerId) -> ClipScrollGroupIndex {
// Currently there is only one scrolled stacking context per context,
// but eventually this will be selected from the vector based on the
// scroll layer of this primitive.
self.clip_scroll_groups[0]
for group in &self.clip_scroll_groups {
if group.1 == scroll_layer_id {
return *group;
}
}
unreachable!("Looking for non-existent ClipScrollGroup");
}
pub fn can_contribute_to_scene(&self) -> bool {
!self.composite_ops.will_make_invisible()
}
pub fn has_clip_scroll_group(&self, id: ScrollLayerId) -> bool {
self.clip_scroll_groups.iter().rev().any(|index| index.1 == id)
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd)]
pub struct ClipScrollGroupIndex(pub usize);
#[derive(Debug, Copy, Clone, Eq, PartialEq, Hash)]
pub struct ClipScrollGroupIndex(pub usize, pub ScrollLayerId);
#[derive(Debug)]
pub struct ClipScrollGroup {
pub stacking_context_index: StackingContextIndex,
pub scroll_layer_id: ScrollLayerId,
pub packed_layer_index: PackedLayerIndex,
pub pipeline_id: PipelineId,
pub xf_rect: Option<TransformedRect>,
}
@ -1393,6 +1431,31 @@ impl PackedLayer {
pub fn empty() -> PackedLayer {
Default::default()
}
pub fn set_transform(&mut self, transform: LayerToWorldTransform) {
self.transform = transform;
self.inv_transform = self.transform.inverse().unwrap();
}
pub fn set_rect(&mut self,
local_rect: Option<LayerRect>,
screen_rect: &DeviceIntRect,
device_pixel_ratio: f32)
-> Option<TransformedRect> {
let local_rect = match local_rect {
Some(rect) if !rect.is_empty() => rect,
_ => return None,
};
let xf_rect = TransformedRect::new(&local_rect, &self.transform, device_pixel_ratio);
if !xf_rect.bounding_rect.intersects(screen_rect) {
return None;
}
self.screen_vertices = xf_rect.vertices.clone();
self.local_clip_rect = local_rect;
Some(xf_rect)
}
}
#[derive(Debug, Clone)]
@ -1437,7 +1500,7 @@ impl CompositeOps {
/// A rendering-oriented representation of frame::Frame built by the render backend
/// and presented to the renderer.
pub struct Frame {
pub viewport_size: LayerSize,
pub window_size: DeviceUintSize,
pub background_color: Option<ColorF>,
pub device_pixel_ratio: f32,
pub cache_size: DeviceUintSize,

View File

@ -5,13 +5,12 @@ authors = ["The Mozilla Project Developers"]
license = "MPL-2.0"
[dependencies]
webrender_traits = {path = "../webrender_traits", version = "0.20.0"}
webrender_traits = {path = "../webrender_traits", version = "0.23.1"}
euclid = "0.11"
app_units = "0.4"
gleam = "0.2"
[dependencies.webrender]
path = "../webrender"
version = "0.19.0"
version = "0.22.1"
default-features = false
features = ["codegen"]

View File

@ -21,7 +21,8 @@ public:
bool* aUseANGLE,
RefPtr<widget::CompositorWidget>&& aWidget,
layers::SynchronousTask* aTask,
bool aEnableProfiler)
bool aEnableProfiler,
LayoutDeviceIntSize aSize)
: mWrApi(aApi)
, mMaxTextureSize(aMaxTextureSize)
, mUseANGLE(aUseANGLE)
@ -29,6 +30,7 @@ public:
, mCompositorWidget(Move(aWidget))
, mTask(aTask)
, mEnableProfiler(aEnableProfiler)
, mSize(aSize)
{
MOZ_COUNT_CTOR(NewRenderer);
}
@ -51,7 +53,8 @@ public:
*mUseANGLE = gl->IsANGLE();
WrRenderer* wrRenderer = nullptr;
if (!wr_window_new(aWindowId, gl.get(), this->mEnableProfiler, mWrApi, &wrRenderer)) {
if (!wr_window_new(aWindowId, mSize.width, mSize.height, gl.get(),
this->mEnableProfiler, mWrApi, &wrRenderer)) {
return;
}
MOZ_ASSERT(wrRenderer);
@ -79,6 +82,7 @@ private:
RefPtr<widget::CompositorWidget> mCompositorWidget;
layers::SynchronousTask* mTask;
bool mEnableProfiler;
LayoutDeviceIntSize mSize;
};
class RemoveRenderer : public RendererEvent
@ -110,7 +114,8 @@ private:
already_AddRefed<WebRenderAPI>
WebRenderAPI::Create(bool aEnableProfiler,
layers::CompositorBridgeParentBase* aBridge,
RefPtr<widget::CompositorWidget>&& aWidget)
RefPtr<widget::CompositorWidget>&& aWidget,
LayoutDeviceIntSize aSize)
{
MOZ_ASSERT(aBridge);
MOZ_ASSERT(aWidget);
@ -127,7 +132,7 @@ WebRenderAPI::Create(bool aEnableProfiler,
// the next time we need to access the WrApi object.
layers::SynchronousTask task("Create Renderer");
auto event = MakeUnique<NewRenderer>(&wrApi, aBridge, &maxTextureSize, &useANGLE,
Move(aWidget), &task, aEnableProfiler);
Move(aWidget), &task, aEnableProfiler, aSize);
RenderThread::Get()->RunEvent(id, Move(event));
task.Wait();

View File

@ -39,7 +39,8 @@ public:
/// This can be called on the compositor thread only.
static already_AddRefed<WebRenderAPI> Create(bool aEnableProfiler,
layers::CompositorBridgeParentBase* aBridge,
RefPtr<widget::CompositorWidget>&& aWidget);
RefPtr<widget::CompositorWidget>&& aWidget,
LayoutDeviceIntSize aSize);
wr::WindowId GetId() const { return mId; }

View File

@ -282,6 +282,8 @@ pub extern fn wr_api_generate_frame(api: &mut RenderApi) {
// Call MakeCurrent before this.
#[no_mangle]
pub extern fn wr_window_new(window_id: WrWindowId,
window_width: u32,
window_height: u32,
gl_context: *mut c_void,
enable_profiler: bool,
out_api: &mut *mut RenderApi,
@ -310,7 +312,8 @@ pub extern fn wr_window_new(window_id: WrWindowId,
.. Default::default()
};
let (renderer, sender) = match Renderer::new(opts) {
let window_size = DeviceUintSize::new(window_width, window_height);
let (renderer, sender) = match Renderer::new(opts, window_size) {
Ok((renderer, sender)) => { (renderer, sender) }
Err(e) => {
println!(" Failed to create a Renderer: {:?}", e);
@ -361,8 +364,8 @@ pub extern fn wr_dp_begin(state: &mut WrState, width: u32, height: u32) {
bounds,
ClipRegion::simple(&bounds),
0,
PropertyBinding::Value(LayoutTransform::identity()),
LayoutTransform::identity(),
None,
None,
webrender_traits::MixBlendMode::Normal,
Vec::new(),
);
@ -637,8 +640,8 @@ pub extern fn wr_dp_push_stacking_context(state:&mut WrState, bounds: WrRect, ov
bounds,
clip_region2,
state.z_index,
PropertyBinding::Value(*transform),
LayoutTransform::identity(),
Some(PropertyBinding::Value(*transform)),
None,
mix_blend_mode,
filters);
state.frame_builder.dl_builder.push_scroll_layer(clip_region, bounds.size, ServoScrollRootId(1));

View File

@ -427,6 +427,8 @@ wr_rendered_epochs_delete(WrRenderedEpochs* pipeline_epochs) WR_DESTRUCTOR_SAFE_
WR_INLINE bool
wr_window_new(WrWindowId window_id,
uint32_t window_width,
uint32_t window_height,
void* aGLContext,
bool enable_profiler,
WrAPI** out_api,

View File

@ -1,15 +1,12 @@
[package]
name = "webrender_traits"
version = "0.20.0"
version = "0.23.1"
authors = ["Glenn Watson <gw@intuitionlibrary.com>"]
license = "MPL-2.0"
repository = "https://github.com/servo/webrender"
build = "build.rs"
[features]
default = ["codegen"]
nightly = ["euclid/unstable", "serde/unstable"]
codegen = ["serde_codegen", "serde_codegen/with-syntex"]
ipc = ["ipc-channel"]
[dependencies]
@ -21,15 +18,10 @@ heapsize = "0.3.6"
ipc-channel = {version = "0.7", optional = true}
offscreen_gl_context = {version = "0.6", features = ["serde"]}
serde = "0.9"
serde_derive = {version = "0.9", optional = true}
serde_derive = "0.9"
[target.'cfg(target_os = "macos")'.dependencies]
core-graphics = "0.7"
[target.'cfg(target_os = "windows")'.dependencies]
servo-dwrote = "0.2"
[build-dependencies.serde_codegen]
version = "0.9"
default_features = false
optional = true
dwrote = "0.3"

View File

@ -1,46 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(all(feature = "serde_codegen", not(feature = "serde_derive")))]
mod inner {
extern crate serde_codegen;
use std::env;
use std::path::Path;
pub fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
let src = Path::new("src/types.rs");
let dst = Path::new(&out_dir).join("types.rs");
serde_codegen::expand(&src, &dst).unwrap();
println!("cargo:rerun-if-changed=src/types.rs");
}
}
#[cfg(all(feature = "serde_derive", not(feature = "serde_codegen")))]
mod inner {
pub fn main() {}
}
#[cfg(all(feature = "serde_codegen", feature = "serde_derive"))]
mod inner {
pub fn main() {
panic!("serde_codegen and serde_derive are both used. \
You probably forgot --no-default-features.")
}
}
#[cfg(not(any(feature = "serde_codegen", feature = "serde_derive")))]
mod inner {
pub fn main() {
panic!("Neither serde_codegen nor serde_derive are used. "
"You probably want --features serde_derive --no-default-features.")
}
}
fn main() {
inner::main();
}

View File

@ -6,14 +6,15 @@ use byteorder::{LittleEndian, WriteBytesExt};
use channel::{self, MsgSender, PayloadHelperMethods, PayloadSender};
use offscreen_gl_context::{GLContextAttributes, GLLimits};
use std::cell::Cell;
use {ApiMsg, ColorF, DisplayListBuilder, Epoch, ImageDescriptor};
use {ApiMsg, ColorF, Epoch, ImageDescriptor};
use {FontKey, IdNamespace, ImageKey, NativeFontHandle, PipelineId};
use {RenderApiSender, ResourceId, ScrollEventPhase, ScrollLayerState, ScrollLocation, ServoScrollRootId};
use {GlyphKey, GlyphDimensions, ImageData, WebGLContextId, WebGLCommand, TileSize};
use {DeviceIntSize, DynamicProperties, LayoutPoint, LayoutSize, WorldPoint, PropertyBindingKey, PropertyBindingId};
use {DeviceUintRect, DeviceUintSize};
use {BuiltDisplayList, AuxiliaryLists};
use VRCompositorCommand;
use ExternalEvent;
use {ExternalEvent, PageZoomFactor};
use std::marker::PhantomData;
impl RenderApiSender {
@ -70,6 +71,11 @@ impl RenderApi {
self.api_sender.send(msg).unwrap();
}
pub fn delete_font(&self, key: FontKey) {
let msg = ApiMsg::DeleteFont(key);
self.api_sender.send(msg).unwrap();
}
/// Gets the dimensions for the supplied glyph keys
///
/// Note: Internally, the internal texture cache doesn't store
@ -193,6 +199,18 @@ impl RenderApi {
self.api_sender.send(msg).unwrap();
}
pub fn set_page_zoom(&self, page_zoom: PageZoomFactor) {
let msg = ApiMsg::SetPageZoom(page_zoom);
self.api_sender.send(msg).unwrap();
}
pub fn set_window_parameters(&self,
window_size: DeviceUintSize,
inner_rect: DeviceUintRect) {
let msg = ApiMsg::SetWindowParameters(window_size, inner_rect);
self.api_sender.send(msg).unwrap();
}
pub fn tick_scrolling_bounce_animations(&self) {
let msg = ApiMsg::TickScrollingBounce;
self.api_sender.send(msg).unwrap();

View File

@ -284,8 +284,8 @@ impl DisplayListBuilder {
bounds: LayoutRect,
clip: ClipRegion,
z_index: i32,
transform: PropertyBinding<LayoutTransform>,
perspective: LayoutTransform,
transform: Option<PropertyBinding<LayoutTransform>>,
perspective: Option<LayoutTransform>,
mix_blend_mode: MixBlendMode,
filters: Vec<FilterOp>) {
let stacking_context = StackingContext {
@ -355,6 +355,40 @@ impl DisplayListBuilder {
self.list.push(item);
}
// Don't use this function. It will go away.
// We're using it as a hack in Gecko to retain parts sub-parts of display lists so that
// we can regenerate them without building Gecko display items.
pub fn push_built_display_list(&mut self, dl: BuiltDisplayList, aux: AuxiliaryLists) {
use SpecificDisplayItem::*;
// It's important for us to make sure that all of ItemRange structures are relocated
// when copying from one list to another. To avoid this problem we could use a custom
// derive implementation that would let ItemRanges relocate themselves.
for i in dl.all_display_items() {
let mut i = *i;
match i.item {
Text(ref mut item) => {
item.glyphs = self.auxiliary_lists_builder.add_glyph_instances(aux.glyph_instances(&item.glyphs));
}
Gradient(ref mut item) => {
item.stops = self.auxiliary_lists_builder.add_gradient_stops(aux.gradient_stops(&item.stops));
}
RadialGradient(ref mut item) => {
item.stops = self.auxiliary_lists_builder.add_gradient_stops(aux.gradient_stops(&item.stops));
}
PushStackingContext(ref mut item) => {
item.stacking_context.filters = self.auxiliary_lists_builder.add_filters(aux.filters(&item.stacking_context.filters));
}
Iframe(_) | PushScrollLayer(_) => {
// We don't support relocating these
panic!();
}
_ => {}
}
i.clip.complex = self.auxiliary_lists_builder.add_complex_clip_regions(aux.complex_clip_regions(&i.clip.complex));
self.list.push(i);
}
}
pub fn new_clip_region(&mut self,
rect: &LayoutRect,
complex: Vec<ComplexClipRegion>,

View File

@ -3,7 +3,6 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#![cfg_attr(feature = "nightly", feature(nonzero))]
#![cfg_attr(feature = "serde_derive", feature(rustc_attrs, structural_match))]
extern crate app_units;
extern crate byteorder;
@ -17,7 +16,6 @@ extern crate heapsize;
extern crate ipc_channel;
extern crate offscreen_gl_context;
extern crate serde;
#[cfg(feature = "serde_derive")]
#[macro_use]
extern crate serde_derive;
@ -27,10 +25,6 @@ extern crate core_graphics;
#[cfg(target_os = "windows")]
extern crate dwrote;
#[cfg(feature = "serde_codegen")]
include!(concat!(env!("OUT_DIR"), "/types.rs"));
#[cfg(feature = "serde_derive")]
include!("types.rs");
mod units;

View File

@ -10,8 +10,8 @@ impl StackingContext {
pub fn new(scroll_policy: ScrollPolicy,
bounds: LayoutRect,
z_index: i32,
transform: PropertyBinding<LayoutTransform>,
perspective: LayoutTransform,
transform: Option<PropertyBinding<LayoutTransform>>,
perspective: Option<LayoutTransform>,
mix_blend_mode: MixBlendMode,
filters: Vec<FilterOp>,
auxiliary_lists_builder: &mut AuxiliaryListsBuilder)

View File

@ -30,6 +30,7 @@ pub type TileSize = u16;
pub enum ApiMsg {
AddRawFont(FontKey, Vec<u8>),
AddNativeFont(FontKey, NativeFontHandle),
DeleteFont(FontKey),
/// Gets the glyph dimensions
GetGlyphDimensions(Vec<GlyphKey>, MsgSender<Vec<Option<GlyphDimensions>>>),
/// Adds an image from the resource cache.
@ -50,7 +51,9 @@ pub enum ApiMsg {
BuiltDisplayListDescriptor,
AuxiliaryListsDescriptor,
bool),
SetPageZoom(PageZoomFactor),
SetRootPipeline(PipelineId),
SetWindowParameters(DeviceUintSize, DeviceUintRect),
Scroll(ScrollLocation, WorldPoint, ScrollEventPhase),
ScrollLayersWithScrollId(LayoutPoint, PipelineId, ServoScrollRootId),
TickScrollingBounce,
@ -900,8 +903,8 @@ pub struct StackingContext {
pub scroll_policy: ScrollPolicy,
pub bounds: LayoutRect,
pub z_index: i32,
pub transform: PropertyBinding<LayoutTransform>,
pub perspective: LayoutTransform,
pub transform: Option<PropertyBinding<LayoutTransform>>,
pub perspective: Option<LayoutTransform>,
pub mix_blend_mode: MixBlendMode,
pub filters: ItemRange,
}
@ -1186,6 +1189,22 @@ pub enum VRCompositorCommand {
Release(VRCompositorId)
}
/// Represents a desktop style page zoom factor.
#[derive(Clone, Copy, Serialize, Deserialize, Debug)]
pub struct PageZoomFactor(f32);
impl PageZoomFactor {
/// Construct a new page zoom factor.
pub fn new(scale: f32) -> PageZoomFactor {
PageZoomFactor(scale)
}
/// Get the page zoom factor as an untyped float.
pub fn get(&self) -> f32 {
self.0
}
}
// Trait object that handles WebVR commands.
// Receives the texture_id associated to the WebGLContext.
pub trait VRCompositorHandler: Send {
@ -1197,6 +1216,7 @@ impl fmt::Debug for ApiMsg {
match self {
&ApiMsg::AddRawFont(..) => { write!(f, "ApiMsg::AddRawFont") }
&ApiMsg::AddNativeFont(..) => { write!(f, "ApiMsg::AddNativeFont") }
&ApiMsg::DeleteFont(..) => { write!(f, "ApiMsg::DeleteFont") }
&ApiMsg::GetGlyphDimensions(..) => { write!(f, "ApiMsg::GetGlyphDimensions") }
&ApiMsg::AddImage(..) => { write!(f, "ApiMsg::AddImage") }
&ApiMsg::UpdateImage(..) => { write!(f, "ApiMsg::UpdateImage") }
@ -1216,6 +1236,8 @@ impl fmt::Debug for ApiMsg {
&ApiMsg::VRCompositorCommand(..) => { write!(f, "ApiMsg::VRCompositorCommand") }
&ApiMsg::ExternalEvent(..) => { write!(f, "ApiMsg::ExternalEvent") }
&ApiMsg::ShutDown => { write!(f, "ApiMsg::ShutDown") }
&ApiMsg::SetPageZoom(..) => { write!(f, "ApiMsg::SetPageZoom") }
&ApiMsg::SetWindowParameters(..) => { write!(f, "ApiMsg::SetWindowParameters") }
}
}
}

View File

@ -1810,7 +1810,7 @@ fuzzy-if(skiaContent,1,5) == 956513-1.svg 956513-1-ref.svg
== 957770-1.svg 957770-1-ref.svg
== 960277-1.html 960277-1-ref.html
fuzzy-if(skiaContent,1,80) == 961887-1.html 961887-1-ref.html
fails-if(webrender) == 961887-2.html 961887-2-ref.html
== 961887-2.html 961887-2-ref.html
== 961887-3.html 961887-3-ref.html
pref(layout.css.overflow-clip-box.enabled,true) fuzzy(50,145) fuzzy-if(asyncPan&&!layersGPUAccelerated,102,3712) == 966992-1.html 966992-1-ref.html
skip-if(Android) == 966510-1.html 966510-1-ref.html # scrollable elements other than the root probably won't work well on android until bug 776030 is fixed

View File

@ -48,7 +48,7 @@ fuzzy(2,14400) pref(layout.css.mix-blend-mode.enabled,true) == mix-blend-mode-ch
pref(layout.css.mix-blend-mode.enabled,true) == mix-blend-mode-nested-976533.html mix-blend-mode-nested-976533-ref.html
pref(layout.css.mix-blend-mode.enabled,true) == mix-blend-mode-culling-1207041.html mix-blend-mode-culling-1207041-ref.html
pref(layout.css.mix-blend-mode.enabled,true) == mix-blend-mode-dest-alpha-1135271.html mix-blend-mode-dest-alpha-1135271-ref.html
fails-if(webrender) == clipped-mixblendmode-containing-unclipped-stuff.html clipped-mixblendmode-containing-unclipped-stuff-ref.html
== clipped-mixblendmode-containing-unclipped-stuff.html clipped-mixblendmode-containing-unclipped-stuff-ref.html
fuzzy(1,6800) == clipped-opacity-containing-unclipped-mixblendmode.html clipped-opacity-containing-unclipped-mixblendmode-ref.html
# Test plan 5.3.1 Blending between the background layers and the background color for an element with background-blend-mode
@ -85,7 +85,7 @@ pref(layout.css.background-blend-mode.enabled,true) == background-blending-backg
# Test plan 5.3.11 background-blend-mode for an element with background-attachement
pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed.html background-blending-background-attachement-fixed-ref.html
pref(layout.css.background-blend-mode.enabled,true) fails-if(webrender) == background-blending-background-attachement-fixed-scroll.html background-blending-background-attachement-fixed-scroll-ref.html
pref(layout.css.background-blend-mode.enabled,true) == background-blending-background-attachement-fixed-scroll.html background-blending-background-attachement-fixed-scroll-ref.html
pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-image.html background-blend-mode-body-image-ref.html
fuzzy-if(Android,4,768) fuzzy-if(gtkWidget,1,132) fuzzy-if(skiaContent,1,800) pref(layout.css.background-blend-mode.enabled,true) == background-blend-mode-body-transparent-image.html background-blend-mode-body-transparent-image-ref.html

View File

@ -432,7 +432,7 @@ pref(layout.css.mix-blend-mode.enabled,true) == blend-normal.svg blend-normal-re
#skip-if(Android) pref(layout.css.mix-blend-mode.enabled,true) == blend-saturation.svg blend-saturation-ref.svg
#skip-if(Android) pref(layout.css.mix-blend-mode.enabled,true) == blend-screen.svg blend-screen-ref.svg
#skip-if(Android) pref(layout.css.mix-blend-mode.enabled,true) == blend-soft-light.svg blend-soft-light-ref.svg
skip-if(Android) pref(layout.css.mix-blend-mode.enabled,true) fails-if(webrender) == blend-difference-stacking.html blend-difference-stacking-ref.html
skip-if(Android) pref(layout.css.mix-blend-mode.enabled,true) == blend-difference-stacking.html blend-difference-stacking-ref.html
# test case for Fragment URLs
# https://drafts.csswg.org/css-values/#local-urls

View File

@ -0,0 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"172610b244a5ee8a8e2f1f045058b8abf9291d84bb76bf8779d2fd420419c2d6","Cargo.toml":"c0894413ac3abce23bb93d1284031b29dd00ef7d2f5fcb8573208e0c33280538","README.md":"d69d75705e2582721cbfb2d3b4b2af052c71679057a0b2ac53a22c03f1755bba","appveyor.yml":"2c7b2468dc69bef84860b8900024cb6e1a1c52f6fe1232e8ccd83caaf7c231ca","src/bitmap_render_target.rs":"d3b229f85a9804ac52976431657727b410e7d5253283df046e46d98c196f0a3a","src/com_helpers.rs":"fccb4b36379ae3454a88aa32a8e5c09e46ef5f5626266dde1fe5f40a992de39c","src/comptr.rs":"218435689f505769686e07cfc5428852dda90b849a0d48e670f632307f5edc7c","src/font.rs":"9bdf3134c6ad3639eab3da4419c9b43aad2673797f6fdc65841da2c82e1f3af4","src/font_collection.rs":"969fa3abf141dc3504774886f4783fda4a74cd5a198c643f8a77fc1af4e75258","src/font_face.rs":"9506ca579345ab2b6b5615fc75f8f431e2bb0dbd93123d1d2a21a73c851a5427","src/font_family.rs":"403da9f8f9903cbe7f9f79636497b273f9885e200f53af99f9d4e483f11d6889","src/font_file.rs":"60ad02fc25765a2c113175ea372e98a2be0d84aa65fef9246b6a0192e63ff708","src/font_file_loader_impl.rs":"0d304ad99ff1e6874510a1498223329d798ff75b417e3db7e823a695003dfe92","src/gdi_interop.rs":"98922996afc5b8c8304cb65e7c965419003825dfa172a3e11fe69bf3d768551c","src/glyph_run_analysis.rs":"d30d8b41b047815ab5770c730b7a6d09939f2347b4a4257b87bebec08a5794fe","src/helpers.rs":"5d6f164468234ca8806dc1cea117b42dbfae80cc4c9ae965cb0556efdb364682","src/lib.rs":"07dae7e9a6b8e2970917eade968490e2af90110047a0e16f539647269b12f439","src/rendering_params.rs":"be1d1c433f76926c285d8ecdb747c5d9cc6a6c10c1a1890c0760cd99755ed471","src/test.rs":"d77e45f8866abeea070cbbafd4cbde62d875292e8d191310a04c70091978547c","src/types.rs":"784235c15d61fb0d001373575169aa473c92af18dcbc1709a5b2bbaa3a7ceb22"},"package":"74114b6b49d6731835da7a28a3642651451e315f7f9b9d04e907e65a45681796"}

19
third_party/rust/dwrote/Cargo.toml vendored Normal file
View File

@ -0,0 +1,19 @@
[package]
name = "dwrote"
description = "Lightweight binding to DirectWrite."
repository = "https://github.com/vvuk/dwrote-rs"
license = "MPL-2.0"
version = "0.3.0"
authors = ["Vladimir Vukicevic <vladimir@pobox.com>"]
[lib]
name = "dwrote"
[dependencies]
libc = "0.2"
lazy_static = "0.2"
winapi = "0.2"
kernel32-sys = "0.2"
gdi32-sys = "0.2"
serde = "0.9"
serde_derive = "0.9"

13
third_party/rust/dwrote/appveyor.yml vendored Normal file
View File

@ -0,0 +1,13 @@
environment:
PATH: 'C:\msys64\mingw64\bin;C:\msys64\usr\bin;%PATH%;C:\Rust\bin'
install:
- ps: Start-FileDownload "https://static.rust-lang.org/dist/rust-nightly-x86_64-pc-windows-gnu.msi"
- msiexec /passive /i "rust-nightly-x86_64-pc-windows-gnu.msi" ADDLOCAL=Rustc,Cargo,Std INSTALLDIR=C:\Rust
- rustc -V
- cargo -V
build: false
test_script:
- cargo test --verbose

View File

@ -4,7 +4,6 @@
#![allow(non_upper_case_globals)]
#[cfg(feature = "serde_derive")]
#[macro_use]
extern crate serde_derive;
@ -17,10 +16,6 @@ extern crate kernel32;
extern crate libc;
extern crate serde;
#[cfg(feature = "serde_codegen")]
include!(concat!(env!("OUT_DIR"), "/types.rs"));
#[cfg(feature = "serde_derive")]
include!("types.rs");
use winapi::DWRITE_FACTORY_TYPE_SHARED;

View File

@ -1 +0,0 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"dea9bd4a6e93fc97a4ba26460497c35d3225c4c935f84c5a559094768c25f616","src/bound.rs":"d1f94299052acaeeae0c011c2304b072dec76c7c32469d2b8881c2e380346496","src/de.rs":"c1a4060eeb40de7ac96828cf35dc538ee0df7ba501388092f7e75d31542b11ba","src/lib.rs":"47db5228d2bc20d2d1d2403323de3ad4578f95b9baa5495aa7a095a2eae525dd","src/ser.rs":"361cd76e182d8ddb60560aab7a648dd012673b89e1090b59ff9565ec3c54143c"},"package":"1f94de585a73dfc312ca77194209278a587bf90d3edc6c2d0fc479b0ed71d1f0"}

View File

@ -1,32 +0,0 @@
[package]
name = "serde_codegen"
version = "0.9.0"
authors = ["Erick Tryzelaar <erick.tryzelaar@gmail.com>"]
license = "MIT/Apache-2.0"
description = "Macros to auto-generate implementations for the serde framework"
homepage = "https://serde.rs"
repository = "https://github.com/serde-rs/serde"
documentation = "https://serde.rs/codegen.html"
keywords = ["serde", "serialization"]
include = ["Cargo.toml", "src/**/*.rs"]
[features]
default = ["with-syntex"]
unstable = []
unstable-testing = ["clippy"]
with-syntex = [
"syntex",
"syntex_syntax",
]
with-syn = []
[dependencies]
clippy = { version = "^0.*", optional = true }
quote = "0.3.8"
serde_codegen_internals = { version = "=0.11.3", default-features = false, path = "../serde_codegen_internals" }
syn = { version = "0.10", features = ["aster", "visit"] }
syntex = { version = "^0.54.0", optional = true }
syntex_syntax = { version = "^0.54.0", optional = true }
[badges]
travis-ci = { repository = "serde-rs/serde" }

View File

@ -1,121 +0,0 @@
use std::collections::HashSet;
use syn::{self, aster, visit};
use internals::ast::Item;
use internals::attr;
// Remove the default from every type parameter because in the generated impls
// they look like associated types: "error: associated type bindings are not
// allowed here".
pub fn without_defaults(generics: &syn::Generics) -> syn::Generics {
syn::Generics {
ty_params: generics.ty_params.iter().map(|ty_param| {
syn::TyParam {
default: None,
.. ty_param.clone()
}}).collect(),
.. generics.clone()
}
}
pub fn with_where_predicates(
generics: &syn::Generics,
predicates: &[syn::WherePredicate],
) -> syn::Generics {
aster::from_generics(generics.clone())
.with_predicates(predicates.to_vec())
.build()
}
pub fn with_where_predicates_from_fields<F>(
item: &Item,
generics: &syn::Generics,
from_field: F,
) -> syn::Generics
where F: Fn(&attr::Field) -> Option<&[syn::WherePredicate]>,
{
aster::from_generics(generics.clone())
.with_predicates(
item.body.all_fields()
.flat_map(|field| from_field(&field.attrs))
.flat_map(|predicates| predicates.to_vec()))
.build()
}
// Puts the given bound on any generic type parameters that are used in fields
// for which filter returns true.
//
// For example, the following struct needs the bound `A: Serialize, B: Serialize`.
//
// struct S<'b, A, B: 'b, C> {
// a: A,
// b: Option<&'b B>
// #[serde(skip_serializing)]
// c: C,
// }
pub fn with_bound<F>(
item: &Item,
generics: &syn::Generics,
filter: F,
bound: &syn::Path,
) -> syn::Generics
where F: Fn(&attr::Field) -> bool,
{
struct FindTyParams {
// Set of all generic type parameters on the current struct (A, B, C in
// the example). Initialized up front.
all_ty_params: HashSet<syn::Ident>,
// Set of generic type parameters used in fields for which filter
// returns true (A and B in the example). Filled in as the visitor sees
// them.
relevant_ty_params: HashSet<syn::Ident>,
}
impl visit::Visitor for FindTyParams {
fn visit_path(&mut self, path: &syn::Path) {
if let Some(seg) = path.segments.last() {
if seg.ident == "PhantomData" {
// Hardcoded exception, because PhantomData<T> implements
// Serialize and Deserialize whether or not T implements it.
return;
}
}
if !path.global && path.segments.len() == 1 {
let id = path.segments[0].ident.clone();
if self.all_ty_params.contains(&id) {
self.relevant_ty_params.insert(id);
}
}
visit::walk_path(self, path);
}
}
let all_ty_params: HashSet<_> = generics.ty_params.iter()
.map(|ty_param| ty_param.ident.clone())
.collect();
let relevant_tys = item.body.all_fields()
.filter(|&field| filter(&field.attrs))
.map(|field| &field.ty);
let mut visitor = FindTyParams {
all_ty_params: all_ty_params,
relevant_ty_params: HashSet::new(),
};
for ty in relevant_tys {
visit::walk_ty(&mut visitor, ty);
}
aster::from_generics(generics.clone())
.with_predicates(
generics.ty_params.iter()
.map(|ty_param| ty_param.ident.clone())
.filter(|id| visitor.relevant_ty_params.contains(id))
.map(|id| aster::where_predicate()
// the type parameter that is being bounded e.g. T
.bound().build(aster::ty().id(id))
// the bound e.g. Serialize
.bound().trait_(bound.clone()).build()
.build()))
.build()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,171 +0,0 @@
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![cfg_attr(feature = "clippy", feature(plugin))]
#![cfg_attr(feature = "clippy", allow(too_many_arguments))]
#![cfg_attr(feature = "clippy", allow(used_underscore_binding))]
// The `quote!` macro requires deep recursion.
#![recursion_limit = "192"]
extern crate serde_codegen_internals as internals;
#[cfg(feature = "with-syntex")]
extern crate syntex;
#[cfg(feature = "with-syntex")]
extern crate syntex_syntax as syntax;
extern crate syn;
#[macro_use]
extern crate quote;
#[cfg(feature = "with-syntex")]
use std::path::Path;
mod bound;
mod de;
mod ser;
#[cfg(feature = "with-syntex")]
fn syntex_registry() -> syntex::Registry {
use syntax::{ast, fold};
/// Strip the serde attributes from the crate.
#[cfg(feature = "with-syntex")]
fn strip_attributes(krate: ast::Crate) -> ast::Crate {
/// Helper folder that strips the serde attributes after the extensions have been expanded.
struct StripAttributeFolder;
impl fold::Folder for StripAttributeFolder {
fn fold_attribute(&mut self, attr: ast::Attribute) -> Option<ast::Attribute> {
if attr.value.name == "serde" {
if let ast::MetaItemKind::List(..) = attr.value.node {
return None;
}
}
Some(attr)
}
fn fold_mac(&mut self, mac: ast::Mac) -> ast::Mac {
fold::noop_fold_mac(mac, self)
}
}
fold::Folder::fold_crate(&mut StripAttributeFolder, krate)
}
let mut reg = syntex::Registry::new();
reg.add_attr("feature(custom_derive)");
reg.add_attr("feature(custom_attribute)");
reg.add_decorator("derive_Serialize", shim::expand_derive_serialize);
reg.add_decorator("derive_Deserialize", shim::expand_derive_deserialize);
reg.add_post_expansion_pass(strip_attributes);
reg
}
#[cfg(feature = "with-syntex")]
pub fn expand_str(src: &str) -> Result<String, syntex::Error> {
let src = src.to_owned();
let expand_thread = move || {
syntex_registry().expand_str("", "", &src)
};
syntex::with_extra_stack(expand_thread)
}
#[cfg(feature = "with-syntex")]
pub fn expand<S, D>(src: S, dst: D) -> Result<(), syntex::Error>
where S: AsRef<Path>,
D: AsRef<Path>,
{
let src = src.as_ref().to_owned();
let dst = dst.as_ref().to_owned();
let expand_thread = move || {
syntex_registry().expand("", src, dst)
};
syntex::with_extra_stack(expand_thread)
}
macro_rules! shim {
($name:ident $pkg:ident :: $func:ident) => {
pub fn $func(
cx: &mut ::syntax::ext::base::ExtCtxt,
span: ::syntax::codemap::Span,
meta_item: &::syntax::ast::MetaItem,
annotatable: &::syntax::ext::base::Annotatable,
push: &mut FnMut(::syntax::ext::base::Annotatable)
) {
let item = match *annotatable {
::syntax::ext::base::Annotatable::Item(ref item) => item,
_ => {
cx.span_err(
meta_item.span,
concat!("`#[derive(",
stringify!($name),
")]` may only be applied to structs and enums"));
return;
}
};
use syntax::{attr, ast, visit};
struct MarkSerdeAttributesUsed;
impl<'a> visit::Visitor<'a> for MarkSerdeAttributesUsed {
fn visit_attribute(&mut self, attr: &ast::Attribute) {
if attr.value.name == "serde" {
if let ast::MetaItemKind::List(..) = attr.value.node {
attr::mark_used(attr);
}
}
}
}
visit::walk_item(&mut MarkSerdeAttributesUsed, item);
use syntax::print::pprust;
let s = pprust::item_to_string(item);
use {syn, $pkg};
let syn_item = syn::parse_macro_input(&s).unwrap();
let expanded = match $pkg::$func(&syn_item) {
Ok(expanded) => expanded.to_string(),
Err(msg) => {
cx.span_err(span, &msg);
return;
}
};
use syntax::parse;
let name = stringify!($name).to_string();
let sess = cx.parse_sess;
let impl_item = parse::parse_item_from_source_str(name, expanded, sess);
push(::syntax::ext::base::Annotatable::Item(impl_item.unwrap().unwrap()));
}
};
}
#[cfg(feature = "with-syntex")]
mod shim {
shim!(Serialize ser::expand_derive_serialize);
shim!(Deserialize de::expand_derive_deserialize);
}
#[cfg(feature = "with-syn")]
#[doc(hidden)]
/// Not public API. Use the serde_derive crate.
pub fn expand_derive_serialize(item: &str) -> Result<quote::Tokens, String> {
let syn_item = syn::parse_macro_input(item).unwrap();
ser::expand_derive_serialize(&syn_item)
}
#[cfg(feature = "with-syn")]
#[doc(hidden)]
/// Not public API. Use the serde_derive crate.
pub fn expand_derive_deserialize(item: &str) -> Result<quote::Tokens, String> {
let syn_item = syn::parse_macro_input(item).unwrap();
de::expand_derive_deserialize(&syn_item)
}

View File

@ -1,570 +0,0 @@
use syn::{self, aster, Ident};
use quote::Tokens;
use bound;
use internals::ast::{Body, Field, Item, Style, Variant};
use internals::{self, attr};
pub fn expand_derive_serialize(item: &syn::MacroInput) -> Result<Tokens, String> {
let ctxt = internals::Ctxt::new();
let item = Item::from_ast(&ctxt, item);
try!(ctxt.check());
let impl_generics = build_impl_generics(&item);
let ty = aster::ty().path()
.segment(item.ident.clone()).with_generics(impl_generics.clone()).build()
.build();
let body = serialize_body(&item,
&impl_generics,
ty.clone());
let where_clause = &impl_generics.where_clause;
let dummy_const = Ident::new(format!("_IMPL_SERIALIZE_FOR_{}", item.ident));
Ok(quote! {
#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)]
const #dummy_const: () = {
extern crate serde as _serde;
#[automatically_derived]
impl #impl_generics _serde::Serialize for #ty #where_clause {
fn serialize<__S>(&self, _serializer: __S) -> _serde::export::Result<__S::Ok, __S::Error>
where __S: _serde::Serializer
{
#body
}
}
};
})
}
// All the generics in the input, plus a bound `T: Serialize` for each generic
// field type that will be serialized by us.
fn build_impl_generics(item: &Item) -> syn::Generics {
let generics = bound::without_defaults(item.generics);
let generics = bound::with_where_predicates_from_fields(
item, &generics,
|attrs| attrs.ser_bound());
match item.attrs.ser_bound() {
Some(predicates) => {
bound::with_where_predicates(&generics, predicates)
}
None => {
bound::with_bound(item, &generics,
needs_serialize_bound,
&aster::path().ids(&["_serde", "Serialize"]).build())
}
}
}
// Fields with a `skip_serializing` or `serialize_with` attribute are not
// serialized by us so we do not generate a bound. Fields with a `bound`
// attribute specify their own bound so we do not generate one. All other fields
// may need a `T: Serialize` bound where T is the type of the field.
fn needs_serialize_bound(attrs: &attr::Field) -> bool {
!attrs.skip_serializing()
&& attrs.serialize_with().is_none()
&& attrs.ser_bound().is_none()
}
fn serialize_body(
item: &Item,
impl_generics: &syn::Generics,
ty: syn::Ty,
) -> Tokens {
match item.body {
Body::Enum(ref variants) => {
serialize_item_enum(
&item.ident,
impl_generics,
ty,
variants,
&item.attrs)
}
Body::Struct(Style::Struct, ref fields) => {
if fields.iter().any(|field| field.ident.is_none()) {
panic!("struct has unnamed fields");
}
serialize_struct(
impl_generics,
ty,
fields,
&item.attrs)
}
Body::Struct(Style::Tuple, ref fields) => {
if fields.iter().any(|field| field.ident.is_some()) {
panic!("tuple struct has named fields");
}
serialize_tuple_struct(
impl_generics,
ty,
fields,
&item.attrs)
}
Body::Struct(Style::Newtype, ref fields) => {
serialize_newtype_struct(
impl_generics,
ty,
&fields[0],
&item.attrs)
}
Body::Struct(Style::Unit, _) => {
serialize_unit_struct(
&item.attrs)
}
}
}
fn serialize_unit_struct(item_attrs: &attr::Item) -> Tokens {
let type_name = item_attrs.name().serialize_name();
quote! {
_serializer.serialize_unit_struct(#type_name)
}
}
fn serialize_newtype_struct(
impl_generics: &syn::Generics,
item_ty: syn::Ty,
field: &Field,
item_attrs: &attr::Item,
) -> Tokens {
let type_name = item_attrs.name().serialize_name();
let mut field_expr = quote!(&self.0);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(
&item_ty, impl_generics, field.ty, path, field_expr);
}
quote! {
_serializer.serialize_newtype_struct(#type_name, #field_expr)
}
}
fn serialize_tuple_struct(
impl_generics: &syn::Generics,
ty: syn::Ty,
fields: &[Field],
item_attrs: &attr::Item,
) -> Tokens {
let serialize_stmts = serialize_tuple_struct_visitor(
ty.clone(),
fields,
impl_generics,
false,
quote!(_serde::ser::SerializeTupleStruct::serialize_field),
);
let type_name = item_attrs.name().serialize_name();
let len = serialize_stmts.len();
let let_mut = mut_if(len > 0);
quote! {
let #let_mut __serde_state = try!(_serializer.serialize_tuple_struct(#type_name, #len));
#(#serialize_stmts)*
_serde::ser::SerializeTupleStruct::end(__serde_state)
}
}
fn serialize_struct(
impl_generics: &syn::Generics,
ty: syn::Ty,
fields: &[Field],
item_attrs: &attr::Item,
) -> Tokens {
let serialize_fields = serialize_struct_visitor(
ty.clone(),
fields,
impl_generics,
false,
quote!(_serde::ser::SerializeStruct::serialize_field),
);
let type_name = item_attrs.name().serialize_name();
let mut serialized_fields = fields.iter()
.filter(|&field| !field.attrs.skip_serializing())
.peekable();
let let_mut = mut_if(serialized_fields.peek().is_some());
let len = serialized_fields
.map(|field| {
let ident = field.ident.clone().expect("struct has unnamed fields");
let field_expr = quote!(&self.#ident);
match field.attrs.skip_serializing_if() {
Some(path) => quote!(if #path(#field_expr) { 0 } else { 1 }),
None => quote!(1),
}
})
.fold(quote!(0), |sum, expr| quote!(#sum + #expr));
quote! {
let #let_mut __serde_state = try!(_serializer.serialize_struct(#type_name, #len));
#(#serialize_fields)*
_serde::ser::SerializeStruct::end(__serde_state)
}
}
fn serialize_item_enum(
type_ident: &syn::Ident,
impl_generics: &syn::Generics,
ty: syn::Ty,
variants: &[Variant],
item_attrs: &attr::Item,
) -> Tokens {
let arms: Vec<_> =
variants.iter()
.enumerate()
.map(|(variant_index, variant)| {
serialize_variant(
type_ident,
impl_generics,
ty.clone(),
variant,
variant_index,
item_attrs,
)
})
.collect();
quote! {
match *self {
#(#arms)*
}
}
}
fn serialize_variant(
type_ident: &syn::Ident,
generics: &syn::Generics,
ty: syn::Ty,
variant: &Variant,
variant_index: usize,
item_attrs: &attr::Item,
) -> Tokens {
let type_name = item_attrs.name().serialize_name();
let variant_ident = variant.ident.clone();
let variant_name = variant.attrs.name().serialize_name();
if variant.attrs.skip_serializing() {
let skipped_msg = format!("the enum variant {}::{} cannot be serialized",
type_ident, variant_ident);
let skipped_err = quote! {
Err(_serde::ser::Error::custom(#skipped_msg))
};
let fields_pat = match variant.style {
Style::Unit => quote!(),
Style::Newtype | Style::Tuple => quote!( (..) ),
Style::Struct => quote!( {..} ),
};
quote! {
#type_ident::#variant_ident #fields_pat => #skipped_err,
}
} else { // variant wasn't skipped
match variant.style {
Style::Unit => {
quote! {
#type_ident::#variant_ident =>
_serde::Serializer::serialize_unit_variant(
_serializer,
#type_name,
#variant_index,
#variant_name,
),
}
},
Style::Newtype => {
let block = serialize_newtype_variant(
type_name,
variant_index,
variant_name,
ty,
generics,
&variant.fields[0],
);
quote! {
#type_ident::#variant_ident(ref __simple_value) => #block,
}
},
Style::Tuple => {
let field_names = (0 .. variant.fields.len())
.map(|i| Ident::new(format!("__field{}", i)));
let block = serialize_tuple_variant(
type_name,
variant_index,
variant_name,
generics,
ty,
&variant.fields,
);
quote! {
#type_ident::#variant_ident(#(ref #field_names),*) => { #block }
}
}
Style::Struct => {
let fields = variant.fields.iter()
.map(|f| f.ident.clone().expect("struct variant has unnamed fields"));
let block = serialize_struct_variant(
variant_index,
variant_name,
generics,
ty,
&variant.fields,
item_attrs,
);
quote! {
#type_ident::#variant_ident { #(ref #fields),* } => { #block }
}
}
}
}
}
fn serialize_newtype_variant(
type_name: String,
variant_index: usize,
variant_name: String,
item_ty: syn::Ty,
generics: &syn::Generics,
field: &Field,
) -> Tokens {
let mut field_expr = quote!(__simple_value);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(
&item_ty, generics, field.ty, path, field_expr);
}
quote! {
_serde::Serializer::serialize_newtype_variant(
_serializer,
#type_name,
#variant_index,
#variant_name,
#field_expr,
)
}
}
fn serialize_tuple_variant(
type_name: String,
variant_index: usize,
variant_name: String,
generics: &syn::Generics,
structure_ty: syn::Ty,
fields: &[Field],
) -> Tokens {
let serialize_stmts = serialize_tuple_struct_visitor(
structure_ty,
fields,
generics,
true,
quote!(_serde::ser::SerializeTupleVariant::serialize_field),
);
let len = serialize_stmts.len();
let let_mut = mut_if(len > 0);
quote! {
let #let_mut __serde_state = try!(_serializer.serialize_tuple_variant(
#type_name,
#variant_index,
#variant_name,
#len));
#(#serialize_stmts)*
_serde::ser::SerializeTupleVariant::end(__serde_state)
}
}
fn serialize_struct_variant(
variant_index: usize,
variant_name: String,
generics: &syn::Generics,
ty: syn::Ty,
fields: &[Field],
item_attrs: &attr::Item,
) -> Tokens {
let serialize_fields = serialize_struct_visitor(
ty.clone(),
fields,
generics,
true,
quote!(_serde::ser::SerializeStructVariant::serialize_field),
);
let item_name = item_attrs.name().serialize_name();
let mut serialized_fields = fields.iter()
.filter(|&field| !field.attrs.skip_serializing())
.peekable();
let let_mut = mut_if(serialized_fields.peek().is_some());
let len = serialized_fields
.map(|field| {
let ident = field.ident.clone().expect("struct has unnamed fields");
match field.attrs.skip_serializing_if() {
Some(path) => quote!(if #path(#ident) { 0 } else { 1 }),
None => quote!(1),
}
})
.fold(quote!(0), |sum, expr| quote!(#sum + #expr));
quote! {
let #let_mut __serde_state = try!(_serializer.serialize_struct_variant(
#item_name,
#variant_index,
#variant_name,
#len,
));
#(#serialize_fields)*
_serde::ser::SerializeStructVariant::end(__serde_state)
}
}
fn serialize_tuple_struct_visitor(
structure_ty: syn::Ty,
fields: &[Field],
generics: &syn::Generics,
is_enum: bool,
func: Tokens,
) -> Vec<Tokens> {
fields.iter()
.enumerate()
.map(|(i, field)| {
let mut field_expr = if is_enum {
let id = Ident::new(format!("__field{}", i));
quote!(#id)
} else {
let i = Ident::new(i);
quote!(&self.#i)
};
let skip = field.attrs.skip_serializing_if()
.map(|path| quote!(#path(#field_expr)));
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(
&structure_ty, generics, field.ty, path, field_expr);
}
let ser = quote! {
try!(#func(&mut __serde_state, #field_expr));
};
match skip {
None => ser,
Some(skip) => quote!(if !#skip { #ser }),
}
})
.collect()
}
fn serialize_struct_visitor(
structure_ty: syn::Ty,
fields: &[Field],
generics: &syn::Generics,
is_enum: bool,
func: Tokens,
) -> Vec<Tokens> {
fields.iter()
.filter(|&field| !field.attrs.skip_serializing())
.map(|field| {
let ident = field.ident.clone().expect("struct has unnamed field");
let mut field_expr = if is_enum {
quote!(#ident)
} else {
quote!(&self.#ident)
};
let key_expr = field.attrs.name().serialize_name();
let skip = field.attrs.skip_serializing_if()
.map(|path| quote!(#path(#field_expr)));
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(
&structure_ty, generics, field.ty, path, field_expr)
}
let ser = quote! {
try!(#func(&mut __serde_state, #key_expr, #field_expr));
};
match skip {
None => ser,
Some(skip) => quote!(if !#skip { #ser }),
}
})
.collect()
}
fn wrap_serialize_with(
item_ty: &syn::Ty,
generics: &syn::Generics,
field_ty: &syn::Ty,
path: &syn::Path,
value: Tokens,
) -> Tokens {
let where_clause = &generics.where_clause;
let wrapper_generics = aster::from_generics(generics.clone())
.add_lifetime_bound("'__a")
.lifetime_name("'__a")
.build();
let wrapper_ty = aster::path()
.segment("__SerializeWith")
.with_generics(wrapper_generics.clone())
.build()
.build();
quote!({
struct __SerializeWith #wrapper_generics #where_clause {
value: &'__a #field_ty,
phantom: ::std::marker::PhantomData<#item_ty>,
}
impl #wrapper_generics _serde::Serialize for #wrapper_ty #where_clause {
fn serialize<__S>(&self, __s: __S) -> _serde::export::Result<__S::Ok, __S::Error>
where __S: _serde::Serializer
{
#path(self.value, __s)
}
}
&__SerializeWith {
value: #value,
phantom: ::std::marker::PhantomData::<#item_ty>,
}
})
}
// Serialization of an empty struct results in code like:
//
// let mut __serde_state = try!(serializer.serialize_struct("S", 0));
// _serde::ser::SerializeStruct::end(__serde_state)
//
// where we want to omit the `mut` to avoid a warning.
fn mut_if(is_mut: bool) -> Option<Tokens> {
if is_mut {
Some(quote!(mut))
} else {
None
}
}

View File

@ -1 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"d95e5e29244853cb6c744f76f8829d6404fb517b69ea79c9b9f807ae0845c6c5","src/ast.rs":"0b96616b2becc71ac85ffb0efcf6c6d0b451c7e2e4934df19193962e7fc9316a","src/attr.rs":"f248f768cde166c5c9e2bb03335fc70148a0460e3434d3cb470ebb9ac41b3948","src/ctxt.rs":"80795ca96772fdef304584795966bbf28c24b788590c83e44efca3bb45a7d06a","src/lib.rs":"a5b9dae1488c7c50f5ed630bf473f5ca13d44f795faf380152fef10bfc7b9eee"},"package":"afad7924a009f859f380e4a2e3a509a845c2ac66435fcead74a4d983b21ae806"}
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"f11d07b974965dc94833195275fa536a0d7790e16ad3389d08f1e198202073fa","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"ebe318a04cf4e547e0f3ab97f1345ecb553358ee13ea81f99e3323e37d70ccdf","src/ast.rs":"7eedebeb0d2b76bda66c0b414a2a40ac19ff1d8604a84cd5d207b76d2d57de97","src/attr.rs":"77daffa2fd682d85debac843c5e69ebf1a1061a680c5d8e590ecf8fcf63976b7","src/case.rs":"44e5506efb5a99478b357f1b2ee1baf40a44b8630997cd62d657b1c677752932","src/ctxt.rs":"3650e9cfc8b58e65137bc5e5ee250117c5abe4a543807a908adabf1a1f57825a","src/lib.rs":"e620949a156816b0e326481d1070febc449cf72849bffd3ca15c473ff3af13e8"},"package":"4d52006899f910528a10631e5b727973fe668f3228109d1707ccf5bad5490b6e"}

View File

@ -1,6 +1,6 @@
[package]
name = "serde_codegen_internals"
version = "0.11.3"
version = "0.14.1"
authors = ["Erick Tryzelaar <erick.tryzelaar@gmail.com>"]
license = "MIT/Apache-2.0"
description = "AST representation used by Serde codegen. Unstable."
@ -8,11 +8,11 @@ homepage = "https://serde.rs"
repository = "https://github.com/serde-rs/serde"
documentation = "https://docs.serde.rs/serde_codegen_internals/"
keywords = ["serde", "serialization"]
include = ["Cargo.toml", "src/**/*.rs"]
[features]
unstable-testing = ["clippy"]
readme = "../README.md"
include = ["Cargo.toml", "src/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"]
[dependencies]
clippy = { version = "^0.*", optional = true }
syn = "0.10"
syn = { version = "0.11", default-features = false, features = ["parsing"] }
[badges]
travis-ci = { repository = "serde-rs/serde" }

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,25 @@
Copyright (c) 2014 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

View File

@ -0,0 +1,71 @@
# Serde &emsp; [![Build Status](https://api.travis-ci.org/serde-rs/serde.svg?branch=master)](https://travis-ci.org/serde-rs/serde) [![Latest Version](https://img.shields.io/crates/v/serde.svg)](https://crates.io/crates/serde)
**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.**
---
You may be looking for:
- [An overview of Serde](https://serde.rs/)
- [Data formats supported by Serde](https://serde.rs/#data-formats)
- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/codegen.html)
- [Examples](https://serde.rs/examples.html)
- [API documentation](https://docs.serde.rs/serde/)
- [Release notes](https://github.com/serde-rs/serde/releases)
## Serde in action
```rust
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[derive(Serialize, Deserialize, Debug)]
struct Point {
x: i32,
y: i32,
}
fn main() {
let point = Point { x: 1, y: 2 };
// Convert the Point to a JSON string.
let serialized = serde_json::to_string(&point).unwrap();
// Prints serialized = {"x":1,"y":2}
println!("serialized = {}", serialized);
// Convert the JSON string back to a Point.
let deserialized: Point = serde_json::from_str(&serialized).unwrap();
// Prints deserialized = Point { x: 1, y: 2 }
println!("deserialized = {:?}", deserialized);
}
```
## Getting help
Serde developers live in the #serde channel on
[`irc.mozilla.org`](https://wiki.mozilla.org/IRC). The #rust channel is also a
good resource with generally faster response time but less specific knowledge
about Serde. If IRC is not your thing or you don't get a good response, we are
happy to respond to [GitHub issues](https://github.com/serde-rs/serde/issues/new)
as well.
## License
Serde is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View File

@ -38,16 +38,30 @@ impl<'a> Item<'a> {
pub fn from_ast(cx: &Ctxt, item: &'a syn::MacroInput) -> Item<'a> {
let attrs = attr::Item::from_ast(cx, item);
let body = match item.body {
syn::Body::Enum(ref variants) => {
Body::Enum(enum_from_ast(cx, variants))
}
let mut body = match item.body {
syn::Body::Enum(ref variants) => Body::Enum(enum_from_ast(cx, variants)),
syn::Body::Struct(ref variant_data) => {
let (style, fields) = struct_from_ast(cx, variant_data);
Body::Struct(style, fields)
}
};
match body {
Body::Enum(ref mut variants) => {
for ref mut variant in variants {
variant.attrs.rename_by_rule(attrs.rename_all());
for ref mut field in &mut variant.fields {
field.attrs.rename_by_rule(variant.attrs.rename_all());
}
}
}
Body::Struct(_, ref mut fields) => {
for field in fields {
field.attrs.rename_by_rule(attrs.rename_all());
}
}
}
Item {
ident: item.ident.clone(),
attrs: attrs,
@ -58,15 +72,13 @@ impl<'a> Item<'a> {
}
impl<'a> Body<'a> {
pub fn all_fields(&'a self) -> Box<Iterator<Item=&'a Field<'a>> + 'a> {
pub fn all_fields(&'a self) -> Box<Iterator<Item = &'a Field<'a>> + 'a> {
match *self {
Body::Enum(ref variants) => {
Box::new(variants.iter()
.flat_map(|variant| variant.fields.iter()))
}
Body::Struct(_, ref fields) => {
Box::new(fields.iter())
.flat_map(|variant| variant.fields.iter()))
}
Body::Struct(_, ref fields) => Box::new(fields.iter()),
}
}
}
@ -87,18 +99,12 @@ fn enum_from_ast<'a>(cx: &Ctxt, variants: &'a [syn::Variant]) -> Vec<Variant<'a>
fn struct_from_ast<'a>(cx: &Ctxt, data: &'a syn::VariantData) -> (Style, Vec<Field<'a>>) {
match *data {
syn::VariantData::Struct(ref fields) => {
(Style::Struct, fields_from_ast(cx, fields))
}
syn::VariantData::Struct(ref fields) => (Style::Struct, fields_from_ast(cx, fields)),
syn::VariantData::Tuple(ref fields) if fields.len() == 1 => {
(Style::Newtype, fields_from_ast(cx, fields))
}
syn::VariantData::Tuple(ref fields) => {
(Style::Tuple, fields_from_ast(cx, fields))
}
syn::VariantData::Unit => {
(Style::Unit, Vec::new())
}
syn::VariantData::Tuple(ref fields) => (Style::Tuple, fields_from_ast(cx, fields)),
syn::VariantData::Unit => (Style::Unit, Vec::new()),
}
}

View File

@ -2,6 +2,7 @@ use Ctxt;
use syn;
use syn::MetaItem::{List, NameValue, Word};
use syn::NestedMetaItem::{Literal, MetaItem};
use std::str::FromStr;
// This module handles parsing of `#[serde(...)]` attributes. The entrypoints
// are `attr::Item::from_ast`, `attr::Variant::from_ast`, and
@ -11,6 +12,8 @@ use syn::NestedMetaItem::{Literal, MetaItem};
// user will see errors simultaneously for all bad attributes in the crate
// rather than just the first.
pub use case::RenameRule;
struct Attr<'c, T> {
cx: &'c Ctxt,
name: &'static str,
@ -90,8 +93,43 @@ impl Name {
pub struct Item {
name: Name,
deny_unknown_fields: bool,
default: Default,
rename_all: RenameRule,
ser_bound: Option<Vec<syn::WherePredicate>>,
de_bound: Option<Vec<syn::WherePredicate>>,
tag: EnumTag,
}
/// Styles of representing an enum.
#[derive(Debug)]
pub enum EnumTag {
/// The default.
///
/// ```json
/// {"variant1": {"key1": "value1", "key2": "value2"}}
/// ```
External,
/// `#[serde(tag = "type")]`
///
/// ```json
/// {"type": "variant1", "key1": "value1", "key2": "value2"}
/// ```
Internal { tag: String },
/// `#[serde(tag = "t", content = "c")]`
///
/// ```json
/// {"t": "variant1", "c": {"key1": "value1", "key2": "value2"}}
/// ```
Adjacent { tag: String, content: String },
/// `#[serde(untagged)]`
///
/// ```json
/// {"key1": "value1", "key2": "value2"}
/// ```
None,
}
impl Item {
@ -100,8 +138,13 @@ impl Item {
let mut ser_name = Attr::none(cx, "rename");
let mut de_name = Attr::none(cx, "rename");
let mut deny_unknown_fields = BoolAttr::none(cx, "deny_unknown_fields");
let mut default = Attr::none(cx, "default");
let mut rename_all = Attr::none(cx, "rename_all");
let mut ser_bound = Attr::none(cx, "bound");
let mut de_bound = Attr::none(cx, "bound");
let mut untagged = BoolAttr::none(cx, "untagged");
let mut internal_tag = Attr::none(cx, "tag");
let mut content = Attr::none(cx, "content");
for meta_items in item.attrs.iter().filter_map(get_serde_meta_items) {
for meta_item in meta_items {
@ -122,14 +165,57 @@ impl Item {
}
}
// Parse `#[serde(rename_all="foo")]`
MetaItem(NameValue(ref name, ref lit)) if name == "rename_all" => {
if let Ok(s) = get_string_from_lit(cx, name.as_ref(), name.as_ref(), lit) {
match RenameRule::from_str(&s) {
Ok(rename_rule) => rename_all.set(rename_rule),
Err(()) => {
cx.error(format!("unknown rename rule for #[serde(rename_all \
= {:?})]",
s))
}
}
}
}
// Parse `#[serde(deny_unknown_fields)]`
MetaItem(Word(ref name)) if name == "deny_unknown_fields" => {
deny_unknown_fields.set_true();
}
// Parse `#[serde(default)]`
MetaItem(Word(ref name)) if name == "default" => {
match item.body {
syn::Body::Struct(syn::VariantData::Struct(_)) => {
default.set(Default::Default);
}
_ => {
cx.error("#[serde(default)] can only be used on structs \
with named fields")
}
}
}
// Parse `#[serde(default="...")]`
MetaItem(NameValue(ref name, ref lit)) if name == "default" => {
if let Ok(path) = parse_lit_into_path(cx, name.as_ref(), lit) {
match item.body {
syn::Body::Struct(syn::VariantData::Struct(_)) => {
default.set(Default::Path(path));
}
_ => {
cx.error("#[serde(default = \"...\")] can only be used \
on structs with named fields")
}
}
}
}
// Parse `#[serde(bound="D: Serialize")]`
MetaItem(NameValue(ref name, ref lit)) if name == "bound" => {
if let Ok(where_predicates) = parse_lit_into_where(cx, name.as_ref(), name.as_ref(), lit) {
if let Ok(where_predicates) =
parse_lit_into_where(cx, name.as_ref(), name.as_ref(), lit) {
ser_bound.set(where_predicates.clone());
de_bound.set(where_predicates);
}
@ -143,26 +229,116 @@ impl Item {
}
}
// Parse `#[serde(untagged)]`
MetaItem(Word(ref name)) if name == "untagged" => {
match item.body {
syn::Body::Enum(_) => {
untagged.set_true();
}
syn::Body::Struct(_) => {
cx.error("#[serde(untagged)] can only be used on enums")
}
}
}
// Parse `#[serde(tag = "type")]`
MetaItem(NameValue(ref name, ref lit)) if name == "tag" => {
if let Ok(s) = get_string_from_lit(cx, name.as_ref(), name.as_ref(), lit) {
match item.body {
syn::Body::Enum(_) => {
internal_tag.set(s);
}
syn::Body::Struct(_) => {
cx.error("#[serde(tag = \"...\")] can only be used on enums")
}
}
}
}
// Parse `#[serde(content = "c")]`
MetaItem(NameValue(ref name, ref lit)) if name == "content" => {
if let Ok(s) = get_string_from_lit(cx, name.as_ref(), name.as_ref(), lit) {
match item.body {
syn::Body::Enum(_) => {
content.set(s);
}
syn::Body::Struct(_) => {
cx.error("#[serde(content = \"...\")] can only be used on \
enums")
}
}
}
}
MetaItem(ref meta_item) => {
cx.error(format!("unknown serde container attribute `{}`",
meta_item.name()));
}
Literal(_) => {
cx.error(format!("unexpected literal in serde container attribute"));
cx.error("unexpected literal in serde container attribute");
}
}
}
}
let tag = match (untagged.get(), internal_tag.get(), content.get()) {
(false, None, None) => EnumTag::External,
(true, None, None) => EnumTag::None,
(false, Some(tag), None) => {
// Check that there are no tuple variants.
if let syn::Body::Enum(ref variants) = item.body {
for variant in variants {
match variant.data {
syn::VariantData::Struct(_) |
syn::VariantData::Unit => {}
syn::VariantData::Tuple(ref fields) => {
if fields.len() != 1 {
cx.error("#[serde(tag = \"...\")] cannot be used with tuple \
variants");
break;
}
}
}
}
}
EnumTag::Internal { tag: tag }
}
(true, Some(_), None) => {
cx.error("enum cannot be both untagged and internally tagged");
EnumTag::External // doesn't matter, will error
}
(false, None, Some(_)) => {
cx.error("#[serde(tag = \"...\", content = \"...\")] must be used together");
EnumTag::External
}
(true, None, Some(_)) => {
cx.error("untagged enum cannot have #[serde(content = \"...\")]");
EnumTag::External
}
(false, Some(tag), Some(content)) => {
EnumTag::Adjacent {
tag: tag,
content: content,
}
}
(true, Some(_), Some(_)) => {
cx.error("untagged enum cannot have #[serde(tag = \"...\", content = \"...\")]");
EnumTag::External
}
};
Item {
name: Name {
serialize: ser_name.get().unwrap_or_else(|| item.ident.to_string()),
deserialize: de_name.get().unwrap_or_else(|| item.ident.to_string()),
},
deny_unknown_fields: deny_unknown_fields.get(),
default: default.get().unwrap_or(Default::None),
rename_all: rename_all.get().unwrap_or(RenameRule::None),
ser_bound: ser_bound.get(),
de_bound: de_bound.get(),
tag: tag,
}
}
@ -170,10 +346,18 @@ impl Item {
&self.name
}
pub fn rename_all(&self) -> &RenameRule {
&self.rename_all
}
pub fn deny_unknown_fields(&self) -> bool {
self.deny_unknown_fields
}
pub fn default(&self) -> &Default {
&self.default
}
pub fn ser_bound(&self) -> Option<&[syn::WherePredicate]> {
self.ser_bound.as_ref().map(|vec| &vec[..])
}
@ -181,12 +365,19 @@ impl Item {
pub fn de_bound(&self) -> Option<&[syn::WherePredicate]> {
self.de_bound.as_ref().map(|vec| &vec[..])
}
pub fn tag(&self) -> &EnumTag {
&self.tag
}
}
/// Represents variant attribute information
#[derive(Debug)]
pub struct Variant {
name: Name,
ser_renamed: bool,
de_renamed: bool,
rename_all: RenameRule,
skip_deserializing: bool,
skip_serializing: bool,
}
@ -197,6 +388,7 @@ impl Variant {
let mut de_name = Attr::none(cx, "rename");
let mut skip_deserializing = BoolAttr::none(cx, "skip_deserializing");
let mut skip_serializing = BoolAttr::none(cx, "skip_serializing");
let mut rename_all = Attr::none(cx, "rename_all");
for meta_items in variant.attrs.iter().filter_map(get_serde_meta_items) {
for meta_item in meta_items {
@ -216,6 +408,21 @@ impl Variant {
de_name.set_opt(de);
}
}
// Parse `#[serde(rename_all="foo")]`
MetaItem(NameValue(ref name, ref lit)) if name == "rename_all" => {
if let Ok(s) = get_string_from_lit(cx, name.as_ref(), name.as_ref(), lit) {
match RenameRule::from_str(&s) {
Ok(rename_rule) => rename_all.set(rename_rule),
Err(()) => {
cx.error(format!("unknown rename rule for #[serde(rename_all \
= {:?})]",
s))
}
}
}
}
// Parse `#[serde(skip_deserializing)]`
MetaItem(Word(ref name)) if name == "skip_deserializing" => {
skip_deserializing.set_true();
@ -226,22 +433,28 @@ impl Variant {
}
MetaItem(ref meta_item) => {
cx.error(format!("unknown serde variant attribute `{}`",
meta_item.name()));
cx.error(format!("unknown serde variant attribute `{}`", meta_item.name()));
}
Literal(_) => {
cx.error(format!("unexpected literal in serde variant attribute"));
cx.error("unexpected literal in serde variant attribute");
}
}
}
}
let ser_name = ser_name.get();
let ser_renamed = ser_name.is_some();
let de_name = de_name.get();
let de_renamed = de_name.is_some();
Variant {
name: Name {
serialize: ser_name.get().unwrap_or_else(|| variant.ident.to_string()),
deserialize: de_name.get().unwrap_or_else(|| variant.ident.to_string()),
serialize: ser_name.unwrap_or_else(|| variant.ident.to_string()),
deserialize: de_name.unwrap_or_else(|| variant.ident.to_string()),
},
ser_renamed: ser_renamed,
de_renamed: de_renamed,
rename_all: rename_all.get().unwrap_or(RenameRule::None),
skip_deserializing: skip_deserializing.get(),
skip_serializing: skip_serializing.get(),
}
@ -251,6 +464,19 @@ impl Variant {
&self.name
}
pub fn rename_by_rule(&mut self, rule: &RenameRule) {
if !self.ser_renamed {
self.name.serialize = rule.apply_to_variant(&self.name.serialize);
}
if !self.de_renamed {
self.name.deserialize = rule.apply_to_variant(&self.name.deserialize);
}
}
pub fn rename_all(&self) -> &RenameRule {
&self.rename_all
}
pub fn skip_deserializing(&self) -> bool {
self.skip_deserializing
}
@ -264,10 +490,12 @@ impl Variant {
#[derive(Debug)]
pub struct Field {
name: Name,
ser_renamed: bool,
de_renamed: bool,
skip_serializing: bool,
skip_deserializing: bool,
skip_serializing_if: Option<syn::Path>,
default: FieldDefault,
default: Default,
serialize_with: Option<syn::Path>,
deserialize_with: Option<syn::Path>,
ser_bound: Option<Vec<syn::WherePredicate>>,
@ -276,7 +504,7 @@ pub struct Field {
/// Represents the default to use for a field when deserializing.
#[derive(Debug, PartialEq)]
pub enum FieldDefault {
pub enum Default {
/// Field must always be specified because it does not have a default.
None,
/// The default is given by `std::default::Default::default()`.
@ -287,9 +515,7 @@ pub enum FieldDefault {
impl Field {
/// Extract out the `#[serde(...)]` attributes from a struct field.
pub fn from_ast(cx: &Ctxt,
index: usize,
field: &syn::Field) -> Self {
pub fn from_ast(cx: &Ctxt, index: usize, field: &syn::Field) -> Self {
let mut ser_name = Attr::none(cx, "rename");
let mut de_name = Attr::none(cx, "rename");
let mut skip_serializing = BoolAttr::none(cx, "skip_serializing");
@ -327,13 +553,13 @@ impl Field {
// Parse `#[serde(default)]`
MetaItem(Word(ref name)) if name == "default" => {
default.set(FieldDefault::Default);
default.set(Default::Default);
}
// Parse `#[serde(default="...")]`
MetaItem(NameValue(ref name, ref lit)) if name == "default" => {
if let Ok(path) = parse_lit_into_path(cx, name.as_ref(), lit) {
default.set(FieldDefault::Path(path));
default.set(Default::Path(path));
}
}
@ -368,9 +594,22 @@ impl Field {
}
}
// Parse `#[serde(with="...")]`
MetaItem(NameValue(ref name, ref lit)) if name == "with" => {
if let Ok(path) = parse_lit_into_path(cx, name.as_ref(), lit) {
let mut ser_path = path.clone();
ser_path.segments.push("serialize".into());
serialize_with.set(ser_path);
let mut de_path = path;
de_path.segments.push("deserialize".into());
deserialize_with.set(de_path);
}
}
// Parse `#[serde(bound="D: Serialize")]`
MetaItem(NameValue(ref name, ref lit)) if name == "bound" => {
if let Ok(where_predicates) = parse_lit_into_where(cx, name.as_ref(), name.as_ref(), lit) {
if let Ok(where_predicates) =
parse_lit_into_where(cx, name.as_ref(), name.as_ref(), lit) {
ser_bound.set(where_predicates.clone());
de_bound.set(where_predicates);
}
@ -385,12 +624,11 @@ impl Field {
}
MetaItem(ref meta_item) => {
cx.error(format!("unknown serde field attribute `{}`",
meta_item.name()));
cx.error(format!("unknown serde field attribute `{}`", meta_item.name()));
}
Literal(_) => {
cx.error(format!("unexpected literal in serde field attribute"));
cx.error("unexpected literal in serde field attribute");
}
}
}
@ -399,18 +637,24 @@ impl Field {
// Is skip_deserializing, initialize the field to Default::default()
// unless a different default is specified by `#[serde(default="...")]`
if skip_deserializing.0.value.is_some() {
default.set_if_none(FieldDefault::Default);
default.set_if_none(Default::Default);
}
let ser_name = ser_name.get();
let ser_renamed = ser_name.is_some();
let de_name = de_name.get();
let de_renamed = de_name.is_some();
Field {
name: Name {
serialize: ser_name.get().unwrap_or(ident.clone()),
deserialize: de_name.get().unwrap_or(ident),
serialize: ser_name.unwrap_or_else(|| ident.clone()),
deserialize: de_name.unwrap_or(ident),
},
ser_renamed: ser_renamed,
de_renamed: de_renamed,
skip_serializing: skip_serializing.get(),
skip_deserializing: skip_deserializing.get(),
skip_serializing_if: skip_serializing_if.get(),
default: default.get().unwrap_or(FieldDefault::None),
default: default.get().unwrap_or(Default::None),
serialize_with: serialize_with.get(),
deserialize_with: deserialize_with.get(),
ser_bound: ser_bound.get(),
@ -422,6 +666,15 @@ impl Field {
&self.name
}
pub fn rename_by_rule(&mut self, rule: &RenameRule) {
if !self.ser_renamed {
self.name.serialize = rule.apply_to_field(&self.name.serialize);
}
if !self.de_renamed {
self.name.deserialize = rule.apply_to_field(&self.name.deserialize);
}
}
pub fn skip_serializing(&self) -> bool {
self.skip_serializing
}
@ -434,7 +687,7 @@ impl Field {
self.skip_serializing_if.as_ref()
}
pub fn default(&self) -> &FieldDefault {
pub fn default(&self) -> &Default {
&self.default
}
@ -457,13 +710,12 @@ impl Field {
type SerAndDe<T> = (Option<T>, Option<T>);
fn get_ser_and_de<T, F>(
cx: &Ctxt,
attr_name: &'static str,
items: &[syn::NestedMetaItem],
f: F
) -> Result<SerAndDe<T>, ()>
where F: Fn(&Ctxt, &str, &str, &syn::Lit) -> Result<T, ()>,
fn get_ser_and_de<T, F>(cx: &Ctxt,
attr_name: &'static str,
items: &[syn::NestedMetaItem],
f: F)
-> Result<SerAndDe<T>, ()>
where F: Fn(&Ctxt, &str, &str, &syn::Lit) -> Result<T, ()>
{
let mut ser_item = Attr::none(cx, attr_name);
let mut de_item = Attr::none(cx, attr_name);
@ -483,7 +735,8 @@ fn get_ser_and_de<T, F>(
}
_ => {
cx.error(format!("malformed {0} attribute, expected `{0}(serialize = ..., deserialize = ...)`",
cx.error(format!("malformed {0} attribute, expected `{0}(serialize = ..., \
deserialize = ...)`",
attr_name));
return Err(());
}
@ -493,35 +746,34 @@ fn get_ser_and_de<T, F>(
Ok((ser_item.get(), de_item.get()))
}
fn get_renames(
cx: &Ctxt,
items: &[syn::NestedMetaItem],
) -> Result<SerAndDe<String>, ()> {
fn get_renames(cx: &Ctxt, items: &[syn::NestedMetaItem]) -> Result<SerAndDe<String>, ()> {
get_ser_and_de(cx, "rename", items, get_string_from_lit)
}
fn get_where_predicates(
cx: &Ctxt,
items: &[syn::NestedMetaItem],
) -> Result<SerAndDe<Vec<syn::WherePredicate>>, ()> {
fn get_where_predicates(cx: &Ctxt,
items: &[syn::NestedMetaItem])
-> Result<SerAndDe<Vec<syn::WherePredicate>>, ()> {
get_ser_and_de(cx, "bound", items, parse_lit_into_where)
}
pub fn get_serde_meta_items(attr: &syn::Attribute) -> Option<Vec<syn::NestedMetaItem>> {
match attr.value {
List(ref name, ref items) if name == "serde" => {
Some(items.iter().cloned().collect())
}
_ => None
List(ref name, ref items) if name == "serde" => Some(items.iter().cloned().collect()),
_ => None,
}
}
fn get_string_from_lit(cx: &Ctxt, attr_name: &str, meta_item_name: &str, lit: &syn::Lit) -> Result<String, ()> {
fn get_string_from_lit(cx: &Ctxt,
attr_name: &str,
meta_item_name: &str,
lit: &syn::Lit)
-> Result<String, ()> {
if let syn::Lit::Str(ref s, _) = *lit {
Ok(s.clone())
} else {
cx.error(format!("expected serde {} attribute to be a string: `{} = \"...\"`",
attr_name, meta_item_name));
attr_name,
meta_item_name));
Err(())
}
}
@ -531,7 +783,11 @@ fn parse_lit_into_path(cx: &Ctxt, attr_name: &str, lit: &syn::Lit) -> Result<syn
syn::parse_path(&string).map_err(|err| cx.error(err))
}
fn parse_lit_into_where(cx: &Ctxt, attr_name: &str, meta_item_name: &str, lit: &syn::Lit) -> Result<Vec<syn::WherePredicate>, ()> {
fn parse_lit_into_where(cx: &Ctxt,
attr_name: &str,
meta_item_name: &str,
lit: &syn::Lit)
-> Result<Vec<syn::WherePredicate>, ()> {
let string = try!(get_string_from_lit(cx, attr_name, meta_item_name, lit));
if string.is_empty() {
return Ok(Vec::new());

View File

@ -0,0 +1,115 @@
use std::ascii::AsciiExt;
use std::str::FromStr;
use self::RenameRule::*;
#[derive(Debug, PartialEq)]
pub enum RenameRule {
/// Don't apply a default rename rule.
None,
/// Rename direct children to "PascalCase" style, as typically used for enum variants.
PascalCase,
/// Rename direct children to "camelCase" style.
CamelCase,
/// Rename direct children to "snake_case" style, as commonly used for fields.
SnakeCase,
/// Rename direct children to "SCREAMING_SNAKE_CASE" style, as commonly used for constants.
ScreamingSnakeCase,
/// Rename direct children to "kebab-case" style.
KebabCase,
}
impl RenameRule {
pub fn apply_to_variant(&self, variant: &str) -> String {
match *self {
None | PascalCase => variant.to_owned(),
CamelCase => variant[..1].to_ascii_lowercase() + &variant[1..],
SnakeCase => {
let mut snake = String::new();
for (i, ch) in variant.char_indices() {
if i > 0 && ch.is_uppercase() {
snake.push('_');
}
snake.push(ch.to_ascii_lowercase());
}
snake
}
ScreamingSnakeCase => SnakeCase.apply_to_variant(variant).to_ascii_uppercase(),
KebabCase => SnakeCase.apply_to_variant(variant).replace('_', "-"),
}
}
pub fn apply_to_field(&self, field: &str) -> String {
match *self {
None | SnakeCase => field.to_owned(),
PascalCase => {
let mut pascal = String::new();
let mut capitalize = true;
for ch in field.chars() {
if ch == '_' {
capitalize = true;
} else if capitalize {
pascal.push(ch.to_ascii_uppercase());
capitalize = false;
} else {
pascal.push(ch);
}
}
pascal
}
CamelCase => {
let pascal = PascalCase.apply_to_field(field);
pascal[..1].to_ascii_lowercase() + &pascal[1..]
}
ScreamingSnakeCase => field.to_ascii_uppercase(),
KebabCase => field.replace('_', "-"),
}
}
}
impl FromStr for RenameRule {
type Err = ();
fn from_str(rename_all_str: &str) -> Result<Self, Self::Err> {
match rename_all_str {
"PascalCase" => Ok(PascalCase),
"camelCase" => Ok(CamelCase),
"snake_case" => Ok(SnakeCase),
"SCREAMING_SNAKE_CASE" => Ok(ScreamingSnakeCase),
"kebab-case" => Ok(KebabCase),
_ => Err(()),
}
}
}
#[test]
fn rename_variants() {
for &(original, camel, snake, screaming, kebab) in
&[("Outcome", "outcome", "outcome", "OUTCOME", "outcome"),
("VeryTasty", "veryTasty", "very_tasty", "VERY_TASTY", "very-tasty"),
("A", "a", "a", "A", "a"),
("Z42", "z42", "z42", "Z42", "z42")] {
assert_eq!(None.apply_to_variant(original), original);
assert_eq!(PascalCase.apply_to_variant(original), original);
assert_eq!(CamelCase.apply_to_variant(original), camel);
assert_eq!(SnakeCase.apply_to_variant(original), snake);
assert_eq!(ScreamingSnakeCase.apply_to_variant(original), screaming);
assert_eq!(KebabCase.apply_to_variant(original), kebab);
}
}
#[test]
fn rename_fields() {
for &(original, pascal, camel, screaming, kebab) in
&[("outcome", "Outcome", "outcome", "OUTCOME", "outcome"),
("very_tasty", "VeryTasty", "veryTasty", "VERY_TASTY", "very-tasty"),
("a", "A", "a", "A", "a"),
("z42", "Z42", "z42", "Z42", "z42")] {
assert_eq!(None.apply_to_field(original), original);
assert_eq!(PascalCase.apply_to_field(original), pascal);
assert_eq!(CamelCase.apply_to_field(original), camel);
assert_eq!(SnakeCase.apply_to_field(original), original);
assert_eq!(ScreamingSnakeCase.apply_to_field(original), screaming);
assert_eq!(KebabCase.apply_to_field(original), kebab);
}
}

View File

@ -8,9 +8,7 @@ pub struct Ctxt {
impl Ctxt {
pub fn new() -> Self {
Ctxt {
errors: RefCell::new(Some(Vec::new())),
}
Ctxt { errors: RefCell::new(Some(Vec::new())) }
}
pub fn error<T: Display>(&self, msg: T) {

View File

@ -1,6 +1,3 @@
#![cfg_attr(feature = "clippy", plugin(clippy))]
#![cfg_attr(feature = "clippy", feature(plugin))]
extern crate syn;
pub mod ast;
@ -8,3 +5,5 @@ pub mod attr;
mod ctxt;
pub use ctxt::Ctxt;
mod case;

View File

@ -0,0 +1 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"b48eda1c232a1fb0725482abbe38616d3e9df3ba207c6581b2c918b6976cdcbf","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"6485b8ed310d3f0340bf1ad1f47645069ce4069dcc6bb46c7d5c6faf41de1fdb","README.md":"ebe318a04cf4e547e0f3ab97f1345ecb553358ee13ea81f99e3323e37d70ccdf","src/bound.rs":"9af80108627e7065366bd065e5125fbf292a3aac4e4014977decc42f659d3af2","src/de.rs":"51cedf6f6711b90829cb625f0e43ed765380321e1add80646cda3561dfe3d074","src/fragment.rs":"a4d5286453b280d457579bd17b4deb3f9ce2e5f6c3e446c4ef61774b87045589","src/lib.rs":"53826d40b094cb4c39d5b99dcc4ce1f84e96cec8d7e51e158b00036d18c7442a","src/ser.rs":"19aab089d33b23e46805b5d58185b1471b6d2ede5fe5f272767ce95ff1d48a31"},"package":"f15ea24bd037b2d64646b4d934fa99c649be66e3f7b29fb595a5543b212b1452"}

View File

@ -0,0 +1,27 @@
[package]
name = "serde_derive"
version = "0.9.11"
authors = ["Erick Tryzelaar <erick.tryzelaar@gmail.com>"]
license = "MIT/Apache-2.0"
description = "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]"
homepage = "https://serde.rs"
repository = "https://github.com/serde-rs/serde"
documentation = "https://serde.rs/codegen.html"
keywords = ["serde", "serialization", "no_std"]
readme = "../README.md"
include = ["Cargo.toml", "src/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"]
[features]
unstable = []
[badges]
travis-ci = { repository = "serde-rs/serde" }
[lib]
name = "serde_derive"
proc-macro = true
[dependencies]
quote = "0.3.8"
serde_codegen_internals = { version = "=0.14.1", default-features = false, path = "../serde_codegen_internals" }
syn = { version = "0.11", features = ["visit"] }

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,25 @@
Copyright (c) 2014 The Rust Project Developers
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

71
third_party/rust/serde_derive/README.md vendored Normal file
View File

@ -0,0 +1,71 @@
# Serde &emsp; [![Build Status](https://api.travis-ci.org/serde-rs/serde.svg?branch=master)](https://travis-ci.org/serde-rs/serde) [![Latest Version](https://img.shields.io/crates/v/serde.svg)](https://crates.io/crates/serde)
**Serde is a framework for *ser*ializing and *de*serializing Rust data structures efficiently and generically.**
---
You may be looking for:
- [An overview of Serde](https://serde.rs/)
- [Data formats supported by Serde](https://serde.rs/#data-formats)
- [Setting up `#[derive(Serialize, Deserialize)]`](https://serde.rs/codegen.html)
- [Examples](https://serde.rs/examples.html)
- [API documentation](https://docs.serde.rs/serde/)
- [Release notes](https://github.com/serde-rs/serde/releases)
## Serde in action
```rust
#[macro_use]
extern crate serde_derive;
extern crate serde_json;
#[derive(Serialize, Deserialize, Debug)]
struct Point {
x: i32,
y: i32,
}
fn main() {
let point = Point { x: 1, y: 2 };
// Convert the Point to a JSON string.
let serialized = serde_json::to_string(&point).unwrap();
// Prints serialized = {"x":1,"y":2}
println!("serialized = {}", serialized);
// Convert the JSON string back to a Point.
let deserialized: Point = serde_json::from_str(&serialized).unwrap();
// Prints deserialized = Point { x: 1, y: 2 }
println!("deserialized = {:?}", deserialized);
}
```
## Getting help
Serde developers live in the #serde channel on
[`irc.mozilla.org`](https://wiki.mozilla.org/IRC). The #rust channel is also a
good resource with generally faster response time but less specific knowledge
about Serde. If IRC is not your thing or you don't get a good response, we are
happy to respond to [GitHub issues](https://github.com/serde-rs/serde/issues/new)
as well.
## License
Serde is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
http://www.apache.org/licenses/LICENSE-2.0)
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
http://opensource.org/licenses/MIT)
at your option.
### Contribution
Unless you explicitly state otherwise, any contribution intentionally submitted
for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
dual licensed as above, without any additional terms or conditions.

View File

@ -0,0 +1,228 @@
use std::collections::HashSet;
use syn::{self, visit};
use internals::ast::Item;
use internals::attr;
macro_rules! path {
($first:ident $(:: $rest:ident)*) => {
syn::Path {
global: false,
segments: vec![
stringify!($first).into(),
$(
stringify!($rest).into(),
)*
],
}
};
(::$first:ident $(:: $rest:ident)*) => {
syn::Path {
global: true,
segments: vec![
stringify!($first).into(),
$(
stringify!($rest).into(),
)*
],
}
};
}
// Remove the default from every type parameter because in the generated impls
// they look like associated types: "error: associated type bindings are not
// allowed here".
pub fn without_defaults(generics: &syn::Generics) -> syn::Generics {
syn::Generics {
ty_params: generics.ty_params
.iter()
.map(|ty_param| syn::TyParam { default: None, ..ty_param.clone() })
.collect(),
..generics.clone()
}
}
pub fn with_where_predicates(generics: &syn::Generics,
predicates: &[syn::WherePredicate])
-> syn::Generics {
let mut generics = generics.clone();
generics.where_clause.predicates.extend_from_slice(predicates);
generics
}
pub fn with_where_predicates_from_fields<F>(item: &Item,
generics: &syn::Generics,
from_field: F)
-> syn::Generics
where F: Fn(&attr::Field) -> Option<&[syn::WherePredicate]>
{
let predicates = item.body
.all_fields()
.flat_map(|field| from_field(&field.attrs))
.flat_map(|predicates| predicates.to_vec());
let mut generics = generics.clone();
generics.where_clause.predicates.extend(predicates);
generics
}
// Puts the given bound on any generic type parameters that are used in fields
// for which filter returns true.
//
// For example, the following struct needs the bound `A: Serialize, B: Serialize`.
//
// struct S<'b, A, B: 'b, C> {
// a: A,
// b: Option<&'b B>
// #[serde(skip_serializing)]
// c: C,
// }
pub fn with_bound<F>(item: &Item,
generics: &syn::Generics,
filter: F,
bound: &syn::Path)
-> syn::Generics
where F: Fn(&attr::Field) -> bool
{
struct FindTyParams {
// Set of all generic type parameters on the current struct (A, B, C in
// the example). Initialized up front.
all_ty_params: HashSet<syn::Ident>,
// Set of generic type parameters used in fields for which filter
// returns true (A and B in the example). Filled in as the visitor sees
// them.
relevant_ty_params: HashSet<syn::Ident>,
}
impl visit::Visitor for FindTyParams {
fn visit_path(&mut self, path: &syn::Path) {
if let Some(seg) = path.segments.last() {
if seg.ident == "PhantomData" {
// Hardcoded exception, because PhantomData<T> implements
// Serialize and Deserialize whether or not T implements it.
return;
}
}
if !path.global && path.segments.len() == 1 {
let id = path.segments[0].ident.clone();
if self.all_ty_params.contains(&id) {
self.relevant_ty_params.insert(id);
}
}
visit::walk_path(self, path);
}
}
let all_ty_params: HashSet<_> = generics.ty_params
.iter()
.map(|ty_param| ty_param.ident.clone())
.collect();
let relevant_tys = item.body
.all_fields()
.filter(|&field| filter(&field.attrs))
.map(|field| &field.ty);
let mut visitor = FindTyParams {
all_ty_params: all_ty_params,
relevant_ty_params: HashSet::new(),
};
for ty in relevant_tys {
visit::walk_ty(&mut visitor, ty);
}
let new_predicates = generics.ty_params
.iter()
.map(|ty_param| ty_param.ident.clone())
.filter(|id| visitor.relevant_ty_params.contains(id))
.map(|id| {
syn::WherePredicate::BoundPredicate(syn::WhereBoundPredicate {
bound_lifetimes: Vec::new(),
// the type parameter that is being bounded e.g. T
bounded_ty: syn::Ty::Path(None, id.into()),
// the bound e.g. Serialize
bounds: vec![syn::TyParamBound::Trait(
syn::PolyTraitRef {
bound_lifetimes: Vec::new(),
trait_ref: bound.clone(),
},
syn::TraitBoundModifier::None
)],
})
});
let mut generics = generics.clone();
generics.where_clause.predicates.extend(new_predicates);
generics
}
pub fn with_self_bound(item: &Item,
generics: &syn::Generics,
bound: &syn::Path)
-> syn::Generics
{
let mut generics = generics.clone();
generics.where_clause.predicates.push(
syn::WherePredicate::BoundPredicate(syn::WhereBoundPredicate {
bound_lifetimes: Vec::new(),
// the type that is being bounded e.g. MyStruct<'a, T>
bounded_ty: type_of_item(item),
// the bound e.g. Default
bounds: vec![syn::TyParamBound::Trait(
syn::PolyTraitRef {
bound_lifetimes: Vec::new(),
trait_ref: bound.clone(),
},
syn::TraitBoundModifier::None
)],
})
);
generics
}
pub fn with_lifetime_bound(generics: &syn::Generics,
lifetime: &str)
-> syn::Generics {
let mut generics = generics.clone();
for lifetime_def in &mut generics.lifetimes {
lifetime_def.bounds.push(syn::Lifetime::new(lifetime));
}
for ty_param in &mut generics.ty_params {
ty_param.bounds.push(syn::TyParamBound::Region(syn::Lifetime::new(lifetime)));
}
generics.lifetimes.push(syn::LifetimeDef {
attrs: Vec::new(),
lifetime: syn::Lifetime::new(lifetime),
bounds: Vec::new(),
});
generics
}
fn type_of_item(item: &Item) -> syn::Ty {
syn::Ty::Path(None, syn::Path {
global: false,
segments: vec![
syn::PathSegment {
ident: item.ident.clone(),
parameters: syn::PathParameters::AngleBracketed(syn::AngleBracketedParameterData {
lifetimes: item.generics
.lifetimes
.iter()
.map(|def| def.lifetime.clone())
.collect(),
types: item.generics
.ty_params
.iter()
.map(|param| syn::Ty::Path(None, param.ident.clone().into()))
.collect(),
bindings: Vec::new(),
}),
}
]
})
}

1404
third_party/rust/serde_derive/src/de.rs vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,67 @@
use quote::{Tokens, ToTokens};
pub enum Fragment {
/// Tokens that can be used as an expression.
Expr(Tokens),
/// Tokens that can be used inside a block. The surrounding curly braces are
/// not part of these tokens.
Block(Tokens),
}
macro_rules! quote_expr {
($($tt:tt)*) => {
$crate::fragment::Fragment::Expr(quote!($($tt)*))
}
}
macro_rules! quote_block {
($($tt:tt)*) => {
$crate::fragment::Fragment::Block(quote!($($tt)*))
}
}
/// Interpolate a fragment in place of an expression. This involves surrounding
/// Block fragments in curly braces.
pub struct Expr(pub Fragment);
impl ToTokens for Expr {
fn to_tokens(&self, out: &mut Tokens) {
match self.0 {
Fragment::Expr(ref expr) => expr.to_tokens(out),
Fragment::Block(ref block) => {
out.append("{");
block.to_tokens(out);
out.append("}");
}
}
}
}
/// Interpolate a fragment as the statements of a block.
pub struct Stmts(pub Fragment);
impl ToTokens for Stmts {
fn to_tokens(&self, out: &mut Tokens) {
match self.0 {
Fragment::Expr(ref expr) => expr.to_tokens(out),
Fragment::Block(ref block) => block.to_tokens(out),
}
}
}
/// Interpolate a fragment as the value part of a `match` expression. This
/// involves putting a comma after expressions and curly braces around blocks.
pub struct Match(pub Fragment);
impl ToTokens for Match {
fn to_tokens(&self, out: &mut Tokens) {
match self.0 {
Fragment::Expr(ref expr) => {
expr.to_tokens(out);
out.append(",");
}
Fragment::Block(ref block) => {
out.append("{");
block.to_tokens(out);
out.append("}");
}
}
}
}

View File

@ -0,0 +1,40 @@
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
#![cfg_attr(feature = "cargo-clippy", allow(used_underscore_binding))]
// The `quote!` macro requires deep recursion.
#![recursion_limit = "192"]
extern crate syn;
#[macro_use]
extern crate quote;
extern crate serde_codegen_internals as internals;
extern crate proc_macro;
use proc_macro::TokenStream;
#[macro_use]
mod bound;
#[macro_use]
mod fragment;
mod ser;
mod de;
#[proc_macro_derive(Serialize, attributes(serde))]
pub fn derive_serialize(input: TokenStream) -> TokenStream {
let input = syn::parse_derive_input(&input.to_string()).unwrap();
match ser::expand_derive_serialize(&input) {
Ok(expanded) => expanded.parse().unwrap(),
Err(msg) => panic!(msg),
}
}
#[proc_macro_derive(Deserialize, attributes(serde))]
pub fn derive_deserialize(input: TokenStream) -> TokenStream {
let input = syn::parse_derive_input(&input.to_string()).unwrap();
match de::expand_derive_deserialize(&input) {
Ok(expanded) => expanded.parse().unwrap(),
Err(msg) => panic!(msg),
}
}

782
third_party/rust/serde_derive/src/ser.rs vendored Normal file
View File

@ -0,0 +1,782 @@
use syn::{self, Ident};
use quote::Tokens;
use bound;
use fragment::{Fragment, Stmts, Match};
use internals::ast::{Body, Field, Item, Style, Variant};
use internals::{self, attr};
pub fn expand_derive_serialize(item: &syn::DeriveInput) -> Result<Tokens, String> {
let ctxt = internals::Ctxt::new();
let item = Item::from_ast(&ctxt, item);
try!(ctxt.check());
let ident = &item.ident;
let generics = build_generics(&item);
let (impl_generics, ty_generics, where_clause) = generics.split_for_impl();
let dummy_const = Ident::new(format!("_IMPL_SERIALIZE_FOR_{}", ident));
let body = Stmts(serialize_body(&item, &generics));
Ok(quote! {
#[allow(non_upper_case_globals, unused_attributes, unused_qualifications)]
const #dummy_const: () = {
extern crate serde as _serde;
#[automatically_derived]
impl #impl_generics _serde::Serialize for #ident #ty_generics #where_clause {
fn serialize<__S>(&self, _serializer: __S) -> _serde::export::Result<__S::Ok, __S::Error>
where __S: _serde::Serializer
{
#body
}
}
};
})
}
// All the generics in the input, plus a bound `T: Serialize` for each generic
// field type that will be serialized by us.
fn build_generics(item: &Item) -> syn::Generics {
let generics = bound::without_defaults(item.generics);
let generics =
bound::with_where_predicates_from_fields(item, &generics, attr::Field::ser_bound);
match item.attrs.ser_bound() {
Some(predicates) => bound::with_where_predicates(&generics, predicates),
None => {
bound::with_bound(item,
&generics,
needs_serialize_bound,
&path!(_serde::Serialize))
}
}
}
// Fields with a `skip_serializing` or `serialize_with` attribute are not
// serialized by us so we do not generate a bound. Fields with a `bound`
// attribute specify their own bound so we do not generate one. All other fields
// may need a `T: Serialize` bound where T is the type of the field.
fn needs_serialize_bound(attrs: &attr::Field) -> bool {
!attrs.skip_serializing() && attrs.serialize_with().is_none() && attrs.ser_bound().is_none()
}
fn serialize_body(item: &Item, generics: &syn::Generics) -> Fragment {
match item.body {
Body::Enum(ref variants) => {
serialize_item_enum(&item.ident, generics, variants, &item.attrs)
}
Body::Struct(Style::Struct, ref fields) => {
if fields.iter().any(|field| field.ident.is_none()) {
panic!("struct has unnamed fields");
}
serialize_struct(&item.ident, generics, fields, &item.attrs)
}
Body::Struct(Style::Tuple, ref fields) => {
if fields.iter().any(|field| field.ident.is_some()) {
panic!("tuple struct has named fields");
}
serialize_tuple_struct(&item.ident, generics, fields, &item.attrs)
}
Body::Struct(Style::Newtype, ref fields) => {
serialize_newtype_struct(&item.ident, generics, &fields[0], &item.attrs)
}
Body::Struct(Style::Unit, _) => serialize_unit_struct(&item.attrs),
}
}
fn serialize_unit_struct(item_attrs: &attr::Item) -> Fragment {
let type_name = item_attrs.name().serialize_name();
quote_expr! {
_serde::Serializer::serialize_unit_struct(_serializer, #type_name)
}
}
fn serialize_newtype_struct(ident: &syn::Ident,
generics: &syn::Generics,
field: &Field,
item_attrs: &attr::Item)
-> Fragment {
let type_name = item_attrs.name().serialize_name();
let mut field_expr = quote!(&self.0);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(ident, generics, field.ty, path, field_expr);
}
quote_expr! {
_serde::Serializer::serialize_newtype_struct(_serializer, #type_name, #field_expr)
}
}
fn serialize_tuple_struct(ident: &syn::Ident,
generics: &syn::Generics,
fields: &[Field],
item_attrs: &attr::Item)
-> Fragment {
let serialize_stmts =
serialize_tuple_struct_visitor(ident,
fields,
generics,
false,
quote!(_serde::ser::SerializeTupleStruct::serialize_field));
let type_name = item_attrs.name().serialize_name();
let len = serialize_stmts.len();
let let_mut = mut_if(len > 0);
quote_block! {
let #let_mut __serde_state = try!(_serde::Serializer::serialize_tuple_struct(_serializer, #type_name, #len));
#(#serialize_stmts)*
_serde::ser::SerializeTupleStruct::end(__serde_state)
}
}
fn serialize_struct(ident: &syn::Ident,
generics: &syn::Generics,
fields: &[Field],
item_attrs: &attr::Item)
-> Fragment {
let serialize_fields =
serialize_struct_visitor(ident,
fields,
generics,
false,
quote!(_serde::ser::SerializeStruct::serialize_field));
let type_name = item_attrs.name().serialize_name();
let mut serialized_fields = fields.iter()
.filter(|&field| !field.attrs.skip_serializing())
.peekable();
let let_mut = mut_if(serialized_fields.peek().is_some());
let len = serialized_fields.map(|field| {
let ident = field.ident.clone().expect("struct has unnamed fields");
let field_expr = quote!(&self.#ident);
match field.attrs.skip_serializing_if() {
Some(path) => quote!(if #path(#field_expr) { 0 } else { 1 }),
None => quote!(1),
}
})
.fold(quote!(0), |sum, expr| quote!(#sum + #expr));
quote_block! {
let #let_mut __serde_state = try!(_serde::Serializer::serialize_struct(_serializer, #type_name, #len));
#(#serialize_fields)*
_serde::ser::SerializeStruct::end(__serde_state)
}
}
fn serialize_item_enum(ident: &syn::Ident,
generics: &syn::Generics,
variants: &[Variant],
item_attrs: &attr::Item)
-> Fragment {
let arms: Vec<_> = variants.iter()
.enumerate()
.map(|(variant_index, variant)| {
serialize_variant(ident,
generics,
variant,
variant_index,
item_attrs)
})
.collect();
quote_expr! {
match *self {
#(#arms)*
}
}
}
fn serialize_variant(ident: &syn::Ident,
generics: &syn::Generics,
variant: &Variant,
variant_index: usize,
item_attrs: &attr::Item)
-> Tokens {
let variant_ident = variant.ident.clone();
if variant.attrs.skip_serializing() {
let skipped_msg = format!("the enum variant {}::{} cannot be serialized",
ident, variant_ident);
let skipped_err = quote! {
_serde::export::Err(_serde::ser::Error::custom(#skipped_msg))
};
let fields_pat = match variant.style {
Style::Unit => quote!(),
Style::Newtype | Style::Tuple => quote!( (..) ),
Style::Struct => quote!( {..} ),
};
quote! {
#ident::#variant_ident #fields_pat => #skipped_err,
}
} else {
// variant wasn't skipped
let case = match variant.style {
Style::Unit => {
quote! {
#ident::#variant_ident
}
}
Style::Newtype => {
quote! {
#ident::#variant_ident(ref __field0)
}
}
Style::Tuple => {
let field_names = (0..variant.fields.len())
.map(|i| Ident::new(format!("__field{}", i)));
quote! {
#ident::#variant_ident(#(ref #field_names),*)
}
}
Style::Struct => {
let fields = variant.fields
.iter()
.map(|f| f.ident.clone().expect("struct variant has unnamed fields"));
quote! {
#ident::#variant_ident { #(ref #fields),* }
}
}
};
let body = Match(match *item_attrs.tag() {
attr::EnumTag::External => {
serialize_externally_tagged_variant(ident,
generics,
variant,
variant_index,
item_attrs)
}
attr::EnumTag::Internal { ref tag } => {
serialize_internally_tagged_variant(ident,
generics,
variant,
item_attrs,
tag)
}
attr::EnumTag::Adjacent { ref tag, ref content } => {
serialize_adjacently_tagged_variant(ident,
generics,
variant,
item_attrs,
tag,
content)
}
attr::EnumTag::None => serialize_untagged_variant(ident, generics, variant, item_attrs),
});
quote! {
#case => #body
}
}
}
fn serialize_externally_tagged_variant(ident: &syn::Ident,
generics: &syn::Generics,
variant: &Variant,
variant_index: usize,
item_attrs: &attr::Item)
-> Fragment {
let type_name = item_attrs.name().serialize_name();
let variant_name = variant.attrs.name().serialize_name();
match variant.style {
Style::Unit => {
quote_expr! {
_serde::Serializer::serialize_unit_variant(
_serializer,
#type_name,
#variant_index,
#variant_name,
)
}
}
Style::Newtype => {
let field = &variant.fields[0];
let mut field_expr = quote!(__field0);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(ident, generics, field.ty, path, field_expr);
}
quote_expr! {
_serde::Serializer::serialize_newtype_variant(
_serializer,
#type_name,
#variant_index,
#variant_name,
#field_expr,
)
}
}
Style::Tuple => {
serialize_tuple_variant(TupleVariant::ExternallyTagged {
type_name: type_name,
variant_index: variant_index,
variant_name: variant_name,
},
ident,
generics,
&variant.fields)
}
Style::Struct => {
serialize_struct_variant(StructVariant::ExternallyTagged {
variant_index: variant_index,
variant_name: variant_name,
},
ident,
generics,
&variant.fields,
&type_name)
}
}
}
fn serialize_internally_tagged_variant(ident: &syn::Ident,
generics: &syn::Generics,
variant: &Variant,
item_attrs: &attr::Item,
tag: &str)
-> Fragment {
let type_name = item_attrs.name().serialize_name();
let variant_name = variant.attrs.name().serialize_name();
let enum_ident_str = ident.as_ref();
let variant_ident_str = variant.ident.as_ref();
match variant.style {
Style::Unit => {
quote_block! {
let mut __struct = try!(_serde::Serializer::serialize_struct(
_serializer, #type_name, 1));
try!(_serde::ser::SerializeStruct::serialize_field(
&mut __struct, #tag, #variant_name));
_serde::ser::SerializeStruct::end(__struct)
}
}
Style::Newtype => {
let field = &variant.fields[0];
let mut field_expr = quote!(__field0);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(ident, generics, field.ty, path, field_expr);
}
quote_expr! {
_serde::ser::private::serialize_tagged_newtype(
_serializer,
#enum_ident_str,
#variant_ident_str,
#tag,
#variant_name,
#field_expr,
)
}
}
Style::Struct => {
serialize_struct_variant(StructVariant::InternallyTagged {
tag: tag,
variant_name: variant_name,
},
ident,
generics,
&variant.fields,
&type_name)
}
Style::Tuple => unreachable!("checked in serde_codegen_internals"),
}
}
fn serialize_adjacently_tagged_variant(ident: &syn::Ident,
generics: &syn::Generics,
variant: &Variant,
item_attrs: &attr::Item,
tag: &str,
content: &str)
-> Fragment {
let type_name = item_attrs.name().serialize_name();
let variant_name = variant.attrs.name().serialize_name();
let inner = Stmts(match variant.style {
Style::Unit => {
return quote_block! {
let mut __struct = try!(_serde::Serializer::serialize_struct(
_serializer, #type_name, 1));
try!(_serde::ser::SerializeStruct::serialize_field(
&mut __struct, #tag, #variant_name));
_serde::ser::SerializeStruct::end(__struct)
};
}
Style::Newtype => {
let field = &variant.fields[0];
let mut field_expr = quote!(__field0);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(ident, generics, field.ty, path, field_expr);
}
quote_expr! {
_serde::Serialize::serialize(#field_expr, _serializer)
}
}
Style::Tuple => {
serialize_tuple_variant(TupleVariant::Untagged,
ident,
generics,
&variant.fields)
}
Style::Struct => {
serialize_struct_variant(StructVariant::Untagged,
ident,
generics,
&variant.fields,
&variant_name)
}
});
let fields_ty = variant.fields.iter().map(|f| &f.ty);
let ref fields_ident: Vec<_> = match variant.style {
Style::Unit => unreachable!(),
Style::Newtype => vec![Ident::new("__field0")],
Style::Tuple => {
(0..variant.fields.len())
.map(|i| Ident::new(format!("__field{}", i)))
.collect()
}
Style::Struct => {
variant.fields
.iter()
.map(|f| f.ident.clone().expect("struct variant has unnamed fields"))
.collect()
}
};
let (_, ty_generics, where_clause) = generics.split_for_impl();
let wrapper_generics = bound::with_lifetime_bound(generics, "'__a");
let (wrapper_impl_generics, wrapper_ty_generics, _) = wrapper_generics.split_for_impl();
quote_block! {
struct __AdjacentlyTagged #wrapper_generics #where_clause {
data: (#(&'__a #fields_ty,)*),
phantom: _serde::export::PhantomData<#ident #ty_generics>,
}
impl #wrapper_impl_generics _serde::Serialize for __AdjacentlyTagged #wrapper_ty_generics #where_clause {
fn serialize<__S>(&self, _serializer: __S) -> _serde::export::Result<__S::Ok, __S::Error>
where __S: _serde::Serializer
{
let (#(#fields_ident,)*) = self.data;
#inner
}
}
let mut __struct = try!(_serde::Serializer::serialize_struct(
_serializer, #type_name, 2));
try!(_serde::ser::SerializeStruct::serialize_field(
&mut __struct, #tag, #variant_name));
try!(_serde::ser::SerializeStruct::serialize_field(
&mut __struct, #content, &__AdjacentlyTagged {
data: (#(#fields_ident,)*),
phantom: _serde::export::PhantomData::<#ident #ty_generics>,
}));
_serde::ser::SerializeStruct::end(__struct)
}
}
fn serialize_untagged_variant(ident: &syn::Ident,
generics: &syn::Generics,
variant: &Variant,
item_attrs: &attr::Item)
-> Fragment {
match variant.style {
Style::Unit => {
quote_expr! {
_serde::Serializer::serialize_unit(_serializer)
}
}
Style::Newtype => {
let field = &variant.fields[0];
let mut field_expr = quote!(__field0);
if let Some(path) = field.attrs.serialize_with() {
field_expr = wrap_serialize_with(ident, generics, field.ty, path, field_expr);
}
quote_expr! {
_serde::Serialize::serialize(#field_expr, _serializer)
}
}
Style::Tuple => {
serialize_tuple_variant(TupleVariant::Untagged, ident, generics, &variant.fields)
}
Style::Struct => {
let type_name = item_attrs.name().serialize_name();
serialize_struct_variant(StructVariant::Untagged,
ident,
generics,
&variant.fields,
&type_name)
}
}
}
enum TupleVariant {
ExternallyTagged {
type_name: String,
variant_index: usize,
variant_name: String,
},
Untagged,
}
fn serialize_tuple_variant(context: TupleVariant,
ident: &syn::Ident,
generics: &syn::Generics,
fields: &[Field])
-> Fragment {
let method = match context {
TupleVariant::ExternallyTagged { .. } => {
quote!(_serde::ser::SerializeTupleVariant::serialize_field)
}
TupleVariant::Untagged => quote!(_serde::ser::SerializeTuple::serialize_element),
};
let serialize_stmts =
serialize_tuple_struct_visitor(ident, fields, generics, true, method);
let len = serialize_stmts.len();
let let_mut = mut_if(len > 0);
match context {
TupleVariant::ExternallyTagged { type_name, variant_index, variant_name } => {
quote_block! {
let #let_mut __serde_state = try!(_serde::Serializer::serialize_tuple_variant(
_serializer,
#type_name,
#variant_index,
#variant_name,
#len));
#(#serialize_stmts)*
_serde::ser::SerializeTupleVariant::end(__serde_state)
}
}
TupleVariant::Untagged => {
quote_block! {
let #let_mut __serde_state = try!(_serde::Serializer::serialize_tuple(
_serializer,
#len));
#(#serialize_stmts)*
_serde::ser::SerializeTuple::end(__serde_state)
}
}
}
}
enum StructVariant<'a> {
ExternallyTagged {
variant_index: usize,
variant_name: String,
},
InternallyTagged { tag: &'a str, variant_name: String },
Untagged,
}
fn serialize_struct_variant<'a>(context: StructVariant<'a>,
ident: &syn::Ident,
generics: &syn::Generics,
fields: &[Field],
name: &str)
-> Fragment {
let method = match context {
StructVariant::ExternallyTagged { .. } => {
quote!(_serde::ser::SerializeStructVariant::serialize_field)
}
StructVariant::InternallyTagged { .. } |
StructVariant::Untagged => quote!(_serde::ser::SerializeStruct::serialize_field),
};
let serialize_fields = serialize_struct_visitor(ident, fields, generics, true, method);
let mut serialized_fields = fields.iter()
.filter(|&field| !field.attrs.skip_serializing())
.peekable();
let let_mut = mut_if(serialized_fields.peek().is_some());
let len = serialized_fields.map(|field| {
let ident = field.ident.clone().expect("struct has unnamed fields");
match field.attrs.skip_serializing_if() {
Some(path) => quote!(if #path(#ident) { 0 } else { 1 }),
None => quote!(1),
}
})
.fold(quote!(0), |sum, expr| quote!(#sum + #expr));
match context {
StructVariant::ExternallyTagged { variant_index, variant_name } => {
quote_block! {
let #let_mut __serde_state = try!(_serde::Serializer::serialize_struct_variant(
_serializer,
#name,
#variant_index,
#variant_name,
#len,
));
#(#serialize_fields)*
_serde::ser::SerializeStructVariant::end(__serde_state)
}
}
StructVariant::InternallyTagged { tag, variant_name } => {
quote_block! {
let mut __serde_state = try!(_serde::Serializer::serialize_struct(
_serializer,
#name,
#len + 1,
));
try!(_serde::ser::SerializeStruct::serialize_field(
&mut __serde_state,
#tag,
#variant_name,
));
#(#serialize_fields)*
_serde::ser::SerializeStruct::end(__serde_state)
}
}
StructVariant::Untagged => {
quote_block! {
let #let_mut __serde_state = try!(_serde::Serializer::serialize_struct(
_serializer,
#name,
#len,
));
#(#serialize_fields)*
_serde::ser::SerializeStruct::end(__serde_state)
}
}
}
}
fn serialize_tuple_struct_visitor(ident: &syn::Ident,
fields: &[Field],
generics: &syn::Generics,
is_enum: bool,
func: Tokens)
-> Vec<Tokens> {
fields.iter()
.enumerate()
.map(|(i, field)| {
let mut field_expr = if is_enum {
let id = Ident::new(format!("__field{}", i));
quote!(#id)
} else {
let i = Ident::new(i);
quote!(&self.#i)
};
let skip = field.attrs
.skip_serializing_if()
.map(|path| quote!(#path(#field_expr)));
if let Some(path) = field.attrs.serialize_with() {
field_expr =
wrap_serialize_with(ident, generics, field.ty, path, field_expr);
}
let ser = quote! {
try!(#func(&mut __serde_state, #field_expr));
};
match skip {
None => ser,
Some(skip) => quote!(if !#skip { #ser }),
}
})
.collect()
}
fn serialize_struct_visitor(ident: &syn::Ident,
fields: &[Field],
generics: &syn::Generics,
is_enum: bool,
func: Tokens)
-> Vec<Tokens> {
fields.iter()
.filter(|&field| !field.attrs.skip_serializing())
.map(|field| {
let field_ident = field.ident.clone().expect("struct has unnamed field");
let mut field_expr = if is_enum {
quote!(#field_ident)
} else {
quote!(&self.#field_ident)
};
let key_expr = field.attrs.name().serialize_name();
let skip = field.attrs
.skip_serializing_if()
.map(|path| quote!(#path(#field_expr)));
if let Some(path) = field.attrs.serialize_with() {
field_expr =
wrap_serialize_with(ident, generics, field.ty, path, field_expr)
}
let ser = quote! {
try!(#func(&mut __serde_state, #key_expr, #field_expr));
};
match skip {
None => ser,
Some(skip) => quote!(if !#skip { #ser }),
}
})
.collect()
}
fn wrap_serialize_with(ident: &syn::Ident,
generics: &syn::Generics,
field_ty: &syn::Ty,
serialize_with: &syn::Path,
value: Tokens)
-> Tokens {
let (_, ty_generics, where_clause) = generics.split_for_impl();
let wrapper_generics = bound::with_lifetime_bound(generics, "'__a");
let (wrapper_impl_generics, wrapper_ty_generics, _) = wrapper_generics.split_for_impl();
quote!({
struct __SerializeWith #wrapper_impl_generics #where_clause {
value: &'__a #field_ty,
phantom: _serde::export::PhantomData<#ident #ty_generics>,
}
impl #wrapper_impl_generics _serde::Serialize for __SerializeWith #wrapper_ty_generics #where_clause {
fn serialize<__S>(&self, __s: __S) -> _serde::export::Result<__S::Ok, __S::Error>
where __S: _serde::Serializer
{
#serialize_with(self.value, __s)
}
}
&__SerializeWith {
value: #value,
phantom: _serde::export::PhantomData::<#ident #ty_generics>,
}
})
}
// Serialization of an empty struct results in code like:
//
// let mut __serde_state = try!(serializer.serialize_struct("S", 0));
// _serde::ser::SerializeStruct::end(__serde_state)
//
// where we want to omit the `mut` to avoid a warning.
fn mut_if(is_mut: bool) -> Option<Tokens> {
if is_mut { Some(quote!(mut)) } else { None }
}

View File

@ -1 +0,0 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",".gitignore":"172610b244a5ee8a8e2f1f045058b8abf9291d84bb76bf8779d2fd420419c2d6","Cargo.toml":"dea6c3d3d4b1440983aa6e3d218b52c630283c8ef52102fa5443c67d954e14f0","README.md":"d69d75705e2582721cbfb2d3b4b2af052c71679057a0b2ac53a22c03f1755bba","build.rs":"b40ce243f62825724b4a45092a8e658d71fa952a6840b83f3bee58e719a56d3b","src/bitmap_render_target.rs":"d3b229f85a9804ac52976431657727b410e7d5253283df046e46d98c196f0a3a","src/com_helpers.rs":"fccb4b36379ae3454a88aa32a8e5c09e46ef5f5626266dde1fe5f40a992de39c","src/comptr.rs":"218435689f505769686e07cfc5428852dda90b849a0d48e670f632307f5edc7c","src/font.rs":"9bdf3134c6ad3639eab3da4419c9b43aad2673797f6fdc65841da2c82e1f3af4","src/font_collection.rs":"969fa3abf141dc3504774886f4783fda4a74cd5a198c643f8a77fc1af4e75258","src/font_face.rs":"9506ca579345ab2b6b5615fc75f8f431e2bb0dbd93123d1d2a21a73c851a5427","src/font_family.rs":"403da9f8f9903cbe7f9f79636497b273f9885e200f53af99f9d4e483f11d6889","src/font_file.rs":"60ad02fc25765a2c113175ea372e98a2be0d84aa65fef9246b6a0192e63ff708","src/font_file_loader_impl.rs":"0d304ad99ff1e6874510a1498223329d798ff75b417e3db7e823a695003dfe92","src/gdi_interop.rs":"98922996afc5b8c8304cb65e7c965419003825dfa172a3e11fe69bf3d768551c","src/glyph_run_analysis.rs":"d30d8b41b047815ab5770c730b7a6d09939f2347b4a4257b87bebec08a5794fe","src/helpers.rs":"5d6f164468234ca8806dc1cea117b42dbfae80cc4c9ae965cb0556efdb364682","src/lib.rs":"7dc9d6bbe0ca46082b5c7d049b230388ff2681831506b2798d7ab6e478e18693","src/rendering_params.rs":"be1d1c433f76926c285d8ecdb747c5d9cc6a6c10c1a1890c0760cd99755ed471","src/test.rs":"d77e45f8866abeea070cbbafd4cbde62d875292e8d191310a04c70091978547c","src/types.rs":"784235c15d61fb0d001373575169aa473c92af18dcbc1709a5b2bbaa3a7ceb22"},"package":"9f013da79c3fb2a9653534b064cd2ca62e10f8b6d19ed8fdc885cb2873412789"}

View File

@ -1,30 +0,0 @@
[package]
name = "servo-dwrote"
description = "Lightweight binding to DirectWrite."
repository = "https://github.com/servo/dwrote-rs"
license = "MPL-2.0"
version = "0.2.0"
authors = ["Vladimir Vukicevic <vladimir@pobox.com>"]
build = "build.rs"
[lib]
name = "dwrote"
[features]
default = ["codegen"]
nightly = ["serde/unstable"]
codegen = ["serde_codegen", "serde_codegen/with-syntex"]
[dependencies]
libc = "0.2"
lazy_static = "0.2"
winapi = "0.2"
kernel32-sys = "0.2"
gdi32-sys = "0.2"
serde = "0.9"
serde_derive = {version = "0.9", optional = true}
[build-dependencies.serde_codegen]
version = "0.9"
default_features = false
optional = true

View File

@ -1,46 +0,0 @@
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#[cfg(all(feature = "serde_codegen", not(feature = "serde_derive")))]
mod inner {
extern crate serde_codegen;
use std::env;
use std::path::Path;
pub fn main() {
let out_dir = env::var_os("OUT_DIR").unwrap();
let src = Path::new("src/types.rs");
let dst = Path::new(&out_dir).join("types.rs");
serde_codegen::expand(&src, &dst).unwrap();
println!("cargo:rerun-if-changed=src/types.rs");
}
}
#[cfg(all(feature = "serde_derive", not(feature = "serde_codegen")))]
mod inner {
pub fn main() {}
}
#[cfg(all(feature = "serde_codegen", feature = "serde_derive"))]
mod inner {
pub fn main() {
panic!("serde_codegen and serde_derive are both used. \
You probably forgot --no-default-features.")
}
}
#[cfg(not(any(feature = "serde_codegen", feature = "serde_derive")))]
mod inner {
pub fn main() {
panic!("Neither serde_codegen nor serde_derive are used. "
"You probably want --features serde_derive --no-default-features.")
}
}
fn main() {
inner::main();
}

View File

@ -1 +0,0 @@
{"files":{".cargo-ok":"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855","Cargo.toml":"f703ce140afaec1a35ce733f6bc3d0ce45a6256095572d0763c815fbf39f4f11","src/aster/generics.rs":"77eb19443af0dff5debb18d064733cc8721a42ad7e993a33352cdeff2b5f9f85","src/aster/ident.rs":"e9d082664f008a56bd854011310b4258ab072740ba82e57495b6e8a868a5f36b","src/aster/invoke.rs":"2b1b993973ab4f5c8fa6d6a286576b2542edce21fe9904f5133c470c072e6d3f","src/aster/lifetime.rs":"d83f4c1a48e3580caa028cfabde6ace232efc95d70af6dc9cfcca48317db9ad7","src/aster/mod.rs":"12220f73b0021e72b4c50f6a513cff174b9c7267209aa23f183043d96ccc9ab7","src/aster/path.rs":"60865b0f952077307c1a66810a4b6dafde43e76a417a433a8343960e7de474e4","src/aster/qpath.rs":"885c94b29ab8ee45c72a682221e241d1f0dd09c659809fe77279b5dd8a4bc645","src/aster/ty.rs":"90649aad98617c09ffc43a38aeb823a3298c41bf5e10f0ef3500b71c81021c2f","src/aster/ty_param.rs":"7ced1e6ca0c98ef468d507d3f07bfcb1171395cd66ff5c3e1b091fe7e8b9a562","src/aster/where_predicate.rs":"5fb8ec3fcb67bcc1d9bb7b64cf2f5beb601aac6502d6db30c0cdf8641fa248d1","src/attr.rs":"2ba436bdd439511be10baf9ad45226ade678176a7fd45a087367e1ad2b43e07a","src/constant.rs":"90535a2320e0dc8ab623a9bffa770bdf697baef2884a7d9224b31daf422ea5a0","src/data.rs":"0119c67821f846e67d792bea638ae7f7f5d7e2f5e5a0c145d8ba8766d6ddb0f9","src/escape.rs":"e035b1f6ce3255e868fddb62ee90a95a2f3caf2db73786a2b179b92e9e337539","src/expr.rs":"02e8d346bef099974d06d74945be92fe6391111b94154df4981d44f1594d5579","src/generics.rs":"a300acff4c6e61d2fe9344db23f5e176e7abb02529bc348d9180f41ad0a4caf6","src/helper.rs":"9693d5c78f2d627a90d689a5d4bee1061eddcb646ae6dff3b2e4fd7cfbb33845","src/ident.rs":"83142b0107baba3137aad3b7d5c7b468ab53bf837bd9544d117d6644080d2705","src/item.rs":"63f2cd9a01c279405196d90a7d1cc530896157352163fb44f6b2a713657058b8","src/krate.rs":"324073a42389eb1c26a9d0f325b4f1cdd37d00a9bcaf07fdee77af54909a452d","src/lib.rs":"ef584db9ac9b7308224798d3983cbf201df7f0da1735fe5ce408f20fb3df763e","src/lit.rs":"2615fc6041f11b67a7cd62012f36eb215fd1fdf6649b6b64d728625148f53c7b","src/mac.rs":"45c44bd7abcbdaea6572bb4721bdc57b02b967ea9865172fe10e029e51e51a42","src/macro_input.rs":"93b999877879076e1f47502d96aa18aad82117d072044ca9de825c8a9bfa60b8","src/nom.rs":"642149bf322b762e02183ac1fed641df7f03ac53334c869a64707de4e9c5e68c","src/op.rs":"232f84ba605ed50e70ee02169dd551548872135cf56f155637917ec3bf810ce1","src/registry.rs":"b709f2a0f372efd8dec8fd46d6d71fb3b56a0261789e6de048a41a5e70144421","src/space.rs":"de9cb71e831c1d66f0bf2f3f219c3455d1979ca89f89b198d3b324e0cd50faf8","src/ty.rs":"97cfcb904a5fd68a42ebd2e5f86466d92e0785b1491d80c2a8d396ccec1b742a","src/visit.rs":"d7dcf429cc1a05821a66a4b38e7856eec45a9b2215f625d95030c3688eda26ca"},"package":"58fd09df59565db3399efbba34ba8a2fec1307511ebd245d0061ff9d42691673"}

View File

@ -1,32 +0,0 @@
[package]
name = "syn"
version = "0.10.8" # don't forget to update version in readme for breaking changes
authors = ["David Tolnay <dtolnay@gmail.com>"]
license = "MIT/Apache-2.0"
description = "Nom parser for Rust source code"
repository = "https://github.com/dtolnay/syn"
documentation = "https://dtolnay.github.io/syn/syn/"
include = ["Cargo.toml", "src/**/*.rs"]
[features]
default = ["parsing", "printing"]
aster = []
expand = ["full", "parsing", "printing"]
full = []
parsing = ["unicode-xid"]
pretty = ["syntex_syntax"]
printing = ["quote"]
visit = []
[dependencies]
clippy = { version = "0.*", optional = true }
quote = { version = "0.3.0", optional = true }
syntex_syntax = { version = "0.50.0", optional = true }
unicode-xid = { version = "0.0.4", optional = true }
[dev-dependencies]
syntex_pos = "0.50.0"
syntex_syntax = "0.50.0"
tempdir = "0.3.5"
time = "0.1.35"
walkdir = "1.0.1"

View File

@ -1,233 +0,0 @@
use {Generics, Ident, LifetimeDef, TyParam, WhereClause, WherePredicate};
use aster::invoke::{Identity, Invoke};
use aster::lifetime::{IntoLifetime, LifetimeDefBuilder, IntoLifetimeDef};
use aster::path::IntoPath;
use aster::ty_param::TyParamBuilder;
use aster::where_predicate::WherePredicateBuilder;
pub struct GenericsBuilder<F = Identity> {
callback: F,
lifetimes: Vec<LifetimeDef>,
ty_params: Vec<TyParam>,
predicates: Vec<WherePredicate>,
}
impl GenericsBuilder {
pub fn new() -> Self {
GenericsBuilder::with_callback(Identity)
}
pub fn from_generics(generics: Generics) -> Self {
GenericsBuilder::from_generics_with_callback(generics, Identity)
}
}
impl<F> GenericsBuilder<F>
where F: Invoke<Generics>
{
pub fn with_callback(callback: F) -> Self {
GenericsBuilder {
callback: callback,
lifetimes: Vec::new(),
ty_params: Vec::new(),
predicates: Vec::new(),
}
}
pub fn from_generics_with_callback(generics: Generics, callback: F) -> Self {
GenericsBuilder {
callback: callback,
lifetimes: generics.lifetimes,
ty_params: generics.ty_params,
predicates: generics.where_clause.predicates,
}
}
pub fn with(self, generics: Generics) -> Self {
self.with_lifetimes(generics.lifetimes.into_iter())
.with_ty_params(generics.ty_params.into_iter())
.with_predicates(generics.where_clause.predicates.into_iter())
}
pub fn with_lifetimes<I, L>(mut self, iter: I) -> Self
where I: IntoIterator<Item = L>,
L: IntoLifetimeDef
{
let iter = iter.into_iter().map(|lifetime_def| lifetime_def.into_lifetime_def());
self.lifetimes.extend(iter);
self
}
pub fn with_lifetime_names<I, N>(mut self, iter: I) -> Self
where I: IntoIterator<Item = N>,
N: Into<Ident>
{
for name in iter {
self = self.lifetime_name(name);
}
self
}
pub fn with_lifetime(mut self, lifetime: LifetimeDef) -> Self {
self.lifetimes.push(lifetime);
self
}
pub fn lifetime_name<N>(self, name: N) -> Self
where N: Into<Ident>
{
self.lifetime(name).build()
}
pub fn lifetime<N>(self, name: N) -> LifetimeDefBuilder<Self>
where N: Into<Ident>
{
LifetimeDefBuilder::with_callback(name, self)
}
pub fn with_ty_params<I>(mut self, iter: I) -> Self
where I: IntoIterator<Item = TyParam>
{
self.ty_params.extend(iter);
self
}
pub fn with_ty_param_ids<I, T>(mut self, iter: I) -> Self
where I: IntoIterator<Item = T>,
T: Into<Ident>
{
for id in iter {
self = self.ty_param_id(id);
}
self
}
pub fn with_ty_param(mut self, ty_param: TyParam) -> Self {
self.ty_params.push(ty_param);
self
}
pub fn ty_param_id<I>(self, id: I) -> Self
where I: Into<Ident>
{
self.ty_param(id).build()
}
pub fn ty_param<I>(self, id: I) -> TyParamBuilder<Self>
where I: Into<Ident>
{
TyParamBuilder::with_callback(id, self)
}
pub fn with_predicates<I>(mut self, iter: I) -> Self
where I: IntoIterator<Item = WherePredicate>
{
self.predicates.extend(iter);
self
}
pub fn with_predicate(mut self, predicate: WherePredicate) -> Self {
self.predicates.push(predicate);
self
}
pub fn predicate(self) -> WherePredicateBuilder<Self> {
WherePredicateBuilder::with_callback(self)
}
pub fn add_lifetime_bound<L>(mut self, lifetime: L) -> Self
where L: IntoLifetime
{
let lifetime = lifetime.into_lifetime();
for lifetime_def in &mut self.lifetimes {
lifetime_def.bounds.push(lifetime.clone());
}
for ty_param in &mut self.ty_params {
*ty_param = TyParamBuilder::from_ty_param(ty_param.clone())
.lifetime_bound(lifetime.clone())
.build();
}
self
}
pub fn add_ty_param_bound<P>(mut self, path: P) -> Self
where P: IntoPath
{
let path = path.into_path();
for ty_param in &mut self.ty_params {
*ty_param = TyParamBuilder::from_ty_param(ty_param.clone())
.trait_bound(path.clone())
.build()
.build();
}
self
}
pub fn strip_bounds(self) -> Self {
self.strip_lifetimes()
.strip_ty_params()
.strip_predicates()
}
pub fn strip_lifetimes(mut self) -> Self {
for lifetime in &mut self.lifetimes {
lifetime.bounds = vec![];
}
self
}
pub fn strip_ty_params(mut self) -> Self {
for ty_param in &mut self.ty_params {
ty_param.bounds = vec![];
}
self
}
pub fn strip_predicates(mut self) -> Self {
self.predicates = vec![];
self
}
pub fn build(self) -> F::Result {
self.callback.invoke(Generics {
lifetimes: self.lifetimes,
ty_params: self.ty_params,
where_clause: WhereClause { predicates: self.predicates },
})
}
}
impl<F> Invoke<LifetimeDef> for GenericsBuilder<F>
where F: Invoke<Generics>
{
type Result = Self;
fn invoke(self, lifetime: LifetimeDef) -> Self {
self.with_lifetime(lifetime)
}
}
impl<F> Invoke<TyParam> for GenericsBuilder<F>
where F: Invoke<Generics>
{
type Result = Self;
fn invoke(self, ty_param: TyParam) -> Self {
self.with_ty_param(ty_param)
}
}
impl<F> Invoke<WherePredicate> for GenericsBuilder<F>
where F: Invoke<Generics>
{
type Result = Self;
fn invoke(self, predicate: WherePredicate) -> Self {
self.with_predicate(predicate)
}
}

View File

@ -1,39 +0,0 @@
use Ident;
pub trait ToIdent {
fn to_ident(&self) -> Ident;
}
impl ToIdent for Ident {
fn to_ident(&self) -> Ident {
self.clone()
}
}
impl<'a> ToIdent for &'a str {
fn to_ident(&self) -> Ident {
(**self).into()
}
}
impl ToIdent for String {
fn to_ident(&self) -> Ident {
self.clone().into()
}
}
impl<'a, T> ToIdent for &'a T
where T: ToIdent
{
fn to_ident(&self) -> Ident {
(**self).to_ident()
}
}
impl<'a, T> ToIdent for &'a mut T
where T: ToIdent
{
fn to_ident(&self) -> Ident {
(**self).to_ident()
}
}

Some files were not shown because too many files have changed in this diff Show More