servo: Merge #13741 - Remove some unused support for the legacy network stack in script (from servo:fetch-doc-unused); r=Manishearth

Source-Repo: https://github.com/servo/servo
Source-Revision: 740464df8af6e0f167bafa1a760078e420172eb7
This commit is contained in:
Ms2ger 2016-10-13 09:55:00 -05:00
parent ef12bb5f10
commit 3e9051996f
3 changed files with 4 additions and 43 deletions

View File

@ -8,9 +8,7 @@
use dom::bindings::js::JS;
use dom::document::Document;
use ipc_channel::ipc::IpcSender;
use msg::constellation_msg::{PipelineId, ReferrerPolicy};
use net_traits::{AsyncResponseTarget, CoreResourceMsg, load_async};
use net_traits::{FetchResponseMsg, LoadContext, ResourceThreads, IpcSend};
use net_traits::{CoreResourceMsg, FetchResponseMsg, ResourceThreads, IpcSend};
use net_traits::request::RequestInit;
use std::thread;
use url::Url;
@ -36,20 +34,10 @@ impl LoadType {
LoadType::PageSource(ref url) => url,
}
}
fn to_load_context(&self) -> LoadContext {
match *self {
LoadType::Image(_) => LoadContext::Image,
LoadType::Script(_) => LoadContext::Script,
LoadType::Subframe(_) | LoadType::PageSource(_) => LoadContext::Browsing,
LoadType::Stylesheet(_) => LoadContext::Style,
LoadType::Media(_) => LoadContext::AudioVideo,
}
}
}
/// Canary value ensuring that manually added blocking loads (ie. ones that weren't
/// created via DocumentLoader::{load_async, fetch_async}) are always removed by the time
/// created via DocumentLoader::fetch_async) are always removed by the time
/// that the owner is destroyed.
#[derive(JSTraceable, HeapSizeOf)]
#[must_root]
@ -95,24 +83,21 @@ impl Drop for LoadBlocker {
#[derive(JSTraceable, HeapSizeOf)]
pub struct DocumentLoader {
resource_threads: ResourceThreads,
pipeline: Option<PipelineId>,
blocking_loads: Vec<LoadType>,
events_inhibited: bool,
}
impl DocumentLoader {
pub fn new(existing: &DocumentLoader) -> DocumentLoader {
DocumentLoader::new_with_threads(existing.resource_threads.clone(), None, None)
DocumentLoader::new_with_threads(existing.resource_threads.clone(), None)
}
pub fn new_with_threads(resource_threads: ResourceThreads,
pipeline: Option<PipelineId>,
initial_load: Option<Url>) -> DocumentLoader {
let initial_loads = initial_load.into_iter().map(LoadType::PageSource).collect();
DocumentLoader {
resource_threads: resource_threads,
pipeline: pipeline,
blocking_loads: initial_loads,
events_inhibited: false,
}
@ -123,24 +108,6 @@ impl DocumentLoader {
self.blocking_loads.push(load);
}
/// Create and initiate a new network request.
pub fn load_async(&mut self,
load: LoadType,
listener: AsyncResponseTarget,
referrer: &Document,
referrer_policy: Option<ReferrerPolicy>) {
let context = load.to_load_context();
let url = load.url().clone();
self.add_blocking_load(load);
load_async(context,
self.resource_threads.sender(),
url,
self.pipeline,
referrer_policy.or(referrer.get_referrer_policy()),
Some(referrer.url().clone()),
listener);
}
/// Initiate a new fetch.
pub fn fetch_async(&mut self,
load: LoadType,

View File

@ -97,7 +97,7 @@ use js::jsapi::JS_GetRuntime;
use msg::constellation_msg::{ALT, CONTROL, SHIFT, SUPER};
use msg::constellation_msg::{Key, KeyModifiers, KeyState};
use msg::constellation_msg::{PipelineId, ReferrerPolicy};
use net_traits::{AsyncResponseTarget, FetchResponseMsg, IpcSend};
use net_traits::{FetchResponseMsg, IpcSend};
use net_traits::CookieSource::NonHTTP;
use net_traits::CoreResourceMsg::{GetCookiesForUrl, SetCookiesForUrl};
use net_traits::request::RequestInit;
@ -1472,11 +1472,6 @@ impl Document {
ReflowReason::RequestAnimationFrame);
}
pub fn load_async(&self, load: LoadType, listener: AsyncResponseTarget, referrer_policy: Option<ReferrerPolicy>) {
let mut loader = self.loader.borrow_mut();
loader.load_async(load, listener, self, referrer_policy);
}
pub fn fetch_async(&self, load: LoadType,
request: RequestInit,
fetch_target: IpcSender<FetchResponseMsg>) {

View File

@ -1729,7 +1729,6 @@ impl ScriptThread {
});
let loader = DocumentLoader::new_with_threads(self.resource_threads.clone(),
Some(browsing_context.pipeline_id()),
Some(incomplete.url.clone()));
let is_html_document = match metadata.content_type {