Backed out changeset a396ec8f44fd (bug 1577439) for failing valgrind-test on a CLOSED TREE.

This commit is contained in:
Gurzau Raul 2019-09-07 19:03:27 +03:00
parent 9f2152b92f
commit 9470d77271
6 changed files with 32 additions and 94 deletions

View File

@ -64,7 +64,7 @@ pub type BloomFilter = CountingBloomFilter<BloomStorageU8>;
/// Similarly, using a KeySize of 10 would lead to a 4% false
/// positive rate for N == 100 and to quite bad false positive
/// rates for larger N.
#[derive(Clone, Default)]
#[derive(Clone)]
pub struct CountingBloomFilter<S>
where
S: BloomStorage,
@ -79,7 +79,9 @@ where
/// Creates a new bloom filter.
#[inline]
pub fn new() -> Self {
Default::default()
CountingBloomFilter {
storage: Default::default(),
}
}
#[inline]

View File

@ -13,20 +13,13 @@ use owning_ref::OwningHandle;
use selectors::bloom::BloomFilter;
use servo_arc::Arc;
use smallvec::SmallVec;
use std::mem::ManuallyDrop;
thread_local! {
/// Bloom filters are large allocations, so we store them in thread-local storage
/// such that they can be reused across style traversals. StyleBloom is responsible
/// for ensuring that the bloom filter is zeroed when it is dropped.
///
/// We intentionally leak this from TLS because we don't have the guarantee
/// of TLS destructors to run in worker threads.
///
/// We could change this once https://github.com/rayon-rs/rayon/issues/688
/// is fixed, hopefully.
static BLOOM_KEY: ManuallyDrop<Arc<AtomicRefCell<BloomFilter>>> =
ManuallyDrop::new(Arc::new_leaked(Default::default()));
static BLOOM_KEY: Arc<AtomicRefCell<BloomFilter>> =
Arc::new_leaked(AtomicRefCell::new(BloomFilter::new()));
}
/// A struct that allows us to fast-reject deep descendant selectors avoiding
@ -135,7 +128,7 @@ impl<E: TElement> StyleBloom<E> {
// See https://github.com/servo/servo/pull/18420#issuecomment-328769322
#[inline(never)]
pub fn new() -> Self {
let bloom_arc = BLOOM_KEY.with(|b| Arc::clone(&*b));
let bloom_arc = BLOOM_KEY.with(|b| b.clone());
let filter =
OwningHandle::new_with_fn(bloom_arc, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
debug_assert!(
@ -143,7 +136,7 @@ impl<E: TElement> StyleBloom<E> {
"Forgot to zero the bloom filter last time"
);
StyleBloom {
filter,
filter: filter,
elements: Default::default(),
pushed_hashes: Default::default(),
}

View File

@ -12,8 +12,6 @@ use crate::shared_lock::SharedRwLock;
use crate::thread_state;
use rayon;
use std::env;
use std::sync::atomic::{AtomicUsize, Ordering};
use parking_lot::{RwLock, RwLockReadGuard};
/// Global style data
pub struct GlobalStyleData {
@ -24,28 +22,20 @@ pub struct GlobalStyleData {
pub options: StyleSystemOptions,
}
/// Global thread pool.
/// Global thread pool
pub struct StyleThreadPool {
/// How many threads parallel styling can use.
pub num_threads: usize,
/// The parallel styling thread pool.
///
/// For leak-checking purposes, we want to terminate the thread-pool, which
/// waits for all the async jobs to complete. Thus the RwLock.
style_thread_pool: RwLock<Option<rayon::ThreadPool>>,
pub style_thread_pool: Option<rayon::ThreadPool>,
}
fn thread_name(index: usize) -> String {
format!("StyleThread#{}", index)
}
// A counter so that we can wait for shutdown of all threads. See
// StyleThreadPool::shutdown.
static ALIVE_WORKER_THREADS: AtomicUsize = AtomicUsize::new(0);
fn thread_startup(_index: usize) {
ALIVE_WORKER_THREADS.fetch_add(1, Ordering::Relaxed);
thread_state::initialize_layout_worker_thread();
#[cfg(feature = "gecko")]
unsafe {
@ -65,43 +55,6 @@ fn thread_shutdown(_: usize) {
bindings::Gecko_UnregisterProfilerThread();
bindings::Gecko_SetJemallocThreadLocalArena(false);
}
ALIVE_WORKER_THREADS.fetch_sub(1, Ordering::Relaxed);
}
impl StyleThreadPool {
/// Shuts down the thread pool, waiting for all work to complete.
pub fn shutdown() {
if ALIVE_WORKER_THREADS.load(Ordering::Relaxed) == 0 {
return;
}
{
// Drop the pool.
let _ = STYLE_THREAD_POOL.style_thread_pool.write().take();
}
// Spin until all our threads are done. This will usually be pretty
// fast, as on shutdown there should be basically no threads left
// running.
//
// This still _technically_ doesn't give us the guarantee of TLS
// destructors running on the worker threads. For that we'd need help
// from rayon to properly join the threads.
//
// See https://github.com/rayon-rs/rayon/issues/688
//
// So we instead intentionally leak TLS stuff (see BLOOM_KEY and co) for
// now until that's fixed.
while ALIVE_WORKER_THREADS.load(Ordering::Relaxed) != 0 {
std::thread::yield_now();
}
}
/// Returns a reference to the thread pool.
///
/// We only really want to give read-only access to the pool, except
/// for shutdown().
pub fn pool(&self) -> RwLockReadGuard<Option<rayon::ThreadPool>> {
self.style_thread_pool.read()
}
}
lazy_static! {
@ -160,11 +113,10 @@ lazy_static! {
};
StyleThreadPool {
num_threads,
style_thread_pool: RwLock::new(pool),
num_threads: num_threads,
style_thread_pool: pool,
}
};
/// Global style data
pub static ref GLOBAL_STYLE_DATA: GlobalStyleData = GlobalStyleData {
shared_lock: SharedRwLock::new_leaked(),

View File

@ -82,7 +82,7 @@ use servo_arc::Arc;
use smallbitvec::SmallBitVec;
use smallvec::SmallVec;
use std::marker::PhantomData;
use std::mem::{self, ManuallyDrop};
use std::mem;
use std::ops::Deref;
use std::ptr::NonNull;
use uluru::{Entry, LRUCache};
@ -477,9 +477,10 @@ type TypelessSharingCache = SharingCacheBase<FakeCandidate>;
type StoredSharingCache = Arc<AtomicRefCell<TypelessSharingCache>>;
thread_local! {
// See the comment on bloom.rs about why do we leak this.
static SHARING_CACHE_KEY: ManuallyDrop<StoredSharingCache> =
ManuallyDrop::new(Arc::new_leaked(Default::default()));
// TODO(emilio): Looks like a few of these should just be Rc<RefCell<>> or
// something. No need for atomics in the thread-local code.
static SHARING_CACHE_KEY: StoredSharingCache =
Arc::new_leaked(AtomicRefCell::new(TypelessSharingCache::default()));
}
/// An LRU cache of the last few nodes seen, so that we can aggressively try to
@ -532,7 +533,7 @@ impl<E: TElement> StyleSharingCache<E> {
mem::align_of::<SharingCache<E>>(),
mem::align_of::<TypelessSharingCache>()
);
let cache_arc = SHARING_CACHE_KEY.with(|c| Arc::clone(&*c));
let cache_arc = SHARING_CACHE_KEY.with(|c| c.clone());
let cache =
OwningHandle::new_with_fn(cache_arc, |x| unsafe { x.as_ref() }.unwrap().borrow_mut());
debug_assert!(cache.is_empty());

View File

@ -96,7 +96,7 @@ use style::gecko_bindings::sugar::ownership::{
HasBoxFFI, HasSimpleFFI, Owned, OwnedOrNull, Strong,
};
use style::gecko_bindings::sugar::refptr::RefPtr;
use style::global_style_data::{GlobalStyleData, GLOBAL_STYLE_DATA, StyleThreadPool, STYLE_THREAD_POOL};
use style::global_style_data::{GlobalStyleData, GLOBAL_STYLE_DATA, STYLE_THREAD_POOL};
use style::invalidation::element::restyle_hints::RestyleHint;
use style::media_queries::MediaList;
use style::parser::{self, Parse, ParserContext};
@ -182,6 +182,12 @@ pub unsafe extern "C" fn Servo_Initialize(dummy_url_data: *mut URLExtraData) {
DUMMY_URL_DATA = dummy_url_data;
}
#[no_mangle]
pub extern "C" fn Servo_InitializeCooperativeThread() {
// Pretend that we're a Servo Layout thread to make some assertions happy.
thread_state::initialize(thread_state::ThreadState::LAYOUT);
}
#[no_mangle]
pub unsafe extern "C" fn Servo_Shutdown() {
DUMMY_URL_DATA = ptr::null_mut();
@ -247,10 +253,8 @@ fn traverse_subtree(
debug!("Traversing subtree from {:?}", element);
let thread_pool_holder = &*STYLE_THREAD_POOL;
let pool;
let thread_pool = if traversal_flags.contains(TraversalFlags::ParallelTraversal) {
pool = thread_pool_holder.pool();
pool.as_ref()
thread_pool_holder.style_thread_pool.as_ref()
} else {
None
};
@ -1202,6 +1206,11 @@ pub extern "C" fn Servo_Property_IsDiscreteAnimatable(property: nsCSSPropertyID)
}
}
#[no_mangle]
pub extern "C" fn Servo_StyleWorkerThreadCount() -> u32 {
STYLE_THREAD_POOL.num_threads as u32
}
#[no_mangle]
pub extern "C" fn Servo_Element_ClearData(element: &RawGeckoElement) {
unsafe { GeckoElement(element).clear_data() };
@ -1406,7 +1415,7 @@ pub unsafe extern "C" fn Servo_StyleSheet_FromUTF8BytesAsync(
should_record_use_counters,
);
if let Some(thread_pool) = STYLE_THREAD_POOL.pool().as_ref() {
if let Some(thread_pool) = STYLE_THREAD_POOL.style_thread_pool.as_ref() {
thread_pool.spawn(|| {
profiler_label!(Parse);
async_parser.parse();
@ -1416,12 +1425,6 @@ pub unsafe extern "C" fn Servo_StyleSheet_FromUTF8BytesAsync(
}
}
#[no_mangle]
pub unsafe extern "C" fn Servo_ShutdownThreadPool() {
debug_assert!(is_main_thread() && !is_in_servo_traversal());
StyleThreadPool::shutdown();
}
#[no_mangle]
pub unsafe extern "C" fn Servo_StyleSheet_FromSharedData(
extra_data: *mut URLExtraData,

View File

@ -88,7 +88,6 @@
#include "mozilla/ClearOnShutdown.h"
#include "mozilla/CountingAllocatorBase.h"
#include "mozilla/UniquePtr.h"
#include "mozilla/ServoStyleConsts.h"
#include "mozilla/ipc/GeckoChildProcessHost.h"
@ -769,19 +768,7 @@ nsresult ShutdownXPCOM(nsIServiceManager* aServMgr) {
GkRust_Shutdown();
#ifdef NS_FREE_PERMANENT_DATA
// By the time we're shutting down, there may still be async parse tasks going
// on in the Servo thread-pool. This is fairly uncommon, though not
// impossible. CSS parsing heavily uses the atom table, so obviously it's not
// fine to get rid of it.
//
// In leak-checking / ASAN / etc. builds, shut down the servo thread-pool,
// which will wait for all the work to be done. For other builds, we don't
// really want to wait on shutdown for possibly slow tasks. So just leak the
// atom table in those.
Servo_ShutdownThreadPool();
NS_ShutdownAtomTable();
#endif
NS_IF_RELEASE(gDebug);