From 1975b4203f2fefefd5c2cf1abf3ccb9ff9969360 Mon Sep 17 00:00:00 2001 From: Jon Coppeard Date: Wed, 3 Jun 2020 08:10:45 +0000 Subject: [PATCH] Bug 1470369 - Don't collect the nursery every GC slice during sweeping r=sfink The main problem here is that we sweep weak caches off-thread, and when we finish sweeping a hash table the Enum class' destructor can rehash or resize the table, causing store buffer entries to be added or removed (since the table may now contain nursery pointers). To address this the patch adds a store buffer lock and establishes that all off-thread store buffer access from inside the GC must take place with this lock held. The changes to GCHashSet/Map are a little gross; perhaps it would be better to add an explicit API to hash tables to allow us to postpone the rehash/resize operations but I haven't done that here. Other complications are: The TypeSetRef generic buffer entries can contain pointers into TI data that is moved during sweeping. We therefore do need to collect the nursery if there are any of those present. This was relatively rare in testing. Finally, swapping objects can result in pointers into dying objects being put in the whole cell store buffer (because we do tricks with skipping barriers when we remap wrappers to not keep otherwise dead wrappers alive). We need to collect the nursery if these are present to prevent them being accessed after the dying objects are finalized. Differential Revision: https://phabricator.services.mozilla.com/D77831 --- js/public/GCHashTable.h | 50 ++++++++++++++++++++--- js/public/SweepingAPI.h | 27 ++++++++++++- js/src/gc/GC.cpp | 72 ++++++++++++++++++++++++--------- js/src/gc/GCRuntime.h | 7 +++- js/src/gc/Marking.cpp | 5 +-- js/src/gc/NurseryAwareHashMap.h | 1 - js/src/gc/StoreBuffer.cpp | 23 +++++++++-- js/src/gc/StoreBuffer.h | 52 +++++++++++++++++++++--- js/src/gc/Verifier.cpp | 2 - js/src/gc/WeakMap-inl.h | 4 +- js/src/threading/Mutex.cpp | 5 ++- js/src/threading/Mutex.h | 1 + js/src/vm/ArrayBufferObject.cpp | 1 - js/src/vm/JSObject.cpp | 8 +++- js/src/vm/MutexIDs.h | 1 + js/src/vm/TypeInference.cpp | 3 ++ 16 files changed, 212 insertions(+), 50 deletions(-) diff --git a/js/public/GCHashTable.h b/js/public/GCHashTable.h index 72e8f56e713f..310c9fe50227 100644 --- a/js/public/GCHashTable.h +++ b/js/public/GCHashTable.h @@ -76,7 +76,12 @@ class GCHashMap : public js::HashMap { bool needsSweep() const { return !this->empty(); } void sweep() { - for (typename Base::Enum e(*this); !e.empty(); e.popFront()) { + typename Base::Enum e(*this); + sweepEntries(e); + } + + void sweepEntries(typename Base::Enum& e) { + for (; !e.empty(); e.popFront()) { if (MapSweepPolicy::needsSweep(&e.front().mutableKey(), &e.front().value())) { e.removeFront(); @@ -270,7 +275,12 @@ class GCHashSet : public js::HashSet { bool needsSweep() const { return !this->empty(); } void sweep() { - for (typename Base::Enum e(*this); !e.empty(); e.popFront()) { + typename Base::Enum e(*this); + sweepEntries(e); + } + + void sweepEntries(typename Base::Enum& e) { + for (; !e.empty(); e.popFront()) { if (GCPolicy::needsSweep(&e.mutableFront())) { e.removeFront(); } @@ -410,9 +420,22 @@ class WeakCache> bool needsSweep() override { return map.needsSweep(); } - size_t sweep() override { + size_t sweep(js::gc::StoreBuffer* sbToLock) override { size_t steps = map.count(); - map.sweep(); + + // Create an Enum and sweep the table entries. + mozilla::Maybe e; + e.emplace(map); + map.sweepEntries(e.ref()); + + // Potentially take a lock while the Enum's destructor is called as this can + // rehash/resize the table and access the store buffer. + mozilla::Maybe lock; + if (sbToLock) { + lock.emplace(sbToLock); + } + e.reset(); + return steps; } @@ -593,9 +616,24 @@ class WeakCache> set(std::forward(args)...), needsBarrier(false) {} - size_t sweep() override { + size_t sweep(js::gc::StoreBuffer* sbToLock) override { size_t steps = set.count(); - set.sweep(); + + // Create an Enum and sweep the table entries. It's not necessary to take + // the store buffer lock yet. + mozilla::Maybe e; + e.emplace(set); + set.sweepEntries(e.ref()); + + // Destroy the Enum, potentially rehashing or resizing the table. Since this + // can access the store buffer, we need to take a lock for this if we're + // called off main thread. + mozilla::Maybe lock; + if (sbToLock) { + lock.emplace(sbToLock); + } + e.reset(); + return steps; } diff --git a/js/public/SweepingAPI.h b/js/public/SweepingAPI.h index baf42d5c9b8f..ffb7d4bfa760 100644 --- a/js/public/SweepingAPI.h +++ b/js/public/SweepingAPI.h @@ -9,6 +9,29 @@ #include "js/HeapAPI.h" +namespace js { +namespace gc { + +class StoreBuffer; + +JS_PUBLIC_API void LockStoreBuffer(StoreBuffer* sb); +JS_PUBLIC_API void UnlockStoreBuffer(StoreBuffer* sb); + +class AutoLockStoreBuffer { + StoreBuffer* sb; + + public: + explicit AutoLockStoreBuffer(StoreBuffer* sb) : sb(sb) { + LockStoreBuffer(sb); + } + ~AutoLockStoreBuffer() { + UnlockStoreBuffer(sb); + } +}; + +} // namespace gc +} // namespace js + namespace JS { namespace detail { class WeakCacheBase; @@ -32,7 +55,7 @@ class WeakCacheBase : public mozilla::LinkedListElement { WeakCacheBase(WeakCacheBase&& other) = default; virtual ~WeakCacheBase() = default; - virtual size_t sweep() = 0; + virtual size_t sweep(js::gc::StoreBuffer* sbToLock) = 0; virtual bool needsSweep() = 0; virtual bool setNeedsIncrementalBarrier(bool needs) { @@ -68,7 +91,7 @@ class WeakCache : protected detail::WeakCacheBase, const T& get() const { return cache; } T& get() { return cache; } - size_t sweep() override { + size_t sweep(js::gc::StoreBuffer* sbToLock) override { GCPolicy::sweep(&cache); return 0; } diff --git a/js/src/gc/GC.cpp b/js/src/gc/GC.cpp index 8ca52165a4c2..f43879fb4c7e 100644 --- a/js/src/gc/GC.cpp +++ b/js/src/gc/GC.cpp @@ -2167,9 +2167,12 @@ void GCRuntime::sweepZoneAfterCompacting(MovingTracer* trc, Zone* zone) { sweepTypesAfterCompacting(zone); sweepFinalizationRegistries(zone); zone->weakRefMap().sweep(); - zone->sweepWeakMaps(); - for (auto* cache : zone->weakCaches()) { - cache->sweep(); + + { + zone->sweepWeakMaps(); + for (auto* cache : zone->weakCaches()) { + cache->sweep(nullptr); + } } if (jit::JitZone* jitZone = zone->jitZone()) { @@ -2365,7 +2368,7 @@ void GCRuntime::updateTypeDescrObjects(MovingTracer* trc, Zone* zone) { // need to be updated. Do not update any non-reserved slots, since they might // point back to unprocessed descriptor objects. - zone->typeDescrObjects().sweep(); + zone->typeDescrObjects().sweep(nullptr); for (auto r = zone->typeDescrObjects().all(); !r.empty(); r.popFront()) { NativeObject* obj = &r.front()->as(); @@ -2547,7 +2550,7 @@ void GCRuntime::updateRuntimePointersToRelocatedCells(AutoGCSession& session) { DebugAPI::sweepAll(rt->defaultFreeOp()); jit::JitRuntime::TraceWeakJitcodeGlobalTable(rt, &trc); for (JS::detail::WeakCacheBase* cache : rt->weakCaches()) { - cache->sweep(); + cache->sweep(nullptr); } // Type inference may put more blocks here to free. @@ -4424,7 +4427,7 @@ bool Compartment::findSweepGroupEdges() { // zone is not still being marked when we start sweeping the wrapped zone. // As an optimization, if the wrapped object is already marked black there // is no danger of later marking and we can skip this. - if (key->asTenured().isMarkedBlack()) { + if (key->isMarkedBlack()) { continue; } @@ -4960,7 +4963,7 @@ class ImmediateSweepWeakCacheTask : public GCParallelTask { void run() override { AutoSetThreadIsSweeping threadIsSweeping(zone); - cache.sweep(); + cache.sweep(&gc->storeBuffer()); } }; @@ -5040,6 +5043,9 @@ void GCRuntime::sweepWeakMaps() { oomUnsafe.crash("clearing weak keys in beginSweepingSweepGroup()"); } + // Lock the storebuffer since this may access it when rehashing or resizing + // the tables. + AutoLockStoreBuffer lock(&storeBuffer()); zone->sweepWeakMaps(); } } @@ -5240,7 +5246,7 @@ static void SweepAllWeakCachesOnMainThread(JSRuntime* rt) { if (cache->needsIncrementalBarrier()) { cache->setNeedsIncrementalBarrier(false); } - cache->sweep(); + cache->sweep(nullptr); return true; }); } @@ -5254,6 +5260,8 @@ IncrementalProgress GCRuntime::beginSweepingSweepGroup(JSFreeOp* fop, using namespace gcstats; + MOZ_ASSERT(!storeBuffer().hasTypeSetPointers()); + AutoSCC scc(stats(), sweepGroupIndex); bool sweepingAtoms = false; @@ -5598,6 +5606,8 @@ IncrementalProgress GCRuntime::sweepTypeInformation(JSFreeOp* fop, // the sweep group finishes we won't be able to determine which things in // the zone are live. + MOZ_ASSERT(!storeBuffer().hasTypeSetPointers()); + gcstats::AutoPhase ap1(stats(), gcstats::PhaseKind::SWEEP_COMPARTMENTS); gcstats::AutoPhase ap2(stats(), gcstats::PhaseKind::SWEEP_TYPES); @@ -5685,7 +5695,7 @@ static size_t IncrementalSweepWeakCache(GCRuntime* gc, JS::detail::WeakCacheBase* cache = item.cache; MOZ_ASSERT(cache->needsIncrementalBarrier()); - size_t steps = cache->sweep(); + size_t steps = cache->sweep(&gc->storeBuffer()); cache->setNeedsIncrementalBarrier(false); return steps; @@ -6555,16 +6565,16 @@ void GCRuntime::incrementalSlice(SliceBudget& budget, MOZ_ASSERT_IF(isIncrementalGCInProgress(), isIncremental); + isIncremental = !budget.isUnlimited(); + /* * Non-incremental collection expects that the nursery is empty. */ - if (!isIncremental) { + if (!isIncremental && !isIncrementalGCInProgress()) { MOZ_ASSERT(nursery().isEmpty()); storeBuffer().checkEmpty(); } - isIncremental = !budget.isUnlimited(); - if (useZeal && hasIncrementalTwoSliceZealMode()) { /* * Yields between slices occurs at predetermined points in these modes; @@ -6663,14 +6673,12 @@ void GCRuntime::incrementalSlice(SliceBudget& budget, incrementalState = State::Sweep; lastMarkSlice = false; + beginSweepPhase(reason, session); [[fallthrough]]; case State::Sweep: - MOZ_ASSERT(nursery().isEmpty()); - storeBuffer().checkEmpty(); - rt->mainContextFromOwnThread()->traceWrapperGCRooters(&marker); if (performSweepActions(budget) == NotFinished) { @@ -7103,25 +7111,49 @@ bool GCRuntime::shouldCollectNurseryForSlice(bool nonincrementalByAPI, return false; } + if (nursery().shouldCollect()) { + return true; + } + + bool nonIncremental = nonincrementalByAPI || budget.isUnlimited(); + + bool shouldCollectForSweeping = storeBuffer().hasTypeSetPointers() || + storeBuffer().mayHavePointersToDeadCells(); + switch (incrementalState) { case State::NotActive: - case State::Sweep: - case State::Compact: return true; case State::Mark: + return (mightSweepInThisSlice(nonIncremental) && + shouldCollectForSweeping) || + mightCompactInThisSlice(nonIncremental); + case State::Sweep: + return shouldCollectForSweeping || + mightCompactInThisSlice(nonIncremental); case State::Finalize: + return mightCompactInThisSlice(nonIncremental); + case State::Compact: + return true; case State::Decommit: - return (nonincrementalByAPI || budget.isUnlimited() || lastMarkSlice || - nursery().shouldCollect() || hasIncrementalTwoSliceZealMode()); case State::Finish: return false; - case State::MarkRoots: + default: MOZ_CRASH("Unexpected GC state"); } return false; } +inline bool GCRuntime::mightSweepInThisSlice(bool nonIncremental) { + MOZ_ASSERT(incrementalState < State::Sweep); + return nonIncremental || lastMarkSlice || hasIncrementalTwoSliceZealMode(); +} + +inline bool GCRuntime::mightCompactInThisSlice(bool nonIncremental) { + MOZ_ASSERT(incrementalState < State::Compact); + return isCompacting && (nonIncremental || hasIncrementalTwoSliceZealMode()); +} + #ifdef JS_GC_ZEAL static bool IsDeterministicGCReason(JS::GCReason reason) { switch (reason) { diff --git a/js/src/gc/GCRuntime.h b/js/src/gc/GCRuntime.h index f09d627b00c6..ae489ebed27a 100644 --- a/js/src/gc/GCRuntime.h +++ b/js/src/gc/GCRuntime.h @@ -705,6 +705,8 @@ class GCRuntime { JS::GCReason reason, AutoGCSession& session); MOZ_MUST_USE bool shouldCollectNurseryForSlice(bool nonincrementalByAPI, SliceBudget& budget); + bool mightSweepInThisSlice(bool nonIncremental); + bool mightCompactInThisSlice(bool nonIncremental); void collectNursery(JS::GCReason reason, gcstats::PhaseKind phase); friend class AutoCallGCCallbacks; @@ -1200,7 +1202,10 @@ class GCRuntime { private: MainThreadData nursery_; - MainThreadData storeBuffer_; + + // The store buffer used to track tenured to nursery edges for generational + // GC. This is accessed off main thread when sweeping WeakCaches. + MainThreadOrGCTaskData storeBuffer_; mozilla::TimeStamp lastLastDitchTime; diff --git a/js/src/gc/Marking.cpp b/js/src/gc/Marking.cpp index 0e662783e7bb..5dce761b9160 100644 --- a/js/src/gc/Marking.cpp +++ b/js/src/gc/Marking.cpp @@ -2700,8 +2700,6 @@ void GCMarker::repush(JSObject* obj) { } bool GCMarker::enterWeakMarkingMode() { - MOZ_ASSERT(runtime()->gc.nursery().isEmpty()); - MOZ_ASSERT(weakMapAction() == ExpandWeakMaps); MOZ_ASSERT(state != MarkingState::WeakMarking); if (state == MarkingState::IterativeMarking) { @@ -3745,8 +3743,7 @@ inline bool SweepingTracer::sweepEdge(T** thingp) { // Bug 1501334 : IsAboutToBeFinalized doesn't work for atoms // Bug 1071218 : Refactor Debugger::sweepAll and // JitRuntime::SweepJitcodeGlobalTable to work per sweep group - TenuredCell& tenured = thing->asTenured(); - if (!tenured.isMarkedAny()) { + if (!thing->isMarkedAny()) { *thingp = nullptr; return false; } diff --git a/js/src/gc/NurseryAwareHashMap.h b/js/src/gc/NurseryAwareHashMap.h index ec7ea0a10187..8a6751b6caa0 100644 --- a/js/src/gc/NurseryAwareHashMap.h +++ b/js/src/gc/NurseryAwareHashMap.h @@ -170,7 +170,6 @@ class NurseryAwareHashMap { } void sweep() { - MOZ_ASSERT(nurseryEntries.empty()); map.sweep(); } diff --git a/js/src/gc/StoreBuffer.cpp b/js/src/gc/StoreBuffer.cpp index 8a7ec7b3eb8a..d9544ff7551e 100644 --- a/js/src/gc/StoreBuffer.cpp +++ b/js/src/gc/StoreBuffer.cpp @@ -10,6 +10,8 @@ #include "gc/Statistics.h" #include "vm/ArgumentsObject.h" +#include "vm/JSContext.h" +#include "vm/MutexIDs.h" #include "vm/Runtime.h" #include "gc/GC-inl.h" @@ -17,6 +19,16 @@ using namespace js; using namespace js::gc; +JS_PUBLIC_API void js::gc::LockStoreBuffer(StoreBuffer* sb) { + MOZ_ASSERT(sb); + sb->lock(); +} + +JS_PUBLIC_API void js::gc::UnlockStoreBuffer(StoreBuffer* sb) { + MOZ_ASSERT(sb); + sb->unlock(); +} + bool StoreBuffer::WholeCellBuffer::init() { MOZ_ASSERT(!head_); if (!storage_) { @@ -55,18 +67,21 @@ void StoreBuffer::GenericBuffer::trace(JSTracer* trc) { } StoreBuffer::StoreBuffer(JSRuntime* rt, const Nursery& nursery) - : bufferVal(this, JS::GCReason::FULL_VALUE_BUFFER), + : lock_(mutexid::StoreBuffer), + bufferVal(this, JS::GCReason::FULL_VALUE_BUFFER), bufStrCell(this, JS::GCReason::FULL_CELL_PTR_STR_BUFFER), bufBigIntCell(this, JS::GCReason::FULL_CELL_PTR_BIGINT_BUFFER), bufObjCell(this, JS::GCReason::FULL_CELL_PTR_OBJ_BUFFER), bufferSlot(this, JS::GCReason::FULL_SLOT_BUFFER), bufferWholeCell(this), bufferGeneric(this), - cancelIonCompilations_(false), runtime_(rt), nursery_(nursery), aboutToOverflow_(false), - enabled_(false) + enabled_(false), + cancelIonCompilations_(false), + hasTypeSetPointers_(false), + mayHavePointersToDeadCells_(false) #ifdef DEBUG , mEntered(false) @@ -118,6 +133,8 @@ void StoreBuffer::clear() { aboutToOverflow_ = false; cancelIonCompilations_ = false; + hasTypeSetPointers_ = false; + mayHavePointersToDeadCells_ = false; bufferVal.clear(); bufStrCell.clear(); diff --git a/js/src/gc/StoreBuffer.h b/js/src/gc/StoreBuffer.h index 97899c7175fd..dd0f172d137c 100644 --- a/js/src/gc/StoreBuffer.h +++ b/js/src/gc/StoreBuffer.h @@ -19,6 +19,7 @@ #include "js/AllocPolicy.h" #include "js/MemoryMetrics.h" #include "js/UniquePtr.h" +#include "threading/Mutex.h" namespace js { namespace gc { @@ -26,6 +27,10 @@ namespace gc { class Arena; class ArenaCellSet; +#ifdef DEBUG +extern bool CurrentThreadHasLockedGC(); +#endif + /* * BufferableRef represents an abstract reference for use in the generational * GC's remembered set. Entries in the store buffer that cannot be represented @@ -378,10 +383,23 @@ class StoreBuffer { } Hasher; }; + // The GC runs tasks that may access the storebuffer in parallel and so must + // take a lock. The mutator may only access the storebuffer from the main + // thread. + inline void CheckAccess() const { +#ifdef DEBUG + if (JS::RuntimeHeapIsBusy()) { + MOZ_ASSERT((CurrentThreadCanAccessRuntime(runtime_) && !lock_.isHeld()) || + lock_.ownedByCurrentThread()); + } else { + MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_)); + } +#endif + } + template void unput(Buffer& buffer, const Edge& edge) { - MOZ_ASSERT(!JS::RuntimeHeapIsBusy()); - MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_)); + CheckAccess(); if (!isEnabled()) { return; } @@ -391,8 +409,7 @@ class StoreBuffer { template void put(Buffer& buffer, const Edge& edge) { - MOZ_ASSERT(!JS::RuntimeHeapIsBusy()); - MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_)); + CheckAccess(); if (!isEnabled()) { return; } @@ -402,6 +419,8 @@ class StoreBuffer { } } + Mutex lock_; + MonoTypeBuffer bufferVal; MonoTypeBuffer bufStrCell; MonoTypeBuffer bufBigIntCell; @@ -409,13 +428,15 @@ class StoreBuffer { MonoTypeBuffer bufferSlot; WholeCellBuffer bufferWholeCell; GenericBuffer bufferGeneric; - bool cancelIonCompilations_; JSRuntime* runtime_; const Nursery& nursery_; bool aboutToOverflow_; bool enabled_; + bool cancelIonCompilations_; + bool hasTypeSetPointers_; + bool mayHavePointersToDeadCells_; #ifdef DEBUG bool mEntered; /* For ReentrancyGuard. */ #endif @@ -436,6 +457,21 @@ class StoreBuffer { bool cancelIonCompilations() const { return cancelIonCompilations_; } + /* + * Type inference data structures are moved during sweeping, so if we we are + * to sweep them then we must make sure that the storebuffer has no pointers + * into them. + */ + bool hasTypeSetPointers() const { return hasTypeSetPointers_; } + + /* + * Brain transplants may add whole cell buffer entires for dead cells. We must + * evict the nursery prior to sweeping arenas if any such entries are present. + */ + bool mayHavePointersToDeadCells() const { + return mayHavePointersToDeadCells_; + } + /* Insert a single edge into the buffer/remembered set. */ void putValue(JS::Value* vp) { put(bufferVal, ValueEdge(vp)); } void unputValue(JS::Value* vp) { unput(bufferVal, ValueEdge(vp)); } @@ -467,6 +503,8 @@ class StoreBuffer { } void setShouldCancelIonCompilations() { cancelIonCompilations_ = true; } + void setHasTypeSetPointers() { hasTypeSetPointers_ = true; } + void setMayHavePointersToDeadCells() { mayHavePointersToDeadCells_ = true; } /* Methods to trace the source of all edges in the store buffer. */ void traceValues(TenuringTracer& mover) { bufferVal.trace(mover); } @@ -486,6 +524,10 @@ class StoreBuffer { JS::GCSizes* sizes); void checkEmpty() const; + + // For use by the GC only. + void lock() { lock_.lock(); } + void unlock() { lock_.unlock(); } }; // A set of cells in an arena used to implement the whole cell store buffer. diff --git a/js/src/gc/Verifier.cpp b/js/src/gc/Verifier.cpp index d7c1fbf780e4..8b1b2a7021a7 100644 --- a/js/src/gc/Verifier.cpp +++ b/js/src/gc/Verifier.cpp @@ -497,7 +497,6 @@ void js::gc::MarkingValidator::nonIncrementalMark(AutoGCSession& session) { JSRuntime* runtime = gc->rt; GCMarker* gcmarker = &gc->marker; - MOZ_ASSERT(gc->nursery().isEmpty()); MOZ_ASSERT(!gcmarker->isWeakMarking()); /* Wait for off-thread parsing which can allocate. */ @@ -671,7 +670,6 @@ void js::gc::MarkingValidator::validate() { return; } - MOZ_ASSERT(gc->nursery().isEmpty()); MOZ_ASSERT(!gc->marker.isWeakMarking()); gc->waitBackgroundSweepEnd(); diff --git a/js/src/gc/WeakMap-inl.h b/js/src/gc/WeakMap-inl.h index 6d5d89ece696..4aaee871e969 100644 --- a/js/src/gc/WeakMap-inl.h +++ b/js/src/gc/WeakMap-inl.h @@ -354,8 +354,8 @@ void WeakMap::traceMappings(WeakMapTracer* tracer) { template void WeakMap::assertEntriesNotAboutToBeFinalized() { for (Range r = Base::all(); !r.empty(); r.popFront()) { - K k(r.front().key()); - MOZ_ASSERT(!gc::IsAboutToBeFinalized(&k)); + auto k = gc::detail::ExtractUnbarriered(r.front().key()); + MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(&k)); JSObject* delegate = gc::detail::GetDelegate(k); if (delegate) { MOZ_ASSERT(!gc::IsAboutToBeFinalizedUnbarriered(&delegate), diff --git a/js/src/threading/Mutex.cpp b/js/src/threading/Mutex.cpp index d99d5e18f3cd..d72668d70bb3 100644 --- a/js/src/threading/Mutex.cpp +++ b/js/src/threading/Mutex.cpp @@ -59,10 +59,13 @@ void js::Mutex::preUnlockChecks() { owningThread_.reset(); } +bool js::Mutex::isHeld() const { return owningThread_.isSome(); } + bool js::Mutex::ownedByCurrentThread() const { // First determine this using the owningThread_ property, then check it via // the mutex stack. - bool check = ThreadId::ThisThreadId() == owningThread_.value(); + bool check = owningThread_.isSome() && + ThreadId::ThisThreadId() == owningThread_.value(); Mutex* stack = HeldMutexStack.get(); diff --git a/js/src/threading/Mutex.h b/js/src/threading/Mutex.h index c7ecdc668c96..dadc421bee1a 100644 --- a/js/src/threading/Mutex.h +++ b/js/src/threading/Mutex.h @@ -75,6 +75,7 @@ class Mutex { #ifdef DEBUG public: + bool isHeld() const; bool ownedByCurrentThread() const; private: diff --git a/js/src/vm/ArrayBufferObject.cpp b/js/src/vm/ArrayBufferObject.cpp index 3129a8c42953..6bd173f3e5f7 100644 --- a/js/src/vm/ArrayBufferObject.cpp +++ b/js/src/vm/ArrayBufferObject.cpp @@ -1619,7 +1619,6 @@ bool InnerViewTable::sweepEntry(JSObject** pkey, ViewVector& views) { } void InnerViewTable::sweep() { - MOZ_ASSERT(nurseryKeys.empty()); map.sweep(); } diff --git a/js/src/vm/JSObject.cpp b/js/src/vm/JSObject.cpp index f7a35eaef4fc..397283bc7e5e 100644 --- a/js/src/vm/JSObject.cpp +++ b/js/src/vm/JSObject.cpp @@ -1606,8 +1606,12 @@ void JSObject::swap(JSContext* cx, HandleObject a, HandleObject b) { * nursery pointers in either object. */ MOZ_ASSERT(!IsInsideNursery(a) && !IsInsideNursery(b)); - cx->runtime()->gc.storeBuffer().putWholeCell(a); - cx->runtime()->gc.storeBuffer().putWholeCell(b); + gc::StoreBuffer& storeBuffer = cx->runtime()->gc.storeBuffer(); + storeBuffer.putWholeCell(a); + storeBuffer.putWholeCell(b); + if (a->zone()->wasGCStarted() || b->zone()->wasGCStarted()) { + storeBuffer.setMayHavePointersToDeadCells(); + } unsigned r = NotifyGCPreSwap(a, b); diff --git a/js/src/vm/MutexIDs.h b/js/src/vm/MutexIDs.h index 954bfa2c7b38..7b49c735b336 100644 --- a/js/src/vm/MutexIDs.h +++ b/js/src/vm/MutexIDs.h @@ -30,6 +30,7 @@ _(GlobalHelperThreadState, 300) \ \ _(GCLock, 400) \ + _(StoreBuffer, 400) \ \ _(SharedImmutableStringsCache, 500) \ _(FutexThread, 500) \ diff --git a/js/src/vm/TypeInference.cpp b/js/src/vm/TypeInference.cpp index 18753255c012..234837fd74b2 100644 --- a/js/src/vm/TypeInference.cpp +++ b/js/src/vm/TypeInference.cpp @@ -733,6 +733,7 @@ void ConstraintTypeSet::postWriteBarrier(JSContext* cx, Type type) { if (gc::StoreBuffer* sb = type.singletonNoBarrier()->storeBuffer()) { sb->putGeneric(TypeSetRef(cx->zone(), this)); sb->setShouldCancelIonCompilations(); + sb->setHasTypeSetPointers(); } } } @@ -4575,6 +4576,8 @@ TypeZone::~TypeZone() { void TypeZone::beginSweep() { MOZ_ASSERT(zone()->isGCSweepingOrCompacting()); + MOZ_ASSERT( + !zone()->runtimeFromMainThread()->gc.storeBuffer().hasTypeSetPointers()); // Clear the analysis pool, but don't release its data yet. While sweeping // types any live data will be allocated into the pool.