Bug 1337117 - Remove references to main thread in the JS engine, r=jandem.

--HG--
extra : rebase_source : 466336ed48fb8636d1ef428195408411ed81473d
This commit is contained in:
Brian Hackett 2017-02-11 05:27:32 -07:00
parent dd8879be51
commit 539e47cd96
86 changed files with 340 additions and 332 deletions

View File

@ -697,7 +697,7 @@ ExposeScriptToActiveJS(JSScript* script)
static MOZ_ALWAYS_INLINE void
MarkStringAsLive(Zone* zone, JSString* string)
{
JSRuntime* rt = JS::shadow::Zone::asShadowZone(zone)->runtimeFromMainThread();
JSRuntime* rt = JS::shadow::Zone::asShadowZone(zone)->runtimeFromActiveCooperatingThread();
js::gc::MarkGCThingAsLive(rt, GCCellPtr(string));
}

View File

@ -124,7 +124,7 @@ struct Zone
return barrierTracer_;
}
JSRuntime* runtimeFromMainThread() const {
JSRuntime* runtimeFromActiveCooperatingThread() const {
MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime_));
return runtime_;
}

View File

@ -55,11 +55,11 @@ namespace oom {
* To make testing OOM in certain helper threads more effective,
* allow restricting the OOM testing to a certain helper thread
* type. This allows us to fail e.g. in off-thread script parsing
* without causing an OOM in the main thread first.
* without causing an OOM in the active thread first.
*/
enum ThreadType {
THREAD_TYPE_NONE = 0, // 0
THREAD_TYPE_MAIN, // 1
THREAD_TYPE_COOPERATING, // 1
THREAD_TYPE_WASM, // 2
THREAD_TYPE_ION, // 3
THREAD_TYPE_PARSE, // 4

View File

@ -1045,7 +1045,7 @@ js::intl_Collator(JSContext* cx, unsigned argc, Value* vp)
void
CollatorObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
const Value& slot = obj->as<CollatorObject>().getReservedSlot(CollatorObject::UCOLLATOR_SLOT);
if (UCollator* coll = static_cast<UCollator*>(slot.toPrivate()))
@ -1478,7 +1478,7 @@ js::intl_NumberFormat(JSContext* cx, unsigned argc, Value* vp)
void
NumberFormatObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
const Value& slot =
obj->as<NumberFormatObject>().getReservedSlot(NumberFormatObject::UNUMBER_FORMAT_SLOT);
@ -2417,7 +2417,7 @@ js::intl_DateTimeFormat(JSContext* cx, unsigned argc, Value* vp)
void
DateTimeFormatObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
const Value& slot =
obj->as<DateTimeFormatObject>().getReservedSlot(DateTimeFormatObject::UDATE_FORMAT_SLOT);
@ -3458,7 +3458,7 @@ PluralRules(JSContext* cx, unsigned argc, Value* vp)
void
PluralRulesObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
const Value& slot =
obj->as<PluralRulesObject>().getReservedSlot(PluralRulesObject::UPLURAL_RULES_SLOT);

View File

@ -209,7 +209,7 @@ MapIteratorObject::create(JSContext* cx, HandleObject mapobj, ValueMap* data,
void
MapIteratorObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
fop->delete_(MapIteratorObjectRange(static_cast<NativeObject*>(obj)));
}
@ -548,7 +548,7 @@ MapObject::create(JSContext* cx, HandleObject proto /* = nullptr */)
void
MapObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
if (ValueMap* map = obj->as<MapObject>().getData())
fop->delete_(map);
}
@ -946,7 +946,7 @@ SetIteratorObject::create(JSContext* cx, HandleObject setobj, ValueSet* data,
void
SetIteratorObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
fop->delete_(SetIteratorObjectRange(static_cast<NativeObject*>(obj)));
}
@ -1140,7 +1140,7 @@ SetObject::trace(JSTracer* trc, JSObject* obj)
void
SetObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
SetObject* setobj = static_cast<SetObject*>(obj);
if (ValueSet* set = setobj->getData())
fop->delete_(set);

View File

@ -608,7 +608,7 @@ ModuleObject::create(JSContext* cx)
/* static */ void
ModuleObject::finalize(js::FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
ModuleObject* self = &obj->as<ModuleObject>();
if (self->hasImportBindings())
fop->delete_(&self->importBindings());

View File

@ -1343,7 +1343,7 @@ SetupOOMFailure(JSContext* cx, bool failAlways, unsigned argc, Value* vp)
return false;
}
uint32_t targetThread = js::oom::THREAD_TYPE_MAIN;
uint32_t targetThread = js::oom::THREAD_TYPE_COOPERATING;
if (args.length() > 1 && !ToUint32(cx, args[1], &targetThread))
return false;
@ -1417,13 +1417,13 @@ OOMTest(JSContext* cx, unsigned argc, Value* vp)
bool verbose = EnvVarIsDefined("OOM_VERBOSE");
unsigned threadStart = oom::THREAD_TYPE_MAIN;
unsigned threadStart = oom::THREAD_TYPE_COOPERATING;
unsigned threadEnd = oom::THREAD_TYPE_MAX;
// Test a single thread type if specified by the OOM_THREAD environment variable.
int threadOption = 0;
if (EnvVarAsInt("OOM_THREAD", &threadOption)) {
if (threadOption < oom::THREAD_TYPE_MAIN || threadOption > oom::THREAD_TYPE_MAX) {
if (threadOption < oom::THREAD_TYPE_COOPERATING || threadOption > oom::THREAD_TYPE_MAX) {
JS_ReportErrorASCII(cx, "OOM_THREAD value out of range.");
return false;
}
@ -4594,18 +4594,16 @@ gc::ZealModeHelpText),
JS_FN_HELP("helperThreadCount", HelperThreadCount, 0, 0,
"helperThreadCount()",
" Returns the number of helper threads available for off-main-thread tasks."),
" Returns the number of helper threads available for off-thread tasks."),
#ifdef JS_TRACE_LOGGING
JS_FN_HELP("startTraceLogger", EnableTraceLogger, 0, 0,
"startTraceLogger()",
" Start logging the mainThread.\n"
" Note: tracelogging starts automatically. Disable it by setting environment variable\n"
" TLOPTIONS=disableMainThread"),
" Start logging this thread.\n"),
JS_FN_HELP("stopTraceLogger", DisableTraceLogger, 0, 0,
"stopTraceLogger()",
" Stop logging the mainThread."),
" Stop logging this thread."),
#endif
JS_FN_HELP("reportOutOfMemory", ReportOutOfMemory, 0, 0,

View File

@ -233,7 +233,7 @@ WeakMap_trace(JSTracer* trc, JSObject* obj)
static void
WeakMap_finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
if (ObjectValueMap* map = obj->as<WeakMapObject>().getMap()) {
#ifdef DEBUG
map->~ObjectValueMap();

View File

@ -518,12 +518,12 @@ frontend::CreateScriptSourceObject(JSContext* cx, const ReadOnlyCompileOptions&
}
// CompileScript independently returns the ScriptSourceObject (SSO) for the
// compile. This is used by off-main-thread script compilation (OMT-SC).
// compile. This is used by off-thread script compilation (OT-SC).
//
// OMT-SC cannot initialize the SSO when it is first constructed because the
// OT-SC cannot initialize the SSO when it is first constructed because the
// SSO is allocated initially in a separate compartment.
//
// After OMT-SC, the separate compartment is merged with the main compartment,
// After OT-SC, the separate compartment is merged with the main compartment,
// at which point the JSScripts created become observable by the debugger via
// memory-space scanning.
//
@ -610,7 +610,7 @@ frontend::CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
return nullptr;
// This happens in GlobalHelperThreadState::finishModuleParseTask() when a
// module is compiled off main thread.
// module is compiled off thread.
if (!ModuleObject::Freeze(cx, module))
return nullptr;

View File

@ -3489,7 +3489,7 @@ void
BytecodeEmitter::tellDebuggerAboutCompiledScript(JSContext* cx)
{
// Note: when parsing off thread the resulting scripts need to be handed to
// the debugger after rejoining to the main thread.
// the debugger after rejoining to the active thread.
if (cx->helperThread())
return;

View File

@ -3129,7 +3129,7 @@ Parser<ParseHandler>::functionDefinition(Node pn, InHandling inHandling,
RootedObject proto(context);
if (generatorKind == StarGenerator) {
// If we are off the main thread, the generator meta-objects have
// If we are off thread, the generator meta-objects have
// already been created by js::StartOffThreadParseTask, so cx will not
// be necessary.
JSContext* cx = context->helperThread() ? nullptr : context;
@ -7984,7 +7984,7 @@ Parser<ParseHandler>::generatorComprehensionLambda(unsigned begin)
ParseContext* outerpc = pc;
// If we are off the main thread, the generator meta-objects have
// If we are off thread, the generator meta-objects have
// already been created by js::StartOffThreadParseScript, so cx will not
// be necessary.
RootedObject proto(context);

View File

@ -648,8 +648,9 @@ TokenStream::reportCompileErrorNumberVA(uint32_t offset, unsigned flags, unsigne
warning = false;
}
// On the main thread, report the error immediately. When compiling off
// thread, save the error so that the main thread can report it later.
// On the active thread, report the error immediately. When compiling off
// thread, save the error so that the thread finishing the parse can report
// it later.
CompileError tempErr;
CompileError* tempErrPtr = &tempErr;
if (cx->helperThread() && !cx->addPendingCompileError(&tempErrPtr))

View File

@ -39,7 +39,7 @@ js::Allocate(JSContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap he
MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative() || clasp->isProxy());
// Off-main-thread alloc cannot trigger GC or make runtime assertions.
// Off-thread alloc cannot trigger GC or make runtime assertions.
if (cx->helperThread()) {
JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
if (MOZ_UNLIKELY(allowGC && !obj))
@ -284,15 +284,15 @@ GCRuntime::refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind, size_
cx->arenas()->checkEmptyFreeList(thingKind);
if (!cx->helperThread())
return refillFreeListFromMainThread(cx, thingKind, thingSize);
return refillFreeListFromActiveCooperatingThread(cx, thingKind, thingSize);
return refillFreeListOffMainThread(cx, thingKind);
return refillFreeListFromHelperThread(cx, thingKind);
}
/* static */ TenuredCell*
GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
GCRuntime::refillFreeListFromActiveCooperatingThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
{
// It should not be possible to allocate on the main thread while we are
// It should not be possible to allocate on the active thread while we are
// inside a GC.
Zone *zone = cx->zone();
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy(), "allocating while under GC");
@ -302,9 +302,9 @@ GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind, size
}
/* static */ TenuredCell*
GCRuntime::refillFreeListOffMainThread(JSContext* cx, AllocKind thingKind)
GCRuntime::refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind)
{
// A GC may be happening on the main thread, but zones used by off thread
// A GC may be happening on the active thread, but zones used by off thread
// tasks are never collected.
Zone* zone = cx->zone();
MOZ_ASSERT(!zone->wasGCStarted());
@ -321,7 +321,7 @@ GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
*/
zone->arenas.checkEmptyFreeList(thingKind);
mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromActiveCooperatingThread();
MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
MOZ_ASSERT_IF(!JS::CurrentThreadIsHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());

View File

@ -22,9 +22,9 @@
namespace js {
bool
RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone)
RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone)
{
MOZ_ASSERT(CurrentThreadCanAccessRuntime(shadowZone->runtimeFromMainThread()));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(shadowZone->runtimeFromActiveCooperatingThread()));
return JS::CurrentThreadIsHeapMajorCollecting();
}
@ -147,9 +147,9 @@ MovableCellHasher<T>::hash(const Lookup& l)
return 0;
// We have to access the zone from-any-thread here: a worker thread may be
// cloning a self-hosted object from the main-thread-runtime-owned self-
// hosting zone into the off-main-thread runtime. The zone's uid lock will
// protect against multiple workers doing this simultaneously.
// cloning a self-hosted object from the main runtime's self- hosting zone
// into another runtime. The zone's uid lock will protect against multiple
// workers doing this simultaneously.
MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
l->zoneFromAnyThread()->isSelfHostingZone());

View File

@ -72,8 +72,8 @@ class ChunkPool
};
};
// Performs extra allocation off the main thread so that when memory is
// required on the main thread it will already be available and waiting.
// Performs extra allocation off thread so that when memory is required on the
// active thread it will already be available and waiting.
class BackgroundAllocTask : public GCParallelTask
{
// Guarded by the GC lock.
@ -867,9 +867,9 @@ class GCRuntime
static void checkIncrementalZoneState(JSContext* cx, T* t);
static TenuredCell* refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind,
size_t thingSize);
static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
size_t thingSize);
static TenuredCell* refillFreeListOffMainThread(JSContext* cx, AllocKind thingKind);
static TenuredCell* refillFreeListFromActiveCooperatingThread(JSContext* cx, AllocKind thingKind,
size_t thingSize);
static TenuredCell* refillFreeListFromHelperThread(JSContext* cx, AllocKind thingKind);
/*
* Return the list of chunks that can be released outside the GC lock.
@ -1276,7 +1276,7 @@ class GCRuntime
ActiveThreadData<bool> arenasEmptyAtShutdown;
#endif
/* Synchronize GC heap access among GC helper threads and main threads. */
/* Synchronize GC heap access among GC helper threads and active threads. */
friend class js::AutoLockGC;
js::Mutex lock;

View File

@ -32,19 +32,13 @@
struct JSRuntime;
namespace JS {
namespace shadow {
struct Runtime;
} // namespace shadow
} // namespace JS
namespace js {
class AutoLockGC;
class FreeOp;
extern bool
RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone);
RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone);
#ifdef DEBUG
@ -256,13 +250,11 @@ struct Cell
MOZ_ALWAYS_INLINE const TenuredCell& asTenured() const;
MOZ_ALWAYS_INLINE TenuredCell& asTenured();
inline JSRuntime* runtimeFromMainThread() const;
inline JS::shadow::Runtime* shadowRuntimeFromMainThread() const;
inline JSRuntime* runtimeFromActiveCooperatingThread() const;
// Note: Unrestricted access to the runtime of a GC thing from an arbitrary
// thread can easily lead to races. Use this method very carefully.
inline JSRuntime* runtimeFromAnyThread() const;
inline JS::shadow::Runtime* shadowRuntimeFromAnyThread() const;
// May be overridden by GC thing kinds that have a compartment pointer.
inline JSCompartment* maybeCompartment() const { return nullptr; }
@ -1070,7 +1062,7 @@ class HeapUsage
* The approximate number of bytes in use on the GC heap, to the nearest
* ArenaSize. This does not include any malloc data. It also does not
* include not-actively-used addresses that are still reserved at the OS
* level for GC usage. It is atomic because it is updated by both the main
* level for GC usage. It is atomic because it is updated by both the active
* and GC helper threads.
*/
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> gcBytes_;
@ -1141,31 +1133,19 @@ Cell::asTenured()
}
inline JSRuntime*
Cell::runtimeFromMainThread() const
Cell::runtimeFromActiveCooperatingThread() const
{
JSRuntime* rt = chunk()->trailer.runtime;
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
return rt;
}
inline JS::shadow::Runtime*
Cell::shadowRuntimeFromMainThread() const
{
return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromMainThread());
}
inline JSRuntime*
Cell::runtimeFromAnyThread() const
{
return chunk()->trailer.runtime;
}
inline JS::shadow::Runtime*
Cell::shadowRuntimeFromAnyThread() const
{
return reinterpret_cast<JS::shadow::Runtime*>(runtimeFromAnyThread());
}
inline uintptr_t
Cell::address() const
{
@ -1305,22 +1285,22 @@ TenuredCell::readBarrier(TenuredCell* thing)
// at the moment this can happen e.g. when rekeying tables containing
// read-barriered GC things after a moving GC.
//
// TODO: Fix this and assert we're not collecting if we're on the main
// TODO: Fix this and assert we're not collecting if we're on the active
// thread.
JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
if (shadowZone->needsIncrementalBarrier()) {
// Barriers are only enabled on the main thread and are disabled while collecting.
MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
// Barriers are only enabled on the active thread and are disabled while collecting.
MOZ_ASSERT(!RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(shadowZone));
Cell* tmp = thing;
TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "read barrier");
MOZ_ASSERT(tmp == thing);
}
if (thing->isMarked(GRAY)) {
// There shouldn't be anything marked grey unless we're on the main thread.
// There shouldn't be anything marked grey unless we're on the active thread.
MOZ_ASSERT(CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread()));
if (!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone))
if (!RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(shadowZone))
UnmarkGrayCellRecursively(thing, thing->getTraceKind());
}
}
@ -1340,10 +1320,10 @@ TenuredCell::writeBarrierPre(TenuredCell* thing)
// those on the Atoms Zone. Normally, we never enter a parse task when
// collecting in the atoms zone, so will filter out atoms below.
// Unfortuantely, If we try that when verifying pre-barriers, we'd never be
// able to handle OMT parse tasks at all as we switch on the verifier any
// time we're not doing GC. This would cause us to deadlock, as OMT parsing
// able to handle off thread parse tasks at all as we switch on the verifier any
// time we're not doing GC. This would cause us to deadlock, as off thread parsing
// is meant to resume after GC work completes. Instead we filter out any
// OMT barriers that reach us and assert that they would normally not be
// off thread barriers that reach us and assert that they would normally not be
// possible.
if (!CurrentThreadCanAccessRuntime(thing->runtimeFromAnyThread())) {
AssertSafeToSkipBarrier(thing);
@ -1353,7 +1333,7 @@ TenuredCell::writeBarrierPre(TenuredCell* thing)
JS::shadow::Zone* shadowZone = thing->shadowZoneFromAnyThread();
if (shadowZone->needsIncrementalBarrier()) {
MOZ_ASSERT(!RuntimeFromMainThreadIsHeapMajorCollecting(shadowZone));
MOZ_ASSERT(!RuntimeFromActiveCooperatingThreadIsHeapMajorCollecting(shadowZone));
Cell* tmp = thing;
TraceManuallyBarrieredGenericPointerEdge(shadowZone->barrierTracer(), &tmp, "pre barrier");
MOZ_ASSERT(tmp == thing);

View File

@ -124,7 +124,7 @@ js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
void
js::IterateGrayObjectsUnderCC(Zone* zone, GCThingCallback cellCallback, void* data)
{
mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromActiveCooperatingThread();
MOZ_ASSERT(JS::CurrentThreadIsHeapCycleCollecting());
MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
::IterateGrayObjects(zone, cellCallback, data);

View File

@ -253,7 +253,7 @@ js::CheckTracedThing(JSTracer* trc, T* thing)
* either free or uninitialized in which case we check the free list.
*
* Further complications are that background sweeping may be running and
* concurrently modifiying the free list and that tracing is done off main
* concurrently modifiying the free list and that tracing is done off
* thread during compacting GC and reading the contents of the thing by
* IsThingPoisoned would be racy in this case.
*/
@ -3000,7 +3000,7 @@ TypedUnmarkGrayCellRecursively(T* t)
{
MOZ_ASSERT(t);
JSRuntime* rt = t->runtimeFromMainThread();
JSRuntime* rt = t->runtimeFromActiveCooperatingThread();
MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
MOZ_ASSERT(!JS::CurrentThreadIsHeapCycleCollecting());

View File

@ -801,7 +801,7 @@ js::Nursery::freeMallocedBuffers()
}
if (!started)
freeMallocedBuffersTask->runFromMainThread(zoneGroup()->runtime);
freeMallocedBuffersTask->runFromActiveCooperatingThread(zoneGroup()->runtime);
MOZ_ASSERT(mallocedBuffers.empty());
}

View File

@ -477,7 +477,7 @@ struct MOZ_RAII AutoPhase
~AutoPhase() {
if (enabled) {
// Bug 1309651 - we only record mainthread time (including time
// Bug 1309651 - we only record active thread time (including time
// spent waiting to join with helper threads), but should start
// recording total work on helper threads sometime by calling
// endParallelPhase here if task is nonnull.

View File

@ -105,7 +105,7 @@ Zone::setNeedsIncrementalBarrier(bool needs, ShouldUpdateJit updateJit)
jitUsingBarriers_ = needs;
}
MOZ_ASSERT_IF(needs && isAtomsZone(), !runtimeFromMainThread()->exclusiveThreadsPresent());
MOZ_ASSERT_IF(needs && isAtomsZone(), !runtimeFromActiveCooperatingThread()->exclusiveThreadsPresent());
MOZ_ASSERT_IF(needs, canCollect());
needsIncrementalBarrier_ = needs;
}
@ -274,8 +274,8 @@ Zone::discardJitCode(FreeOp* fop, bool discardBaselineCode)
/*
* Free all control flow graphs that are cached on BaselineScripts.
* Assuming this happens on the mainthread and all control flow
* graph reads happen on the mainthread, this is save.
* Assuming this happens on the active thread and all control flow
* graph reads happen on the active thread, this is safe.
*/
jitZone()->cfgSpace()->lifoAlloc().freeAll();
}
@ -295,7 +295,7 @@ Zone::gcNumber()
{
// Zones in use by exclusive threads are not collected, and threads using
// them cannot access the main runtime's gcNumber without racing.
return usedByExclusiveThread ? 0 : runtimeFromMainThread()->gc.gcNumber();
return usedByExclusiveThread ? 0 : runtimeFromActiveCooperatingThread()->gc.gcNumber();
}
js::jit::JitZone*

View File

@ -190,7 +190,7 @@ struct Zone : public JS::shadow::Zone,
void* reallocPtr = nullptr) {
if (!js::CurrentThreadCanAccessRuntime(runtime_))
return nullptr;
return runtimeFromMainThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
return runtimeFromActiveCooperatingThread()->onOutOfMemory(allocFunc, nbytes, reallocPtr);
}
void reportAllocationOverflow() { js::ReportAllocationOverflow(nullptr); }
@ -226,7 +226,7 @@ struct Zone : public JS::shadow::Zone,
}
bool isCollecting() const {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromActiveCooperatingThread()));
return isCollectingFromAnyThread();
}
@ -267,7 +267,7 @@ struct Zone : public JS::shadow::Zone,
bool compileBarriers() const { return compileBarriers(needsIncrementalBarrier()); }
bool compileBarriers(bool needsIncrementalBarrier) const {
return needsIncrementalBarrier ||
runtimeFromMainThread()->hasZealMode(js::gc::ZealMode::VerifierPre);
runtimeFromActiveCooperatingThread()->hasZealMode(js::gc::ZealMode::VerifierPre);
}
enum ShouldUpdateJit { DontUpdateJit, UpdateJit };
@ -539,7 +539,7 @@ struct Zone : public JS::shadow::Zone,
void transferUniqueId(js::gc::Cell* tgt, js::gc::Cell* src) {
MOZ_ASSERT(src != tgt);
MOZ_ASSERT(!IsInsideNursery(tgt));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromActiveCooperatingThread()));
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
uniqueIds().rekeyIfMoved(src, tgt);
}
@ -557,7 +557,7 @@ struct Zone : public JS::shadow::Zone,
for (js::gc::UniqueIdMap::Enum e(source->uniqueIds()); !e.empty(); e.popFront()) {
MOZ_ASSERT(!uniqueIds().has(e.front().key()));
if (!uniqueIds().put(e.front().key(), e.front().value()))
oomUnsafe.crash("failed to transfer unique ids from off-main-thread");
oomUnsafe.crash("failed to transfer unique ids from off-thread");
}
source->uniqueIds().clear();
}

View File

@ -1015,7 +1015,7 @@ CanOptimizeDenseOrUnboxedArraySetElem(JSObject* obj, uint32_t index,
return false;
// Unboxed arrays need to be able to emit floating point code.
if (obj->is<UnboxedArrayObject>() && !obj->runtimeFromMainThread()->jitSupportsFloatingPoint)
if (obj->is<UnboxedArrayObject>() && !obj->runtimeFromActiveCooperatingThread()->jitSupportsFloatingPoint)
return false;
Shape* shape = obj->maybeShape();

View File

@ -536,7 +536,7 @@ class CompileInfo
AnalysisMode analysisMode_;
// Whether a script needs an arguments object is unstable over compilation
// since the arguments optimization could be marked as failed on the main
// since the arguments optimization could be marked as failed on the active
// thread, so cache a value here and use it throughout for consistency.
bool scriptNeedsArgsObj_;

View File

@ -251,7 +251,7 @@ const GlobalObject*
CompileCompartment::maybeGlobal()
{
// This uses unsafeUnbarrieredMaybeGlobal() so as not to trigger the read
// barrier on the global from off the main thread. This is safe because we
// barrier on the global from off thread. This is safe because we
// abort Ion compilation when we GC.
return compartment()->unsafeUnbarrieredMaybeGlobal();
}

View File

@ -16,7 +16,7 @@ class JitRuntime;
// During Ion compilation we need access to various bits of the current
// compartment, runtime and so forth. However, since compilation can run off
// thread while the main thread is actively mutating the VM, this access needs
// thread while the active thread is mutating the VM, this access needs
// to be restricted. The classes below give the compiler an interface to access
// all necessary information in a threadsafe fashion.

View File

@ -374,7 +374,7 @@ void
JitZoneGroup::patchIonBackedges(JSContext* cx, BackedgeTarget target)
{
if (target == BackedgeLoopHeader) {
// We must be on the main thread. The caller must use
// We must be on the active thread. The caller must use
// AutoPreventBackedgePatching to ensure we don't reenter.
MOZ_ASSERT(cx->runtime()->jitRuntime()->preventBackedgePatching());
MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
@ -1387,7 +1387,7 @@ IonScript::unlinkFromRuntime(FreeOp* fop)
void
jit::ToggleBarriers(JS::Zone* zone, bool needs)
{
JSRuntime* rt = zone->runtimeFromMainThread();
JSRuntime* rt = zone->runtimeFromActiveCooperatingThread();
if (!rt->hasJitRuntime())
return;
@ -2430,8 +2430,8 @@ CheckScriptSize(JSContext* cx, JSScript* script)
uint32_t numLocalsAndArgs = NumLocalsAndArgs(script);
if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE ||
numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS)
if (script->length() > MAX_ACTIVE_THREAD_SCRIPT_SIZE ||
numLocalsAndArgs > MAX_ACTIVE_THREAD_LOCALS_AND_ARGS)
{
if (!OffThreadCompilationAvailable(cx)) {
JitSpew(JitSpew_IonAbort, "Script too large (%" PRIuSIZE " bytes) (%u locals/args)",
@ -2549,9 +2549,9 @@ bool
jit::OffThreadCompilationAvailable(JSContext* cx)
{
// Even if off thread compilation is enabled, compilation must still occur
// on the main thread in some cases.
// on the active thread in some cases.
//
// Require cpuCount > 1 so that Ion compilation jobs and main-thread
// Require cpuCount > 1 so that Ion compilation jobs and active-thread
// execution are not competing for the same resources.
return cx->runtime()->canUseOffthreadIonCompilation()
&& HelperThreadState().cpuCount > 1

View File

@ -75,7 +75,7 @@ class JitContext
JitContext();
~JitContext();
// Running context when executing on the main thread. Not available during
// Running context when executing on the active thread. Not available during
// compilation.
JSContext* cx;

View File

@ -7009,7 +7009,7 @@ static size_t
NumFixedSlots(JSObject* object)
{
// Note: we can't use object->numFixedSlots() here, as this will read the
// shape and can race with the main thread if we are building off thread.
// shape and can race with the active thread if we are building off thread.
// The allocation kind and object class (which goes through the type) can
// be read freely, however.
gc::AllocKind kind = object->asTenured().getAllocKind();
@ -13076,7 +13076,7 @@ JSObject*
IonBuilder::checkNurseryObject(JSObject* obj)
{
// If we try to use any nursery pointers during compilation, make sure that
// the main thread will cancel this compilation before performing a minor
// the active thread will cancel this compilation before performing a minor
// GC. All constants used during compilation should either go through this
// function or should come from a type set (which has a similar barrier).
if (obj && IsInsideNursery(obj)) {

View File

@ -40,8 +40,8 @@ OptimizationInfo::initNormalOptimizationInfo()
registerAllocator_ = RegisterAllocator_Backtracking;
inlineMaxBytecodePerCallSiteMainThread_ = 550;
inlineMaxBytecodePerCallSiteOffThread_ = 1100;
inlineMaxBytecodePerCallSiteActiveCooperatingThread_ = 550;
inlineMaxBytecodePerCallSiteHelperThread_ = 1100;
inlineMaxCalleeInlinedBytecodeLength_ = 3550;
inlineMaxTotalBytecodeLength_ = 85000;
inliningMaxCallerBytecodeLength_ = 1600;
@ -93,17 +93,17 @@ OptimizationInfo::compilerWarmUpThreshold(JSScript* script, jsbytecode* pc) cons
warmUpThreshold = JitOptions.forcedDefaultIonSmallFunctionWarmUpThreshold.ref();
}
// If the script is too large to compile on the main thread, we can still
// If the script is too large to compile on the active thread, we can still
// compile it off thread. In these cases, increase the warm-up counter
// threshold to improve the compilation's type information and hopefully
// avoid later recompilation.
if (script->length() > MAX_MAIN_THREAD_SCRIPT_SIZE)
warmUpThreshold *= (script->length() / (double) MAX_MAIN_THREAD_SCRIPT_SIZE);
if (script->length() > MAX_ACTIVE_THREAD_SCRIPT_SIZE)
warmUpThreshold *= (script->length() / (double) MAX_ACTIVE_THREAD_SCRIPT_SIZE);
uint32_t numLocalsAndArgs = NumLocalsAndArgs(script);
if (numLocalsAndArgs > MAX_MAIN_THREAD_LOCALS_AND_ARGS)
warmUpThreshold *= (numLocalsAndArgs / (double) MAX_MAIN_THREAD_LOCALS_AND_ARGS);
if (numLocalsAndArgs > MAX_ACTIVE_THREAD_LOCALS_AND_ARGS)
warmUpThreshold *= (numLocalsAndArgs / (double) MAX_ACTIVE_THREAD_LOCALS_AND_ARGS);
if (!pc || JitOptions.eagerCompilation)
return warmUpThreshold;

View File

@ -98,9 +98,9 @@ class OptimizationInfo
// The maximum total bytecode size of an inline call site. We use a lower
// value if off-thread compilation is not available, to avoid stalling the
// main thread.
uint32_t inlineMaxBytecodePerCallSiteOffThread_;
uint32_t inlineMaxBytecodePerCallSiteMainThread_;
// active thread.
uint32_t inlineMaxBytecodePerCallSiteHelperThread_;
uint32_t inlineMaxBytecodePerCallSiteActiveCooperatingThread_;
// The maximum value we allow for baselineScript->inlinedBytecodeLength_
// when inlining.
@ -248,8 +248,8 @@ class OptimizationInfo
uint32_t inlineMaxBytecodePerCallSite(bool offThread) const {
return (offThread || !JitOptions.limitScriptSize)
? inlineMaxBytecodePerCallSiteOffThread_
: inlineMaxBytecodePerCallSiteMainThread_;
? inlineMaxBytecodePerCallSiteHelperThread_
: inlineMaxBytecodePerCallSiteActiveCooperatingThread_;
}
uint16_t inlineMaxCalleeInlinedBytecodeLength() const {

View File

@ -330,7 +330,7 @@ class JitZoneGroup
ZoneGroupData<BackedgeTarget> backedgeTarget_;
// List of all backedges in all Ion code. The backedge edge list is accessed
// asynchronously when the main thread is paused and preventBackedgePatching_
// asynchronously when the active thread is paused and preventBackedgePatching_
// is false. Thus, the list must only be mutated while preventBackedgePatching_
// is true.
ZoneGroupData<InlineList<PatchableBackedge>> backedgeList_;
@ -501,7 +501,7 @@ class JitCompartment
}
// This function is used to call the read barrier, to mark the SIMD template
// type as used. This function can only be called from the main thread.
// type as used. This function can only be called from the active thread.
void registerSimdTemplateObjectFor(SimdType type) {
ReadBarrieredObject& tpl = simdTemplateObjects_[type];
MOZ_ASSERT(tpl.unbarrieredGet());
@ -650,7 +650,7 @@ class MOZ_STACK_CLASS AutoWritableJitCode
: AutoWritableJitCode(TlsContext.get()->runtime(), addr, size)
{}
explicit AutoWritableJitCode(JitCode* code)
: AutoWritableJitCode(code->runtimeFromMainThread(), code->raw(), code->bufferSize())
: AutoWritableJitCode(code->runtimeFromActiveCooperatingThread(), code->raw(), code->bufferSize())
{}
~AutoWritableJitCode() {
if (!ExecutableAllocator::makeExecutable(addr_, size_))

View File

@ -2760,7 +2760,7 @@ JitFrameIterator::verifyReturnAddressUsingNativeToBytecodeMap()
JSRuntime* rt = TlsContext.get()->runtime();
// Don't verify on non-main-thread.
// Don't verify while off thread.
if (!CurrentThreadCanAccessRuntime(rt))
return true;

View File

@ -16,9 +16,9 @@ namespace js {
namespace jit {
// Longer scripts can only be compiled off thread, as these compilations
// can be expensive and stall the main thread for too long.
static const uint32_t MAX_MAIN_THREAD_SCRIPT_SIZE = 2 * 1000;
static const uint32_t MAX_MAIN_THREAD_LOCALS_AND_ARGS = 256;
// can be expensive and stall the active thread for too long.
static const uint32_t MAX_ACTIVE_THREAD_SCRIPT_SIZE = 2 * 1000;
static const uint32_t MAX_ACTIVE_THREAD_LOCALS_AND_ARGS = 256;
// Possible register allocators which may be used.
enum IonRegisterAllocator {

View File

@ -211,7 +211,7 @@ IonSpewer::beginFunction()
{
// If we are doing a synchronous logging then we spew everything as we go,
// as this is useful in case of failure during the compilation. On the other
// hand, it is recommended to disabled off main thread compilation.
// hand, it is recommended to disable off thread compilation.
if (!getAsyncLogging() && !firstFunction_) {
LockGuard<Mutex> guard(outputLock_);
jsonOutput_.put(","); // separate functions

View File

@ -454,8 +454,7 @@ JitcodeGlobalTable::lookupForSamplerInfallible(void* ptr, JSRuntime* rt, uint32_
// barrier is not needed. Any JS frames sampled during the sweep phase of
// the GC must be on stack, and on-stack frames must already be marked at
// the beginning of the sweep phase. It's not possible to assert this here
// as we may not be running on the main thread when called from the gecko
// profiler.
// as we may not be off thread when called from the gecko profiler.
return *entry;
}

View File

@ -892,12 +892,12 @@ bool
jit::IonCompilationCanUseNurseryPointers()
{
// If we are doing backend compilation, which could occur on a helper
// thread but might actually be on the main thread, check the flag set on
// thread but might actually be on the active thread, check the flag set on
// the JSContext by AutoEnterIonCompilation.
if (CurrentThreadIsIonCompiling())
return !CurrentThreadIsIonCompilingSafeForMinorGC();
// Otherwise, we must be on the main thread during MIR construction. The
// Otherwise, we must be on the active thread during MIR construction. The
// store buffer must have been notified that minor GCs must cancel pending
// or in progress Ion compilations.
JSContext* cx = TlsContext.get();

View File

@ -3986,7 +3986,7 @@ class MInitElemGetterSetter
};
// WrappedFunction wraps a JSFunction so it can safely be used off-thread.
// In particular, a function's flags can be modified on the main thread as
// In particular, a function's flags can be modified on the active thread as
// functions are relazified and delazified, so we must be careful not to access
// these flags off-thread.
class WrappedFunction : public TempObject
@ -8351,7 +8351,7 @@ struct LambdaFunctionInfo
{
// The functions used in lambdas are the canonical original function in
// the script, and are immutable except for delazification. Record this
// information while still on the main thread to avoid races.
// information while still on the active thread to avoid races.
CompilerFunction fun;
uint16_t flags;
uint16_t nargs;

View File

@ -80,7 +80,7 @@ class MIRGenerator
abortFmt(AbortReason r, const char* message, va_list ap);
// Collect the evaluation result of phases after IonBuilder, such that
// off-main-thread compilation can report what error got encountered.
// off-thread compilation can report what error got encountered.
void setOffThreadStatus(AbortReasonOr<Ok> result) {
MOZ_ASSERT(offThreadStatus_.isOk());
offThreadStatus_ = result;
@ -113,7 +113,7 @@ class MIRGenerator
safeForMinorGC_ = false;
}
// Whether the main thread is trying to cancel this build.
// Whether the active thread is trying to cancel this build.
bool shouldCancel(const char* why) {
maybePause();
return cancelBuild_;

View File

@ -163,7 +163,7 @@ MacroAssembler::guardObjectType(Register obj, const TypeSet* types,
MOZ_ASSERT_IF(types->getObjectCount() > 0, scratch != InvalidReg);
// Note: this method elides read barriers on values read from type sets, as
// this may be called off the main thread during Ion compilation. This is
// this may be called off thread during Ion compilation. This is
// safe to do as the final JitCode object will be allocated during the
// incremental GC (or the compilation canceled before we start sweeping),
// see CodeGenerator::link. Other callers should use TypeSet::readBarrier

View File

@ -1190,7 +1190,7 @@ AssertValidObjectPtr(JSContext* cx, JSObject* obj)
// Check what we can, so that we'll hopefully assert/crash if we get a
// bogus object (pointer).
MOZ_ASSERT(obj->compartment() == cx->compartment());
MOZ_ASSERT(obj->runtimeFromMainThread() == cx->runtime());
MOZ_ASSERT(obj->runtimeFromActiveCooperatingThread() == cx->runtime());
MOZ_ASSERT_IF(!obj->hasLazyGroup() && obj->maybeShape(),
obj->group()->clasp() == obj->maybeShape()->getObjectClass());

View File

@ -89,7 +89,7 @@ BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
}
// Compute the snapshot offset from the bailout ID.
JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
JSRuntime* rt = activation->compartment()->runtimeFromActiveCooperatingThread();
JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
uintptr_t tableOffset = bailout->tableOffset();
uintptr_t tableStart = reinterpret_cast<uintptr_t>(Assembler::BailoutTableStart(code->raw()));

View File

@ -32,7 +32,7 @@ BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
}
// Compute the snapshot offset from the bailout ID.
JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
JSRuntime* rt = activation->compartment()->runtimeFromActiveCooperatingThread();
JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
uintptr_t tableOffset = bailout->tableOffset();
uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());

View File

@ -85,7 +85,7 @@ BailoutFrameInfo::BailoutFrameInfo(const JitActivationIterator& activations,
}
// Compute the snapshot offset from the bailout ID.
JSRuntime* rt = activation->compartment()->runtimeFromMainThread();
JSRuntime* rt = activation->compartment()->runtimeFromActiveCooperatingThread();
JitCode* code = rt->jitRuntime()->getBailoutTable(bailout->frameClass());
uintptr_t tableOffset = bailout->tableOffset();
uintptr_t tableStart = reinterpret_cast<uintptr_t>(code->raw());

View File

@ -378,7 +378,7 @@ BEGIN_TEST(testHashMapLookupWithDefaultOOM)
{
uint32_t timeToFail;
for (timeToFail = 1; timeToFail < 1000; timeToFail++) {
js::oom::SimulateOOMAfter(timeToFail, js::oom::THREAD_TYPE_MAIN, false);
js::oom::SimulateOOMAfter(timeToFail, js::oom::THREAD_TYPE_COOPERATING, false);
LookupWithDefaultUntilResize();
}

View File

@ -36,7 +36,7 @@ const uint32_t maxAllocsPerTest = 100;
testName = name; \
printf("Test %s: started\n", testName); \
for (oomAfter = 1; oomAfter < maxAllocsPerTest; ++oomAfter) { \
js::oom::SimulateOOMAfter(oomAfter, js::oom::THREAD_TYPE_MAIN, true)
js::oom::SimulateOOMAfter(oomAfter, js::oom::THREAD_TYPE_COOPERATING, true)
#define OOM_TEST_FINISHED \
{ \

View File

@ -4108,13 +4108,13 @@ JS::CanCompileOffThread(JSContext* cx, const ReadOnlyCompileOptions& options, si
// These are heuristics which the caller may choose to ignore (e.g., for
// testing purposes).
if (!options.forceAsync) {
// Compiling off the main thread inolves creating a new Zone and other
// Compiling off the active thread inolves creating a new Zone and other
// significant overheads. Don't bother if the script is tiny.
if (length < TINY_LENGTH)
return false;
// If the parsing task would have to wait for GC to complete, it'll probably
// be faster to just start it synchronously on the main thread unless the
// be faster to just start it synchronously on the active thread unless the
// script is huge.
if (OffThreadParsingMustWaitForGC(cx->runtime()) && length < HUGE_LENGTH)
return false;

View File

@ -4158,9 +4158,9 @@ CanCompileOffThread(JSContext* cx, const ReadOnlyCompileOptions& options, size_t
*
* After successfully triggering an off thread compile of a script, the
* callback will eventually be invoked with the specified data and a token
* for the compilation. The callback will be invoked while off the main thread,
* for the compilation. The callback will be invoked while off thread,
* so must ensure that its operations are thread safe. Afterwards, one of the
* following functions must be invoked on the main thread:
* following functions must be invoked on the runtime's active thread:
*
* - FinishOffThreadScript, to get the result script (or nullptr on failure).
* - CancelOffThreadScript, to free the resources without creating a script.
@ -6000,7 +6000,7 @@ DecodeInterpretedFunction(JSContext* cx, TranscodeBuffer& buffer, JS::MutableHan
// Register an encoder on the given script source, such that all functions can
// be encoded as they are parsed. This strategy is used to avoid blocking the
// main thread in a non-interruptible way.
// active thread in a non-interruptible way.
//
// The |script| argument of |StartIncrementalEncoding| and
// |FinishIncrementalEncoding| should be the top-level script returned either as

View File

@ -449,12 +449,12 @@ struct JSContext : public JS::RootingContext,
js::ThreadLocalData<bool> ionCompilingSafeForMinorGC;
// Whether this thread is currently performing GC. This thread could be the
// main thread or a helper thread while the main thread is running the
// active thread or a helper thread while the active thread is running the
// collector.
js::ThreadLocalData<bool> performingGC;
// Whether this thread is currently sweeping GC things. This thread could
// be the main thread or a helper thread while the main thread is running
// be the active thread or a helper thread while the active thread is running
// the mutator. This is used to assert that destruction of GCPtr only
// happens when we are sweeping.
js::ThreadLocalData<bool> gcSweeping;
@ -516,9 +516,9 @@ struct JSContext : public JS::RootingContext,
// with AutoDisableCompactingGC which uses this counter.
js::ThreadLocalData<unsigned> compactingDisabledCount;
// Count of AutoKeepAtoms instances on the main thread's stack. When any
// Count of AutoKeepAtoms instances on the current thread's stack. When any
// instances exist, atoms in the runtime will not be collected. Threads
// off the main thread do not increment this value, but the presence
// parsing off the active thread do not increment this value, but the presence
// of any such threads also inhibits collection of atoms. We don't scan the
// stacks of exclusive threads, so we need to avoid collecting their
// objects in another way. The only GC thing pointers they have are to
@ -779,9 +779,9 @@ struct JSContext : public JS::RootingContext,
RequestInterruptCanWait
};
// Any thread can call requestInterrupt() to request that the main JS thread
// Any thread can call requestInterrupt() to request that this thread
// stop running and call the interrupt callback (allowing the interrupt
// callback to halt execution). To stop the main JS thread, requestInterrupt
// callback to halt execution). To stop this thread, requestInterrupt
// sets two fields: interrupt_ (set to true) and jitStackLimit_ (set to
// UINTPTR_MAX). The JS engine must continually poll one of these fields
// and call handleInterrupt if either field has the interrupt value. (The
@ -792,7 +792,7 @@ struct JSContext : public JS::RootingContext,
//
// Note that the writes to interrupt_ and jitStackLimit_ use a Relaxed
// Atomic so, while the writes are guaranteed to eventually be visible to
// the main thread, it can happen in any order. handleInterrupt calls the
// this thread, it can happen in any order. handleInterrupt calls the
// interrupt callback if either is set, so it really doesn't matter as long
// as the JS engine is continually polling at least one field. In corner
// cases, this relaxed ordering could lead to an interrupt handler being
@ -1134,9 +1134,9 @@ class MOZ_RAII AutoLockForExclusiveAccess
if (runtime->numExclusiveThreads) {
runtime->exclusiveAccessLock.lock();
} else {
MOZ_ASSERT(!runtime->mainThreadHasExclusiveAccess);
MOZ_ASSERT(!runtime->activeThreadHasExclusiveAccess);
#ifdef DEBUG
runtime->mainThreadHasExclusiveAccess = true;
runtime->activeThreadHasExclusiveAccess = true;
#endif
}
}
@ -1154,9 +1154,9 @@ class MOZ_RAII AutoLockForExclusiveAccess
if (runtime->numExclusiveThreads) {
runtime->exclusiveAccessLock.unlock();
} else {
MOZ_ASSERT(runtime->mainThreadHasExclusiveAccess);
MOZ_ASSERT(runtime->activeThreadHasExclusiveAccess);
#ifdef DEBUG
runtime->mainThreadHasExclusiveAccess = false;
runtime->activeThreadHasExclusiveAccess = false;
#endif
}
}

View File

@ -103,7 +103,7 @@ JSCompartment::~JSCompartment()
reportTelemetry();
// Write the code coverage information in a file.
JSRuntime* rt = runtimeFromMainThread();
JSRuntime* rt = runtimeFromActiveCooperatingThread();
if (rt->lcovOutput().isEnabled())
rt->lcovOutput().writeLCovResult(lcovOutput);
@ -1124,7 +1124,7 @@ JSCompartment::updateDebuggerObservesFlag(unsigned flag)
flag == DebuggerObservesCoverage ||
flag == DebuggerObservesAsmJS);
GlobalObject* global = zone()->runtimeFromMainThread()->gc.isForegroundSweeping()
GlobalObject* global = zone()->runtimeFromActiveCooperatingThread()->gc.isForegroundSweeping()
? unsafeUnbarrieredMaybeGlobal()
: maybeGlobal();
const GlobalObject::DebuggerVector* v = global->getDebuggers();

View File

@ -395,7 +395,7 @@ struct JSCompartment
JS::CompartmentBehaviors& behaviors() { return behaviors_; }
const JS::CompartmentBehaviors& behaviors() const { return behaviors_; }
JSRuntime* runtimeFromMainThread() const {
JSRuntime* runtimeFromActiveCooperatingThread() const {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
return runtime_;
}

View File

@ -324,7 +324,7 @@ js::ComputeStackString(JSContext* cx)
static void
exn_finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
if (JSErrorReport* report = obj->as<ErrorObject>().getErrorReport())
fop->free_(report);
}

View File

@ -169,7 +169,7 @@ JS_SetCompartmentPrincipals(JSCompartment* compartment, JSPrincipals* principals
// Any compartment with the trusted principals -- and there can be
// multiple -- is a system compartment.
const JSPrincipals* trusted = compartment->runtimeFromMainThread()->trustedPrincipals();
const JSPrincipals* trusted = compartment->runtimeFromActiveCooperatingThread()->trustedPrincipals();
bool isSystem = principals && principals == trusted;
// Clear out the old principals, if any.
@ -390,7 +390,7 @@ js::NotifyAnimationActivity(JSObject* obj)
{
int64_t timeNow = PRMJ_Now();
obj->compartment()->lastAnimationTime = timeNow;
obj->runtimeFromMainThread()->lastAnimationTime = timeNow;
obj->runtimeFromActiveCooperatingThread()->lastAnimationTime = timeNow;
}
JS_FRIEND_API(uint32_t)

View File

@ -618,7 +618,7 @@ js::XDRInterpretedFunction(XDRState<mode>* xdr, HandleScope enclosingScope,
if (mode == XDR_DECODE) {
RootedObject proto(cx);
if (firstword & IsStarGenerator) {
// If we are off the main thread, the generator meta-objects have
// If we are off thread, the generator meta-objects have
// already been created by js::StartOffThreadParseTask, so
// JSContext* will not be necessary.
JSContext* context = cx->helperThread() ? nullptr : cx;

View File

@ -324,7 +324,7 @@ struct js::gc::FinalizePhase
};
/*
* Finalization order for GC things swept incrementally on the main thrad.
* Finalization order for GC things swept incrementally on the active thread.
*/
static const FinalizePhase IncrementalFinalizePhases[] = {
{
@ -508,12 +508,12 @@ FinalizeTypedArenas(FreeOp* fop,
{
// When operating in the foreground, take the lock at the top.
Maybe<AutoLockGC> maybeLock;
if (fop->onMainThread())
if (fop->onActiveCooperatingThread())
maybeLock.emplace(fop->runtime());
// During background sweeping free arenas are released later on in
// sweepBackgroundThings().
MOZ_ASSERT_IF(!fop->onMainThread(), keepArenas == ArenaLists::KEEP_ARENAS);
MOZ_ASSERT_IF(!fop->onActiveCooperatingThread(), keepArenas == ArenaLists::KEEP_ARENAS);
size_t thingSize = Arena::thingSize(thingKind);
size_t thingsPerArena = Arena::thingsPerArena(thingKind);
@ -2004,7 +2004,7 @@ bool
ArenaLists::relocateArenas(Zone* zone, Arena*& relocatedListOut, JS::gcreason::Reason reason,
SliceBudget& sliceBudget, gcstats::Statistics& stats)
{
// This is only called from the main thread while we are doing a GC, so
// This is only called from the active thread while we are doing a GC, so
// there is no need to lock.
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
MOZ_ASSERT(runtime_->gc.isHeapCompacting());
@ -2133,7 +2133,7 @@ MovingTracer::onScopeEdge(Scope** scopep)
void
Zone::prepareForCompacting()
{
FreeOp* fop = runtimeFromMainThread()->defaultFreeOp();
FreeOp* fop = runtimeFromActiveCooperatingThread()->defaultFreeOp();
discardJitCode(fop);
}
@ -2415,7 +2415,7 @@ GCRuntime::updateCellPointers(MovingTracer* trc, Zone* zone, AllocKinds kinds, s
}
}
fgTask->runFromMainThread(rt);
fgTask->runFromActiveCooperatingThread(rt);
{
AutoLockHelperThreadState lock;
@ -2981,7 +2981,7 @@ bool
GCRuntime::triggerGC(JS::gcreason::Reason reason)
{
/*
* Don't trigger GCs if this is being called off the main thread from
* Don't trigger GCs if this is being called off the active thread from
* onTooMuchMalloc().
*/
if (!CurrentThreadCanAccessRuntime(rt))
@ -3122,7 +3122,7 @@ GCRuntime::startDecommit()
MOZ_ASSERT(!chunk->info.numArenasFreeCommitted);
// Since we release the GC lock while doing the decommit syscall below,
// it is dangerous to iterate the available list directly, as the main
// it is dangerous to iterate the available list directly, as the active
// thread could modify it concurrently. Instead, we build and pass an
// explicit Vector containing the Chunks we want to visit.
MOZ_ASSERT(availableChunks(lock).verify());
@ -3138,7 +3138,7 @@ GCRuntime::startDecommit()
if (sweepOnBackgroundThread && decommitTask.start())
return;
decommitTask.runFromMainThread(rt);
decommitTask.runFromActiveCooperatingThread(rt);
}
void
@ -3203,7 +3203,7 @@ GCRuntime::sweepBackgroundThings(ZoneList& zones, LifoAlloc& freeBlocks)
AutoLockGC lock(rt);
// Release swept areans, dropping and reaquiring the lock every so often to
// avoid blocking the main thread from allocating chunks.
// avoid blocking the active thread from allocating chunks.
static const size_t LockReleasePeriod = 32;
size_t releaseCount = 0;
Arena* next;
@ -3374,7 +3374,7 @@ GCHelperState::waitBackgroundSweepEnd()
void
GCHelperState::doSweep(AutoLockGC& lock)
{
// The main thread may call queueZonesForBackgroundSweep() while this is
// The active thread may call queueZonesForBackgroundSweep() while this is
// running so we must check there is no more work to do before exiting.
do {
@ -3457,7 +3457,7 @@ JS::Zone::sweepUniqueIds(js::FreeOp* fop)
void
Zone::sweepCompartments(FreeOp* fop, bool keepAtleastOne, bool destroyingRuntime)
{
JSRuntime* rt = runtimeFromMainThread();
JSRuntime* rt = runtimeFromActiveCooperatingThread();
JSDestroyCompartmentCallback callback = rt->destroyCompartmentCallback;
JSCompartment** read = compartments().begin();
@ -3759,7 +3759,7 @@ RelazifyFunctions(Zone* zone, AllocKind kind)
AutoAssertEmptyNursery empty(TlsContext.get());
JSRuntime* rt = zone->runtimeFromMainThread();
JSRuntime* rt = zone->runtimeFromActiveCooperatingThread();
for (auto i = zone->cellIter<JSObject>(kind, empty); !i.done(); i.next()) {
JSFunction* fun = &i->as<JSFunction>();
if (fun->hasScript())
@ -3845,9 +3845,9 @@ GCRuntime::beginMarkPhase(JS::gcreason::Reason reason, AutoLockForExclusiveAcces
* Note that only affects the first slice of an incremental GC since root
* marking is completed before we return to the mutator.
*
* Off-main-thread parsing is inhibited after the start of GC which prevents
* Off-thread parsing is inhibited after the start of GC which prevents
* races between creating atoms during parsing and sweeping atoms on the
* main thread.
* active thread.
*
* Otherwise, we always schedule a GC in the atoms zone so that atoms which
* the other collected zones are using are marked, and we can update the
@ -4477,7 +4477,7 @@ Zone::findOutgoingEdges(ZoneComponentFinder& finder)
* Any compartment may have a pointer to an atom in the atoms
* compartment, and these aren't in the cross compartment map.
*/
JSRuntime* rt = runtimeFromMainThread();
JSRuntime* rt = runtimeFromActiveCooperatingThread();
Zone* atomsZone = rt->atomsCompartment(finder.lock)->zone();
if (atomsZone->isGCMarking())
finder.addEdgeTo(atomsZone);
@ -4971,7 +4971,7 @@ GCRuntime::startTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperT
if (!task.startWithLockHeld(locked)) {
AutoUnlockHelperThreadState unlock(locked);
gcstats::AutoPhase ap(stats(), phase);
task.runFromMainThread(rt);
task.runFromActiveCooperatingThread(rt);
}
}
@ -4985,12 +4985,12 @@ GCRuntime::joinTask(GCParallelTask& task, gcstats::Phase phase, AutoLockHelperTh
using WeakCacheTaskVector = mozilla::Vector<SweepWeakCacheTask, 0, SystemAllocPolicy>;
static void
SweepWeakCachesFromMainThread(JSRuntime* rt)
SweepWeakCachesFromActiveCooperatingThread(JSRuntime* rt)
{
for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
for (JS::WeakCache<void*>* cache : zone->weakCaches()) {
SweepWeakCacheTask task(rt, *cache);
task.runFromMainThread(rt);
task.runFromActiveCooperatingThread(rt);
}
}
}
@ -5002,7 +5002,7 @@ PrepareWeakCacheTasks(JSRuntime* rt)
for (GCZoneGroupIter zone(rt); !zone.done(); zone.next()) {
for (JS::WeakCache<void*>* cache : zone->weakCaches()) {
if (!out.append(SweepWeakCacheTask(rt, *cache))) {
SweepWeakCachesFromMainThread(rt);
SweepWeakCachesFromActiveCooperatingThread(rt);
return WeakCacheTaskVector();
}
}
@ -5098,7 +5098,7 @@ GCRuntime::beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock)
startTask(task, gcstats::PHASE_SWEEP_MISC, helperLock);
}
// The remainder of the of the tasks run in parallel on the main
// The remainder of the of the tasks run in parallel on the active
// thread until we join, below.
{
gcstats::AutoPhase ap(stats(), gcstats::PHASE_SWEEP_MISC);
@ -5156,7 +5156,7 @@ GCRuntime::beginSweepingZoneGroup(AutoLockForExclusiveAccess& lock)
}
}
// Rejoin our off-main-thread tasks.
// Rejoin our off-thread tasks.
if (sweepingAtoms) {
AutoLockHelperThreadState helperLock;
joinTask(sweepAtomsTask, gcstats::PHASE_SWEEP_ATOMS, helperLock);
@ -6262,7 +6262,7 @@ GCRuntime::gcCycle(bool nonincrementalByAPI, SliceBudget& budget, JS::gcreason::
if (!isIncrementalGCInProgress())
incMajorGcNumber();
// It's ok if threads other than the main thread have suppressGC set, as
// It's ok if threads other than the active thread have suppressGC set, as
// they are operating on zones which will not be collected from here.
MOZ_ASSERT(!TlsContext.get()->suppressGC);
@ -6286,7 +6286,7 @@ GCRuntime::gcCycle(bool nonincrementalByAPI, SliceBudget& budget, JS::gcreason::
allocTask.cancel(GCParallelTask::CancelAndWait);
}
// We don't allow off-main-thread parsing to start while we're doing an
// We don't allow off-thread parsing to start while we're doing an
// incremental GC.
MOZ_ASSERT_IF(rt->activeGCInAtomsZone(), !rt->exclusiveThreadsPresent());
@ -6877,7 +6877,7 @@ gc::MergeCompartments(JSCompartment* source, JSCompartment* target)
MOZ_ASSERT(source->creationOptions().addonIdOrNull() ==
target->creationOptions().addonIdOrNull());
JSContext* cx = source->runtimeFromMainThread()->activeContextFromOwnThread();
JSContext* cx = source->runtimeFromActiveCooperatingThread()->activeContextFromOwnThread();
AutoPrepareForTracing prepare(cx, SkipAtoms);
@ -7835,7 +7835,7 @@ js::gc::detail::CellIsMarkedGrayIfKnown(const Cell* cell)
// that is not being collected. Gray targets of CCWs that are marked black
// by a barrier will eventually be marked black in the next GC slice.
auto tc = &cell->asTenured();
auto rt = tc->runtimeFromMainThread();
auto rt = tc->runtimeFromActiveCooperatingThread();
if (!rt->gc.areGrayBitsValid() ||
(rt->gc.isIncrementalGCInProgress() && !tc->zone()->wasGCStarted()))
{

View File

@ -170,7 +170,7 @@ CanBeFinalizedInBackground(AllocKind kind, const Class* clasp)
MOZ_ASSERT(IsObjectAllocKind(kind));
/* If the class has no finalizer or a finalizer that is safe to call on
* a different thread, we change the alloc kind. For example,
* AllocKind::OBJECT0 calls the finalizer on the main thread,
* AllocKind::OBJECT0 calls the finalizer on the active thread,
* AllocKind::OBJECT0_BACKGROUND calls the finalizer on the gcHelperThread.
* IsBackgroundFinalized is called to prevent recursively incrementing
* the alloc kind; kind may already be a background finalize kind.
@ -855,10 +855,10 @@ NotifyGCPostSwap(JSObject* a, JSObject* b, unsigned preResult);
/*
* Helper state for use when JS helper threads sweep and allocate GC thing kinds
* that can be swept and allocated off the main thread.
* that can be swept and allocated off thread.
*
* In non-threadsafe builds, all actual sweeping and allocation is performed
* on the main thread, but GCHelperState encapsulates this from clients as
* on the active thread, but GCHelperState encapsulates this from clients as
* much as possible.
*/
class GCHelperState
@ -871,7 +871,7 @@ class GCHelperState
// Associated runtime.
JSRuntime* const rt;
// Condvar for notifying the main thread when work has finished. This is
// Condvar for notifying the active thread when work has finished. This is
// associated with the runtime's GC lock --- the worker thread state
// condvars can't be used here due to lock ordering issues.
js::ConditionVariable done;
@ -986,8 +986,8 @@ class GCParallelTask
bool startWithLockHeld(AutoLockHelperThreadState& locked);
void joinWithLockHeld(AutoLockHelperThreadState& locked);
// Instead of dispatching to a helper, run the task on the main thread.
void runFromMainThread(JSRuntime* rt);
// Instead of dispatching to a helper, run the task on the current thread.
void runFromActiveCooperatingThread(JSRuntime* rt);
// Dispatch a cancelation request.
enum CancelMode { CancelNoWait, CancelAndWait};

View File

@ -479,7 +479,7 @@ class JSObject : public js::gc::Cell
//
// These cases are:
// 1) The off-thread parsing task uses a dummy global since it cannot
// share with the actual global being used concurrently on the main
// share with the actual global being used concurrently on the active
// thread.
// 2) A GC may occur when creating the GlobalObject, in which case the
// compartment global pointer may not yet be set. In this case there is

View File

@ -74,7 +74,7 @@ JSObject::finalize(js::FreeOp* fop)
#ifdef DEBUG
MOZ_ASSERT(isTenured());
if (!IsBackgroundFinalized(asTenured().getAllocKind())) {
/* Assert we're on the main thread. */
/* Assert we're on the active thread. */
MOZ_ASSERT(CurrentThreadCanAccessZone(zone()));
}
#endif
@ -299,7 +299,7 @@ SetNewObjectMetadata(JSContext* cx, JSObject* obj)
{
MOZ_ASSERT(!cx->compartment()->hasObjectPendingMetadata());
// The metadata builder is invoked for each object created on the main
// The metadata builder is invoked for each object created on the active
// thread, except when analysis/compilation is active, to avoid recursion.
if (!cx->helperThread()) {
if (MOZ_UNLIKELY((size_t)cx->compartment()->hasAllocationMetadataBuilder()) &&
@ -337,7 +337,7 @@ JSObject::create(JSContext* cx, js::gc::AllocKind kind, js::gc::InitialHeap heap
uint32_t finalizeFlags = flags & FinalizeMask;
// Classes with a finalizer must specify whether instances will be finalized
// on the main thread or in the background, except proxies whose behaviour
// on the active thread or in the background, except proxies whose behaviour
// depends on the target object.
if (clasp->hasFinalize() && !clasp->isProxy()) {
MOZ_ASSERT(finalizeFlags == JSCLASS_FOREGROUND_FINALIZE ||

View File

@ -1332,7 +1332,7 @@ ScriptSourceObject::trace(JSTracer* trc, JSObject* obj)
void
ScriptSourceObject::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
ScriptSourceObject* sso = &obj->as<ScriptSourceObject>();
// If code coverage is enabled, record the filename associated with this
@ -1807,8 +1807,8 @@ ScriptSource::setSourceCopy(JSContext* cx, SourceBufferHolder& srcBuf,
// helper threads:
// - If we are on a helper thread, there must be another helper thread to
// execute our compression task.
// - If we are on the main thread, there must be at least two helper
// threads since at most one helper thread can be blocking on the main
// - If we are on the active thread, there must be at least two helper
// threads since at most one helper thread can be blocking on the active
// thread (see HelperThreadState::canStartParseTask) which would cause a
// deadlock if there wasn't a second helper thread that could make
// progress on our compression task.

View File

@ -360,8 +360,8 @@ class ScriptSource
uint32_t refs;
// Note: while ScriptSources may be compressed off thread, they are only
// modified by the main thread, and all members are always safe to access
// on the main thread.
// modified by the active thread, and all members are always safe to access
// on the active thread.
// Indicate which field in the |data| union is active.

View File

@ -70,7 +70,7 @@ SimulateOOMAfter(uint64_t allocations, uint32_t thread, bool always) {
void
ResetSimulatedOOM() {
if (targetThread != THREAD_TYPE_NONE && targetThread != THREAD_TYPE_MAIN)
if (targetThread != THREAD_TYPE_NONE && targetThread != THREAD_TYPE_COOPERATING)
HelperThreadState().waitForAllThreads();
targetThread = THREAD_TYPE_NONE;
maxAllocations = UINT64_MAX;

View File

@ -286,7 +286,7 @@ class WeakMap : public HashMap<Key, Value, HashPolicy, RuntimeAllocPolicy>,
if (!obj)
return nullptr;
MOZ_ASSERT(obj->runtimeFromMainThread() == zone()->runtimeFromMainThread());
MOZ_ASSERT(obj->runtimeFromActiveCooperatingThread() == zone()->runtimeFromActiveCooperatingThread());
return obj;
}
@ -304,7 +304,7 @@ class WeakMap : public HashMap<Key, Value, HashPolicy, RuntimeAllocPolicy>,
* Check if the delegate is marked with any color to properly handle
* gray marking when the key's delegate is black and the map is gray.
*/
return delegate && gc::IsMarkedUnbarriered(zone()->runtimeFromMainThread(), &delegate);
return delegate && gc::IsMarkedUnbarriered(zone()->runtimeFromActiveCooperatingThread(), &delegate);
}
bool keyNeedsMark(JSScript* script) const {

View File

@ -17,16 +17,16 @@ namespace js {
/* static */ mozilla::Atomic<size_t> AutoNoteSingleThreadedRegion::count(0);
template <AllowedBackgroundThread Background>
template <AllowedHelperThread Helper>
static inline bool
OnBackgroundThread()
OnHelperThread()
{
if (Background == AllowedBackgroundThread::IonCompile || Background == AllowedBackgroundThread::GCTaskOrIonCompile) {
if (Helper == AllowedHelperThread::IonCompile || Helper == AllowedHelperThread::GCTaskOrIonCompile) {
if (CurrentThreadIsIonCompiling())
return true;
}
if (Background == AllowedBackgroundThread::GCTask || Background == AllowedBackgroundThread::GCTaskOrIonCompile) {
if (Helper == AllowedHelperThread::GCTask || Helper == AllowedHelperThread::GCTaskOrIonCompile) {
if (TlsContext.get()->performingGC || TlsContext.get()->runtime()->gc.onBackgroundThread())
return true;
}
@ -34,16 +34,16 @@ OnBackgroundThread()
return false;
}
template <AllowedBackgroundThread Background>
template <AllowedHelperThread Helper>
void
CheckActiveThread<Background>::check() const
CheckActiveThread<Helper>::check() const
{
// When interrupting a thread on Windows, changes are made to the runtime
// and active thread's state from another thread while the active thread is
// suspended. We need a way to mark these accesses as being tantamount to
// accesses by the active thread. See bug 1323066.
#ifndef XP_WIN
if (OnBackgroundThread<Background>())
if (OnHelperThread<Helper>())
return;
JSContext* cx = TlsContext.get();
@ -51,20 +51,20 @@ CheckActiveThread<Background>::check() const
#endif // XP_WIN
}
template class CheckActiveThread<AllowedBackgroundThread::None>;
template class CheckActiveThread<AllowedBackgroundThread::GCTask>;
template class CheckActiveThread<AllowedBackgroundThread::IonCompile>;
template class CheckActiveThread<AllowedHelperThread::None>;
template class CheckActiveThread<AllowedHelperThread::GCTask>;
template class CheckActiveThread<AllowedHelperThread::IonCompile>;
template <AllowedBackgroundThread Background>
template <AllowedHelperThread Helper>
void
CheckZoneGroup<Background>::check() const
CheckZoneGroup<Helper>::check() const
{
if (OnBackgroundThread<Background>())
if (OnHelperThread<Helper>())
return;
if (group) {
// This check is disabled for now because helper thread parse tasks
// access data in the same zone group that the single main thread is
// access data in the same zone group that the single active thread is
// using. This will be fixed soon (bug 1323066).
//MOZ_ASSERT(group->context && group->context == TlsContext.get());
} else {
@ -74,16 +74,16 @@ CheckZoneGroup<Background>::check() const
}
}
template class CheckZoneGroup<AllowedBackgroundThread::None>;
template class CheckZoneGroup<AllowedBackgroundThread::GCTask>;
template class CheckZoneGroup<AllowedBackgroundThread::IonCompile>;
template class CheckZoneGroup<AllowedBackgroundThread::GCTaskOrIonCompile>;
template class CheckZoneGroup<AllowedHelperThread::None>;
template class CheckZoneGroup<AllowedHelperThread::GCTask>;
template class CheckZoneGroup<AllowedHelperThread::IonCompile>;
template class CheckZoneGroup<AllowedHelperThread::GCTaskOrIonCompile>;
template <GlobalLock Lock, AllowedBackgroundThread Background>
template <GlobalLock Lock, AllowedHelperThread Helper>
void
CheckGlobalLock<Lock, Background>::check() const
CheckGlobalLock<Lock, Helper>::check() const
{
if (OnBackgroundThread<Background>())
if (OnHelperThread<Helper>())
return;
switch (Lock) {
@ -99,10 +99,10 @@ CheckGlobalLock<Lock, Background>::check() const
}
}
template class CheckGlobalLock<GlobalLock::GCLock, AllowedBackgroundThread::None>;
template class CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedBackgroundThread::None>;
template class CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedBackgroundThread::GCTask>;
template class CheckGlobalLock<GlobalLock::HelperThreadLock, AllowedBackgroundThread::None>;
template class CheckGlobalLock<GlobalLock::GCLock, AllowedHelperThread::None>;
template class CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedHelperThread::None>;
template class CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedHelperThread::GCTask>;
template class CheckGlobalLock<GlobalLock::HelperThreadLock, AllowedHelperThread::None>;
#endif // DEBUG

View File

@ -191,9 +191,9 @@ class CheckThreadLocal
template <typename T>
using ThreadLocalData = ProtectedDataNoCheckArgs<CheckThreadLocal, T>;
// Enum describing which background threads (GC tasks or Ion compilations) may
// Enum describing which helper threads (GC tasks or Ion compilations) may
// access data even though they do not have exclusive access to any zone group.
enum class AllowedBackgroundThread
enum class AllowedHelperThread
{
None,
GCTask,
@ -201,7 +201,7 @@ enum class AllowedBackgroundThread
GCTaskOrIonCompile
};
template <AllowedBackgroundThread Background>
template <AllowedHelperThread Helper>
class CheckActiveThread
{
public:
@ -212,18 +212,18 @@ class CheckActiveThread
// active thread.
template <typename T>
using ActiveThreadData =
ProtectedDataNoCheckArgs<CheckActiveThread<AllowedBackgroundThread::None>, T>;
ProtectedDataNoCheckArgs<CheckActiveThread<AllowedHelperThread::None>, T>;
// Data which may only be accessed by the runtime's cooperatively scheduled
// active thread, or by various helper thread tasks.
template <typename T>
using ActiveThreadOrGCTaskData =
ProtectedDataNoCheckArgs<CheckActiveThread<AllowedBackgroundThread::GCTask>, T>;
ProtectedDataNoCheckArgs<CheckActiveThread<AllowedHelperThread::GCTask>, T>;
template <typename T>
using ActiveThreadOrIonCompileData =
ProtectedDataNoCheckArgs<CheckActiveThread<AllowedBackgroundThread::IonCompile>, T>;
ProtectedDataNoCheckArgs<CheckActiveThread<AllowedHelperThread::IonCompile>, T>;
template <AllowedBackgroundThread Background>
template <AllowedHelperThread Helper>
class CheckZoneGroup
{
#ifdef DEBUG
@ -242,19 +242,19 @@ class CheckZoneGroup
// associated zone group.
template <typename T>
using ZoneGroupData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::None>, T>;
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedHelperThread::None>, T>;
// Data which may only be accessed by threads with exclusive access to the
// associated zone group, or by various helper thread tasks.
template <typename T>
using ZoneGroupOrGCTaskData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::GCTask>, T>;
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedHelperThread::GCTask>, T>;
template <typename T>
using ZoneGroupOrIonCompileData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::IonCompile>, T>;
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedHelperThread::IonCompile>, T>;
template <typename T>
using ZoneGroupOrGCTaskOrIonCompileData =
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedBackgroundThread::GCTaskOrIonCompile>, T>;
ProtectedDataZoneGroupArg<CheckZoneGroup<AllowedHelperThread::GCTaskOrIonCompile>, T>;
// Runtime wide locks which might protect some data.
enum class GlobalLock
@ -264,7 +264,7 @@ enum class GlobalLock
HelperThreadLock
};
template <GlobalLock Lock, AllowedBackgroundThread Background>
template <GlobalLock Lock, AllowedHelperThread Helper>
class CheckGlobalLock
{
#ifdef DEBUG
@ -276,24 +276,24 @@ class CheckGlobalLock
// Data which may only be accessed while holding the GC lock.
template <typename T>
using GCLockData =
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::GCLock, AllowedBackgroundThread::None>, T>;
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::GCLock, AllowedHelperThread::None>, T>;
// Data which may only be accessed while holding the exclusive access lock.
template <typename T>
using ExclusiveAccessLockData =
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedBackgroundThread::None>, T>;
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedHelperThread::None>, T>;
// Data which may only be accessed while holding the exclusive access lock or
// by GC helper thread tasks (at which point a foreground thread should be
// holding the exclusive access lock, though we do not check this).
template <typename T>
using ExclusiveAccessLockOrGCTaskData =
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedBackgroundThread::GCTask>, T>;
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedHelperThread::GCTask>, T>;
// Data which may only be accessed while holding the helper thread lock.
template <typename T>
using HelperThreadLockData =
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::HelperThreadLock, AllowedBackgroundThread::None>, T>;
ProtectedDataNoCheckArgs<CheckGlobalLock<GlobalLock::HelperThreadLock, AllowedHelperThread::None>, T>;
// Class for protected data that is only written to once. 'const' may sometimes
// be usable instead of this class, but in cases where the data cannot be set
@ -356,7 +356,7 @@ using WriteOnceData = ProtectedDataWriteOnce<CheckUnprotected, T>;
// Data that is written once, and only while holding the exclusive access lock.
template <typename T>
using ExclusiveAccessLockWriteOnceData =
ProtectedDataWriteOnce<CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedBackgroundThread::None>, T>;
ProtectedDataWriteOnce<CheckGlobalLock<GlobalLock::ExclusiveAccessLock, AllowedHelperThread::None>, T>;
#undef DECLARE_ASSIGNMENT_OPERATOR
#undef DECLARE_ONE_BOOL_OPERATOR

View File

@ -3299,7 +3299,7 @@ Debugger::findZoneEdges(Zone* zone, js::gc::ZoneComponentFinder& finder)
/* static */ void
Debugger::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
Debugger* dbg = fromJSObject(obj);
if (!dbg)
@ -8276,7 +8276,7 @@ DebuggerFrame_maybeDecrementFrameScriptStepModeCount(FreeOp* fop, AbstractFrameP
static void
DebuggerFrame_finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
DebuggerFrame_freeScriptFrameIterData(fop, obj);
OnStepHandler* onStepHandler = obj->as<DebuggerFrame>().onStepHandler();
if (onStepHandler)

View File

@ -640,7 +640,7 @@ js::DefineToStringTag(JSContext *cx, HandleObject obj, JSAtom* tag)
static void
GlobalDebuggees_finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
fop->delete_((GlobalObject::DebuggerVector*) obj->as<NativeObject>().getPrivate());
}

View File

@ -126,8 +126,8 @@ GetSelectorRuntime(CompilationSelector selector)
{
struct Matcher
{
JSRuntime* match(JSScript* script) { return script->runtimeFromMainThread(); }
JSRuntime* match(JSCompartment* comp) { return comp->runtimeFromMainThread(); }
JSRuntime* match(JSScript* script) { return script->runtimeFromActiveCooperatingThread(); }
JSRuntime* match(JSCompartment* comp) { return comp->runtimeFromActiveCooperatingThread(); }
JSRuntime* match(ZonesInState zbs) { return zbs.runtime; }
JSRuntime* match(JSRuntime* runtime) { return runtime; }
JSRuntime* match(AllCompilations all) { return nullptr; }
@ -267,7 +267,7 @@ js::HasOffThreadIonCompile(JSCompartment* comp)
return true;
}
jit::IonBuilder* builder = comp->runtimeFromMainThread()->ionLazyLinkList().getFirst();
jit::IonBuilder* builder = comp->runtimeFromActiveCooperatingThread()->ionLazyLinkList().getFirst();
while (builder) {
if (builder->script()->compartment() == comp)
return true;
@ -461,7 +461,7 @@ js::CancelOffThreadParses(JSRuntime* rt)
HelperThreadState().wait(lock, GlobalHelperThreadState::CONSUMER);
}
// Clean up any parse tasks which haven't been finished by the main thread.
// Clean up any parse tasks which haven't been finished by the active thread.
GlobalHelperThreadState::ParseTaskVector& finished = HelperThreadState().parseFinishedList(lock);
while (true) {
bool found = false;
@ -553,7 +553,7 @@ CreateGlobalForOffThreadParse(JSContext* cx, ParseTaskKind kind, const gc::AutoS
JS_SetCompartmentPrincipals(global->compartment(), currentCompartment->principals());
// Initialize all classes required for parsing while still on the main
// Initialize all classes required for parsing while still on the active
// thread, for both the target and the new global so that prototype
// pointers can be changed infallibly after parsing finishes.
if (!EnsureParserCreatedClasses(cx, kind))
@ -1180,7 +1180,7 @@ js::GCParallelTask::join()
}
void
js::GCParallelTask::runFromMainThread(JSRuntime* rt)
js::GCParallelTask::runFromActiveCooperatingThread(JSRuntime* rt)
{
MOZ_ASSERT(state == NotStarted);
MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(rt));
@ -1453,7 +1453,7 @@ HelperThread::ThreadMain(void* arg)
ThisThread::SetName("JS Helper");
//See bug 1104658.
//Set the FPU control word to be the same as the main thread's, or math
//Set the FPU control word to be the same as the active thread's, or math
//computations on this thread may use incorrect precision rules during
//Ion compilation.
FIX_FPU();
@ -1488,7 +1488,7 @@ HelperThread::handleWasmWorkload(AutoLockHelperThreadState& locked)
HelperThreadState().setWasmError(locked, Move(error));
}
// Notify the main thread in case it's waiting.
// Notify the active thread in case it's waiting.
HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
currentTask.reset();
}
@ -1517,7 +1517,7 @@ HelperThread::handlePromiseTaskWorkload(AutoLockHelperThreadState& locked)
}
}
// Notify the main thread in case it's waiting.
// Notify the active thread in case it's waiting.
HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
currentTask.reset();
}
@ -1570,7 +1570,7 @@ HelperThread::handleIonWorkload(AutoLockHelperThreadState& locked)
// so that the compiled code can be incorporated at the next interrupt
// callback. Don't interrupt Ion code for this, as this incorporation can
// be delayed indefinitely without affecting performance as long as the
// main thread is actually executing Ion code.
// active thread is actually executing Ion code.
//
// This must happen before the current task is reset. DestroyContext
// cancels in progress Ion compilations before destroying its target
@ -1583,7 +1583,7 @@ HelperThread::handleIonWorkload(AutoLockHelperThreadState& locked)
currentTask.reset();
pause = false;
// Notify the main thread in case it is waiting for the compilation to finish.
// Notify the active thread in case it is waiting for the compilation to finish.
HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
// When finishing Ion compilation jobs, we can start unpausing compilation
@ -1690,7 +1690,7 @@ HelperThread::handleParseWorkload(AutoLockHelperThreadState& locked, uintptr_t s
}
TlsContext.set(oldcx);
// The callback is invoked while we are still off the main thread.
// The callback is invoked while we are still off thread.
task->callback(task, task->callbackData);
// FinishOffThreadScript will need to be called on the script to
@ -1703,7 +1703,7 @@ HelperThread::handleParseWorkload(AutoLockHelperThreadState& locked, uintptr_t s
currentTask.reset();
// Notify the main thread in case it is waiting for the parse/emit to finish.
// Notify the active thread in case it is waiting for the parse/emit to finish.
HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
}
@ -1729,7 +1729,7 @@ HelperThread::handleCompressionWorkload(AutoLockHelperThreadState& locked)
task->helperThread = nullptr;
currentTask.reset();
// Notify the main thread in case it is waiting for the compression to finish.
// Notify the active thread in case it is waiting for the compression to finish.
HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER, locked);
}

View File

@ -5,7 +5,7 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/*
* Definitions for managing off-main-thread work using a process wide list
* Definitions for managing off-thread work using a process wide list
* of worklist items and pool of threads. Worklist items are engine internal,
* and are distinct from e.g. web workers.
*/
@ -229,7 +229,7 @@ class GlobalHelperThreadState
return Move(firstWasmError);
}
void noteWasmFailure(const AutoLockHelperThreadState&) {
// Be mindful to signal the main thread after calling this function.
// Be mindful to signal the active thread after calling this function.
numWasmFailedJobs++;
}
void setWasmError(const AutoLockHelperThreadState&, UniqueChars error) {
@ -252,7 +252,7 @@ class GlobalHelperThreadState
private:
/*
* Number of wasm jobs that encountered failure for the active module.
* Their parent is logically the main thread, and this number serves for harvesting.
* Their parent is logically the active thread, and this number serves for harvesting.
*/
uint32_t numWasmFailedJobs;
/*
@ -590,7 +590,7 @@ struct ParseTask
// Rooted pointer to the global object used by 'cx'.
JSObject* exclusiveContextGlobal;
// Callback invoked off the main thread when the parse finishes.
// Callback invoked off thread when the parse finishes.
JS::OffThreadCompileCallback callback;
void* callbackData;

View File

@ -97,7 +97,7 @@ JS::detail::InitWithFailureDiagnostic(bool isDebugBuild)
#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
RETURN_IF_FAIL(js::oom::InitThreadType());
js::oom::SetThreadType(js::oom::THREAD_TYPE_MAIN);
js::oom::SetThreadType(js::oom::THREAD_TYPE_COOPERATING);
#endif
RETURN_IF_FAIL(js::Mutex::Init());

View File

@ -268,8 +268,8 @@ js::NativeObject::numFixedSlotsForCompilation() const
{
// This is an alternative method for getting the number of fixed slots in an
// object. It requires more logic and memory accesses than numFixedSlots()
// but is safe to be called from the compilation thread, even if the main
// thread is actively mutating the VM.
// but is safe to be called from the compilation thread, even if the active
// thread is mutating the VM.
// The compiler does not have access to nursery things.
MOZ_ASSERT(!IsInsideNursery(this));
@ -1656,7 +1656,7 @@ js::NativeDefineProperty(JSContext* cx, HandleNativeObject obj, HandleId id,
if (!NativeDefineProperty(cx, obj, id, value, getter, setter, attrs, result))
return false;
if (!result) {
// Off-main-thread callers should not get here: they must call this
// Off-thread callers should not get here: they must call this
// function only with known-valid arguments. Populating a new
// PlainObject with configurable properties is fine.
MOZ_ASSERT(!cx->helperThread());

View File

@ -15,7 +15,7 @@ inline bool
ObjectGroup::needsSweep()
{
// Note: this can be called off thread during compacting GCs, in which case
// nothing will be running on the main thread.
// nothing will be running on the active thread.
return generation() != zoneFromAnyThread()->types.generation;
}

View File

@ -276,7 +276,7 @@ js::ForOfPIC::Chain::sweep(FreeOp* fop)
static void
ForOfPIC_finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
if (ForOfPIC::Chain* chain = ForOfPIC::fromJSObject(&obj->as<NativeObject>()))
chain->sweep(fop);
}

View File

@ -22,7 +22,7 @@ using namespace js;
static void
resc_finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
RegExpStatics* res = static_cast<RegExpStatics*>(obj->as<RegExpStaticsObject>().getPrivate());
fop->delete_(res);
}

View File

@ -131,7 +131,7 @@ JSRuntime::JSRuntime(JSRuntime* parentRuntime)
windowProxyClass_(nullptr),
exclusiveAccessLock(mutexid::RuntimeExclusiveAccess),
#ifdef DEBUG
mainThreadHasExclusiveAccess(false),
activeThreadHasExclusiveAccess(false),
#endif
numExclusiveThreads(0),
numCompartments(0),
@ -276,7 +276,7 @@ JSRuntime::destroyRuntime()
/*
* Cancel any pending, in progress or completed Ion compilations and
* parse tasks. Waiting for wasm and compression tasks is done
* synchronously (on the main thread or during parse tasks), so no
* synchronously (on the active thread or during parse tasks), so no
* explicit canceling is needed for these.
*/
CancelOffThreadIonCompile(this);
@ -782,7 +782,7 @@ js::CurrentThreadCanAccessZone(Zone* zone)
if (CurrentThreadCanAccessRuntime(zone->runtime_))
return true;
// Only zones in use by an exclusive thread can be used off the main thread.
// Only zones in use by an exclusive thread can be used off thread.
// We don't keep track of which thread owns such zones though, so this check
// is imperfect.
return zone->usedByExclusiveThread;
@ -815,7 +815,7 @@ JSRuntime::IonBuilderList&
JSRuntime::ionLazyLinkList()
{
MOZ_ASSERT(CurrentThreadCanAccessRuntime(this),
"Should only be mutated by the main thread.");
"Should only be mutated by the active thread.");
return ionLazyLinkList_.ref();
}
@ -823,7 +823,7 @@ void
JSRuntime::ionLazyLinkListRemove(jit::IonBuilder* builder)
{
MOZ_ASSERT(CurrentThreadCanAccessRuntime(this),
"Should only be mutated by the main thread.");
"Should only be mutated by the active thread.");
MOZ_ASSERT(ionLazyLinkListSize_ > 0);
builder->removeFrom(ionLazyLinkList());
@ -836,7 +836,7 @@ void
JSRuntime::ionLazyLinkListAdd(jit::IonBuilder* builder)
{
MOZ_ASSERT(CurrentThreadCanAccessRuntime(this),
"Should only be mutated by the main thread.");
"Should only be mutated by the active thread.");
ionLazyLinkList().insertFront(builder);
ionLazyLinkListSize_++;
}

View File

@ -114,6 +114,36 @@ class Simulator;
#endif
} // namespace jit
// JS Engine Threading
//
// Multiple threads may interact with a JS runtime. JS has run-to-completion
// semantics, which means that scripts cannot observe changes in behavior
// due to activities performed on other threads (there is an exception to this
// for shared array buffers and related APIs).
//
// The main way we ensure that run-to-completion semantics are preserved is
// by dividing content into zone groups. Pieces of web content will be in the
// the same zone group if they have the same tab/origin or can otherwise
// observe changes in each other via Window.opener and so forth. When a thread
// executes JS in a zone group, it acquires that group --- including exclusive
// access to most of the group's content --- and does not relinquish control of
// the zone group until the script finishes executing.
//
// Threads interacting with a runtime are divided into two categories:
//
// - Cooperating threads are capable of running JS. At most one cooperating
// thread may be |active| at a time in a runtime, but they may yield control
// to each other so that their execution is interleaved. As described above,
// each thread owns the zone groups it is operating on so that this
// interleaving does not cause observable changes in a script's behavior.
//
// - Helper threads do not run JS, and are controlled or triggered by activity
// in the cooperating threads. Helper threads may have exclusive access to
// zone groups created for them, for parsing and similar tasks, but their
// activities do not cause observable changes in script behaviors. Activity
// on helper threads may be referred to as happening 'off thread' or on a
// background thread in some parts of the VM.
/*
* A FreeOp can do one thing: free memory. For convenience, it has delete_
* convenience methods that also call destructors.
@ -134,13 +164,13 @@ class FreeOp : public JSFreeOp
explicit FreeOp(JSRuntime* maybeRuntime);
~FreeOp();
bool onMainThread() const {
bool onActiveCooperatingThread() const {
return runtime_ != nullptr;
}
bool maybeOffMainThread() const {
// Sometimes background finalization happens on the main thread so
// runtime_ being null doesn't always mean we are off the main thread.
bool maybeOnHelperThread() const {
// Sometimes background finalization happens on the active thread so
// runtime_ being null doesn't always mean we are off thread.
return !runtime_;
}
@ -534,14 +564,14 @@ struct JSRuntime : public js::MallocProvider<JSRuntime>
* be accessed simultaneously by multiple threads.
*
* Locking this only occurs if there is actually a thread other than the
* main thread which could access such data.
* active thread which could access such data.
*/
js::Mutex exclusiveAccessLock;
#ifdef DEBUG
bool mainThreadHasExclusiveAccess;
bool activeThreadHasExclusiveAccess;
#endif
/* Number of non-main threads with exclusive access to some zone. */
/* Number of non-cooperating threads with exclusive access to some zone. */
js::UnprotectedData<size_t> numExclusiveThreads;
friend class js::AutoLockForExclusiveAccess;
@ -556,13 +586,13 @@ struct JSRuntime : public js::MallocProvider<JSRuntime>
#ifdef DEBUG
bool currentThreadHasExclusiveAccess() const {
return (!exclusiveThreadsPresent() && mainThreadHasExclusiveAccess) ||
return (!exclusiveThreadsPresent() && activeThreadHasExclusiveAccess) ||
exclusiveAccessLock.ownedByCurrentThread();
}
#endif
// How many compartments there are across all zones. This number includes
// off main thread context compartments, so it isn't necessarily equal to the
// off thread context compartments, so it isn't necessarily equal to the
// number of compartments visited by CompartmentsIter.
js::ActiveThreadData<size_t> numCompartments;

View File

@ -367,10 +367,10 @@ SavedFrame::protoAccessors[] = {
/* static */ void
SavedFrame::finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->onMainThread());
MOZ_ASSERT(fop->onActiveCooperatingThread());
JSPrincipals* p = obj->as<SavedFrame>().getPrincipals();
if (p) {
JSRuntime* rt = obj->runtimeFromMainThread();
JSRuntime* rt = obj->runtimeFromActiveCooperatingThread();
JS_DropPrincipals(rt->activeContextFromOwnThread(), p);
}
}

View File

@ -3069,7 +3069,7 @@ CloneObject(JSContext* cx, HandleNativeObject selfHostedObject)
// Object hash identities are owned by the hashed object, which may be on a
// different thread than the clone target. In theory, these objects are all
// tenured and will not be compacted; however, we simply avoid the issue
// altogether by skipping the cycle-detection when off the main thread.
// altogether by skipping the cycle-detection when off thread.
mozilla::Maybe<AutoCycleDetector> detect;
if (js::CurrentThreadCanAccessZone(selfHostedObject->zoneFromAnyThread())) {
detect.emplace(cx, selfHostedObject);

View File

@ -922,7 +922,7 @@ NativeObject::putProperty(JSContext* cx, HandleNativeObject obj, HandleId id,
if (hadSlot && !shape->hasSlot()) {
if (oldSlot < obj->slotSpan())
obj->freeSlot(cx, oldSlot);
/* Note: The optimization based on propertyRemovals is only relevant to the main thread. */
/* Note: The optimization based on propertyRemovals is only relevant to the active thread. */
if (!cx->helperThread())
++cx->propertyRemovals;
}
@ -1705,7 +1705,7 @@ EmptyShape::insertInitialShape(JSContext* cx, HandleShape shape, HandleObject pr
* nativeEmpty() result and generate the appropriate properties if found.
* Clearing the cache entry avoids this duplicate regeneration.
*
* Clearing is not necessary when this context is running off the main
* Clearing is not necessary when this context is running off
* thread, as it will not use the new object cache for allocations.
*/
if (!cx->helperThread())

View File

@ -299,7 +299,7 @@ SharedArrayBufferObject::rawBufferObject() const
void
SharedArrayBufferObject::Finalize(FreeOp* fop, JSObject* obj)
{
MOZ_ASSERT(fop->maybeOffMainThread());
MOZ_ASSERT(fop->maybeOnHelperThread());
SharedArrayBufferObject& buf = obj->as<SharedArrayBufferObject>();

View File

@ -47,7 +47,7 @@ JSString::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf)
// JSExternalString: Ask the embedding to tell us what's going on. If it
// doesn't want to say, don't count, the chars could be stored anywhere.
if (isExternal()) {
if (auto* cb = runtimeFromMainThread()->externalStringSizeofCallback.ref()) {
if (auto* cb = runtimeFromActiveCooperatingThread()->externalStringSizeofCallback.ref()) {
// Our callback isn't supposed to cause GC.
JS::AutoSuppressGCAnalysis nogc;
return cb(this, mallocSizeOf);

View File

@ -45,7 +45,7 @@ Symbol::new_(JSContext* cx, JS::SymbolCode code, JSString* description)
}
// Lock to allocate. If symbol allocation becomes a bottleneck, this can
// probably be replaced with an assertion that we're on the main thread.
// probably be replaced with an assertion that we're on the active thread.
AutoLockForExclusiveAccess lock(cx);
Symbol* sym;
{

View File

@ -847,7 +847,7 @@ TraceLoggerThreadState::init()
"\n"
"usage: TLOPTIONS=option,option,option,... where options can be:\n"
"\n"
" EnableMainThread Start logging the main thread immediately.\n"
" EnableCooperatingThread Start logging cooperating threads immediately.\n"
" EnableOffThread Start logging helper threads immediately.\n"
" EnableGraph Enable spewing the tracelogging graph to a file.\n"
" Errors Report errors during tracing to stderr.\n"
@ -857,10 +857,10 @@ TraceLoggerThreadState::init()
/*NOTREACHED*/
}
if (strstr(options, "EnableMainThread"))
mainThreadEnabled = true;
if (strstr(options, "EnableActiveThread"))
cooperatingThreadEnabled = true;
if (strstr(options, "EnableOffThread"))
offThreadEnabled = true;
helperThreadEnabled = true;
if (strstr(options, "EnableGraph"))
graphSpewingEnabled = true;
if (strstr(options, "Errors"))
@ -963,7 +963,7 @@ TraceLoggerThreadState::forCurrentThread(JSContext* maybecx)
if (graphSpewingEnabled)
logger->initGraph();
if (CurrentHelperThread() ? offThreadEnabled : mainThreadEnabled)
if (CurrentHelperThread() ? helperThreadEnabled : cooperatingThreadEnabled)
logger->enable();
}

View File

@ -318,8 +318,8 @@ class TraceLoggerThreadState
#endif
bool enabledTextIds[TraceLogger_Last];
bool mainThreadEnabled;
bool offThreadEnabled;
bool cooperatingThreadEnabled;
bool helperThreadEnabled;
bool graphSpewingEnabled;
bool spewErrors;
mozilla::LinkedList<TraceLoggerThread> threadLoggers;
@ -345,8 +345,8 @@ class TraceLoggerThreadState
#ifdef DEBUG
initialized(false),
#endif
mainThreadEnabled(false),
offThreadEnabled(false),
cooperatingThreadEnabled(false),
helperThreadEnabled(false),
graphSpewingEnabled(false),
spewErrors(false),
nextTextId(TraceLogger_Last),

View File

@ -1023,21 +1023,21 @@ TypeSet::intersectSets(TemporaryTypeSet* a, TemporaryTypeSet* b, LifoAlloc* allo
// Constraints generated during Ion compilation capture assumptions made about
// heap properties that will trigger invalidation of the resulting Ion code if
// the constraint is violated. Constraints can only be attached to type sets on
// the main thread, so to allow compilation to occur almost entirely off thread
// the active thread, so to allow compilation to occur almost entirely off thread
// the generation is split into two phases.
//
// During compilation, CompilerConstraint values are constructed in a list,
// recording the heap property type set which was read from and its expected
// contents, along with the assumption made about those contents.
//
// At the end of compilation, when linking the result on the main thread, the
// At the end of compilation, when linking the result on the active thread, the
// list of compiler constraints are read and converted to type constraints and
// attached to the type sets. If the property type sets have changed so that the
// assumptions no longer hold then the compilation is aborted and its result
// discarded.
// Superclass of all constraints generated during Ion compilation. These may
// be allocated off the main thread, using the current JIT context's allocator.
// be allocated off thread, using the current JIT context's allocator.
class CompilerConstraint
{
public:
@ -1046,7 +1046,7 @@ class CompilerConstraint
// Contents of the property at the point when the query was performed. This
// may differ from the actual property types later in compilation as the
// main thread performs side effects.
// active thread performs side effects.
TemporaryTypeSet* expected;
CompilerConstraint(LifoAlloc* alloc, const HeapTypeSetKey& property)
@ -1311,7 +1311,7 @@ TypeSet::ObjectKey::ensureTrackedProperty(JSContext* cx, jsid id)
{
// If we are accessing a lazily defined property which actually exists in
// the VM and has not been instantiated yet, instantiate it now if we are
// on the main thread and able to do so.
// on the active thread and able to do so.
if (!JSID_IS_VOID(id) && !JSID_IS_EMPTY(id)) {
MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
if (isSingleton()) {
@ -1535,7 +1535,7 @@ js::InvalidateCompilerOutputsForScript(JSContext* cx, HandleScript script)
static void
CheckDefinitePropertiesTypeSet(JSContext* cx, TemporaryTypeSet* frozen, StackTypeSet* actual)
{
// The definite properties analysis happens on the main thread, so no new
// The definite properties analysis happens on the active thread, so no new
// types can have been added to actual. The analysis may have updated the
// contents of |frozen| though with new speculative types, and these need
// to be reflected in |actual| for AddClearDefiniteFunctionUsesInScript
@ -3083,7 +3083,7 @@ ObjectGroup::clearNewScript(JSContext* cx, ObjectGroup* replacement /* = nullptr
}
}
} else {
// Threads off the main thread are not allowed to run scripts.
// Helper threads are not allowed to run scripts.
MOZ_ASSERT(!cx->activation());
}
@ -4324,7 +4324,7 @@ ObjectGroup::sweep(AutoClearTypeInferenceStateOnOOM* oom)
if (maybeUnboxedLayout()) {
// Remove unboxed layouts that are about to be finalized from the
// compartment wide list while we are still on the main thread.
// compartment wide list while we are still on the active thread.
ObjectGroup* group = this;
if (IsAboutToBeFinalizedUnbarriered(&group))
unboxedLayout().detachFromCompartment();
@ -4597,7 +4597,7 @@ AutoClearTypeInferenceStateOnOOM::~AutoClearTypeInferenceStateOnOOM()
zone->types.setSweepingTypes(false);
if (oom) {
JSRuntime* rt = zone->runtimeFromMainThread();
JSRuntime* rt = zone->runtimeFromActiveCooperatingThread();
js::CancelOffThreadIonCompile(rt);
zone->setPreservingCode(false);
zone->discardJitCode(rt->defaultFreeOp(), /* discardBaselineCode = */ false);

View File

@ -1186,10 +1186,10 @@ FinishDefinitePropertiesAnalysis(JSContext* cx, CompilerConstraintList* constrai
// Representation of a heap type property which may or may not be instantiated.
// Heap properties for singleton types are instantiated lazily as they are used
// by the compiler, but this is only done on the main thread. If we are
// by the compiler, but this is only done on the active thread. If we are
// compiling off thread and use a property which has not yet been instantiated,
// it will be treated as empty and non-configured and will be instantiated when
// rejoining to the main thread. If it is in fact not empty, the compilation
// rejoining to the active thread. If it is in fact not empty, the compilation
// will fail; to avoid this, we try to instantiate singleton property types
// during generation of baseline caches.
class HeapTypeSetKey

View File

@ -1319,10 +1319,10 @@ ProcessHasSignalHandlers()
# endif
#endif
// The interrupt handler allows the main thread to be paused from another
// The interrupt handler allows the active thread to be paused from another
// thread (see InterruptRunningJitCode).
#if defined(XP_WIN)
// Windows uses SuspendThread to stop the main thread from another thread.
// Windows uses SuspendThread to stop the active thread from another thread.
#else
struct sigaction interruptHandler;
interruptHandler.sa_flags = SA_SIGINFO;
@ -1419,7 +1419,7 @@ wasm::HaveSignalHandlers()
// handled by this function:
// 1. Ion loop backedges are patched to instead point to a stub that handles
// the interrupt;
// 2. if the main thread's pc is inside wasm code, the pc is updated to point
// 2. if the active thread's pc is inside wasm code, the pc is updated to point
// to a stub that handles the interrupt.
void
js::InterruptRunningJitCode(JSContext* cx)
@ -1443,10 +1443,10 @@ js::InterruptRunningJitCode(JSContext* cx)
return;
}
// We are not on the runtime's main thread, so to do 1 and 2 above, we need
// to halt the runtime's main thread first.
// We are not on the runtime's active thread, so to do 1 and 2 above, we need
// to halt the runtime's active thread first.
#if defined(XP_WIN)
// On Windows, we can simply suspend the main thread and work directly on
// On Windows, we can simply suspend the active thread and work directly on
// its context from this thread. SuspendThread can sporadically fail if the
// thread is in the middle of a syscall. Rather than retrying in a loop,
// just wait for the next request for interrupt.
@ -1462,7 +1462,7 @@ js::InterruptRunningJitCode(JSContext* cx)
}
cx->finishHandlingJitInterrupt();
#else
// On Unix, we instead deliver an async signal to the main thread which
// On Unix, we instead deliver an async signal to the active thread which
// halts the thread and callers our JitInterruptHandler (which has already
// been installed by EnsureSignalHandlersInstalled).
pthread_t thread = (pthread_t)cx->threadNative();