mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-27 23:02:20 +00:00
Bug 1325050 - Structure reorganization for multithreaded runtimes, r=jandem,jonco,h4writer,luke,lhansen,nbp.
--HG-- extra : rebase_source : 53c8fbae972fbf2f5e9428ce4d2965c8f86e942d
This commit is contained in:
parent
b583e128e0
commit
db7742c7f9
@ -12,10 +12,6 @@
|
||||
#include "js/TypeDecls.h"
|
||||
#include "js/Utility.h"
|
||||
|
||||
namespace js {
|
||||
class ExclusiveContext;
|
||||
} // namespace js
|
||||
|
||||
class JSFlatString;
|
||||
|
||||
namespace JS {
|
||||
@ -222,11 +218,11 @@ class ConstTwoByteChars : public mozilla::Range<const char16_t>
|
||||
* This method cannot trigger GC.
|
||||
*/
|
||||
extern Latin1CharsZ
|
||||
LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx,
|
||||
LossyTwoByteCharsToNewLatin1CharsZ(JSContext* cx,
|
||||
const mozilla::Range<const char16_t> tbchars);
|
||||
|
||||
inline Latin1CharsZ
|
||||
LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx, const char16_t* begin, size_t length)
|
||||
LossyTwoByteCharsToNewLatin1CharsZ(JSContext* cx, const char16_t* begin, size_t length)
|
||||
{
|
||||
const mozilla::Range<const char16_t> tbchars(begin, length);
|
||||
return JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, tbchars);
|
||||
@ -234,7 +230,7 @@ LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx, const char16_t* beg
|
||||
|
||||
template <typename CharT>
|
||||
extern UTF8CharsZ
|
||||
CharsToNewUTF8CharsZ(js::ExclusiveContext* maybeCx, const mozilla::Range<CharT> chars);
|
||||
CharsToNewUTF8CharsZ(JSContext* maybeCx, const mozilla::Range<CharT> chars);
|
||||
|
||||
uint32_t
|
||||
Utf8ToOneUcs4Char(const uint8_t* utf8Buffer, int utf8Length);
|
||||
|
@ -341,7 +341,7 @@ GetDebuggeeGlobals(JSContext* cx, JSObject& dbgObj, AutoObjectVector& vector);
|
||||
// execution.
|
||||
|
||||
class MOZ_STACK_CLASS AutoEntryMonitor {
|
||||
JSRuntime* runtime_;
|
||||
JSContext* cx_;
|
||||
AutoEntryMonitor* savedMonitor_;
|
||||
|
||||
public:
|
||||
|
@ -462,10 +462,10 @@ WasIncrementalGC(JSContext* cx);
|
||||
/** Ensure that generational GC is disabled within some scope. */
|
||||
class JS_PUBLIC_API(AutoDisableGenerationalGC)
|
||||
{
|
||||
js::gc::GCRuntime* gc;
|
||||
JSContext* cx;
|
||||
|
||||
public:
|
||||
explicit AutoDisableGenerationalGC(JSRuntime* rt);
|
||||
explicit AutoDisableGenerationalGC(JSContext* cx);
|
||||
~AutoDisableGenerationalGC();
|
||||
};
|
||||
|
||||
@ -506,13 +506,10 @@ class JS_PUBLIC_API(AutoRequireNoGC)
|
||||
*/
|
||||
class JS_PUBLIC_API(AutoAssertNoGC) : public AutoRequireNoGC
|
||||
{
|
||||
js::gc::GCRuntime* gc;
|
||||
size_t gcNumber;
|
||||
JSContext* cx_;
|
||||
|
||||
public:
|
||||
AutoAssertNoGC();
|
||||
explicit AutoAssertNoGC(JSRuntime* rt);
|
||||
explicit AutoAssertNoGC(JSContext* cx);
|
||||
explicit AutoAssertNoGC(JSContext* cx = nullptr);
|
||||
~AutoAssertNoGC();
|
||||
};
|
||||
|
||||
@ -603,15 +600,13 @@ class JS_PUBLIC_API(AutoAssertGCCallback) : public AutoSuppressGCAnalysis
|
||||
class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoAssertNoGC
|
||||
{
|
||||
public:
|
||||
AutoCheckCannotGC() : AutoAssertNoGC() {}
|
||||
explicit AutoCheckCannotGC(JSContext* cx) : AutoAssertNoGC(cx) {}
|
||||
explicit AutoCheckCannotGC(JSContext* cx = nullptr) : AutoAssertNoGC(cx) {}
|
||||
} JS_HAZ_GC_INVALIDATED;
|
||||
#else
|
||||
class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoRequireNoGC
|
||||
{
|
||||
public:
|
||||
AutoCheckCannotGC() {}
|
||||
explicit AutoCheckCannotGC(JSContext* cx) {}
|
||||
explicit AutoCheckCannotGC(JSContext* cx = nullptr) {}
|
||||
} JS_HAZ_GC_INVALIDATED;
|
||||
#endif
|
||||
|
||||
@ -628,6 +623,9 @@ UnmarkGrayGCThingRecursively(GCCellPtr thing);
|
||||
namespace js {
|
||||
namespace gc {
|
||||
|
||||
extern JS_FRIEND_API(bool)
|
||||
BarriersAreAllowedOnCurrentThread();
|
||||
|
||||
static MOZ_ALWAYS_INLINE void
|
||||
ExposeGCThingToActiveJS(JS::GCCellPtr thing)
|
||||
{
|
||||
@ -644,10 +642,9 @@ ExposeGCThingToActiveJS(JS::GCCellPtr thing)
|
||||
if (thing.mayBeOwnedByOtherRuntime())
|
||||
return;
|
||||
|
||||
JS::shadow::Runtime* rt = detail::GetCellRuntime(thing.asCell());
|
||||
MOZ_DIAGNOSTIC_ASSERT(rt->allowGCBarriers());
|
||||
MOZ_DIAGNOSTIC_ASSERT(BarriersAreAllowedOnCurrentThread());
|
||||
|
||||
if (IsIncrementalBarrierNeededOnTenuredGCThing(rt, thing))
|
||||
if (IsIncrementalBarrierNeededOnTenuredGCThing(thing))
|
||||
JS::IncrementalReferenceBarrier(thing);
|
||||
else if (!thing.mayBeOwnedByOtherRuntime() && js::gc::detail::CellIsMarkedGray(thing.asCell()))
|
||||
JS::UnmarkGrayGCThingRecursively(thing);
|
||||
@ -666,10 +663,9 @@ MarkGCThingAsLive(JSRuntime* aRt, JS::GCCellPtr thing)
|
||||
if (thing.mayBeOwnedByOtherRuntime())
|
||||
return;
|
||||
|
||||
JS::shadow::Runtime* rt = JS::shadow::Runtime::asShadowRuntime(aRt);
|
||||
MOZ_DIAGNOSTIC_ASSERT(rt->allowGCBarriers());
|
||||
MOZ_DIAGNOSTIC_ASSERT(BarriersAreAllowedOnCurrentThread());
|
||||
|
||||
if (IsIncrementalBarrierNeededOnTenuredGCThing(rt, thing))
|
||||
if (IsIncrementalBarrierNeededOnTenuredGCThing(thing))
|
||||
JS::IncrementalReferenceBarrier(thing);
|
||||
}
|
||||
|
||||
|
@ -106,20 +106,13 @@ struct Zone
|
||||
JSTracer* const barrierTracer_; // A pointer to the JSRuntime's |gcMarker|.
|
||||
|
||||
public:
|
||||
// Stack GC roots for Rooted GC pointers.
|
||||
js::RootedListHeads stackRoots_;
|
||||
template <typename T> friend class JS::Rooted;
|
||||
|
||||
bool needsIncrementalBarrier_;
|
||||
|
||||
Zone(JSRuntime* runtime, JSTracer* barrierTracerArg)
|
||||
: runtime_(runtime),
|
||||
barrierTracer_(barrierTracerArg),
|
||||
needsIncrementalBarrier_(false)
|
||||
{
|
||||
for (auto& stackRootPtr : stackRoots_)
|
||||
stackRootPtr = nullptr;
|
||||
}
|
||||
{}
|
||||
|
||||
bool needsIncrementalBarrier() const {
|
||||
return needsIncrementalBarrier_;
|
||||
@ -302,15 +295,6 @@ GetGCThingZone(const uintptr_t addr)
|
||||
|
||||
}
|
||||
|
||||
static MOZ_ALWAYS_INLINE JS::shadow::Runtime*
|
||||
GetCellRuntime(const Cell* cell)
|
||||
{
|
||||
MOZ_ASSERT(cell);
|
||||
const uintptr_t addr = uintptr_t(cell);
|
||||
const uintptr_t rt_addr = (addr & ~ChunkMask) | ChunkRuntimeOffset;
|
||||
return *reinterpret_cast<JS::shadow::Runtime**>(rt_addr);
|
||||
}
|
||||
|
||||
static MOZ_ALWAYS_INLINE bool
|
||||
CellIsMarkedGray(const Cell* cell)
|
||||
{
|
||||
@ -379,14 +363,15 @@ namespace js {
|
||||
namespace gc {
|
||||
|
||||
static MOZ_ALWAYS_INLINE bool
|
||||
IsIncrementalBarrierNeededOnTenuredGCThing(JS::shadow::Runtime* rt, const JS::GCCellPtr thing)
|
||||
IsIncrementalBarrierNeededOnTenuredGCThing(const JS::GCCellPtr thing)
|
||||
{
|
||||
MOZ_ASSERT(thing);
|
||||
MOZ_ASSERT(!js::gc::IsInsideNursery(thing.asCell()));
|
||||
|
||||
// TODO: I'd like to assert !isHeapBusy() here but this gets called while we
|
||||
// are tracing the heap, e.g. during memory reporting (see bug 1313318).
|
||||
MOZ_ASSERT(!rt->isHeapCollecting());
|
||||
// TODO: I'd like to assert !CurrentThreadIsHeapBusy() here but this gets
|
||||
// called while we are tracing the heap, e.g. during memory reporting
|
||||
// (see bug 1313318).
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
|
||||
|
||||
JS::Zone* zone = JS::GetTenuredGCThingZone(thing);
|
||||
return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
|
||||
|
@ -755,23 +755,17 @@ namespace JS {
|
||||
template <typename T>
|
||||
class MOZ_RAII Rooted : public js::RootedBase<T, Rooted<T>>
|
||||
{
|
||||
inline void registerWithRootLists(js::RootedListHeads& roots) {
|
||||
inline void registerWithRootLists(RootedListHeads& roots) {
|
||||
this->stack = &roots[JS::MapTypeToRootKind<T>::kind];
|
||||
this->prev = *stack;
|
||||
*stack = reinterpret_cast<Rooted<void*>*>(this);
|
||||
}
|
||||
|
||||
inline js::RootedListHeads& rootLists(JS::RootingContext* cx) {
|
||||
return rootLists(static_cast<js::ContextFriendFields*>(cx));
|
||||
inline RootedListHeads& rootLists(RootingContext* cx) {
|
||||
return cx->stackRoots_;
|
||||
}
|
||||
inline js::RootedListHeads& rootLists(js::ContextFriendFields* cx) {
|
||||
if (JS::Zone* zone = cx->zone_)
|
||||
return JS::shadow::Zone::asShadowZone(zone)->stackRoots_;
|
||||
MOZ_ASSERT(cx->isJSContext);
|
||||
return cx->roots.stackRoots_;
|
||||
}
|
||||
inline js::RootedListHeads& rootLists(JSContext* cx) {
|
||||
return rootLists(js::ContextFriendFields::get(cx));
|
||||
inline RootedListHeads& rootLists(JSContext* cx) {
|
||||
return rootLists(RootingContext::get(cx));
|
||||
}
|
||||
|
||||
public:
|
||||
@ -1041,6 +1035,9 @@ MutableHandle<T>::MutableHandle(PersistentRooted<T>* root)
|
||||
ptr = root->address();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(void)
|
||||
AddPersistentRoot(RootingContext* cx, RootKind kind, PersistentRooted<void*>* root);
|
||||
|
||||
/**
|
||||
* A copyable, assignable global GC root type with arbitrary lifetime, an
|
||||
* infallible constructor, and automatic unrooting on destruction.
|
||||
@ -1084,40 +1081,41 @@ class PersistentRooted : public js::RootedBase<T, PersistentRooted<T>>,
|
||||
friend class mozilla::LinkedList<PersistentRooted>;
|
||||
friend class mozilla::LinkedListElement<PersistentRooted>;
|
||||
|
||||
void registerWithRootLists(js::RootLists& roots) {
|
||||
void registerWithRootLists(RootingContext* cx) {
|
||||
MOZ_ASSERT(!initialized());
|
||||
JS::RootKind kind = JS::MapTypeToRootKind<T>::kind;
|
||||
roots.heapRoots_[kind].insertBack(reinterpret_cast<JS::PersistentRooted<void*>*>(this));
|
||||
AddPersistentRoot(cx, kind, reinterpret_cast<JS::PersistentRooted<void*>*>(this));
|
||||
}
|
||||
|
||||
js::RootLists& rootLists(JSContext* cx) {
|
||||
return rootLists(JS::RootingContext::get(cx));
|
||||
}
|
||||
js::RootLists& rootLists(JS::RootingContext* cx) {
|
||||
MOZ_ASSERT(cx->isJSContext);
|
||||
return cx->roots;
|
||||
}
|
||||
|
||||
// Disallow ExclusiveContext*.
|
||||
js::RootLists& rootLists(js::ContextFriendFields* cx) = delete;
|
||||
|
||||
public:
|
||||
using ElementType = T;
|
||||
|
||||
PersistentRooted() : ptr(GCPolicy<T>::initial()) {}
|
||||
|
||||
template <typename RootingContext>
|
||||
explicit PersistentRooted(const RootingContext& cx)
|
||||
explicit PersistentRooted(RootingContext* cx)
|
||||
: ptr(GCPolicy<T>::initial())
|
||||
{
|
||||
registerWithRootLists(rootLists(cx));
|
||||
registerWithRootLists(cx);
|
||||
}
|
||||
|
||||
template <typename RootingContext, typename U>
|
||||
PersistentRooted(const RootingContext& cx, U&& initial)
|
||||
explicit PersistentRooted(JSContext* cx)
|
||||
: ptr(GCPolicy<T>::initial())
|
||||
{
|
||||
registerWithRootLists(RootingContext::get(cx));
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
PersistentRooted(RootingContext* cx, U&& initial)
|
||||
: ptr(mozilla::Forward<U>(initial))
|
||||
{
|
||||
registerWithRootLists(rootLists(cx));
|
||||
registerWithRootLists(cx);
|
||||
}
|
||||
|
||||
template <typename U>
|
||||
PersistentRooted(JSContext* cx, U&& initial)
|
||||
: ptr(mozilla::Forward<U>(initial))
|
||||
{
|
||||
registerWithRootLists(RootingContext::get(cx));
|
||||
}
|
||||
|
||||
PersistentRooted(const PersistentRooted& rhs)
|
||||
@ -1139,15 +1137,14 @@ class PersistentRooted : public js::RootedBase<T, PersistentRooted<T>>,
|
||||
return ListBase::isInList();
|
||||
}
|
||||
|
||||
template <typename RootingContext>
|
||||
void init(const RootingContext& cx) {
|
||||
void init(JSContext* cx) {
|
||||
init(cx, GCPolicy<T>::initial());
|
||||
}
|
||||
|
||||
template <typename RootingContext, typename U>
|
||||
void init(const RootingContext& cx, U&& initial) {
|
||||
template <typename U>
|
||||
void init(JSContext* cx, U&& initial) {
|
||||
ptr = mozilla::Forward<U>(initial);
|
||||
registerWithRootLists(rootLists(cx));
|
||||
registerWithRootLists(RootingContext::get(cx));
|
||||
}
|
||||
|
||||
void reset() {
|
||||
|
@ -699,7 +699,7 @@ namespace js {
|
||||
//
|
||||
// The type is declared opaque in SharedArrayObject.h. Instances of
|
||||
// js::FutexWaiter are stack-allocated and linked onto a list across a
|
||||
// call to FutexRuntime::wait().
|
||||
// call to FutexThread::wait().
|
||||
//
|
||||
// The 'waiters' field of the SharedArrayRawBuffer points to the highest
|
||||
// priority waiter in the list, and lower priority nodes are linked through
|
||||
@ -711,16 +711,16 @@ namespace js {
|
||||
class FutexWaiter
|
||||
{
|
||||
public:
|
||||
FutexWaiter(uint32_t offset, JSRuntime* rt)
|
||||
FutexWaiter(uint32_t offset, JSContext* cx)
|
||||
: offset(offset),
|
||||
rt(rt),
|
||||
cx(cx),
|
||||
lower_pri(nullptr),
|
||||
back(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
uint32_t offset; // int32 element index within the SharedArrayBuffer
|
||||
JSRuntime* rt; // The runtime of the waiter
|
||||
JSContext* cx; // The waiting thread
|
||||
FutexWaiter* lower_pri; // Lower priority nodes in circular doubly-linked list of waiters
|
||||
FutexWaiter* back; // Other direction
|
||||
};
|
||||
@ -733,7 +733,7 @@ class AutoLockFutexAPI
|
||||
|
||||
public:
|
||||
AutoLockFutexAPI() {
|
||||
js::Mutex* lock = FutexRuntime::lock_;
|
||||
js::Mutex* lock = FutexThread::lock_;
|
||||
unique_.emplace(*lock);
|
||||
}
|
||||
|
||||
@ -756,8 +756,6 @@ js::atomics_wait(JSContext* cx, unsigned argc, Value* vp)
|
||||
HandleValue timeoutv = args.get(3);
|
||||
MutableHandleValue r = args.rval();
|
||||
|
||||
JSRuntime* rt = cx->runtime();
|
||||
|
||||
Rooted<TypedArrayObject*> view(cx, nullptr);
|
||||
if (!GetSharedTypedArray(cx, objv, &view))
|
||||
return false;
|
||||
@ -782,7 +780,7 @@ js::atomics_wait(JSContext* cx, unsigned argc, Value* vp)
|
||||
}
|
||||
}
|
||||
|
||||
if (!rt->fx.canWait())
|
||||
if (!cx->fx.canWait())
|
||||
return ReportCannotWait(cx);
|
||||
|
||||
// This lock also protects the "waiters" field on SharedArrayRawBuffer,
|
||||
@ -798,7 +796,7 @@ js::atomics_wait(JSContext* cx, unsigned argc, Value* vp)
|
||||
Rooted<SharedArrayBufferObject*> sab(cx, view->bufferShared());
|
||||
SharedArrayRawBuffer* sarb = sab->rawBufferObject();
|
||||
|
||||
FutexWaiter w(offset, rt);
|
||||
FutexWaiter w(offset, cx);
|
||||
if (FutexWaiter* waiters = sarb->waiters()) {
|
||||
w.lower_pri = waiters;
|
||||
w.back = waiters->back;
|
||||
@ -809,14 +807,14 @@ js::atomics_wait(JSContext* cx, unsigned argc, Value* vp)
|
||||
sarb->setWaiters(&w);
|
||||
}
|
||||
|
||||
FutexRuntime::WaitResult result = FutexRuntime::FutexOK;
|
||||
bool retval = rt->fx.wait(cx, lock.unique(), timeout, &result);
|
||||
FutexThread::WaitResult result = FutexThread::FutexOK;
|
||||
bool retval = cx->fx.wait(cx, lock.unique(), timeout, &result);
|
||||
if (retval) {
|
||||
switch (result) {
|
||||
case FutexRuntime::FutexOK:
|
||||
case FutexThread::FutexOK:
|
||||
r.setString(cx->names().futexOK);
|
||||
break;
|
||||
case FutexRuntime::FutexTimedOut:
|
||||
case FutexThread::FutexTimedOut:
|
||||
r.setString(cx->names().futexTimedOut);
|
||||
break;
|
||||
}
|
||||
@ -872,9 +870,9 @@ js::atomics_wake(JSContext* cx, unsigned argc, Value* vp)
|
||||
do {
|
||||
FutexWaiter* c = iter;
|
||||
iter = iter->lower_pri;
|
||||
if (c->offset != offset || !c->rt->fx.isWaiting())
|
||||
if (c->offset != offset || !c->cx->fx.isWaiting())
|
||||
continue;
|
||||
c->rt->fx.wake(FutexRuntime::WakeExplicit);
|
||||
c->cx->fx.wake(FutexThread::WakeExplicit);
|
||||
++woken;
|
||||
--count;
|
||||
} while (count > 0 && iter != waiters);
|
||||
@ -885,15 +883,15 @@ js::atomics_wake(JSContext* cx, unsigned argc, Value* vp)
|
||||
}
|
||||
|
||||
/* static */ bool
|
||||
js::FutexRuntime::initialize()
|
||||
js::FutexThread::initialize()
|
||||
{
|
||||
MOZ_ASSERT(!lock_);
|
||||
lock_ = js_new<js::Mutex>(mutexid::FutexRuntime);
|
||||
lock_ = js_new<js::Mutex>(mutexid::FutexThread);
|
||||
return lock_ != nullptr;
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
js::FutexRuntime::destroy()
|
||||
js::FutexThread::destroy()
|
||||
{
|
||||
if (lock_) {
|
||||
js::Mutex* lock = lock_;
|
||||
@ -903,7 +901,7 @@ js::FutexRuntime::destroy()
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
js::FutexRuntime::lock()
|
||||
js::FutexThread::lock()
|
||||
{
|
||||
// Load the atomic pointer.
|
||||
js::Mutex* lock = lock_;
|
||||
@ -911,10 +909,10 @@ js::FutexRuntime::lock()
|
||||
lock->lock();
|
||||
}
|
||||
|
||||
/* static */ mozilla::Atomic<js::Mutex*> FutexRuntime::lock_;
|
||||
/* static */ mozilla::Atomic<js::Mutex*> FutexThread::lock_;
|
||||
|
||||
/* static */ void
|
||||
js::FutexRuntime::unlock()
|
||||
js::FutexThread::unlock()
|
||||
{
|
||||
// Load the atomic pointer.
|
||||
js::Mutex* lock = lock_;
|
||||
@ -922,7 +920,7 @@ js::FutexRuntime::unlock()
|
||||
lock->unlock();
|
||||
}
|
||||
|
||||
js::FutexRuntime::FutexRuntime()
|
||||
js::FutexThread::FutexThread()
|
||||
: cond_(nullptr),
|
||||
state_(Idle),
|
||||
canWait_(false)
|
||||
@ -930,7 +928,7 @@ js::FutexRuntime::FutexRuntime()
|
||||
}
|
||||
|
||||
bool
|
||||
js::FutexRuntime::initInstance()
|
||||
js::FutexThread::initInstance()
|
||||
{
|
||||
MOZ_ASSERT(lock_);
|
||||
cond_ = js_new<js::ConditionVariable>();
|
||||
@ -938,30 +936,30 @@ js::FutexRuntime::initInstance()
|
||||
}
|
||||
|
||||
void
|
||||
js::FutexRuntime::destroyInstance()
|
||||
js::FutexThread::destroyInstance()
|
||||
{
|
||||
if (cond_)
|
||||
js_delete(cond_);
|
||||
}
|
||||
|
||||
bool
|
||||
js::FutexRuntime::isWaiting()
|
||||
js::FutexThread::isWaiting()
|
||||
{
|
||||
// When a worker is awoken for an interrupt it goes into state
|
||||
// WaitingNotifiedForInterrupt for a short time before it actually
|
||||
// wakes up and goes into WaitingInterrupted. In those states the
|
||||
// worker is still waiting, and if an explicit wake arrives the
|
||||
// worker transitions to Woken. See further comments in
|
||||
// FutexRuntime::wait().
|
||||
// FutexThread::wait().
|
||||
return state_ == Waiting || state_ == WaitingInterrupted || state_ == WaitingNotifiedForInterrupt;
|
||||
}
|
||||
|
||||
bool
|
||||
js::FutexRuntime::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
js::FutexThread::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
mozilla::Maybe<mozilla::TimeDuration>& timeout, WaitResult* result)
|
||||
{
|
||||
MOZ_ASSERT(&cx->runtime()->fx == this);
|
||||
MOZ_ASSERT(cx->runtime()->fx.canWait());
|
||||
MOZ_ASSERT(&cx->fx == this);
|
||||
MOZ_ASSERT(cx->fx.canWait());
|
||||
MOZ_ASSERT(state_ == Idle || state_ == WaitingInterrupted);
|
||||
|
||||
// Disallow waiting when a runtime is processing an interrupt.
|
||||
@ -1008,7 +1006,7 @@ js::FutexRuntime::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
}
|
||||
|
||||
switch (state_) {
|
||||
case FutexRuntime::Waiting:
|
||||
case FutexThread::Waiting:
|
||||
// Timeout or spurious wakeup.
|
||||
if (isTimed) {
|
||||
auto now = mozilla::TimeStamp::Now();
|
||||
@ -1019,11 +1017,11 @@ js::FutexRuntime::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
}
|
||||
break;
|
||||
|
||||
case FutexRuntime::Woken:
|
||||
case FutexThread::Woken:
|
||||
*result = FutexOK;
|
||||
return true;
|
||||
|
||||
case FutexRuntime::WaitingNotifiedForInterrupt:
|
||||
case FutexThread::WaitingNotifiedForInterrupt:
|
||||
// The interrupt handler may reenter the engine. In that case
|
||||
// there are two complications:
|
||||
//
|
||||
@ -1056,7 +1054,7 @@ js::FutexRuntime::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
state_ = WaitingInterrupted;
|
||||
{
|
||||
UnlockGuard<Mutex> unlock(locked);
|
||||
if (!cx->runtime()->handleInterrupt(cx))
|
||||
if (!cx->handleInterrupt())
|
||||
return false;
|
||||
}
|
||||
if (state_ == Woken) {
|
||||
@ -1072,7 +1070,7 @@ js::FutexRuntime::wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
}
|
||||
|
||||
void
|
||||
js::FutexRuntime::wake(WakeReason reason)
|
||||
js::FutexThread::wake(WakeReason reason)
|
||||
{
|
||||
MOZ_ASSERT(isWaiting());
|
||||
|
||||
@ -1090,7 +1088,7 @@ js::FutexRuntime::wake(WakeReason reason)
|
||||
state_ = WaitingNotifiedForInterrupt;
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("bad WakeReason in FutexRuntime::wake()");
|
||||
MOZ_CRASH("bad WakeReason in FutexThread::wake()");
|
||||
}
|
||||
cond_->notify_all();
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ int32_t atomics_xor_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, i
|
||||
int32_t atomics_cmpxchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t oldval, int32_t newval);
|
||||
int32_t atomics_xchg_asm_callout(wasm::Instance* i, int32_t vt, int32_t offset, int32_t value);
|
||||
|
||||
class FutexRuntime
|
||||
class FutexThread
|
||||
{
|
||||
friend class AutoLockFutexAPI;
|
||||
|
||||
@ -59,7 +59,7 @@ public:
|
||||
static void lock();
|
||||
static void unlock();
|
||||
|
||||
FutexRuntime();
|
||||
FutexThread();
|
||||
MOZ_MUST_USE bool initInstance();
|
||||
void destroyInstance();
|
||||
|
||||
@ -89,7 +89,7 @@ public:
|
||||
MOZ_MUST_USE bool wait(JSContext* cx, js::UniqueLock<js::Mutex>& locked,
|
||||
mozilla::Maybe<mozilla::TimeDuration>& timeout, WaitResult* result);
|
||||
|
||||
// Wake the thread represented by this Runtime.
|
||||
// Wake the thread this is associated with.
|
||||
//
|
||||
// The futex lock must be held around this call. (The sleeping
|
||||
// thread will not wake up until the caller of Atomics.wake()
|
||||
@ -110,7 +110,7 @@ public:
|
||||
bool isWaiting();
|
||||
|
||||
// If canWait() returns false (the default) then wait() is disabled
|
||||
// on the runtime to which the FutexRuntime belongs.
|
||||
// on the thread to which the FutexThread belongs.
|
||||
bool canWait() {
|
||||
return canWait_;
|
||||
}
|
||||
@ -145,7 +145,7 @@ public:
|
||||
static mozilla::Atomic<js::Mutex*> lock_;
|
||||
|
||||
// A flag that controls whether waiting is allowed.
|
||||
bool canWait_;
|
||||
ThreadLocalData<bool> canWait_;
|
||||
};
|
||||
|
||||
JSObject*
|
||||
|
@ -114,7 +114,7 @@ DataViewObject::create(JSContext* cx, uint32_t byteOffset, uint32_t byteLength,
|
||||
|
||||
// Include a barrier if the data view's data pointer is in the nursery, as
|
||||
// is done for typed arrays.
|
||||
if (!IsInsideNursery(obj) && cx->runtime()->gc.nursery.isInside(ptr)) {
|
||||
if (!IsInsideNursery(obj) && cx->nursery().isInside(ptr)) {
|
||||
// Shared buffer data should never be nursery-allocated, so we
|
||||
// need to fail here if isSharedMemory. However, mmap() can
|
||||
// place a SharedArrayRawBuffer up against the bottom end of a
|
||||
@ -124,7 +124,7 @@ DataViewObject::create(JSContext* cx, uint32_t byteOffset, uint32_t byteLength,
|
||||
MOZ_ASSERT(arrayBuffer->byteLength() == 0 &&
|
||||
(uintptr_t(ptr.unwrapValue()) & gc::ChunkMask) == 0);
|
||||
} else {
|
||||
cx->runtime()->gc.storeBuffer.putWholeCell(obj);
|
||||
cx->zone()->group()->storeBuffer().putWholeCell(obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -94,7 +94,7 @@ class EvalScriptGuard
|
||||
lookup_.str = lookupStr_;
|
||||
if (lookup_.str && IsEvalCacheCandidate(script_)) {
|
||||
// Ignore failure to add cache entry.
|
||||
if (!p_->add(cx_, cx_->caches.evalCache, lookup_, cacheEntry))
|
||||
if (!p_->add(cx_, cx_->caches().evalCache, lookup_, cacheEntry))
|
||||
cx_->recoverFromOutOfMemory();
|
||||
}
|
||||
}
|
||||
@ -107,10 +107,10 @@ class EvalScriptGuard
|
||||
lookup_.callerScript = callerScript;
|
||||
lookup_.version = cx_->findVersion();
|
||||
lookup_.pc = pc;
|
||||
p_.emplace(cx_, cx_->caches.evalCache, lookup_);
|
||||
p_.emplace(cx_, cx_->caches().evalCache, lookup_);
|
||||
if (*p_) {
|
||||
script_ = (*p_)->script;
|
||||
p_->remove(cx_, cx_->caches.evalCache, lookup_);
|
||||
p_->remove(cx_, cx_->caches().evalCache, lookup_);
|
||||
script_->uncacheForEval();
|
||||
}
|
||||
}
|
||||
|
@ -2821,7 +2821,7 @@ void
|
||||
js::SharedIntlData::trace(JSTracer* trc)
|
||||
{
|
||||
// Atoms are always tenured.
|
||||
if (!trc->runtime()->isHeapMinorCollecting()) {
|
||||
if (!JS::CurrentThreadIsHeapMinorCollecting()) {
|
||||
availableTimeZones.trace(trc);
|
||||
ianaZonesTreatedAsLinksByICU.trace(trc);
|
||||
ianaLinksCanonicalizedDifferentlyByICU.trace(trc);
|
||||
@ -2843,7 +2843,7 @@ js::intl_IsValidTimeZoneName(JSContext* cx, unsigned argc, Value* vp)
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isString());
|
||||
|
||||
SharedIntlData& sharedIntlData = cx->sharedIntlData;
|
||||
SharedIntlData& sharedIntlData = cx->runtime()->sharedIntlData.ref();
|
||||
|
||||
RootedString timeZone(cx, args[0].toString());
|
||||
RootedString validatedTimeZone(cx);
|
||||
@ -2867,7 +2867,7 @@ js::intl_canonicalizeTimeZone(JSContext* cx, unsigned argc, Value* vp)
|
||||
MOZ_ASSERT(args.length() == 1);
|
||||
MOZ_ASSERT(args[0].isString());
|
||||
|
||||
SharedIntlData& sharedIntlData = cx->sharedIntlData;
|
||||
SharedIntlData& sharedIntlData = cx->runtime()->sharedIntlData.ref();
|
||||
|
||||
// Some time zone names are canonicalized differently by ICU -- handle
|
||||
// those first:
|
||||
|
@ -102,9 +102,9 @@ HashableValue::operator==(const HashableValue& other) const
|
||||
|
||||
#ifdef DEBUG
|
||||
bool same;
|
||||
JS::RootingContext* rcx = GetJSContextFromMainThread();
|
||||
RootedValue valueRoot(rcx, value);
|
||||
RootedValue otherRoot(rcx, other.value);
|
||||
JSContext* cx = TlsContext.get();
|
||||
RootedValue valueRoot(cx, value);
|
||||
RootedValue otherRoot(cx, other.value);
|
||||
MOZ_ASSERT(SameValue(nullptr, valueRoot, otherRoot, &same));
|
||||
MOZ_ASSERT(same == b);
|
||||
#endif
|
||||
@ -464,7 +464,7 @@ WriteBarrierPostImpl(JSRuntime* rt, ObjectT* obj, const Value& keyValue)
|
||||
if (!keys)
|
||||
return false;
|
||||
|
||||
rt->gc.storeBuffer.putGeneric(OrderedHashTableRef<ObjectT>(obj));
|
||||
key->zone()->group()->storeBuffer().putGeneric(OrderedHashTableRef<ObjectT>(obj));
|
||||
}
|
||||
|
||||
if (!keys->append(key))
|
||||
|
@ -115,7 +115,7 @@ GlobalObject::initImportEntryProto(JSContext* cx, Handle<GlobalObject*> global)
|
||||
}
|
||||
|
||||
/* static */ ImportEntryObject*
|
||||
ImportEntryObject::create(ExclusiveContext* cx,
|
||||
ImportEntryObject::create(JSContext* cx,
|
||||
HandleAtom moduleRequest,
|
||||
HandleAtom importName,
|
||||
HandleAtom localName)
|
||||
@ -187,7 +187,7 @@ StringOrNullValue(JSString* maybeString)
|
||||
}
|
||||
|
||||
/* static */ ExportEntryObject*
|
||||
ExportEntryObject::create(ExclusiveContext* cx,
|
||||
ExportEntryObject::create(JSContext* cx,
|
||||
HandleAtom maybeExportName,
|
||||
HandleAtom maybeModuleRequest,
|
||||
HandleAtom maybeImportName,
|
||||
@ -576,7 +576,7 @@ ModuleObject::isInstance(HandleValue value)
|
||||
}
|
||||
|
||||
/* static */ ModuleObject*
|
||||
ModuleObject::create(ExclusiveContext* cx)
|
||||
ModuleObject::create(JSContext* cx)
|
||||
{
|
||||
RootedObject proto(cx, cx->global()->getModulePrototype());
|
||||
RootedObject obj(cx, NewObjectWithGivenProto(cx, &class_, proto));
|
||||
@ -853,7 +853,7 @@ ModuleObject::createEnvironment()
|
||||
}
|
||||
|
||||
bool
|
||||
ModuleObject::noteFunctionDeclaration(ExclusiveContext* cx, HandleAtom name, HandleFunction fun)
|
||||
ModuleObject::noteFunctionDeclaration(JSContext* cx, HandleAtom name, HandleFunction fun)
|
||||
{
|
||||
FunctionDeclarationVector* funDecls = functionDeclarations();
|
||||
if (!funDecls->emplaceBack(name, fun)) {
|
||||
@ -1014,7 +1014,7 @@ GlobalObject::initModuleProto(JSContext* cx, Handle<GlobalObject*> global)
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// ModuleBuilder
|
||||
|
||||
ModuleBuilder::ModuleBuilder(ExclusiveContext* cx, HandleModuleObject module)
|
||||
ModuleBuilder::ModuleBuilder(JSContext* cx, HandleModuleObject module)
|
||||
: cx_(cx),
|
||||
module_(cx, module),
|
||||
requestedModules_(cx, AtomVector(cx)),
|
||||
|
@ -43,9 +43,9 @@ class ImportEntryObject : public NativeObject
|
||||
};
|
||||
|
||||
static const Class class_;
|
||||
static JSObject* initClass(ExclusiveContext* cx, HandleObject obj);
|
||||
static JSObject* initClass(JSContext* cx, HandleObject obj);
|
||||
static bool isInstance(HandleValue value);
|
||||
static ImportEntryObject* create(ExclusiveContext* cx,
|
||||
static ImportEntryObject* create(JSContext* cx,
|
||||
HandleAtom moduleRequest,
|
||||
HandleAtom importName,
|
||||
HandleAtom localName);
|
||||
@ -70,9 +70,9 @@ class ExportEntryObject : public NativeObject
|
||||
};
|
||||
|
||||
static const Class class_;
|
||||
static JSObject* initClass(ExclusiveContext* cx, HandleObject obj);
|
||||
static JSObject* initClass(JSContext* cx, HandleObject obj);
|
||||
static bool isInstance(HandleValue value);
|
||||
static ExportEntryObject* create(ExclusiveContext* cx,
|
||||
static ExportEntryObject* create(JSContext* cx,
|
||||
HandleAtom maybeExportName,
|
||||
HandleAtom maybeModuleRequest,
|
||||
HandleAtom maybeImportName,
|
||||
@ -225,7 +225,7 @@ class ModuleObject : public NativeObject
|
||||
|
||||
static bool isInstance(HandleValue value);
|
||||
|
||||
static ModuleObject* create(ExclusiveContext* cx);
|
||||
static ModuleObject* create(JSContext* cx);
|
||||
void init(HandleScript script);
|
||||
void setInitialEnvironment(Handle<ModuleEnvironmentObject*> initialEnvironment);
|
||||
void initImportExportData(HandleArrayObject requestedModules,
|
||||
@ -264,7 +264,7 @@ class ModuleObject : public NativeObject
|
||||
void createEnvironment();
|
||||
|
||||
// For BytecodeEmitter.
|
||||
bool noteFunctionDeclaration(ExclusiveContext* cx, HandleAtom name, HandleFunction fun);
|
||||
bool noteFunctionDeclaration(JSContext* cx, HandleAtom name, HandleFunction fun);
|
||||
|
||||
// For intrinsic_InstantiateModuleFunctionDeclarations.
|
||||
static bool instantiateFunctionDeclarations(JSContext* cx, HandleModuleObject self);
|
||||
@ -294,7 +294,7 @@ class ModuleObject : public NativeObject
|
||||
class MOZ_STACK_CLASS ModuleBuilder
|
||||
{
|
||||
public:
|
||||
explicit ModuleBuilder(ExclusiveContext* cx, HandleModuleObject module);
|
||||
explicit ModuleBuilder(JSContext* cx, HandleModuleObject module);
|
||||
|
||||
bool processImport(frontend::ParseNode* pn);
|
||||
bool processExport(frontend::ParseNode* pn);
|
||||
@ -317,7 +317,7 @@ class MOZ_STACK_CLASS ModuleBuilder
|
||||
using RootedImportEntryVector = JS::Rooted<ImportEntryVector>;
|
||||
using RootedExportEntryVector = JS::Rooted<ExportEntryVector>;
|
||||
|
||||
ExclusiveContext* cx_;
|
||||
JSContext* cx_;
|
||||
RootedModuleObject module_;
|
||||
RootedAtomVector requestedModules_;
|
||||
RootedAtomVector importedBoundNames_;
|
||||
|
@ -172,7 +172,7 @@ JSString*
|
||||
js::ObjectToSource(JSContext* cx, HandleObject obj)
|
||||
{
|
||||
/* If outermost, we need parentheses to be an expression, not a block. */
|
||||
bool outermost = (cx->cycleDetectorSet.count() == 0);
|
||||
bool outermost = (cx->cycleDetectorSet().count() == 0);
|
||||
|
||||
AutoCycleDetector detector(cx, obj);
|
||||
if (!detector.init())
|
||||
|
@ -316,7 +316,7 @@ static bool
|
||||
GetMaxGCPauseSinceClear(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(cx->runtime()->gc.stats.getMaxGCPauseSinceClear().ToMicroseconds());
|
||||
args.rval().setNumber(cx->runtime()->gc.stats().getMaxGCPauseSinceClear().ToMicroseconds());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -324,7 +324,7 @@ static bool
|
||||
ClearMaxGCPauseAccumulator(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
args.rval().setNumber(cx->runtime()->gc.stats.clearMaxGCPauseAccumulator().ToMicroseconds());
|
||||
args.rval().setNumber(cx->runtime()->gc.stats().clearMaxGCPauseAccumulator().ToMicroseconds());
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -2648,7 +2648,7 @@ js::EnqueuePromiseReactions(JSContext* cx, Handle<PromiseObject*> promise,
|
||||
}
|
||||
|
||||
PromiseTask::PromiseTask(JSContext* cx, Handle<PromiseObject*> promise)
|
||||
: runtime_(cx),
|
||||
: runtime_(cx->runtime()),
|
||||
promise_(cx, promise)
|
||||
{}
|
||||
|
||||
@ -2660,7 +2660,7 @@ PromiseTask::~PromiseTask()
|
||||
void
|
||||
PromiseTask::finish(JSContext* cx)
|
||||
{
|
||||
MOZ_ASSERT(cx == runtime_);
|
||||
MOZ_ASSERT(cx->runtime() == runtime_);
|
||||
{
|
||||
// We can't leave a pending exception when returning to the caller so do
|
||||
// the same thing as Gecko, which is to ignore the error. This should
|
||||
@ -2675,7 +2675,7 @@ PromiseTask::finish(JSContext* cx)
|
||||
void
|
||||
PromiseTask::cancel(JSContext* cx)
|
||||
{
|
||||
MOZ_ASSERT(cx == runtime_);
|
||||
MOZ_ASSERT(cx->runtime() == runtime_);
|
||||
js_delete(this);
|
||||
}
|
||||
|
||||
|
@ -337,7 +337,7 @@ MinorGC(JSContext* cx, unsigned argc, Value* vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
if (args.get(0) == BooleanValue(true))
|
||||
cx->runtime()->gc.storeBuffer.setAboutToOverflow();
|
||||
cx->zone()->group()->storeBuffer().setAboutToOverflow();
|
||||
|
||||
cx->minorGC(JS::gcreason::API);
|
||||
args.rval().setUndefined();
|
||||
@ -817,12 +817,11 @@ SelectForGC(JSContext* cx, unsigned argc, Value* vp)
|
||||
* start to detect missing pre-barriers. It is invalid for nursery things
|
||||
* to be in the set, so evict the nursery before adding items.
|
||||
*/
|
||||
JSRuntime* rt = cx->runtime();
|
||||
rt->gc.evictNursery();
|
||||
cx->zone()->group()->evictNursery();
|
||||
|
||||
for (unsigned i = 0; i < args.length(); i++) {
|
||||
if (args[i].isObject()) {
|
||||
if (!rt->gc.selectForMarking(&args[i].toObject()))
|
||||
if (!cx->runtime()->gc.selectForMarking(&args[i].toObject()))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -1425,15 +1424,14 @@ OOMTest(JSContext* cx, unsigned argc, Value* vp)
|
||||
threadEnd = threadOption + 1;
|
||||
}
|
||||
|
||||
JSRuntime* rt = cx->runtime();
|
||||
if (rt->runningOOMTest) {
|
||||
if (cx->runningOOMTest) {
|
||||
JS_ReportErrorASCII(cx, "Nested call to oomTest() is not allowed.");
|
||||
return false;
|
||||
}
|
||||
rt->runningOOMTest = true;
|
||||
cx->runningOOMTest = true;
|
||||
|
||||
MOZ_ASSERT(!cx->isExceptionPending());
|
||||
rt->hadOutOfMemory = false;
|
||||
cx->runtime()->hadOutOfMemory = false;
|
||||
|
||||
JS_SetGCZeal(cx, 0, JS_DEFAULT_ZEAL_FREQ);
|
||||
|
||||
@ -1500,7 +1498,7 @@ OOMTest(JSContext* cx, unsigned argc, Value* vp)
|
||||
}
|
||||
}
|
||||
|
||||
rt->runningOOMTest = false;
|
||||
cx->runningOOMTest = false;
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
@ -1751,7 +1749,7 @@ ReadGeckoProfilingStack(JSContext* cx, unsigned argc, Value* vp)
|
||||
args.rval().setUndefined();
|
||||
|
||||
// Return boolean 'false' if profiler is not enabled.
|
||||
if (!cx->runtime()->geckoProfiler.enabled()) {
|
||||
if (!cx->runtime()->geckoProfiler().enabled()) {
|
||||
args.rval().setBoolean(false);
|
||||
return true;
|
||||
}
|
||||
@ -1763,7 +1761,7 @@ ReadGeckoProfilingStack(JSContext* cx, unsigned argc, Value* vp)
|
||||
|
||||
// If profiler sampling has been suppressed, return an empty
|
||||
// stack.
|
||||
if (!cx->runtime()->isProfilerSamplingEnabled()) {
|
||||
if (!cx->isProfilerSamplingEnabled()) {
|
||||
args.rval().setObject(*stack);
|
||||
return true;
|
||||
}
|
||||
@ -1893,7 +1891,7 @@ DisplayName(JSContext* cx, unsigned argc, Value* vp)
|
||||
|
||||
JSFunction* fun = &args[0].toObject().as<JSFunction>();
|
||||
JSString* str = fun->displayAtom();
|
||||
args.rval().setString(str ? str : cx->runtime()->emptyString);
|
||||
args.rval().setString(str ? str : cx->runtime()->emptyString.ref());
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -1999,7 +1997,7 @@ testingFunc_bailAfter(JSContext* cx, unsigned argc, Value* vp)
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
cx->runtime()->setIonBailAfter(args[0].toInt32());
|
||||
cx->zone()->group()->setIonBailAfter(args[0].toInt32());
|
||||
#endif
|
||||
|
||||
args.rval().setUndefined();
|
||||
@ -3536,7 +3534,8 @@ minorGC(JSContext* cx, JSGCStatus status, void* data)
|
||||
|
||||
if (info->active) {
|
||||
info->active = false;
|
||||
cx->gc.evictNursery(JS::gcreason::DEBUG_GC);
|
||||
if (cx->zone() && !cx->zone()->isAtomsZone())
|
||||
cx->zone()->group()->evictNursery(JS::gcreason::DEBUG_GC);
|
||||
info->active = true;
|
||||
}
|
||||
}
|
||||
|
@ -1439,7 +1439,7 @@ OutlineTypedObject::setOwnerAndData(JSObject* owner, uint8_t* data)
|
||||
// Trigger a post barrier when attaching an object outside the nursery to
|
||||
// one that is inside it.
|
||||
if (owner && !IsInsideNursery(this) && IsInsideNursery(owner))
|
||||
runtimeFromMainThread()->gc.storeBuffer.putWholeCell(this);
|
||||
zone()->group()->storeBuffer().putWholeCell(this);
|
||||
}
|
||||
|
||||
/*static*/ OutlineTypedObject*
|
||||
@ -1636,7 +1636,8 @@ OutlineTypedObject::obj_trace(JSTracer* trc, JSObject* object)
|
||||
newData += reinterpret_cast<uint8_t*>(owner) - reinterpret_cast<uint8_t*>(oldOwner);
|
||||
typedObj.setData(newData);
|
||||
|
||||
trc->runtime()->gc.nursery.maybeSetForwardingPointer(trc, oldData, newData, /* direct = */ false);
|
||||
Nursery& nursery = typedObj.zoneFromAnyThread()->group()->nursery();
|
||||
nursery.maybeSetForwardingPointer(trc, oldData, newData, /* direct = */ false);
|
||||
}
|
||||
|
||||
if (!descr.opaque() || !typedObj.isAttached())
|
||||
@ -2141,8 +2142,8 @@ InlineTypedObject::objectMovedDuringMinorGC(JSTracer* trc, JSObject* dst, JSObje
|
||||
// but they will not set any direct forwarding pointers.
|
||||
uint8_t* oldData = reinterpret_cast<uint8_t*>(src) + offsetOfDataStart();
|
||||
uint8_t* newData = dst->as<InlineTypedObject>().inlineTypedMem();
|
||||
trc->runtime()->gc.nursery.maybeSetForwardingPointer(trc, oldData, newData,
|
||||
descr.size() >= sizeof(uintptr_t));
|
||||
dst->zone()->group()->nursery().maybeSetForwardingPointer(trc, oldData, newData,
|
||||
descr.size() >= sizeof(uintptr_t));
|
||||
}
|
||||
}
|
||||
|
||||
@ -2189,7 +2190,7 @@ InlineTransparentTypedObject::getOrCreateBuffer(JSContext* cx)
|
||||
if (IsInsideNursery(this)) {
|
||||
// Make sure the buffer is traced by the next generational collection,
|
||||
// so that its data pointer is updated after this typed object moves.
|
||||
cx->runtime()->gc.storeBuffer.putWholeCell(buffer);
|
||||
zone()->group()->storeBuffer().putWholeCell(buffer);
|
||||
}
|
||||
|
||||
return buffer;
|
||||
@ -2679,8 +2680,8 @@ StoreReferenceAny::store(JSContext* cx, GCPtrValue* heap, const Value& v,
|
||||
// value properties of typed objects, as these properties are always
|
||||
// considered to contain undefined.
|
||||
if (!v.isUndefined()) {
|
||||
if (cx->isJSContext())
|
||||
AddTypePropertyId(cx->asJSContext(), obj, id, v);
|
||||
if (!cx->helperThread())
|
||||
AddTypePropertyId(cx, obj, id, v);
|
||||
else if (!HasTypePropertyId(obj, id, v))
|
||||
return false;
|
||||
}
|
||||
@ -2699,8 +2700,8 @@ StoreReferenceObject::store(JSContext* cx, GCPtrObject* heap, const Value& v,
|
||||
// object properties of typed objects, as these properties are always
|
||||
// considered to contain null.
|
||||
if (v.isObject()) {
|
||||
if (cx->isJSContext())
|
||||
AddTypePropertyId(cx->asJSContext(), obj, id, v);
|
||||
if (!cx->helperThread())
|
||||
AddTypePropertyId(cx, obj, id, v);
|
||||
else if (!HasTypePropertyId(obj, id, v))
|
||||
return false;
|
||||
}
|
||||
|
@ -34,7 +34,7 @@ using mozilla::Nothing;
|
||||
class MOZ_STACK_CLASS AutoCompilationTraceLogger
|
||||
{
|
||||
public:
|
||||
AutoCompilationTraceLogger(ExclusiveContext* cx, const TraceLoggerTextId id,
|
||||
AutoCompilationTraceLogger(JSContext* cx, const TraceLoggerTextId id,
|
||||
const ReadOnlyCompileOptions& options);
|
||||
|
||||
private:
|
||||
@ -50,7 +50,7 @@ class MOZ_STACK_CLASS BytecodeCompiler
|
||||
{
|
||||
public:
|
||||
// Construct an object passing mandatory arguments.
|
||||
BytecodeCompiler(ExclusiveContext* cx,
|
||||
BytecodeCompiler(JSContext* cx,
|
||||
LifoAlloc& alloc,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& sourceBuffer,
|
||||
@ -86,7 +86,7 @@ class MOZ_STACK_CLASS BytecodeCompiler
|
||||
AutoCompilationTraceLogger traceLogger;
|
||||
AutoKeepAtoms keepAtoms;
|
||||
|
||||
ExclusiveContext* cx;
|
||||
JSContext* cx;
|
||||
LifoAlloc& alloc;
|
||||
const ReadOnlyCompileOptions& options;
|
||||
SourceBufferHolder& sourceBuffer;
|
||||
@ -109,23 +109,23 @@ class MOZ_STACK_CLASS BytecodeCompiler
|
||||
RootedScript script;
|
||||
};
|
||||
|
||||
AutoCompilationTraceLogger::AutoCompilationTraceLogger(ExclusiveContext* cx,
|
||||
AutoCompilationTraceLogger::AutoCompilationTraceLogger(JSContext* cx,
|
||||
const TraceLoggerTextId id, const ReadOnlyCompileOptions& options)
|
||||
: logger(cx->isJSContext() ? TraceLoggerForMainThread(cx->asJSContext()->runtime())
|
||||
: TraceLoggerForCurrentThread()),
|
||||
: logger(!cx->helperThread() ? TraceLoggerForMainThread(cx->runtime())
|
||||
: TraceLoggerForCurrentThread()),
|
||||
event(logger, TraceLogger_AnnotateScripts, options),
|
||||
scriptLogger(logger, event),
|
||||
typeLogger(logger, id)
|
||||
{}
|
||||
|
||||
BytecodeCompiler::BytecodeCompiler(ExclusiveContext* cx,
|
||||
BytecodeCompiler::BytecodeCompiler(JSContext* cx,
|
||||
LifoAlloc& alloc,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& sourceBuffer,
|
||||
HandleScope enclosingScope,
|
||||
TraceLoggerTextId logId)
|
||||
: traceLogger(cx, logId, options),
|
||||
keepAtoms(cx->perThreadData),
|
||||
keepAtoms(cx),
|
||||
cx(cx),
|
||||
alloc(alloc),
|
||||
options(options),
|
||||
@ -154,8 +154,8 @@ BytecodeCompiler::checkLength()
|
||||
// JSScript as 32-bits. It could be lifted fairly easily, since the compiler
|
||||
// is using size_t internally already.
|
||||
if (sourceBuffer.length() > UINT32_MAX) {
|
||||
if (cx->isJSContext())
|
||||
JS_ReportErrorNumberASCII(cx->asJSContext(), GetErrorMessage, nullptr,
|
||||
if (!cx->helperThread())
|
||||
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr,
|
||||
JSMSG_SOURCE_TOO_LONG);
|
||||
return false;
|
||||
}
|
||||
@ -329,12 +329,12 @@ BytecodeCompiler::compileScript(HandleObject environment, SharedContext* sc)
|
||||
|
||||
// Successfully parsed. Emit the script.
|
||||
if (pn) {
|
||||
if (sc->isEvalContext() && sc->hasDebuggerStatement() && cx->isJSContext()) {
|
||||
if (sc->isEvalContext() && sc->hasDebuggerStatement() && !cx->helperThread()) {
|
||||
// If the eval'ed script contains any debugger statement, force construction
|
||||
// of arguments objects for the caller script and any other scripts it is
|
||||
// transitively nested inside. The debugger can access any variable on the
|
||||
// scope chain.
|
||||
if (!deoptimizeArgumentsInEnclosingScripts(cx->asJSContext(), environment))
|
||||
if (!deoptimizeArgumentsInEnclosingScripts(cx, environment))
|
||||
return nullptr;
|
||||
}
|
||||
if (!emitter->emitScript(pn))
|
||||
@ -357,7 +357,7 @@ BytecodeCompiler::compileScript(HandleObject environment, SharedContext* sc)
|
||||
if (!maybeCompleteCompressSource())
|
||||
return nullptr;
|
||||
|
||||
MOZ_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->isExceptionPending());
|
||||
MOZ_ASSERT_IF(!cx->helperThread(), !cx->isExceptionPending());
|
||||
|
||||
return script;
|
||||
}
|
||||
@ -421,7 +421,7 @@ BytecodeCompiler::compileModule()
|
||||
if (!maybeCompleteCompressSource())
|
||||
return nullptr;
|
||||
|
||||
MOZ_ASSERT_IF(cx->isJSContext(), !cx->asJSContext()->isExceptionPending());
|
||||
MOZ_ASSERT_IF(!cx->helperThread(), !cx->isExceptionPending());
|
||||
return module;
|
||||
}
|
||||
|
||||
@ -486,7 +486,7 @@ BytecodeCompiler::sourceObjectPtr() const
|
||||
}
|
||||
|
||||
ScriptSourceObject*
|
||||
frontend::CreateScriptSourceObject(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
|
||||
frontend::CreateScriptSourceObject(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
Maybe<uint32_t> parameterListEnd /* = Nothing() */)
|
||||
{
|
||||
ScriptSource* ss = cx->new_<ScriptSource>();
|
||||
@ -510,8 +510,8 @@ frontend::CreateScriptSourceObject(ExclusiveContext* cx, const ReadOnlyCompileOp
|
||||
//
|
||||
// Instead, we put off populating those SSO slots in off-thread compilations
|
||||
// until after we've merged compartments.
|
||||
if (cx->isJSContext()) {
|
||||
if (!ScriptSourceObject::initFromOptions(cx->asJSContext(), sso, options))
|
||||
if (!cx->helperThread()) {
|
||||
if (!ScriptSourceObject::initFromOptions(cx, sso, options))
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -550,7 +550,7 @@ class MOZ_STACK_CLASS AutoInitializeSourceObject
|
||||
};
|
||||
|
||||
JSScript*
|
||||
frontend::CompileGlobalScript(ExclusiveContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
|
||||
frontend::CompileGlobalScript(JSContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& srcBuf,
|
||||
SourceCompressionTask* extraSct,
|
||||
@ -565,7 +565,7 @@ frontend::CompileGlobalScript(ExclusiveContext* cx, LifoAlloc& alloc, ScopeKind
|
||||
}
|
||||
|
||||
JSScript*
|
||||
frontend::CompileEvalScript(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
frontend::CompileEvalScript(JSContext* cx, LifoAlloc& alloc,
|
||||
HandleObject environment, HandleScope enclosingScope,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& srcBuf,
|
||||
@ -580,7 +580,7 @@ frontend::CompileEvalScript(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
}
|
||||
|
||||
ModuleObject*
|
||||
frontend::CompileModule(ExclusiveContext* cx, const ReadOnlyCompileOptions& optionsInput,
|
||||
frontend::CompileModule(JSContext* cx, const ReadOnlyCompileOptions& optionsInput,
|
||||
SourceBufferHolder& srcBuf, LifoAlloc& alloc,
|
||||
ScriptSourceObject** sourceObjectOut /* = nullptr */)
|
||||
{
|
||||
@ -605,14 +605,14 @@ frontend::CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
if (!GlobalObject::ensureModulePrototypesCreated(cx, cx->global()))
|
||||
return nullptr;
|
||||
|
||||
LifoAlloc& alloc = cx->asJSContext()->tempLifoAlloc();
|
||||
LifoAlloc& alloc = cx->tempLifoAlloc();
|
||||
RootedModuleObject module(cx, CompileModule(cx, options, srcBuf, alloc));
|
||||
if (!module)
|
||||
return nullptr;
|
||||
|
||||
// This happens in GlobalHelperThreadState::finishModuleParseTask() when a
|
||||
// module is compiled off main thread.
|
||||
if (!ModuleObject::Freeze(cx->asJSContext(), module))
|
||||
if (!ModuleObject::Freeze(cx, module))
|
||||
return nullptr;
|
||||
|
||||
return module;
|
||||
|
@ -27,14 +27,14 @@ struct SourceCompressionTask;
|
||||
namespace frontend {
|
||||
|
||||
JSScript*
|
||||
CompileGlobalScript(ExclusiveContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
|
||||
CompileGlobalScript(JSContext* cx, LifoAlloc& alloc, ScopeKind scopeKind,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& srcBuf,
|
||||
SourceCompressionTask* extraSct = nullptr,
|
||||
ScriptSourceObject** sourceObjectOut = nullptr);
|
||||
|
||||
JSScript*
|
||||
CompileEvalScript(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
CompileEvalScript(JSContext* cx, LifoAlloc& alloc,
|
||||
HandleObject scopeChain, HandleScope enclosingScope,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& srcBuf,
|
||||
@ -46,7 +46,7 @@ CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& srcBuf);
|
||||
|
||||
ModuleObject*
|
||||
CompileModule(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
|
||||
CompileModule(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
SourceBufferHolder& srcBuf, LifoAlloc& alloc,
|
||||
ScriptSourceObject** sourceObjectOut = nullptr);
|
||||
|
||||
@ -90,7 +90,7 @@ CompileAsyncFunctionBody(JSContext* cx, MutableHandleFunction fun,
|
||||
Handle<PropertyNameVector> formals, JS::SourceBufferHolder& srcBuf);
|
||||
|
||||
ScriptSourceObject*
|
||||
CreateScriptSourceObject(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
|
||||
CreateScriptSourceObject(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
mozilla::Maybe<uint32_t> parameterListEnd = mozilla::Nothing());
|
||||
|
||||
/*
|
||||
|
@ -961,7 +961,7 @@ BytecodeEmitter::EmitterScope::enterLexical(BytecodeEmitter* bce, ScopeKind kind
|
||||
updateFrameFixedSlots(bce, bi);
|
||||
|
||||
// Create and intern the VM scope.
|
||||
auto createScope = [kind, bindings, firstFrameSlot](ExclusiveContext* cx,
|
||||
auto createScope = [kind, bindings, firstFrameSlot](JSContext* cx,
|
||||
HandleScope enclosing)
|
||||
{
|
||||
return LexicalScope::create(cx, kind, bindings, firstFrameSlot, enclosing);
|
||||
@ -1015,7 +1015,7 @@ BytecodeEmitter::EmitterScope::enterNamedLambda(BytecodeEmitter* bce, FunctionBo
|
||||
bi++;
|
||||
MOZ_ASSERT(!bi, "There should be exactly one binding in a NamedLambda scope");
|
||||
|
||||
auto createScope = [funbox](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [funbox](JSContext* cx, HandleScope enclosing) {
|
||||
ScopeKind scopeKind =
|
||||
funbox->strict() ? ScopeKind::StrictNamedLambda : ScopeKind::NamedLambda;
|
||||
return LexicalScope::create(cx, scopeKind, funbox->namedLambdaBindings(),
|
||||
@ -1071,7 +1071,7 @@ BytecodeEmitter::EmitterScope::enterParameterExpressionVar(BytecodeEmitter* bce)
|
||||
|
||||
// Create and intern the VM scope.
|
||||
uint32_t firstFrameSlot = frameSlotStart();
|
||||
auto createScope = [firstFrameSlot](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [firstFrameSlot](JSContext* cx, HandleScope enclosing) {
|
||||
return VarScope::create(cx, ScopeKind::ParameterExpressionVar,
|
||||
/* data = */ nullptr, firstFrameSlot,
|
||||
/* needsEnvironment = */ true, enclosing);
|
||||
@ -1165,7 +1165,7 @@ BytecodeEmitter::EmitterScope::enterFunction(BytecodeEmitter* bce, FunctionBox*
|
||||
}
|
||||
|
||||
// Create and intern the VM scope.
|
||||
auto createScope = [funbox](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [funbox](JSContext* cx, HandleScope enclosing) {
|
||||
RootedFunction fun(cx, funbox->function());
|
||||
return FunctionScope::create(cx, funbox->functionScopeBindings(),
|
||||
funbox->hasParameterExprs,
|
||||
@ -1219,7 +1219,7 @@ BytecodeEmitter::EmitterScope::enterFunctionExtraBodyVar(BytecodeEmitter* bce, F
|
||||
fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
|
||||
|
||||
// Create and intern the VM scope.
|
||||
auto createScope = [funbox, firstFrameSlot](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [funbox, firstFrameSlot](JSContext* cx, HandleScope enclosing) {
|
||||
return VarScope::create(cx, ScopeKind::FunctionBodyVar,
|
||||
funbox->extraVarScopeBindings(), firstFrameSlot,
|
||||
funbox->needsExtraBodyVarEnvironmentRegardlessOfBindings(),
|
||||
@ -1287,7 +1287,7 @@ BytecodeEmitter::EmitterScope::enterGlobal(BytecodeEmitter* bce, GlobalSharedCon
|
||||
// lazily upon first access.
|
||||
fallbackFreeNameLocation_ = Some(NameLocation::Intrinsic());
|
||||
|
||||
auto createScope = [](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [](JSContext* cx, HandleScope enclosing) {
|
||||
MOZ_ASSERT(!enclosing);
|
||||
return &cx->global()->emptyGlobalScope();
|
||||
};
|
||||
@ -1320,7 +1320,7 @@ BytecodeEmitter::EmitterScope::enterGlobal(BytecodeEmitter* bce, GlobalSharedCon
|
||||
else
|
||||
fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
|
||||
|
||||
auto createScope = [globalsc](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [globalsc](JSContext* cx, HandleScope enclosing) {
|
||||
MOZ_ASSERT(!enclosing);
|
||||
return GlobalScope::create(cx, globalsc->scopeKind(), globalsc->bindings);
|
||||
};
|
||||
@ -1342,7 +1342,7 @@ BytecodeEmitter::EmitterScope::enterEval(BytecodeEmitter* bce, EvalSharedContext
|
||||
|
||||
// Create the `var` scope. Note that there is also a lexical scope, created
|
||||
// separately in emitScript().
|
||||
auto createScope = [evalsc](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [evalsc](JSContext* cx, HandleScope enclosing) {
|
||||
ScopeKind scopeKind = evalsc->strict() ? ScopeKind::StrictEval : ScopeKind::Eval;
|
||||
return EvalScope::create(cx, scopeKind, evalsc->bindings, enclosing);
|
||||
};
|
||||
@ -1430,7 +1430,7 @@ BytecodeEmitter::EmitterScope::enterModule(BytecodeEmitter* bce, ModuleSharedCon
|
||||
}
|
||||
|
||||
// Create and intern the VM scope.
|
||||
auto createScope = [modulesc](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [modulesc](JSContext* cx, HandleScope enclosing) {
|
||||
return ModuleScope::create(cx, modulesc->bindings, modulesc->module(), enclosing);
|
||||
};
|
||||
if (!internBodyScope(bce, createScope))
|
||||
@ -1450,7 +1450,7 @@ BytecodeEmitter::EmitterScope::enterWith(BytecodeEmitter* bce)
|
||||
// 'with' make all accesses dynamic and unanalyzable.
|
||||
fallbackFreeNameLocation_ = Some(NameLocation::Dynamic());
|
||||
|
||||
auto createScope = [](ExclusiveContext* cx, HandleScope enclosing) {
|
||||
auto createScope = [](JSContext* cx, HandleScope enclosing) {
|
||||
return WithScope::create(cx, enclosing);
|
||||
};
|
||||
if (!internScope(bce, createScope))
|
||||
@ -3486,18 +3486,17 @@ BytecodeEmitter::maybeSetSourceMap()
|
||||
}
|
||||
|
||||
void
|
||||
BytecodeEmitter::tellDebuggerAboutCompiledScript(ExclusiveContext* cx)
|
||||
BytecodeEmitter::tellDebuggerAboutCompiledScript(JSContext* cx)
|
||||
{
|
||||
// Note: when parsing off thread the resulting scripts need to be handed to
|
||||
// the debugger after rejoining to the main thread.
|
||||
if (!cx->isJSContext())
|
||||
if (cx->helperThread())
|
||||
return;
|
||||
|
||||
// Lazy scripts are never top level (despite always being invoked with a
|
||||
// nullptr parent), and so the hook should never be fired.
|
||||
if (emitterMode != LazyFunction && !parent) {
|
||||
Debugger::onNewScript(cx->asJSContext(), script);
|
||||
}
|
||||
if (emitterMode != LazyFunction && !parent)
|
||||
Debugger::onNewScript(cx, script);
|
||||
}
|
||||
|
||||
inline TokenStream*
|
||||
@ -6121,7 +6120,7 @@ BytecodeEmitter::emitAssignment(ParseNode* lhs, JSOp op, ParseNode* rhs)
|
||||
}
|
||||
|
||||
bool
|
||||
ParseNode::getConstantValue(ExclusiveContext* cx, AllowConstantObjects allowObjects,
|
||||
ParseNode::getConstantValue(JSContext* cx, AllowConstantObjects allowObjects,
|
||||
MutableHandleValue vp, Value* compare, size_t ncompare,
|
||||
NewObjectKind newKind)
|
||||
{
|
||||
@ -8568,7 +8567,7 @@ BytecodeEmitter::emitDeleteExpression(ParseNode* node)
|
||||
}
|
||||
|
||||
static const char *
|
||||
SelfHostedCallFunctionName(JSAtom* name, ExclusiveContext* cx)
|
||||
SelfHostedCallFunctionName(JSAtom* name, JSContext* cx)
|
||||
{
|
||||
if (name == cx->names().callFunction)
|
||||
return "callFunction";
|
||||
@ -10453,7 +10452,7 @@ BytecodeEmitter::emitTreeInBranch(ParseNode* pn)
|
||||
}
|
||||
|
||||
static bool
|
||||
AllocSrcNote(ExclusiveContext* cx, SrcNotesVector& notes, unsigned* index)
|
||||
AllocSrcNote(JSContext* cx, SrcNotesVector& notes, unsigned* index)
|
||||
{
|
||||
// Start it off moderately large to avoid repeated resizings early on.
|
||||
// ~99% of cases fit within 256 bytes.
|
||||
|
@ -32,7 +32,7 @@ class TokenStream;
|
||||
class CGConstList {
|
||||
Vector<Value> list;
|
||||
public:
|
||||
explicit CGConstList(ExclusiveContext* cx) : list(cx) {}
|
||||
explicit CGConstList(JSContext* cx) : list(cx) {}
|
||||
MOZ_MUST_USE bool append(const Value& v) {
|
||||
MOZ_ASSERT_IF(v.isString(), v.toString()->isAtom());
|
||||
return list.append(v);
|
||||
@ -56,7 +56,7 @@ struct CGObjectList {
|
||||
struct MOZ_STACK_CLASS CGScopeList {
|
||||
Rooted<GCVector<Scope*>> vector;
|
||||
|
||||
explicit CGScopeList(ExclusiveContext* cx)
|
||||
explicit CGScopeList(JSContext* cx)
|
||||
: vector(cx, GCVector<Scope*>(cx))
|
||||
{ }
|
||||
|
||||
@ -67,7 +67,7 @@ struct MOZ_STACK_CLASS CGScopeList {
|
||||
|
||||
struct CGTryNoteList {
|
||||
Vector<JSTryNote> list;
|
||||
explicit CGTryNoteList(ExclusiveContext* cx) : list(cx) {}
|
||||
explicit CGTryNoteList(JSContext* cx) : list(cx) {}
|
||||
|
||||
MOZ_MUST_USE bool append(JSTryNoteKind kind, uint32_t stackDepth, size_t start, size_t end);
|
||||
size_t length() const { return list.length(); }
|
||||
@ -89,7 +89,7 @@ struct CGScopeNote : public ScopeNote
|
||||
|
||||
struct CGScopeNoteList {
|
||||
Vector<CGScopeNote> list;
|
||||
explicit CGScopeNoteList(ExclusiveContext* cx) : list(cx) {}
|
||||
explicit CGScopeNoteList(JSContext* cx) : list(cx) {}
|
||||
|
||||
MOZ_MUST_USE bool append(uint32_t scopeIndex, uint32_t offset, bool inPrologue,
|
||||
uint32_t parent);
|
||||
@ -100,7 +100,7 @@ struct CGScopeNoteList {
|
||||
|
||||
struct CGYieldOffsetList {
|
||||
Vector<uint32_t> list;
|
||||
explicit CGYieldOffsetList(ExclusiveContext* cx) : list(cx) {}
|
||||
explicit CGYieldOffsetList(JSContext* cx) : list(cx) {}
|
||||
|
||||
MOZ_MUST_USE bool append(uint32_t offset) { return list.append(offset); }
|
||||
size_t length() const { return list.length(); }
|
||||
@ -175,7 +175,7 @@ struct MOZ_STACK_CLASS BytecodeEmitter
|
||||
|
||||
SharedContext* const sc; /* context shared between parsing and bytecode generation */
|
||||
|
||||
ExclusiveContext* const cx;
|
||||
JSContext* const cx;
|
||||
|
||||
BytecodeEmitter* const parent; /* enclosing function or global context */
|
||||
|
||||
@ -193,7 +193,7 @@ struct MOZ_STACK_CLASS BytecodeEmitter
|
||||
last SRC_COLSPAN-annotated opcode */
|
||||
JumpTarget lastTarget; // Last jump target emitted.
|
||||
|
||||
EmitSection(ExclusiveContext* cx, uint32_t lineNum)
|
||||
EmitSection(JSContext* cx, uint32_t lineNum)
|
||||
: code(cx), notes(cx), lastNoteOffset(0), currentLine(lineNum), lastColumn(0),
|
||||
lastTarget{ -1 - ptrdiff_t(JSOP_JUMPTARGET_LENGTH) }
|
||||
{}
|
||||
@ -350,7 +350,7 @@ struct MOZ_STACK_CLASS BytecodeEmitter
|
||||
|
||||
MOZ_MUST_USE bool maybeSetDisplayURL();
|
||||
MOZ_MUST_USE bool maybeSetSourceMap();
|
||||
void tellDebuggerAboutCompiledScript(ExclusiveContext* cx);
|
||||
void tellDebuggerAboutCompiledScript(JSContext* cx);
|
||||
|
||||
inline TokenStream* tokenStream();
|
||||
|
||||
|
@ -29,10 +29,10 @@ using JS::ToInt32;
|
||||
using JS::ToUint32;
|
||||
|
||||
static bool
|
||||
ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result);
|
||||
ContainsHoistedDeclaration(JSContext* cx, ParseNode* node, bool* result);
|
||||
|
||||
static bool
|
||||
ListContainsHoistedDeclaration(ExclusiveContext* cx, ListNode* list, bool* result)
|
||||
ListContainsHoistedDeclaration(JSContext* cx, ListNode* list, bool* result)
|
||||
{
|
||||
for (ParseNode* node = list->pn_head; node; node = node->pn_next) {
|
||||
if (!ContainsHoistedDeclaration(cx, node, result))
|
||||
@ -54,7 +54,7 @@ ListContainsHoistedDeclaration(ExclusiveContext* cx, ListNode* list, bool* resul
|
||||
// by a constant condition, contains a declaration that forbids |node| being
|
||||
// completely eliminated as dead.
|
||||
static bool
|
||||
ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result)
|
||||
ContainsHoistedDeclaration(JSContext* cx, ParseNode* node, bool* result)
|
||||
{
|
||||
JS_CHECK_RECURSION(cx, return false);
|
||||
|
||||
@ -415,7 +415,7 @@ ContainsHoistedDeclaration(ExclusiveContext* cx, ParseNode* node, bool* result)
|
||||
* XXX handles only strings and numbers for now
|
||||
*/
|
||||
static bool
|
||||
FoldType(ExclusiveContext* cx, ParseNode* pn, ParseNodeKind kind)
|
||||
FoldType(JSContext* cx, ParseNode* pn, ParseNodeKind kind)
|
||||
{
|
||||
if (!pn->isKind(kind)) {
|
||||
switch (kind) {
|
||||
@ -517,10 +517,10 @@ Boolish(ParseNode* pn)
|
||||
}
|
||||
|
||||
static bool
|
||||
Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda);
|
||||
Fold(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda);
|
||||
|
||||
static bool
|
||||
FoldCondition(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
FoldCondition(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
// Conditions fold like any other expression...
|
||||
@ -551,7 +551,7 @@ FoldCondition(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldTypeOfExpr(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldTypeOfExpr(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_TYPEOFEXPR));
|
||||
@ -587,7 +587,7 @@ FoldTypeOfExpr(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>&
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldDeleteExpr(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldDeleteExpr(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_DELETEEXPR));
|
||||
@ -610,7 +610,7 @@ FoldDeleteExpr(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>&
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldDeleteElement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldDeleteElement(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_DELETEELEM));
|
||||
@ -635,7 +635,7 @@ FoldDeleteElement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldDeleteProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldDeleteProperty(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_DELETEPROP));
|
||||
@ -657,7 +657,7 @@ FoldDeleteProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandle
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldNot(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldNot(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_NOT));
|
||||
@ -692,7 +692,7 @@ FoldNot(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldUnaryArithmetic(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldUnaryArithmetic(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_BITNOT) || node->isKind(PNK_POS) || node->isKind(PNK_NEG),
|
||||
@ -726,7 +726,7 @@ FoldUnaryArithmetic(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandl
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldIncrementDecrement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldIncrementDecrement(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_PREINCREMENT) ||
|
||||
@ -747,7 +747,7 @@ FoldIncrementDecrement(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHa
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldAndOr(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
FoldAndOr(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
ParseNode* node = *nodePtr;
|
||||
@ -830,7 +830,7 @@ FoldAndOr(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& p
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldConditional(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
FoldConditional(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
ParseNode** nextNode = nodePtr;
|
||||
@ -900,7 +900,7 @@ FoldConditional(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandl
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldIf(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
FoldIf(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
ParseNode** nextNode = nodePtr;
|
||||
@ -1004,7 +1004,7 @@ FoldIf(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& pars
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldFunction(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldFunction(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_FUNCTION));
|
||||
@ -1067,7 +1067,7 @@ ComputeBinary(ParseNodeKind kind, double left, double right)
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldModule(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser)
|
||||
FoldModule(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_MODULE));
|
||||
MOZ_ASSERT(node->isArity(PN_CODE));
|
||||
@ -1078,7 +1078,7 @@ FoldModule(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& pars
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldBinaryArithmetic(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldBinaryArithmetic(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_SUB) ||
|
||||
@ -1150,7 +1150,7 @@ FoldBinaryArithmetic(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHand
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldExponentiation(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldExponentiation(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_POW));
|
||||
@ -1194,7 +1194,7 @@ FoldExponentiation(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandle
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldList(ExclusiveContext* cx, ParseNode* list, Parser<FullParseHandler>& parser,
|
||||
FoldList(JSContext* cx, ParseNode* list, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(list->isArity(PN_LIST));
|
||||
@ -1214,7 +1214,7 @@ FoldList(ExclusiveContext* cx, ParseNode* list, Parser<FullParseHandler>& parser
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldReturn(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldReturn(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_RETURN));
|
||||
@ -1229,7 +1229,7 @@ FoldReturn(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& pars
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldTry(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldTry(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_TRY));
|
||||
@ -1253,7 +1253,7 @@ FoldTry(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldCatch(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldCatch(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_CATCH));
|
||||
@ -1277,7 +1277,7 @@ FoldCatch(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parse
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldClass(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldClass(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_CLASS));
|
||||
@ -1298,7 +1298,7 @@ FoldClass(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parse
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldElement(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
FoldElement(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
ParseNode* node = *nodePtr;
|
||||
@ -1374,7 +1374,7 @@ FoldElement(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>&
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldAdd(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
FoldAdd(JSContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
ParseNode* node = *nodePtr;
|
||||
@ -1525,7 +1525,7 @@ FoldAdd(ExclusiveContext* cx, ParseNode** nodePtr, Parser<FullParseHandler>& par
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldCall(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldCall(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_CALL) || node->isKind(PNK_SUPERCALL) ||
|
||||
@ -1560,7 +1560,7 @@ FoldCall(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldForInOrOf(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldForInOrOf(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_FORIN) || node->isKind(PNK_FOROF));
|
||||
@ -1572,7 +1572,7 @@ FoldForInOrOf(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& p
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldForHead(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldForHead(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_FORHEAD));
|
||||
@ -1602,7 +1602,7 @@ FoldForHead(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& par
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldDottedProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldDottedProperty(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_DOT));
|
||||
@ -1620,7 +1620,7 @@ FoldDottedProperty(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandle
|
||||
}
|
||||
|
||||
static bool
|
||||
FoldName(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
FoldName(JSContext* cx, ParseNode* node, Parser<FullParseHandler>& parser,
|
||||
bool inGenexpLambda)
|
||||
{
|
||||
MOZ_ASSERT(node->isKind(PNK_NAME));
|
||||
@ -1633,7 +1633,7 @@ FoldName(ExclusiveContext* cx, ParseNode* node, Parser<FullParseHandler>& parser
|
||||
}
|
||||
|
||||
bool
|
||||
Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda)
|
||||
Fold(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bool inGenexpLambda)
|
||||
{
|
||||
JS_CHECK_RECURSION(cx, return false);
|
||||
|
||||
@ -1921,7 +1921,7 @@ Fold(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>& parser, bo
|
||||
}
|
||||
|
||||
bool
|
||||
frontend::FoldConstants(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser)
|
||||
frontend::FoldConstants(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser)
|
||||
{
|
||||
// Don't constant-fold inside "use asm" code, as this could create a parse
|
||||
// tree that doesn't type-check as asm.js.
|
||||
|
@ -26,10 +26,10 @@ namespace frontend {
|
||||
// if (!FoldConstants(cx, &pn, parser))
|
||||
// return false;
|
||||
MOZ_MUST_USE bool
|
||||
FoldConstants(ExclusiveContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser);
|
||||
FoldConstants(JSContext* cx, ParseNode** pnp, Parser<FullParseHandler>* parser);
|
||||
|
||||
inline MOZ_MUST_USE bool
|
||||
FoldConstants(ExclusiveContext* cx, SyntaxParseHandler::Node* pnp,
|
||||
FoldConstants(JSContext* cx, SyntaxParseHandler::Node* pnp,
|
||||
Parser<SyntaxParseHandler>* parser)
|
||||
{
|
||||
return true;
|
||||
|
@ -97,7 +97,7 @@ class FullParseHandler
|
||||
isParenthesizedDestructuringPattern(node);
|
||||
}
|
||||
|
||||
FullParseHandler(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
FullParseHandler(JSContext* cx, LifoAlloc& alloc,
|
||||
TokenStream& tokenStream, Parser<SyntaxParseHandler>* syntaxParser,
|
||||
LazyScript* lazyOuterFunction)
|
||||
: allocator(cx, alloc),
|
||||
@ -114,7 +114,7 @@ class FullParseHandler
|
||||
void prepareNodeForMutation(ParseNode* pn) { return allocator.prepareNodeForMutation(pn); }
|
||||
const Token& currentToken() { return tokenStream.currentToken(); }
|
||||
|
||||
ParseNode* newName(PropertyName* name, const TokenPos& pos, ExclusiveContext* cx)
|
||||
ParseNode* newName(PropertyName* name, const TokenPos& pos, JSContext* cx)
|
||||
{
|
||||
return new_<NameNode>(PNK_NAME, JSOP_GETNAME, name, pos);
|
||||
}
|
||||
@ -876,11 +876,11 @@ class FullParseHandler
|
||||
return node->isKind(PNK_NAME);
|
||||
}
|
||||
|
||||
bool isEvalAnyParentheses(ParseNode* node, ExclusiveContext* cx) {
|
||||
bool isEvalAnyParentheses(ParseNode* node, JSContext* cx) {
|
||||
return node->isKind(PNK_NAME) && node->pn_atom == cx->names().eval;
|
||||
}
|
||||
|
||||
const char* nameIsArgumentsEvalAnyParentheses(ParseNode* node, ExclusiveContext* cx) {
|
||||
const char* nameIsArgumentsEvalAnyParentheses(ParseNode* node, JSContext* cx) {
|
||||
MOZ_ASSERT(isNameAnyParentheses(node),
|
||||
"must only call this function on known names");
|
||||
|
||||
@ -891,7 +891,7 @@ class FullParseHandler
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool isAsyncKeyword(ParseNode* node, ExclusiveContext* cx) {
|
||||
bool isAsyncKeyword(ParseNode* node, JSContext* cx) {
|
||||
return node->isKind(PNK_NAME) &&
|
||||
node->pn_pos.begin + strlen("async") == node->pn_pos.end &&
|
||||
node->pn_atom == cx->names().async;
|
||||
|
@ -62,7 +62,7 @@ class CollectionPool
|
||||
|
||||
// Fallibly aquire one of the supported collection types from the pool.
|
||||
template <typename Collection>
|
||||
Collection* acquire(ExclusiveContext* cx) {
|
||||
Collection* acquire(JSContext* cx) {
|
||||
ConcreteCollectionPool::template assertInvariants<Collection>();
|
||||
|
||||
RepresentativeCollection* collection;
|
||||
@ -215,7 +215,7 @@ class NameCollectionPool
|
||||
}
|
||||
|
||||
template <typename Map>
|
||||
Map* acquireMap(ExclusiveContext* cx) {
|
||||
Map* acquireMap(JSContext* cx) {
|
||||
MOZ_ASSERT(hasActiveCompilation());
|
||||
return mapPool_.acquire<Map>(cx);
|
||||
}
|
||||
@ -229,7 +229,7 @@ class NameCollectionPool
|
||||
}
|
||||
|
||||
template <typename Vector>
|
||||
Vector* acquireVector(ExclusiveContext* cx) {
|
||||
Vector* acquireVector(JSContext* cx) {
|
||||
MOZ_ASSERT(hasActiveCompilation());
|
||||
return vectorPool_.acquire<Vector>(cx);
|
||||
}
|
||||
@ -274,7 +274,7 @@ class NameCollectionPool
|
||||
pool_.release##T(&collection_); \
|
||||
} \
|
||||
\
|
||||
bool acquire(ExclusiveContext* cx) { \
|
||||
bool acquire(JSContext* cx) { \
|
||||
MOZ_ASSERT(!collection_); \
|
||||
collection_ = pool_.acquire##T<T>(cx); \
|
||||
return !!collection_; \
|
||||
|
@ -25,7 +25,7 @@ class NameResolver
|
||||
{
|
||||
static const size_t MaxParents = 100;
|
||||
|
||||
ExclusiveContext* cx;
|
||||
JSContext* cx;
|
||||
size_t nparents; /* number of parents in the parents array */
|
||||
ParseNode* parents[MaxParents]; /* history of ParseNodes we've been looking at */
|
||||
StringBuffer* buf; /* when resolving, buffer to append to */
|
||||
@ -342,7 +342,7 @@ class NameResolver
|
||||
}
|
||||
|
||||
public:
|
||||
explicit NameResolver(ExclusiveContext* cx) : cx(cx), nparents(0), buf(nullptr) {}
|
||||
explicit NameResolver(JSContext* cx) : cx(cx), nparents(0), buf(nullptr) {}
|
||||
|
||||
/*
|
||||
* Resolve all names for anonymous functions recursively within the
|
||||
@ -833,7 +833,7 @@ class NameResolver
|
||||
} /* anonymous namespace */
|
||||
|
||||
bool
|
||||
frontend::NameFunctions(ExclusiveContext* cx, ParseNode* pn)
|
||||
frontend::NameFunctions(JSContext* cx, ParseNode* pn)
|
||||
{
|
||||
NameResolver nr(cx);
|
||||
return nr.resolve(pn);
|
||||
|
@ -12,15 +12,12 @@
|
||||
#include "js/TypeDecls.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
class ExclusiveContext;
|
||||
|
||||
namespace frontend {
|
||||
|
||||
class ParseNode;
|
||||
|
||||
MOZ_MUST_USE bool
|
||||
NameFunctions(ExclusiveContext* cx, ParseNode* pn);
|
||||
NameFunctions(JSContext* cx, ParseNode* pn);
|
||||
|
||||
} /* namespace frontend */
|
||||
} /* namespace js */
|
||||
|
@ -788,7 +788,7 @@ class ParseNode
|
||||
ForCopyOnWriteArray
|
||||
};
|
||||
|
||||
MOZ_MUST_USE bool getConstantValue(ExclusiveContext* cx, AllowConstantObjects allowObjects,
|
||||
MOZ_MUST_USE bool getConstantValue(JSContext* cx, AllowConstantObjects allowObjects,
|
||||
MutableHandleValue vp, Value* compare = nullptr,
|
||||
size_t ncompare = 0, NewObjectKind newKind = TenuredObject);
|
||||
inline bool isConstant();
|
||||
@ -1244,7 +1244,7 @@ struct CallSiteNode : public ListNode {
|
||||
return node.isKind(PNK_CALLSITEOBJ);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE bool getRawArrayValue(ExclusiveContext* cx, MutableHandleValue vp) {
|
||||
MOZ_MUST_USE bool getRawArrayValue(JSContext* cx, MutableHandleValue vp) {
|
||||
return pn_head->getConstantValue(cx, AllowObjects, vp);
|
||||
}
|
||||
};
|
||||
@ -1351,7 +1351,7 @@ void DumpParseTree(ParseNode* pn, int indent = 0);
|
||||
class ParseNodeAllocator
|
||||
{
|
||||
public:
|
||||
explicit ParseNodeAllocator(ExclusiveContext* cx, LifoAlloc& alloc)
|
||||
explicit ParseNodeAllocator(JSContext* cx, LifoAlloc& alloc)
|
||||
: cx(cx), alloc(alloc), freelist(nullptr)
|
||||
{}
|
||||
|
||||
@ -1361,7 +1361,7 @@ class ParseNodeAllocator
|
||||
void prepareNodeForMutation(ParseNode* pn);
|
||||
|
||||
private:
|
||||
ExclusiveContext* cx;
|
||||
JSContext* cx;
|
||||
LifoAlloc& alloc;
|
||||
ParseNode* freelist;
|
||||
};
|
||||
|
@ -131,7 +131,7 @@ StatementKindIsBraced(StatementKind kind)
|
||||
void
|
||||
ParseContext::Scope::dump(ParseContext* pc)
|
||||
{
|
||||
ExclusiveContext* cx = pc->sc()->context;
|
||||
JSContext* cx = pc->sc()->context;
|
||||
|
||||
fprintf(stdout, "ParseScope %p", this);
|
||||
|
||||
@ -272,7 +272,7 @@ SharedContext::computeInWith(Scope* scope)
|
||||
}
|
||||
}
|
||||
|
||||
EvalSharedContext::EvalSharedContext(ExclusiveContext* cx, JSObject* enclosingEnv,
|
||||
EvalSharedContext::EvalSharedContext(JSContext* cx, JSObject* enclosingEnv,
|
||||
Scope* enclosingScope, Directives directives,
|
||||
bool extraWarnings)
|
||||
: SharedContext(cx, Kind::Eval, directives, extraWarnings),
|
||||
@ -315,7 +315,7 @@ ParseContext::init()
|
||||
return false;
|
||||
}
|
||||
|
||||
ExclusiveContext* cx = sc()->context;
|
||||
JSContext* cx = sc()->context;
|
||||
|
||||
if (isFunctionBox()) {
|
||||
// Named lambdas always need a binding for their own name. If this
|
||||
@ -400,7 +400,7 @@ ParseContext::~ParseContext()
|
||||
}
|
||||
|
||||
bool
|
||||
UsedNameTracker::noteUse(ExclusiveContext* cx, JSAtom* name, uint32_t scriptId, uint32_t scopeId)
|
||||
UsedNameTracker::noteUse(JSContext* cx, JSAtom* name, uint32_t scriptId, uint32_t scopeId)
|
||||
{
|
||||
if (UsedNameMap::AddPtr p = map_.lookupForAdd(name)) {
|
||||
if (!p->value().noteUsedInScope(scriptId, scopeId))
|
||||
@ -438,7 +438,7 @@ UsedNameTracker::rewind(RewindToken token)
|
||||
r.front().value().resetToScope(token.scriptId, token.scopeId);
|
||||
}
|
||||
|
||||
FunctionBox::FunctionBox(ExclusiveContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead,
|
||||
FunctionBox::FunctionBox(JSContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead,
|
||||
JSFunction* fun, Directives directives, bool extraWarnings,
|
||||
GeneratorKind generatorKind, FunctionAsyncKind asyncKind)
|
||||
: ObjectBox(fun, traceListHead),
|
||||
@ -690,7 +690,7 @@ Parser<SyntaxParseHandler>::abortIfSyntaxParser()
|
||||
return false;
|
||||
}
|
||||
|
||||
ParserBase::ParserBase(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
ParserBase::ParserBase(JSContext* cx, LifoAlloc& alloc,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
const char16_t* chars, size_t length,
|
||||
bool foldConstants,
|
||||
@ -705,7 +705,7 @@ ParserBase::ParserBase(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
usedNames(usedNames),
|
||||
sct(nullptr),
|
||||
ss(nullptr),
|
||||
keepAtoms(cx->perThreadData),
|
||||
keepAtoms(cx),
|
||||
foldConstants(foldConstants),
|
||||
#ifdef DEBUG
|
||||
checkOptionsCalled(false),
|
||||
@ -713,7 +713,7 @@ ParserBase::ParserBase(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
abortedSyntaxParse(false),
|
||||
isUnexpectedEOF_(false)
|
||||
{
|
||||
cx->perThreadData->frontendCollectionPool.addActiveCompilation();
|
||||
cx->frontendCollectionPool().addActiveCompilation();
|
||||
tempPoolMark = alloc.mark();
|
||||
}
|
||||
|
||||
@ -728,11 +728,11 @@ ParserBase::~ParserBase()
|
||||
*/
|
||||
alloc.freeAllIfHugeAndUnused();
|
||||
|
||||
context->perThreadData->frontendCollectionPool.removeActiveCompilation();
|
||||
context->frontendCollectionPool().removeActiveCompilation();
|
||||
}
|
||||
|
||||
template <typename ParseHandler>
|
||||
Parser<ParseHandler>::Parser(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
Parser<ParseHandler>::Parser(JSContext* cx, LifoAlloc& alloc,
|
||||
const ReadOnlyCompileOptions& options,
|
||||
const char16_t* chars, size_t length,
|
||||
bool foldConstants,
|
||||
@ -827,7 +827,7 @@ Parser<ParseHandler>::newFunctionBox(Node fn, JSFunction* fun, Directives inheri
|
||||
return funbox;
|
||||
}
|
||||
|
||||
ModuleSharedContext::ModuleSharedContext(ExclusiveContext* cx, ModuleObject* module,
|
||||
ModuleSharedContext::ModuleSharedContext(JSContext* cx, ModuleObject* module,
|
||||
Scope* enclosingScope, ModuleBuilder& builder)
|
||||
: SharedContext(cx, Kind::Module, Directives(true), false),
|
||||
module_(cx, module),
|
||||
@ -1491,7 +1491,7 @@ Parser<FullParseHandler>::checkStatementsEOF()
|
||||
|
||||
template <typename Scope>
|
||||
static typename Scope::Data*
|
||||
NewEmptyBindingData(ExclusiveContext* cx, LifoAlloc& alloc, uint32_t numBindings)
|
||||
NewEmptyBindingData(JSContext* cx, LifoAlloc& alloc, uint32_t numBindings)
|
||||
{
|
||||
size_t allocSize = Scope::sizeOfData(numBindings);
|
||||
typename Scope::Data* bindings = static_cast<typename Scope::Data*>(alloc.alloc(allocSize));
|
||||
@ -1886,7 +1886,7 @@ Parser<FullParseHandler>::finishLexicalScope(ParseContext::Scope& scope, ParseNo
|
||||
}
|
||||
|
||||
static bool
|
||||
IsArgumentsUsedInLegacyGenerator(ExclusiveContext* cx, Scope* scope)
|
||||
IsArgumentsUsedInLegacyGenerator(JSContext* cx, Scope* scope)
|
||||
{
|
||||
JSAtom* argumentsName = cx->names().arguments;
|
||||
for (ScopeIter si(scope); si; si++) {
|
||||
@ -2058,7 +2058,7 @@ Parser<FullParseHandler>::moduleBody(ModuleSharedContext* modulesc)
|
||||
if (!str.encodeLatin1(context, name))
|
||||
return null();
|
||||
|
||||
JS_ReportErrorNumberLatin1(context->asJSContext(), GetErrorMessage, nullptr,
|
||||
JS_ReportErrorNumberLatin1(context, GetErrorMessage, nullptr,
|
||||
JSMSG_MISSING_EXPORT, str.ptr());
|
||||
return null();
|
||||
}
|
||||
@ -3132,7 +3132,7 @@ Parser<ParseHandler>::functionDefinition(Node pn, InHandling inHandling,
|
||||
// If we are off the main thread, the generator meta-objects have
|
||||
// already been created by js::StartOffThreadParseTask, so cx will not
|
||||
// be necessary.
|
||||
JSContext* cx = context->maybeJSContext();
|
||||
JSContext* cx = context->helperThread() ? nullptr : context;
|
||||
proto = GlobalObject::getOrCreateStarGeneratorFunctionPrototype(cx, context->global());
|
||||
if (!proto)
|
||||
return null();
|
||||
@ -3231,8 +3231,8 @@ Parser<FullParseHandler>::trySyntaxParseInnerFunction(ParseNode* pn, HandleFunct
|
||||
// correctness.
|
||||
parser->clearAbortedSyntaxParse();
|
||||
usedNames.rewind(token);
|
||||
MOZ_ASSERT_IF(parser->context->isJSContext(),
|
||||
!parser->context->asJSContext()->isExceptionPending());
|
||||
MOZ_ASSERT_IF(!parser->context->helperThread(),
|
||||
!parser->context->isExceptionPending());
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
@ -7982,7 +7982,7 @@ Parser<ParseHandler>::generatorComprehensionLambda(unsigned begin)
|
||||
// already been created by js::StartOffThreadParseScript, so cx will not
|
||||
// be necessary.
|
||||
RootedObject proto(context);
|
||||
JSContext* cx = context->maybeJSContext();
|
||||
JSContext* cx = context->helperThread() ? nullptr : context;
|
||||
proto = GlobalObject::getOrCreateStarGeneratorFunctionPrototype(cx, context->global());
|
||||
if (!proto)
|
||||
return null();
|
||||
@ -8848,7 +8848,7 @@ Parser<ParseHandler>::arrayInitializer(YieldHandling yieldHandling, PossibleErro
|
||||
}
|
||||
|
||||
static JSAtom*
|
||||
DoubleToAtom(ExclusiveContext* cx, double value)
|
||||
DoubleToAtom(JSContext* cx, double value)
|
||||
{
|
||||
// This is safe because doubles can not be moved.
|
||||
Value tmp = DoubleValue(value);
|
||||
@ -9567,24 +9567,22 @@ Parser<ParseHandler>::exprInParens(InHandling inHandling, YieldHandling yieldHan
|
||||
void
|
||||
ParserBase::addTelemetry(JSCompartment::DeprecatedLanguageExtension e)
|
||||
{
|
||||
JSContext* cx = context->maybeJSContext();
|
||||
if (!cx)
|
||||
if (context->helperThread())
|
||||
return;
|
||||
cx->compartment()->addTelemetry(getFilename(), e);
|
||||
context->compartment()->addTelemetry(getFilename(), e);
|
||||
}
|
||||
|
||||
bool
|
||||
ParserBase::warnOnceAboutExprClosure()
|
||||
{
|
||||
#ifndef RELEASE_OR_BETA
|
||||
JSContext* cx = context->maybeJSContext();
|
||||
if (!cx)
|
||||
if (context->helperThread())
|
||||
return true;
|
||||
|
||||
if (!cx->compartment()->warnedAboutExprClosure) {
|
||||
if (!context->compartment()->warnedAboutExprClosure) {
|
||||
if (!warning(JSMSG_DEPRECATED_EXPR_CLOSURE))
|
||||
return false;
|
||||
cx->compartment()->warnedAboutExprClosure = true;
|
||||
context->compartment()->warnedAboutExprClosure = true;
|
||||
}
|
||||
#endif
|
||||
return true;
|
||||
@ -9593,14 +9591,13 @@ ParserBase::warnOnceAboutExprClosure()
|
||||
bool
|
||||
ParserBase::warnOnceAboutForEach()
|
||||
{
|
||||
JSContext* cx = context->maybeJSContext();
|
||||
if (!cx)
|
||||
if (context->helperThread())
|
||||
return true;
|
||||
|
||||
if (!cx->compartment()->warnedAboutForEach) {
|
||||
if (!context->compartment()->warnedAboutForEach) {
|
||||
if (!warning(JSMSG_DEPRECATED_FOR_EACH))
|
||||
return false;
|
||||
cx->compartment()->warnedAboutForEach = true;
|
||||
context->compartment()->warnedAboutForEach = true;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -631,7 +631,7 @@ class UsedNameTracker
|
||||
void resetToScope(uint32_t scriptId, uint32_t scopeId);
|
||||
|
||||
public:
|
||||
explicit UsedNameInfo(ExclusiveContext* cx)
|
||||
explicit UsedNameInfo(JSContext* cx)
|
||||
: uses_(cx)
|
||||
{ }
|
||||
|
||||
@ -677,7 +677,7 @@ class UsedNameTracker
|
||||
uint32_t scopeCounter_;
|
||||
|
||||
public:
|
||||
explicit UsedNameTracker(ExclusiveContext* cx)
|
||||
explicit UsedNameTracker(JSContext* cx)
|
||||
: map_(cx),
|
||||
scriptCounter_(0),
|
||||
scopeCounter_(0)
|
||||
@ -702,7 +702,7 @@ class UsedNameTracker
|
||||
return map_.lookup(name);
|
||||
}
|
||||
|
||||
MOZ_MUST_USE bool noteUse(ExclusiveContext* cx, JSAtom* name,
|
||||
MOZ_MUST_USE bool noteUse(JSContext* cx, JSAtom* name,
|
||||
uint32_t scriptId, uint32_t scopeId);
|
||||
|
||||
struct RewindToken
|
||||
@ -740,7 +740,7 @@ class ParserBase : public StrictModeGetter
|
||||
ParserBase* thisForCtor() { return this; }
|
||||
|
||||
public:
|
||||
ExclusiveContext* const context;
|
||||
JSContext* const context;
|
||||
|
||||
LifoAlloc& alloc;
|
||||
|
||||
@ -784,7 +784,7 @@ class ParserBase : public StrictModeGetter
|
||||
bool isUnexpectedEOF_:1;
|
||||
|
||||
public:
|
||||
ParserBase(ExclusiveContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
|
||||
ParserBase(JSContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
|
||||
const char16_t* chars, size_t length, bool foldConstants,
|
||||
UsedNameTracker& usedNames, Parser<SyntaxParseHandler>* syntaxParser,
|
||||
LazyScript* lazyOuterFunction);
|
||||
@ -992,7 +992,7 @@ class Parser final : public ParserBase, private JS::AutoGCRooter
|
||||
void freeTree(Node node) { handler.freeTree(node); }
|
||||
|
||||
public:
|
||||
Parser(ExclusiveContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
|
||||
Parser(JSContext* cx, LifoAlloc& alloc, const ReadOnlyCompileOptions& options,
|
||||
const char16_t* chars, size_t length, bool foldConstants, UsedNameTracker& usedNames,
|
||||
Parser<SyntaxParseHandler>* syntaxParser, LazyScript* lazyOuterFunction);
|
||||
~Parser();
|
||||
|
@ -272,7 +272,7 @@ class ModuleSharedContext;
|
||||
class SharedContext
|
||||
{
|
||||
public:
|
||||
ExclusiveContext* const context;
|
||||
JSContext* const context;
|
||||
AnyContextFlags anyCxFlags;
|
||||
bool strictScript;
|
||||
bool localStrict;
|
||||
@ -301,7 +301,7 @@ class SharedContext
|
||||
void computeThisBinding(Scope* scope);
|
||||
|
||||
public:
|
||||
SharedContext(ExclusiveContext* cx, Kind kind, Directives directives, bool extraWarnings)
|
||||
SharedContext(JSContext* cx, Kind kind, Directives directives, bool extraWarnings)
|
||||
: context(cx),
|
||||
anyCxFlags(),
|
||||
strictScript(directives.strict()),
|
||||
@ -377,7 +377,7 @@ class MOZ_STACK_CLASS GlobalSharedContext : public SharedContext
|
||||
public:
|
||||
Rooted<GlobalScope::Data*> bindings;
|
||||
|
||||
GlobalSharedContext(ExclusiveContext* cx, ScopeKind scopeKind, Directives directives,
|
||||
GlobalSharedContext(JSContext* cx, ScopeKind scopeKind, Directives directives,
|
||||
bool extraWarnings)
|
||||
: SharedContext(cx, Kind::Global, directives, extraWarnings),
|
||||
scopeKind_(scopeKind),
|
||||
@ -410,7 +410,7 @@ class MOZ_STACK_CLASS EvalSharedContext : public SharedContext
|
||||
public:
|
||||
Rooted<EvalScope::Data*> bindings;
|
||||
|
||||
EvalSharedContext(ExclusiveContext* cx, JSObject* enclosingEnv, Scope* enclosingScope,
|
||||
EvalSharedContext(JSContext* cx, JSObject* enclosingEnv, Scope* enclosingScope,
|
||||
Directives directives, bool extraWarnings);
|
||||
|
||||
Scope* compilationEnclosingScope() const override {
|
||||
@ -478,22 +478,22 @@ class FunctionBox : public ObjectBox, public SharedContext
|
||||
|
||||
FunctionContextFlags funCxFlags;
|
||||
|
||||
FunctionBox(ExclusiveContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead, JSFunction* fun,
|
||||
FunctionBox(JSContext* cx, LifoAlloc& alloc, ObjectBox* traceListHead, JSFunction* fun,
|
||||
Directives directives, bool extraWarnings, GeneratorKind generatorKind,
|
||||
FunctionAsyncKind asyncKind);
|
||||
|
||||
MutableHandle<LexicalScope::Data*> namedLambdaBindings() {
|
||||
MOZ_ASSERT(context->compartment()->runtimeFromAnyThread()->keepAtoms());
|
||||
MOZ_ASSERT(context->keepAtoms);
|
||||
return MutableHandle<LexicalScope::Data*>::fromMarkedLocation(&namedLambdaBindings_);
|
||||
}
|
||||
|
||||
MutableHandle<FunctionScope::Data*> functionScopeBindings() {
|
||||
MOZ_ASSERT(context->compartment()->runtimeFromAnyThread()->keepAtoms());
|
||||
MOZ_ASSERT(context->keepAtoms);
|
||||
return MutableHandle<FunctionScope::Data*>::fromMarkedLocation(&functionScopeBindings_);
|
||||
}
|
||||
|
||||
MutableHandle<VarScope::Data*> extraVarScopeBindings() {
|
||||
MOZ_ASSERT(context->compartment()->runtimeFromAnyThread()->keepAtoms());
|
||||
MOZ_ASSERT(context->keepAtoms);
|
||||
return MutableHandle<VarScope::Data*>::fromMarkedLocation(&extraVarScopeBindings_);
|
||||
}
|
||||
|
||||
@ -621,7 +621,7 @@ class MOZ_STACK_CLASS ModuleSharedContext : public SharedContext
|
||||
Rooted<ModuleScope::Data*> bindings;
|
||||
ModuleBuilder& builder;
|
||||
|
||||
ModuleSharedContext(ExclusiveContext* cx, ModuleObject* module, Scope* enclosingScope,
|
||||
ModuleSharedContext(JSContext* cx, ModuleObject* module, Scope* enclosingScope,
|
||||
ModuleBuilder& builder);
|
||||
|
||||
HandleModuleObject module() const { return module_; }
|
||||
|
@ -170,7 +170,7 @@ class SyntaxParseHandler
|
||||
}
|
||||
|
||||
public:
|
||||
SyntaxParseHandler(ExclusiveContext* cx, LifoAlloc& alloc,
|
||||
SyntaxParseHandler(JSContext* cx, LifoAlloc& alloc,
|
||||
TokenStream& tokenStream, Parser<SyntaxParseHandler>* syntaxParser,
|
||||
LazyScript* lazyOuterFunction)
|
||||
: lastAtom(nullptr),
|
||||
@ -184,7 +184,7 @@ class SyntaxParseHandler
|
||||
|
||||
void trace(JSTracer* trc) {}
|
||||
|
||||
Node newName(PropertyName* name, const TokenPos& pos, ExclusiveContext* cx) {
|
||||
Node newName(PropertyName* name, const TokenPos& pos, JSContext* cx) {
|
||||
lastAtom = name;
|
||||
if (name == cx->names().arguments)
|
||||
return NodeUnparenthesizedArgumentsName;
|
||||
@ -547,11 +547,11 @@ class SyntaxParseHandler
|
||||
node == NodeParenthesizedName;
|
||||
}
|
||||
|
||||
bool isEvalAnyParentheses(Node node, ExclusiveContext* cx) {
|
||||
bool isEvalAnyParentheses(Node node, JSContext* cx) {
|
||||
return node == NodeUnparenthesizedEvalName || node == NodeParenthesizedEvalName;
|
||||
}
|
||||
|
||||
const char* nameIsArgumentsEvalAnyParentheses(Node node, ExclusiveContext* cx) {
|
||||
const char* nameIsArgumentsEvalAnyParentheses(Node node, JSContext* cx) {
|
||||
MOZ_ASSERT(isNameAnyParentheses(node),
|
||||
"must only call this method on known names");
|
||||
|
||||
@ -562,7 +562,7 @@ class SyntaxParseHandler
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool isAsyncKeyword(Node node, ExclusiveContext* cx) {
|
||||
bool isAsyncKeyword(Node node, JSContext* cx) {
|
||||
return node == NodePotentialAsyncKeyword;
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ frontend::IsKeyword(JSLinearString* str)
|
||||
return FindKeyword(str) != nullptr;
|
||||
}
|
||||
|
||||
TokenStream::SourceCoords::SourceCoords(ExclusiveContext* cx, uint32_t ln)
|
||||
TokenStream::SourceCoords::SourceCoords(JSContext* cx, uint32_t ln)
|
||||
: lineStartOffsets_(cx), initialLineNum_(ln), lastLineIndex_(0)
|
||||
{
|
||||
// This is actually necessary! Removing it causes compile errors on
|
||||
@ -339,7 +339,7 @@ TokenStream::SourceCoords::lineNumAndColumnIndex(uint32_t offset, uint32_t* line
|
||||
#pragma warning(disable:4351)
|
||||
#endif
|
||||
|
||||
TokenStream::TokenStream(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
|
||||
TokenStream::TokenStream(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
const char16_t* base, size_t length, StrictModeGetter* smg)
|
||||
: srcCoords(cx, options.lineno),
|
||||
options_(options),
|
||||
@ -652,7 +652,7 @@ TokenStream::reportCompileErrorNumberVA(uint32_t offset, unsigned flags, unsigne
|
||||
// thread, save the error so that the main thread can report it later.
|
||||
CompileError tempErr;
|
||||
CompileError* tempErrPtr = &tempErr;
|
||||
if (!cx->isJSContext() && !cx->addPendingCompileError(&tempErrPtr))
|
||||
if (cx->helperThread() && !cx->addPendingCompileError(&tempErrPtr))
|
||||
return false;
|
||||
CompileError& err = *tempErrPtr;
|
||||
|
||||
@ -670,8 +670,8 @@ TokenStream::reportCompileErrorNumberVA(uint32_t offset, unsigned flags, unsigne
|
||||
|
||||
// If we have no location information, try to get one from the caller.
|
||||
bool callerFilename = false;
|
||||
if (offset != NoOffset && !err.filename && cx->isJSContext()) {
|
||||
NonBuiltinFrameIter iter(cx->asJSContext(),
|
||||
if (offset != NoOffset && !err.filename && !cx->helperThread()) {
|
||||
NonBuiltinFrameIter iter(cx,
|
||||
FrameIter::FOLLOW_DEBUGGER_EVAL_PREV_LINK,
|
||||
cx->compartment()->principals());
|
||||
if (!iter.done() && iter.filename()) {
|
||||
@ -736,8 +736,8 @@ TokenStream::reportCompileErrorNumberVA(uint32_t offset, unsigned flags, unsigne
|
||||
err.initOwnedLinebuf(linebuf.release(), windowLength, offset - windowStart);
|
||||
}
|
||||
|
||||
if (cx->isJSContext())
|
||||
err.throwError(cx->asJSContext());
|
||||
if (!cx->helperThread())
|
||||
err.throwError(cx);
|
||||
|
||||
return warning;
|
||||
}
|
||||
@ -1069,7 +1069,7 @@ TokenStream::newToken(ptrdiff_t adjust)
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE JSAtom*
|
||||
TokenStream::atomize(ExclusiveContext* cx, CharBuffer& cb)
|
||||
TokenStream::atomize(JSContext* cx, CharBuffer& cb)
|
||||
{
|
||||
return AtomizeChars(cx, cb.begin(), cb.length());
|
||||
}
|
||||
|
@ -326,7 +326,7 @@ class MOZ_STACK_CLASS TokenStream
|
||||
public:
|
||||
typedef Vector<char16_t, 32> CharBuffer;
|
||||
|
||||
TokenStream(ExclusiveContext* cx, const ReadOnlyCompileOptions& options,
|
||||
TokenStream(JSContext* cx, const ReadOnlyCompileOptions& options,
|
||||
const char16_t* base, size_t length, StrictModeGetter* smg);
|
||||
|
||||
~TokenStream();
|
||||
@ -480,7 +480,7 @@ class MOZ_STACK_CLASS TokenStream
|
||||
}
|
||||
}
|
||||
|
||||
static JSAtom* atomize(ExclusiveContext* cx, CharBuffer& cb);
|
||||
static JSAtom* atomize(JSContext* cx, CharBuffer& cb);
|
||||
MOZ_MUST_USE bool putIdentInTokenbuf(const char16_t* identStart);
|
||||
|
||||
struct Flags
|
||||
@ -872,7 +872,7 @@ class MOZ_STACK_CLASS TokenStream
|
||||
uint32_t lineNumToIndex(uint32_t lineNum) const { return lineNum - initialLineNum_; }
|
||||
|
||||
public:
|
||||
SourceCoords(ExclusiveContext* cx, uint32_t ln);
|
||||
SourceCoords(JSContext* cx, uint32_t ln);
|
||||
|
||||
MOZ_MUST_USE bool add(uint32_t lineNum, uint32_t lineStartOffset);
|
||||
MOZ_MUST_USE bool fill(const SourceCoords& other);
|
||||
@ -897,7 +897,7 @@ class MOZ_STACK_CLASS TokenStream
|
||||
return cx->names();
|
||||
}
|
||||
|
||||
ExclusiveContext* context() const {
|
||||
JSContext* context() const {
|
||||
return cx;
|
||||
}
|
||||
|
||||
@ -918,7 +918,7 @@ class MOZ_STACK_CLASS TokenStream
|
||||
// begins, the offset of |buf[0]|.
|
||||
class TokenBuf {
|
||||
public:
|
||||
TokenBuf(ExclusiveContext* cx, const char16_t* buf, size_t length, size_t startOffset)
|
||||
TokenBuf(JSContext* cx, const char16_t* buf, size_t length, size_t startOffset)
|
||||
: base_(buf),
|
||||
startOffset_(startOffset),
|
||||
limit_(buf + length),
|
||||
@ -1097,7 +1097,7 @@ class MOZ_STACK_CLASS TokenStream
|
||||
UniqueTwoByteChars sourceMapURL_; // source map's filename or null
|
||||
CharBuffer tokenbuf; // current token string buffer
|
||||
uint8_t isExprEnding[TOK_LIMIT];// which tokens definitely terminate exprs?
|
||||
ExclusiveContext* const cx;
|
||||
JSContext* const cx;
|
||||
bool mutedErrors;
|
||||
StrictModeGetter* strictModeGetter; // used to test for strict mode
|
||||
};
|
||||
|
@ -25,7 +25,7 @@ using namespace gc;
|
||||
|
||||
template <typename T, AllowGC allowGC /* = CanGC */>
|
||||
JSObject*
|
||||
js::Allocate(ExclusiveContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
|
||||
js::Allocate(JSContext* cx, AllocKind kind, size_t nDynamicSlots, InitialHeap heap,
|
||||
const Class* clasp)
|
||||
{
|
||||
static_assert(mozilla::IsConvertible<T*, JSObject*>::value, "must be JSObject derived");
|
||||
@ -40,20 +40,19 @@ js::Allocate(ExclusiveContext* cx, AllocKind kind, size_t nDynamicSlots, Initial
|
||||
MOZ_ASSERT_IF(nDynamicSlots != 0, clasp->isNative() || clasp->isProxy());
|
||||
|
||||
// Off-main-thread alloc cannot trigger GC or make runtime assertions.
|
||||
if (!cx->isJSContext()) {
|
||||
if (cx->helperThread()) {
|
||||
JSObject* obj = GCRuntime::tryNewTenuredObject<NoGC>(cx, kind, thingSize, nDynamicSlots);
|
||||
if (MOZ_UNLIKELY(allowGC && !obj))
|
||||
ReportOutOfMemory(cx);
|
||||
return obj;
|
||||
}
|
||||
|
||||
JSContext* ncx = cx->asJSContext();
|
||||
JSRuntime* rt = ncx->runtime();
|
||||
if (!rt->gc.checkAllocatorState<allowGC>(ncx, kind))
|
||||
JSRuntime* rt = cx->runtime();
|
||||
if (!rt->gc.checkAllocatorState<allowGC>(cx, kind))
|
||||
return nullptr;
|
||||
|
||||
if (ncx->nursery().isEnabled() && heap != TenuredHeap) {
|
||||
JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(ncx, thingSize, nDynamicSlots, clasp);
|
||||
if (cx->nursery().isEnabled() && heap != TenuredHeap) {
|
||||
JSObject* obj = rt->gc.tryNewNurseryObject<allowGC>(cx, thingSize, nDynamicSlots, clasp);
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
@ -68,10 +67,10 @@ js::Allocate(ExclusiveContext* cx, AllocKind kind, size_t nDynamicSlots, Initial
|
||||
|
||||
return GCRuntime::tryNewTenuredObject<allowGC>(cx, kind, thingSize, nDynamicSlots);
|
||||
}
|
||||
template JSObject* js::Allocate<JSObject, NoGC>(ExclusiveContext* cx, gc::AllocKind kind,
|
||||
template JSObject* js::Allocate<JSObject, NoGC>(JSContext* cx, gc::AllocKind kind,
|
||||
size_t nDynamicSlots, gc::InitialHeap heap,
|
||||
const Class* clasp);
|
||||
template JSObject* js::Allocate<JSObject, CanGC>(ExclusiveContext* cx, gc::AllocKind kind,
|
||||
template JSObject* js::Allocate<JSObject, CanGC>(JSContext* cx, gc::AllocKind kind,
|
||||
size_t nDynamicSlots, gc::InitialHeap heap,
|
||||
const Class* clasp);
|
||||
|
||||
@ -81,19 +80,19 @@ template <AllowGC allowGC>
|
||||
JSObject*
|
||||
GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots, const Class* clasp)
|
||||
{
|
||||
MOZ_ASSERT(isNurseryAllocAllowed());
|
||||
MOZ_ASSERT(!cx->zone()->usedByExclusiveThread);
|
||||
MOZ_ASSERT(cx->isNurseryAllocAllowed());
|
||||
MOZ_ASSERT(!cx->helperThread());
|
||||
MOZ_ASSERT(!IsAtomsCompartment(cx->compartment()));
|
||||
JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
|
||||
JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
|
||||
if (obj)
|
||||
return obj;
|
||||
|
||||
if (allowGC && !rt->mainThread.suppressGC) {
|
||||
minorGC(JS::gcreason::OUT_OF_NURSERY);
|
||||
if (allowGC && !cx->suppressGC) {
|
||||
cx->zone()->group()->minorGC(JS::gcreason::OUT_OF_NURSERY);
|
||||
|
||||
// Exceeding gcMaxBytes while tenuring can disable the Nursery.
|
||||
if (nursery.isEnabled()) {
|
||||
JSObject* obj = nursery.allocateObject(cx, thingSize, nDynamicSlots, clasp);
|
||||
if (cx->nursery().isEnabled()) {
|
||||
JSObject* obj = cx->nursery().allocateObject(cx, thingSize, nDynamicSlots, clasp);
|
||||
MOZ_ASSERT(obj);
|
||||
return obj;
|
||||
}
|
||||
@ -103,7 +102,7 @@ GCRuntime::tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicS
|
||||
|
||||
template <AllowGC allowGC>
|
||||
JSObject*
|
||||
GCRuntime::tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
|
||||
GCRuntime::tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
|
||||
size_t nDynamicSlots)
|
||||
{
|
||||
HeapSlot* slots = nullptr;
|
||||
@ -129,7 +128,7 @@ GCRuntime::tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thin
|
||||
|
||||
template <typename T, AllowGC allowGC /* = CanGC */>
|
||||
T*
|
||||
js::Allocate(ExclusiveContext* cx)
|
||||
js::Allocate(JSContext* cx)
|
||||
{
|
||||
static_assert(!mozilla::IsConvertible<T*, JSObject*>::value, "must not be JSObject derived");
|
||||
static_assert(sizeof(T) >= CellSize,
|
||||
@ -139,9 +138,8 @@ js::Allocate(ExclusiveContext* cx)
|
||||
size_t thingSize = sizeof(T);
|
||||
MOZ_ASSERT(thingSize == Arena::thingSize(kind));
|
||||
|
||||
if (cx->isJSContext()) {
|
||||
JSContext* ncx = cx->asJSContext();
|
||||
if (!ncx->runtime()->gc.checkAllocatorState<allowGC>(ncx, kind))
|
||||
if (!cx->helperThread()) {
|
||||
if (!cx->runtime()->gc.checkAllocatorState<allowGC>(cx, kind))
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
@ -149,14 +147,14 @@ js::Allocate(ExclusiveContext* cx)
|
||||
}
|
||||
|
||||
#define DECL_ALLOCATOR_INSTANCES(allocKind, traceKind, type, sizedType) \
|
||||
template type* js::Allocate<type, NoGC>(ExclusiveContext* cx);\
|
||||
template type* js::Allocate<type, CanGC>(ExclusiveContext* cx);
|
||||
template type* js::Allocate<type, NoGC>(JSContext* cx);\
|
||||
template type* js::Allocate<type, CanGC>(JSContext* cx);
|
||||
FOR_EACH_NONOBJECT_ALLOCKIND(DECL_ALLOCATOR_INSTANCES)
|
||||
#undef DECL_ALLOCATOR_INSTANCES
|
||||
|
||||
template <typename T, AllowGC allowGC>
|
||||
/* static */ T*
|
||||
GCRuntime::tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize)
|
||||
GCRuntime::tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize)
|
||||
{
|
||||
// Bump allocate in the arena's current free-list span.
|
||||
T* t = reinterpret_cast<T*>(cx->arenas()->allocateFromFreeList(kind, thingSize));
|
||||
@ -166,13 +164,13 @@ GCRuntime::tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thing
|
||||
// chunks available it may also allocate new memory directly.
|
||||
t = reinterpret_cast<T*>(refillFreeListFromAnyThread(cx, kind, thingSize));
|
||||
|
||||
if (MOZ_UNLIKELY(!t && allowGC && cx->isJSContext())) {
|
||||
if (MOZ_UNLIKELY(!t && allowGC && !cx->helperThread())) {
|
||||
// We have no memory available for a new chunk; perform an
|
||||
// all-compartments, non-incremental, shrinking GC and wait for
|
||||
// sweeping to finish.
|
||||
JS::PrepareForFullGC(cx->asJSContext());
|
||||
cx->asJSContext()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
|
||||
cx->asJSContext()->gc.waitBackgroundSweepOrAllocEnd();
|
||||
JS::PrepareForFullGC(cx);
|
||||
cx->runtime()->gc.gc(GC_SHRINK, JS::gcreason::LAST_DITCH);
|
||||
cx->runtime()->gc.waitBackgroundSweepOrAllocEnd();
|
||||
|
||||
t = tryNewTenuredThing<T, NoGC>(cx, kind, thingSize);
|
||||
if (!t)
|
||||
@ -204,13 +202,13 @@ GCRuntime::checkAllocatorState(JSContext* cx, AllocKind kind)
|
||||
MOZ_ASSERT_IF(!cx->compartment()->isAtomsCompartment(),
|
||||
kind != AllocKind::ATOM &&
|
||||
kind != AllocKind::FAT_INLINE_ATOM);
|
||||
MOZ_ASSERT(!rt->isHeapBusy());
|
||||
MOZ_ASSERT(isAllocAllowed());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
|
||||
MOZ_ASSERT(cx->isAllocAllowed());
|
||||
#endif
|
||||
|
||||
// Crash if we perform a GC action when it is not safe.
|
||||
if (allowGC && !rt->mainThread.suppressGC)
|
||||
rt->gc.verifyIsSafeToGC();
|
||||
if (allowGC && !cx->suppressGC)
|
||||
cx->verifyIsSafeToGC();
|
||||
|
||||
// For testing out of memory conditions
|
||||
if (js::oom::ShouldFailWithOOM()) {
|
||||
@ -234,7 +232,7 @@ GCRuntime::gcIfNeededPerAllocation(JSContext* cx)
|
||||
|
||||
// Invoking the interrupt callback can fail and we can't usefully
|
||||
// handle that here. Just check in case we need to collect instead.
|
||||
if (rt->hasPendingInterrupt())
|
||||
if (cx->hasPendingInterrupt())
|
||||
gcIfRequested();
|
||||
|
||||
// If we have grown past our GC heap threshold while in the middle of
|
||||
@ -252,13 +250,13 @@ GCRuntime::gcIfNeededPerAllocation(JSContext* cx)
|
||||
|
||||
template <typename T>
|
||||
/* static */ void
|
||||
GCRuntime::checkIncrementalZoneState(ExclusiveContext* cx, T* t)
|
||||
GCRuntime::checkIncrementalZoneState(JSContext* cx, T* t)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
if (!cx->isJSContext())
|
||||
if (cx->helperThread())
|
||||
return;
|
||||
|
||||
Zone* zone = cx->asJSContext()->zone();
|
||||
Zone* zone = cx->zone();
|
||||
MOZ_ASSERT_IF(t && zone->wasGCStarted() && (zone->isGCMarking() || zone->isGCSweeping()),
|
||||
t->asTenured().arena()->allocatedDuringIncremental);
|
||||
#endif
|
||||
@ -281,12 +279,12 @@ GCRuntime::startBackgroundAllocTaskIfIdle()
|
||||
}
|
||||
|
||||
/* static */ TenuredCell*
|
||||
GCRuntime::refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind, size_t thingSize)
|
||||
GCRuntime::refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind, size_t thingSize)
|
||||
{
|
||||
cx->arenas()->checkEmptyFreeList(thingKind);
|
||||
|
||||
if (cx->isJSContext())
|
||||
return refillFreeListFromMainThread(cx->asJSContext(), thingKind, thingSize);
|
||||
if (!cx->helperThread())
|
||||
return refillFreeListFromMainThread(cx, thingKind, thingSize);
|
||||
|
||||
return refillFreeListOffMainThread(cx, thingKind);
|
||||
}
|
||||
@ -297,17 +295,17 @@ GCRuntime::refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind, size
|
||||
// It should not be possible to allocate on the main thread while we are
|
||||
// inside a GC.
|
||||
Zone *zone = cx->zone();
|
||||
MOZ_ASSERT(!cx->runtime()->isHeapBusy(), "allocating while under GC");
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy(), "allocating while under GC");
|
||||
|
||||
AutoMaybeStartBackgroundAllocation maybeStartBGAlloc;
|
||||
return cx->arenas()->allocateFromArena(zone, thingKind, CheckThresholds, maybeStartBGAlloc);
|
||||
}
|
||||
|
||||
/* static */ TenuredCell*
|
||||
GCRuntime::refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind)
|
||||
GCRuntime::refillFreeListOffMainThread(JSContext* cx, AllocKind thingKind)
|
||||
{
|
||||
// A GC may be happening on the main thread, but zones used by exclusive
|
||||
// contexts are never collected.
|
||||
// A GC may be happening on the main thread, but zones used by off thread
|
||||
// tasks are never collected.
|
||||
Zone* zone = cx->zone();
|
||||
MOZ_ASSERT(!zone->wasGCStarted());
|
||||
|
||||
@ -324,8 +322,8 @@ GCRuntime::refillFreeListInGC(Zone* zone, AllocKind thingKind)
|
||||
|
||||
zone->arenas.checkEmptyFreeList(thingKind);
|
||||
mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
|
||||
MOZ_ASSERT(rt->isHeapCollecting());
|
||||
MOZ_ASSERT_IF(!rt->isHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
|
||||
MOZ_ASSERT(JS::CurrentThreadIsHeapCollecting());
|
||||
MOZ_ASSERT_IF(!JS::CurrentThreadIsHeapMinorCollecting(), !rt->gc.isBackgroundSweeping());
|
||||
|
||||
AutoMaybeStartBackgroundAllocation maybeStartBackgroundAllocation;
|
||||
return zone->arenas.allocateFromArena(zone, thingKind, DontCheckThresholds,
|
||||
@ -342,10 +340,10 @@ ArenaLists::allocateFromArena(JS::Zone* zone, AllocKind thingKind,
|
||||
mozilla::Maybe<AutoLockGC> maybeLock;
|
||||
|
||||
// See if we can proceed without taking the GC lock.
|
||||
if (backgroundFinalizeState[thingKind] != BFS_DONE)
|
||||
if (backgroundFinalizeState(thingKind) != BFS_DONE)
|
||||
maybeLock.emplace(rt);
|
||||
|
||||
ArenaList& al = arenaLists[thingKind];
|
||||
ArenaList& al = arenaLists(thingKind);
|
||||
Arena* arena = al.takeNextArena();
|
||||
if (arena) {
|
||||
// Empty arenas should be immediately freed.
|
||||
@ -380,11 +378,11 @@ ArenaLists::allocateFromArenaInner(JS::Zone* zone, Arena* arena, AllocKind kind)
|
||||
{
|
||||
size_t thingSize = Arena::thingSize(kind);
|
||||
|
||||
freeLists[kind] = arena->getFirstFreeSpan();
|
||||
freeLists(kind) = arena->getFirstFreeSpan();
|
||||
|
||||
if (MOZ_UNLIKELY(zone->wasGCStarted()))
|
||||
zone->runtimeFromAnyThread()->gc.arenaAllocatedDuringGC(zone, arena);
|
||||
TenuredCell* thing = freeLists[kind]->allocate(thingSize);
|
||||
TenuredCell* thing = freeLists(kind)->allocate(thingSize);
|
||||
MOZ_ASSERT(thing); // This allocation is infallible.
|
||||
return thing;
|
||||
}
|
||||
@ -559,7 +557,7 @@ GCRuntime::pickChunk(const AutoLockGC& lock,
|
||||
}
|
||||
|
||||
BackgroundAllocTask::BackgroundAllocTask(JSRuntime* rt, ChunkPool& pool)
|
||||
: runtime(rt),
|
||||
: GCParallelTask(rt),
|
||||
chunkPool_(pool),
|
||||
enabled_(CanUseExtraThreads() && GetCPUCount() >= 2)
|
||||
{
|
||||
@ -571,17 +569,17 @@ BackgroundAllocTask::run()
|
||||
TraceLoggerThread* logger = TraceLoggerForCurrentThread();
|
||||
AutoTraceLog logAllocation(logger, TraceLogger_GCAllocation);
|
||||
|
||||
AutoLockGC lock(runtime);
|
||||
while (!cancel_ && runtime->gc.wantBackgroundAllocation(lock)) {
|
||||
AutoLockGC lock(runtime());
|
||||
while (!cancel_ && runtime()->gc.wantBackgroundAllocation(lock)) {
|
||||
Chunk* chunk;
|
||||
{
|
||||
AutoUnlockGC unlock(lock);
|
||||
chunk = Chunk::allocate(runtime);
|
||||
chunk = Chunk::allocate(runtime());
|
||||
if (!chunk)
|
||||
break;
|
||||
chunk->init(runtime);
|
||||
chunk->init(runtime());
|
||||
}
|
||||
chunkPool_.push(chunk);
|
||||
chunkPool_.ref().push(chunk);
|
||||
}
|
||||
}
|
||||
|
||||
@ -591,7 +589,7 @@ Chunk::allocate(JSRuntime* rt)
|
||||
Chunk* chunk = static_cast<Chunk*>(MapAlignedPages(ChunkSize, ChunkSize));
|
||||
if (!chunk)
|
||||
return nullptr;
|
||||
rt->gc.stats.count(gcstats::STAT_NEW_CHUNK);
|
||||
rt->gc.stats().count(gcstats::STAT_NEW_CHUNK);
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
@ -23,11 +23,11 @@ struct Class;
|
||||
// object-specific optimizations.
|
||||
template <typename T, AllowGC allowGC = CanGC>
|
||||
T*
|
||||
Allocate(ExclusiveContext* cx);
|
||||
Allocate(JSContext* cx);
|
||||
|
||||
template <typename, AllowGC allowGC = CanGC>
|
||||
JSObject*
|
||||
Allocate(ExclusiveContext* cx, gc::AllocKind kind, size_t nDynamicSlots, gc::InitialHeap heap,
|
||||
Allocate(JSContext* cx, gc::AllocKind kind, size_t nDynamicSlots, gc::InitialHeap heap,
|
||||
const Class* clasp);
|
||||
|
||||
} // namespace js
|
||||
|
@ -78,8 +78,8 @@ AtomMarkingRuntime::registerArena(Arena* arena)
|
||||
// We need to find a range of bits from the atoms bitmap for this arena.
|
||||
|
||||
// Look for a free range of bits compatible with this arena.
|
||||
if (freeArenaIndexes.length()) {
|
||||
arena->atomBitmapStart() = freeArenaIndexes.popCopy();
|
||||
if (freeArenaIndexes.ref().length()) {
|
||||
arena->atomBitmapStart() = freeArenaIndexes.ref().popCopy();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -94,7 +94,7 @@ AtomMarkingRuntime::unregisterArena(Arena* arena)
|
||||
MOZ_ASSERT(arena->zone->isAtomsZone());
|
||||
|
||||
// Leak these atom bits if we run out of memory.
|
||||
mozilla::Unused << freeArenaIndexes.emplaceBack(arena->atomBitmapStart());
|
||||
mozilla::Unused << freeArenaIndexes.ref().emplaceBack(arena->atomBitmapStart());
|
||||
}
|
||||
|
||||
bool
|
||||
@ -127,13 +127,13 @@ AtomMarkingRuntime::updateZoneBitmap(Zone* zone, const Bitmap& bitmap)
|
||||
|
||||
// |bitmap| was produced by computeBitmapFromChunkMarkBits, so it should
|
||||
// have the maximum possible size.
|
||||
MOZ_ASSERT(zone->markedAtoms.length() <= bitmap.length());
|
||||
MOZ_ASSERT(zone->markedAtoms().length() <= bitmap.length());
|
||||
|
||||
// Take the bitwise and between the two mark bitmaps to get the best new
|
||||
// overapproximation we can. |bitmap| might include bits that are not in
|
||||
// the zone's mark bitmap, if additional zones were collected by the GC.
|
||||
for (size_t i = 0; i < zone->markedAtoms.length(); i++)
|
||||
zone->markedAtoms[i] &= bitmap[i];
|
||||
for (size_t i = 0; i < zone->markedAtoms().length(); i++)
|
||||
zone->markedAtoms()[i] &= bitmap[i];
|
||||
}
|
||||
|
||||
// Set any bits in the chunk mark bitmaps for atoms which are marked in bitmap.
|
||||
@ -179,16 +179,16 @@ AtomMarkingRuntime::updateChunkMarkBits(JSRuntime* runtime)
|
||||
// not collected in the current GC. Atoms which are referenced by
|
||||
// collected zones have already been marked.
|
||||
if (!zone->isCollectingFromAnyThread()) {
|
||||
MOZ_ASSERT(zone->markedAtoms.length() <= allocatedWords);
|
||||
for (size_t i = 0; i < zone->markedAtoms.length(); i++)
|
||||
markedUnion[i] |= zone->markedAtoms[i];
|
||||
MOZ_ASSERT(zone->markedAtoms().length() <= allocatedWords);
|
||||
for (size_t i = 0; i < zone->markedAtoms().length(); i++)
|
||||
markedUnion[i] |= zone->markedAtoms()[i];
|
||||
}
|
||||
}
|
||||
AddBitmapToChunkMarkBits(runtime, markedUnion);
|
||||
} else {
|
||||
for (ZonesIter zone(runtime, SkipAtoms); !zone.done(); zone.next()) {
|
||||
if (!zone->isCollectingFromAnyThread())
|
||||
AddBitmapToChunkMarkBits(runtime, zone->markedAtoms);
|
||||
AddBitmapToChunkMarkBits(runtime, zone->markedAtoms());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -214,7 +214,7 @@ ThingIsPermanent(TenuredCell* thing)
|
||||
}
|
||||
|
||||
void
|
||||
AtomMarkingRuntime::markAtom(ExclusiveContext* cx, TenuredCell* thing)
|
||||
AtomMarkingRuntime::markAtom(JSContext* cx, TenuredCell* thing)
|
||||
{
|
||||
// The context's zone will be null during initialization of the runtime.
|
||||
if (!thing || !cx->zone())
|
||||
@ -228,13 +228,13 @@ AtomMarkingRuntime::markAtom(ExclusiveContext* cx, TenuredCell* thing)
|
||||
|
||||
{
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
if (!EnsureBitmapLength(cx->zone()->markedAtoms, allocatedWords))
|
||||
if (!EnsureBitmapLength(cx->zone()->markedAtoms(), allocatedWords))
|
||||
oomUnsafe.crash("Atom bitmap OOM");
|
||||
}
|
||||
|
||||
SetBit(cx->zone()->markedAtoms.begin(), bit);
|
||||
SetBit(cx->zone()->markedAtoms().begin(), bit);
|
||||
|
||||
if (cx->isJSContext()) {
|
||||
if (!cx->helperThread()) {
|
||||
// Trigger a read barrier on the atom, in case there is an incremental
|
||||
// GC in progress. This is necessary if the atom is being marked
|
||||
// because a reference to it was obtained from another zone which is
|
||||
@ -244,14 +244,14 @@ AtomMarkingRuntime::markAtom(ExclusiveContext* cx, TenuredCell* thing)
|
||||
}
|
||||
|
||||
void
|
||||
AtomMarkingRuntime::markId(ExclusiveContext* cx, jsid id)
|
||||
AtomMarkingRuntime::markId(JSContext* cx, jsid id)
|
||||
{
|
||||
if (JSID_IS_GCTHING(id))
|
||||
markAtom(cx, &JSID_TO_GCTHING(id).asCell()->asTenured());
|
||||
}
|
||||
|
||||
void
|
||||
AtomMarkingRuntime::markAtomValue(ExclusiveContext* cx, const Value& value)
|
||||
AtomMarkingRuntime::markAtomValue(JSContext* cx, const Value& value)
|
||||
{
|
||||
if (value.isGCThing()) {
|
||||
Cell* thing = value.toGCThing();
|
||||
@ -265,17 +265,17 @@ AtomMarkingRuntime::adoptMarkedAtoms(Zone* target, Zone* source)
|
||||
{
|
||||
MOZ_ASSERT(target->runtimeFromAnyThread()->currentThreadHasExclusiveAccess());
|
||||
|
||||
Bitmap* targetBitmap = &target->markedAtoms;
|
||||
Bitmap* sourceBitmap = &source->markedAtoms;
|
||||
Bitmap* targetBitmap = &target->markedAtoms();
|
||||
Bitmap* sourceBitmap = &source->markedAtoms();
|
||||
if (targetBitmap->length() < sourceBitmap->length())
|
||||
std::swap(targetBitmap, sourceBitmap);
|
||||
for (size_t i = 0; i < sourceBitmap->length(); i++)
|
||||
(*targetBitmap)[i] |= (*sourceBitmap)[i];
|
||||
|
||||
if (targetBitmap != &target->markedAtoms)
|
||||
target->markedAtoms = Move(source->markedAtoms);
|
||||
if (targetBitmap != &target->markedAtoms())
|
||||
target->markedAtoms() = Move(source->markedAtoms());
|
||||
else
|
||||
source->markedAtoms.clear();
|
||||
source->markedAtoms().clear();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -301,9 +301,9 @@ AtomMarkingRuntime::atomIsMarked(Zone* zone, Cell* thingArg)
|
||||
}
|
||||
|
||||
size_t bit = GetAtomBit(thing);
|
||||
if (bit >= zone->markedAtoms.length() * JS_BITS_PER_WORD)
|
||||
if (bit >= zone->markedAtoms().length() * JS_BITS_PER_WORD)
|
||||
return false;
|
||||
return GetBit(zone->markedAtoms.begin(), bit);
|
||||
return GetBit(zone->markedAtoms().begin(), bit);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -9,6 +9,7 @@
|
||||
|
||||
#include "NamespaceImports.h"
|
||||
#include "gc/Heap.h"
|
||||
#include "threading/ProtectedData.h"
|
||||
|
||||
namespace js {
|
||||
namespace gc {
|
||||
@ -18,7 +19,7 @@ namespace gc {
|
||||
class AtomMarkingRuntime
|
||||
{
|
||||
// Unused arena atom bitmap indexes. Protected by the GC lock.
|
||||
Vector<size_t, 0, SystemAllocPolicy> freeArenaIndexes;
|
||||
js::ExclusiveAccessLockOrGCTaskData<Vector<size_t, 0, SystemAllocPolicy>> freeArenaIndexes;
|
||||
|
||||
// The extent of all allocated and free words in atom mark bitmaps.
|
||||
// This monotonically increases and may be read from without locking.
|
||||
@ -51,9 +52,9 @@ class AtomMarkingRuntime
|
||||
void updateChunkMarkBits(JSRuntime* runtime);
|
||||
|
||||
// Mark an atom or id as being newly reachable by the context's zone.
|
||||
void markAtom(ExclusiveContext* cx, TenuredCell* thing);
|
||||
void markId(ExclusiveContext* cx, jsid id);
|
||||
void markAtomValue(ExclusiveContext* cx, const Value& value);
|
||||
void markAtom(JSContext* cx, TenuredCell* thing);
|
||||
void markId(JSContext* cx, jsid id);
|
||||
void markAtomValue(JSContext* cx, const Value& value);
|
||||
|
||||
// Mark all atoms in |source| as being reachable within |target|.
|
||||
void adoptMarkedAtoms(Zone* target, Zone* source);
|
||||
|
@ -24,7 +24,8 @@ namespace js {
|
||||
bool
|
||||
RuntimeFromMainThreadIsHeapMajorCollecting(JS::shadow::Zone* shadowZone)
|
||||
{
|
||||
return shadowZone->runtimeFromMainThread()->isHeapMajorCollecting();
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(shadowZone->runtimeFromMainThread()));
|
||||
return JS::CurrentThreadIsHeapMajorCollecting();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
@ -67,19 +68,19 @@ HeapSlot::assertPreconditionForWriteBarrierPost(NativeObject* obj, Kind kind, ui
|
||||
bool
|
||||
CurrentThreadIsIonCompiling()
|
||||
{
|
||||
return TlsPerThreadData.get()->ionCompiling;
|
||||
return TlsContext.get()->ionCompiling;
|
||||
}
|
||||
|
||||
bool
|
||||
CurrentThreadIsIonCompilingSafeForMinorGC()
|
||||
{
|
||||
return TlsPerThreadData.get()->ionCompilingSafeForMinorGC;
|
||||
return TlsContext.get()->ionCompilingSafeForMinorGC;
|
||||
}
|
||||
|
||||
bool
|
||||
CurrentThreadIsGCSweeping()
|
||||
{
|
||||
return TlsPerThreadData.get()->gcSweeping;
|
||||
return TlsContext.get()->gcSweeping;
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
|
@ -85,16 +85,16 @@ class MOZ_RAII AutoStopVerifyingBarriers
|
||||
// inside of an outer minor GC. This is not allowed by the
|
||||
// gc::Statistics phase tree. So we pause the "real" GC, if in fact one
|
||||
// is in progress.
|
||||
gcstats::Phase outer = gc->stats.currentPhase();
|
||||
gcstats::Phase outer = gc->stats().currentPhase();
|
||||
if (outer != gcstats::PHASE_NONE)
|
||||
gc->stats.endPhase(outer);
|
||||
MOZ_ASSERT(gc->stats.currentPhase() == gcstats::PHASE_NONE);
|
||||
gc->stats().endPhase(outer);
|
||||
MOZ_ASSERT(gc->stats().currentPhase() == gcstats::PHASE_NONE);
|
||||
|
||||
if (restartPreVerifier)
|
||||
gc->startVerifyPreBarriers();
|
||||
|
||||
if (outer != gcstats::PHASE_NONE)
|
||||
gc->stats.beginPhase(outer);
|
||||
gc->stats().beginPhase(outer);
|
||||
}
|
||||
};
|
||||
#else
|
||||
|
@ -29,7 +29,7 @@ class VerifyPreTracer;
|
||||
|
||||
namespace gc {
|
||||
|
||||
typedef Vector<JS::Zone*, 4, SystemAllocPolicy> ZoneVector;
|
||||
typedef Vector<ZoneGroup*, 4, SystemAllocPolicy> ZoneGroupVector;
|
||||
using BlackGrayEdgeVector = Vector<TenuredCell*, 0, SystemAllocPolicy>;
|
||||
|
||||
class AutoMaybeStartBackgroundAllocation;
|
||||
@ -77,8 +77,7 @@ class ChunkPool
|
||||
class BackgroundAllocTask : public GCParallelTask
|
||||
{
|
||||
// Guarded by the GC lock.
|
||||
JSRuntime* runtime;
|
||||
ChunkPool& chunkPool_;
|
||||
GCLockData<ChunkPool&> chunkPool_;
|
||||
|
||||
const bool enabled_;
|
||||
|
||||
@ -96,15 +95,14 @@ class BackgroundDecommitTask : public GCParallelTask
|
||||
public:
|
||||
using ChunkVector = mozilla::Vector<Chunk*>;
|
||||
|
||||
explicit BackgroundDecommitTask(JSRuntime *rt) : runtime(rt) {}
|
||||
explicit BackgroundDecommitTask(JSRuntime *rt) : GCParallelTask(rt) {}
|
||||
void setChunksToScan(ChunkVector &chunks);
|
||||
|
||||
protected:
|
||||
void run() override;
|
||||
|
||||
private:
|
||||
JSRuntime* runtime;
|
||||
ChunkVector toDecommit;
|
||||
UnprotectedData<ChunkVector> toDecommit;
|
||||
};
|
||||
|
||||
/*
|
||||
@ -119,70 +117,74 @@ class GCSchedulingTunables
|
||||
* subsequently invoke the standard OOM machinery, independent of available
|
||||
* physical memory.
|
||||
*/
|
||||
size_t gcMaxBytes_;
|
||||
UnprotectedData<size_t> gcMaxBytes_;
|
||||
|
||||
/* Maximum nursery size for each zone group. */
|
||||
UnprotectedData<size_t> gcMaxNurseryBytes_;
|
||||
|
||||
/*
|
||||
* The base value used to compute zone->trigger.gcBytes(). When
|
||||
* usage.gcBytes() surpasses threshold.gcBytes() for a zone, the zone may
|
||||
* be scheduled for a GC, depending on the exact circumstances.
|
||||
*/
|
||||
size_t gcZoneAllocThresholdBase_;
|
||||
UnprotectedData<size_t> gcZoneAllocThresholdBase_;
|
||||
|
||||
/* Fraction of threshold.gcBytes() which triggers an incremental GC. */
|
||||
double zoneAllocThresholdFactor_;
|
||||
UnprotectedData<double> zoneAllocThresholdFactor_;
|
||||
|
||||
/*
|
||||
* Number of bytes to allocate between incremental slices in GCs triggered
|
||||
* by the zone allocation threshold.
|
||||
*/
|
||||
size_t zoneAllocDelayBytes_;
|
||||
UnprotectedData<size_t> zoneAllocDelayBytes_;
|
||||
|
||||
/*
|
||||
* Totally disables |highFrequencyGC|, the HeapGrowthFactor, and other
|
||||
* tunables that make GC non-deterministic.
|
||||
*/
|
||||
bool dynamicHeapGrowthEnabled_;
|
||||
UnprotectedData<bool> dynamicHeapGrowthEnabled_;
|
||||
|
||||
/*
|
||||
* We enter high-frequency mode if we GC a twice within this many
|
||||
* microseconds. This value is stored directly in microseconds.
|
||||
*/
|
||||
uint64_t highFrequencyThresholdUsec_;
|
||||
UnprotectedData<uint64_t> highFrequencyThresholdUsec_;
|
||||
|
||||
/*
|
||||
* When in the |highFrequencyGC| mode, these parameterize the per-zone
|
||||
* "HeapGrowthFactor" computation.
|
||||
*/
|
||||
uint64_t highFrequencyLowLimitBytes_;
|
||||
uint64_t highFrequencyHighLimitBytes_;
|
||||
double highFrequencyHeapGrowthMax_;
|
||||
double highFrequencyHeapGrowthMin_;
|
||||
UnprotectedData<uint64_t> highFrequencyLowLimitBytes_;
|
||||
UnprotectedData<uint64_t> highFrequencyHighLimitBytes_;
|
||||
UnprotectedData<double> highFrequencyHeapGrowthMax_;
|
||||
UnprotectedData<double> highFrequencyHeapGrowthMin_;
|
||||
|
||||
/*
|
||||
* When not in |highFrequencyGC| mode, this is the global (stored per-zone)
|
||||
* "HeapGrowthFactor".
|
||||
*/
|
||||
double lowFrequencyHeapGrowth_;
|
||||
UnprotectedData<double> lowFrequencyHeapGrowth_;
|
||||
|
||||
/*
|
||||
* Doubles the length of IGC slices when in the |highFrequencyGC| mode.
|
||||
*/
|
||||
bool dynamicMarkSliceEnabled_;
|
||||
UnprotectedData<bool> dynamicMarkSliceEnabled_;
|
||||
|
||||
/*
|
||||
* Controls whether painting can trigger IGC slices.
|
||||
*/
|
||||
bool refreshFrameSlicesEnabled_;
|
||||
UnprotectedData<bool> refreshFrameSlicesEnabled_;
|
||||
|
||||
/*
|
||||
* Controls the number of empty chunks reserved for future allocation.
|
||||
*/
|
||||
uint32_t minEmptyChunkCount_;
|
||||
uint32_t maxEmptyChunkCount_;
|
||||
UnprotectedData<uint32_t> minEmptyChunkCount_;
|
||||
UnprotectedData<uint32_t> maxEmptyChunkCount_;
|
||||
|
||||
public:
|
||||
GCSchedulingTunables()
|
||||
: gcMaxBytes_(0),
|
||||
gcMaxNurseryBytes_(0),
|
||||
gcZoneAllocThresholdBase_(30 * 1024 * 1024),
|
||||
zoneAllocThresholdFactor_(0.9),
|
||||
zoneAllocDelayBytes_(1024 * 1024),
|
||||
@ -200,6 +202,7 @@ class GCSchedulingTunables
|
||||
{}
|
||||
|
||||
size_t gcMaxBytes() const { return gcMaxBytes_; }
|
||||
size_t gcMaxNurseryBytes() const { return gcMaxNurseryBytes_; }
|
||||
size_t gcZoneAllocThresholdBase() const { return gcZoneAllocThresholdBase_; }
|
||||
double zoneAllocThresholdFactor() const { return zoneAllocThresholdFactor_; }
|
||||
size_t zoneAllocDelayBytes() const { return zoneAllocDelayBytes_; }
|
||||
@ -523,7 +526,7 @@ class GCSchedulingState
|
||||
* growth factor is a measure of how large (as a percentage of the last GC)
|
||||
* the heap is allowed to grow before we try to schedule another GC.
|
||||
*/
|
||||
bool inHighFrequencyGCMode_;
|
||||
UnprotectedData<bool> inHighFrequencyGCMode_;
|
||||
|
||||
public:
|
||||
GCSchedulingState()
|
||||
@ -542,8 +545,8 @@ class GCSchedulingState
|
||||
|
||||
template<typename F>
|
||||
struct Callback {
|
||||
F op;
|
||||
void* data;
|
||||
UnprotectedData<F> op;
|
||||
UnprotectedData<void*> data;
|
||||
|
||||
Callback()
|
||||
: op(nullptr), data(nullptr)
|
||||
@ -554,7 +557,7 @@ struct Callback {
|
||||
};
|
||||
|
||||
template<typename F>
|
||||
using CallbackVector = Vector<Callback<F>, 4, SystemAllocPolicy>;
|
||||
using CallbackVector = UnprotectedData<Vector<Callback<F>, 4, SystemAllocPolicy>>;
|
||||
|
||||
template <typename T, typename Iter0, typename Iter1>
|
||||
class ChainedIter
|
||||
@ -618,11 +621,6 @@ class GCRuntime
|
||||
// The return value indicates if we were able to do the GC.
|
||||
bool triggerZoneGC(Zone* zone, JS::gcreason::Reason reason);
|
||||
void maybeGC(Zone* zone);
|
||||
void minorGC(JS::gcreason::Reason reason,
|
||||
gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
|
||||
void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
|
||||
minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
|
||||
}
|
||||
// The return value indicates whether a major GC was performed.
|
||||
bool gcIfRequested();
|
||||
void gc(JSGCInvocationKind gckind, JS::gcreason::Reason reason);
|
||||
@ -708,45 +706,11 @@ class GCRuntime
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
bool isAllocAllowed() { return noGCOrAllocationCheck == 0; }
|
||||
void disallowAlloc() { ++noGCOrAllocationCheck; }
|
||||
void allowAlloc() {
|
||||
MOZ_ASSERT(!isAllocAllowed());
|
||||
--noGCOrAllocationCheck;
|
||||
}
|
||||
|
||||
bool isNurseryAllocAllowed() { return noNurseryAllocationCheck == 0; }
|
||||
void disallowNurseryAlloc() { ++noNurseryAllocationCheck; }
|
||||
void allowNurseryAlloc() {
|
||||
MOZ_ASSERT(!isNurseryAllocAllowed());
|
||||
--noNurseryAllocationCheck;
|
||||
}
|
||||
|
||||
bool isStrictProxyCheckingEnabled() { return disableStrictProxyCheckingCount == 0; }
|
||||
void disableStrictProxyChecking() { ++disableStrictProxyCheckingCount; }
|
||||
void enableStrictProxyChecking() {
|
||||
MOZ_ASSERT(disableStrictProxyCheckingCount > 0);
|
||||
--disableStrictProxyCheckingCount;
|
||||
bool currentThreadHasLockedGC() const {
|
||||
return lock.ownedByCurrentThread();
|
||||
}
|
||||
#endif // DEBUG
|
||||
|
||||
bool isInsideUnsafeRegion() { return inUnsafeRegion != 0; }
|
||||
void enterUnsafeRegion() {
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
++inUnsafeRegion;
|
||||
}
|
||||
void leaveUnsafeRegion() {
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
MOZ_ASSERT(inUnsafeRegion > 0);
|
||||
--inUnsafeRegion;
|
||||
}
|
||||
|
||||
void verifyIsSafeToGC() {
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
MOZ_DIAGNOSTIC_ASSERT(!isInsideUnsafeRegion(),
|
||||
"[AutoAssertNoGC] possible GC in GC-unsafe region");
|
||||
}
|
||||
|
||||
void setAlwaysPreserveCode() { alwaysPreserveCode = true; }
|
||||
|
||||
bool isIncrementalGCAllowed() const { return incrementalAllowed; }
|
||||
@ -755,12 +719,6 @@ class GCRuntime
|
||||
bool isIncrementalGCEnabled() const { return mode == JSGC_MODE_INCREMENTAL && incrementalAllowed; }
|
||||
bool isIncrementalGCInProgress() const { return state() != State::NotActive; }
|
||||
|
||||
bool isGenerationalGCEnabled() const { return generationalDisabled == 0; }
|
||||
void disableGenerationalGC();
|
||||
void enableGenerationalGC();
|
||||
|
||||
void disableCompactingGC();
|
||||
void enableCompactingGC();
|
||||
bool isCompactingGCEnabled() const;
|
||||
|
||||
void setGrayRootsTracer(JSTraceDataOp traceOp, void* data);
|
||||
@ -798,7 +756,7 @@ class GCRuntime
|
||||
JS::Zone* getCurrentZoneGroup() { return currentZoneGroup; }
|
||||
void setFoundBlackGrayEdges(TenuredCell& target) {
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
if (!foundBlackGrayEdges.append(&target))
|
||||
if (!foundBlackGrayEdges.ref().append(&target))
|
||||
oomUnsafe.crash("OOM|small: failed to insert into foundBlackGrayEdges");
|
||||
}
|
||||
|
||||
@ -816,6 +774,9 @@ class GCRuntime
|
||||
bool isFullGc() const { return isFull; }
|
||||
bool isCompactingGc() const { return isCompacting; }
|
||||
|
||||
bool areGrayBitsValid() const { return grayBitsValid; }
|
||||
void setGrayBitsInvalid() { grayBitsValid = false; }
|
||||
|
||||
bool minorGCRequested() const { return minorGCTriggerReason != JS::gcreason::NO_REASON; }
|
||||
bool majorGCRequested() const { return majorGCTriggerReason != JS::gcreason::NO_REASON; }
|
||||
bool isGcNeeded() { return minorGCRequested() || majorGCRequested(); }
|
||||
@ -834,15 +795,15 @@ class GCRuntime
|
||||
inline void updateOnFreeArenaAlloc(const ChunkInfo& info);
|
||||
inline void updateOnArenaFree(const ChunkInfo& info);
|
||||
|
||||
ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_; }
|
||||
ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_; }
|
||||
ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_; }
|
||||
const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_; }
|
||||
const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_; }
|
||||
const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_; }
|
||||
ChunkPool& fullChunks(const AutoLockGC& lock) { return fullChunks_.ref(); }
|
||||
ChunkPool& availableChunks(const AutoLockGC& lock) { return availableChunks_.ref(); }
|
||||
ChunkPool& emptyChunks(const AutoLockGC& lock) { return emptyChunks_.ref(); }
|
||||
const ChunkPool& fullChunks(const AutoLockGC& lock) const { return fullChunks_.ref(); }
|
||||
const ChunkPool& availableChunks(const AutoLockGC& lock) const { return availableChunks_.ref(); }
|
||||
const ChunkPool& emptyChunks(const AutoLockGC& lock) const { return emptyChunks_.ref(); }
|
||||
typedef ChainedIter<Chunk*, ChunkPool::Iter, ChunkPool::Iter> NonEmptyChunksIter;
|
||||
NonEmptyChunksIter allNonEmptyChunks() {
|
||||
return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_), ChunkPool::Iter(fullChunks_));
|
||||
return NonEmptyChunksIter(ChunkPool::Iter(availableChunks_.ref()), ChunkPool::Iter(fullChunks_.ref()));
|
||||
}
|
||||
|
||||
Chunk* getOrAllocChunk(const AutoLockGC& lock,
|
||||
@ -861,12 +822,6 @@ class GCRuntime
|
||||
// Free certain LifoAlloc blocks when it is safe to do so.
|
||||
void freeUnusedLifoBlocksAfterSweeping(LifoAlloc* lifo);
|
||||
void freeAllLifoBlocksAfterSweeping(LifoAlloc* lifo);
|
||||
void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
|
||||
|
||||
// Queue a thunk to run after the next minor GC.
|
||||
void callAfterMinorGC(void (*thunk)(void* data), void* data) {
|
||||
nursery.queueSweepAction(thunk, data);
|
||||
}
|
||||
|
||||
// Public here for ReleaseArenaLists and FinalizeTypedArenas.
|
||||
void releaseArena(Arena* arena, const AutoLockGC& lock);
|
||||
@ -881,10 +836,10 @@ class GCRuntime
|
||||
JSObject* tryNewNurseryObject(JSContext* cx, size_t thingSize, size_t nDynamicSlots,
|
||||
const Class* clasp);
|
||||
template <AllowGC allowGC>
|
||||
static JSObject* tryNewTenuredObject(ExclusiveContext* cx, AllocKind kind, size_t thingSize,
|
||||
static JSObject* tryNewTenuredObject(JSContext* cx, AllocKind kind, size_t thingSize,
|
||||
size_t nDynamicSlots);
|
||||
template <typename T, AllowGC allowGC>
|
||||
static T* tryNewTenuredThing(ExclusiveContext* cx, AllocKind kind, size_t thingSize);
|
||||
static T* tryNewTenuredThing(JSContext* cx, AllocKind kind, size_t thingSize);
|
||||
static TenuredCell* refillFreeListInGC(Zone* zone, AllocKind thingKind);
|
||||
|
||||
private:
|
||||
@ -905,12 +860,12 @@ class GCRuntime
|
||||
// Allocator internals
|
||||
MOZ_MUST_USE bool gcIfNeededPerAllocation(JSContext* cx);
|
||||
template <typename T>
|
||||
static void checkIncrementalZoneState(ExclusiveContext* cx, T* t);
|
||||
static TenuredCell* refillFreeListFromAnyThread(ExclusiveContext* cx, AllocKind thingKind,
|
||||
static void checkIncrementalZoneState(JSContext* cx, T* t);
|
||||
static TenuredCell* refillFreeListFromAnyThread(JSContext* cx, AllocKind thingKind,
|
||||
size_t thingSize);
|
||||
static TenuredCell* refillFreeListFromMainThread(JSContext* cx, AllocKind thingKind,
|
||||
size_t thingSize);
|
||||
static TenuredCell* refillFreeListOffMainThread(ExclusiveContext* cx, AllocKind thingKind);
|
||||
static TenuredCell* refillFreeListOffMainThread(JSContext* cx, AllocKind thingKind);
|
||||
|
||||
/*
|
||||
* Return the list of chunks that can be released outside the GC lock.
|
||||
@ -1018,18 +973,21 @@ class GCRuntime
|
||||
void callWeakPointerCompartmentCallbacks(JSCompartment* comp) const;
|
||||
|
||||
public:
|
||||
JSRuntime* rt;
|
||||
JSRuntime* const rt;
|
||||
|
||||
/* Embedders can use this zone however they wish. */
|
||||
JS::Zone* systemZone;
|
||||
UnprotectedData<JS::Zone*> systemZone;
|
||||
|
||||
/* List of compartments and zones (protected by the GC lock). */
|
||||
ZoneVector zones;
|
||||
// List of all zone groups (protected by the GC lock).
|
||||
UnprotectedData<ZoneGroupVector> groups;
|
||||
|
||||
Nursery nursery;
|
||||
StoreBuffer storeBuffer;
|
||||
// The unique atoms zone, which has no zone group.
|
||||
WriteOnceData<Zone*> atomsZone;
|
||||
|
||||
gcstats::Statistics stats;
|
||||
private:
|
||||
UnprotectedData<gcstats::Statistics> stats_;
|
||||
public:
|
||||
gcstats::Statistics& stats() { return stats_.ref(); }
|
||||
|
||||
GCMarker marker;
|
||||
|
||||
@ -1051,7 +1009,7 @@ class GCRuntime
|
||||
// needed or eventually expired if not re-used. The emptyChunks pool gets
|
||||
// refilled from the background allocation task heuristically so that empty
|
||||
// chunks should always available for immediate allocation without syscalls.
|
||||
ChunkPool emptyChunks_;
|
||||
GCLockData<ChunkPool> emptyChunks_;
|
||||
|
||||
// Chunks which have had some, but not all, of their arenas allocated live
|
||||
// in the available chunk lists. When all available arenas in a chunk have
|
||||
@ -1059,15 +1017,15 @@ class GCRuntime
|
||||
// to the fullChunks pool. During a GC, if all arenas are free, the chunk
|
||||
// is moved back to the emptyChunks pool and scheduled for eventual
|
||||
// release.
|
||||
ChunkPool availableChunks_;
|
||||
UnprotectedData<ChunkPool> availableChunks_;
|
||||
|
||||
// When all arenas in a chunk are used, it is moved to the fullChunks pool
|
||||
// so as to reduce the cost of operations on the available lists.
|
||||
ChunkPool fullChunks_;
|
||||
UnprotectedData<ChunkPool> fullChunks_;
|
||||
|
||||
RootedValueMap rootsHash;
|
||||
UnprotectedData<RootedValueMap> rootsHash;
|
||||
|
||||
size_t maxMallocBytes;
|
||||
UnprotectedData<size_t> maxMallocBytes;
|
||||
|
||||
// An incrementing id used to assign unique ids to cells that require one.
|
||||
mozilla::Atomic<uint64_t, mozilla::ReleaseAcquire> nextCellUniqueId_;
|
||||
@ -1076,18 +1034,18 @@ class GCRuntime
|
||||
* Number of the committed arenas in all GC chunks including empty chunks.
|
||||
*/
|
||||
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> numArenasFreeCommitted;
|
||||
VerifyPreTracer* verifyPreData;
|
||||
UnprotectedData<VerifyPreTracer*> verifyPreData;
|
||||
|
||||
private:
|
||||
bool chunkAllocationSinceLastGC;
|
||||
int64_t lastGCTime;
|
||||
UnprotectedData<bool> chunkAllocationSinceLastGC;
|
||||
UnprotectedData<int64_t> lastGCTime;
|
||||
|
||||
JSGCMode mode;
|
||||
UnprotectedData<JSGCMode> mode;
|
||||
|
||||
mozilla::Atomic<size_t, mozilla::ReleaseAcquire> numActiveZoneIters;
|
||||
|
||||
/* During shutdown, the GC needs to clean up every possible object. */
|
||||
bool cleanUpEverything;
|
||||
UnprotectedData<bool> cleanUpEverything;
|
||||
|
||||
// Gray marking must be done after all black marking is complete. However,
|
||||
// we do not have write barriers on XPConnect roots. Therefore, XPConnect
|
||||
@ -1100,7 +1058,7 @@ class GCRuntime
|
||||
Okay,
|
||||
Failed
|
||||
};
|
||||
GrayBufferState grayBufferState;
|
||||
UnprotectedData<GrayBufferState> grayBufferState;
|
||||
bool hasBufferedGrayRoots() const { return grayBufferState == GrayBufferState::Okay; }
|
||||
|
||||
// Clear each zone's gray buffers, but do not change the current state.
|
||||
@ -1112,99 +1070,92 @@ class GCRuntime
|
||||
resetBufferedGrayRoots();
|
||||
}
|
||||
|
||||
/*
|
||||
* The gray bits can become invalid if UnmarkGray overflows the stack. A
|
||||
* full GC will reset this bit, since it fills in all the gray bits.
|
||||
*/
|
||||
UnprotectedData<bool> grayBitsValid;
|
||||
|
||||
mozilla::Atomic<JS::gcreason::Reason, mozilla::Relaxed> majorGCTriggerReason;
|
||||
|
||||
JS::gcreason::Reason minorGCTriggerReason;
|
||||
public:
|
||||
UnprotectedData<JS::gcreason::Reason> minorGCTriggerReason;
|
||||
|
||||
private:
|
||||
/* Perform full GC if rt->keepAtoms() becomes false. */
|
||||
bool fullGCForAtomsRequested_;
|
||||
UnprotectedData<bool> fullGCForAtomsRequested_;
|
||||
|
||||
/* Incremented at the start of every minor GC. */
|
||||
uint64_t minorGCNumber;
|
||||
UnprotectedData<uint64_t> minorGCNumber;
|
||||
|
||||
/* Incremented at the start of every major GC. */
|
||||
uint64_t majorGCNumber;
|
||||
UnprotectedData<uint64_t> majorGCNumber;
|
||||
|
||||
/* The major GC number at which to release observed type information. */
|
||||
uint64_t jitReleaseNumber;
|
||||
UnprotectedData<uint64_t> jitReleaseNumber;
|
||||
|
||||
/* Incremented on every GC slice. */
|
||||
uint64_t number;
|
||||
UnprotectedData<uint64_t> number;
|
||||
|
||||
/* The number at the time of the most recent GC's first slice. */
|
||||
uint64_t startNumber;
|
||||
UnprotectedData<uint64_t> startNumber;
|
||||
|
||||
/* Whether the currently running GC can finish in multiple slices. */
|
||||
bool isIncremental;
|
||||
UnprotectedData<bool> isIncremental;
|
||||
|
||||
/* Whether all zones are being collected in first GC slice. */
|
||||
bool isFull;
|
||||
UnprotectedData<bool> isFull;
|
||||
|
||||
/* Whether the heap will be compacted at the end of GC. */
|
||||
bool isCompacting;
|
||||
UnprotectedData<bool> isCompacting;
|
||||
|
||||
/* The invocation kind of the current GC, taken from the first slice. */
|
||||
JSGCInvocationKind invocationKind;
|
||||
UnprotectedData<JSGCInvocationKind> invocationKind;
|
||||
|
||||
/* The initial GC reason, taken from the first slice. */
|
||||
JS::gcreason::Reason initialReason;
|
||||
|
||||
#ifdef DEBUG
|
||||
/*
|
||||
* If this is 0, all cross-compartment proxies must be registered in the
|
||||
* wrapper map. This checking must be disabled temporarily while creating
|
||||
* new wrappers. When non-zero, this records the recursion depth of wrapper
|
||||
* creation.
|
||||
*/
|
||||
uintptr_t disableStrictProxyCheckingCount;
|
||||
#endif
|
||||
UnprotectedData<JS::gcreason::Reason> initialReason;
|
||||
|
||||
/*
|
||||
* The current incremental GC phase. This is also used internally in
|
||||
* non-incremental GC.
|
||||
*/
|
||||
State incrementalState;
|
||||
UnprotectedData<State> incrementalState;
|
||||
|
||||
/* Indicates that the last incremental slice exhausted the mark stack. */
|
||||
bool lastMarkSlice;
|
||||
UnprotectedData<bool> lastMarkSlice;
|
||||
|
||||
/* Whether any sweeping will take place in the separate GC helper thread. */
|
||||
bool sweepOnBackgroundThread;
|
||||
UnprotectedData<bool> sweepOnBackgroundThread;
|
||||
|
||||
/* Whether observed type information is being released in the current GC. */
|
||||
bool releaseObservedTypes;
|
||||
UnprotectedData<bool> releaseObservedTypes;
|
||||
|
||||
/* Whether any black->gray edges were found during marking. */
|
||||
BlackGrayEdgeVector foundBlackGrayEdges;
|
||||
UnprotectedData<BlackGrayEdgeVector> foundBlackGrayEdges;
|
||||
|
||||
/* Singly linekd list of zones to be swept in the background. */
|
||||
ZoneList backgroundSweepZones;
|
||||
/* Singly linked list of zones to be swept in the background. */
|
||||
UnprotectedData<ZoneList> backgroundSweepZones;
|
||||
|
||||
/*
|
||||
* Free LIFO blocks are transferred to this allocator before being freed on
|
||||
* the background GC thread after sweeping.
|
||||
*/
|
||||
LifoAlloc blocksToFreeAfterSweeping;
|
||||
|
||||
/*
|
||||
* Free LIFO blocks are transferred to this allocator before being freed
|
||||
* after minor GC.
|
||||
*/
|
||||
LifoAlloc blocksToFreeAfterMinorGC;
|
||||
UnprotectedData<LifoAlloc> blocksToFreeAfterSweeping;
|
||||
|
||||
private:
|
||||
/* Index of current zone group (for stats). */
|
||||
unsigned zoneGroupIndex;
|
||||
UnprotectedData<unsigned> zoneGroupIndex;
|
||||
|
||||
/*
|
||||
* Incremental sweep state.
|
||||
*/
|
||||
JS::Zone* zoneGroups;
|
||||
JS::Zone* currentZoneGroup;
|
||||
bool sweepingTypes;
|
||||
unsigned finalizePhase;
|
||||
JS::Zone* sweepZone;
|
||||
AllocKind sweepKind;
|
||||
bool abortSweepAfterCurrentGroup;
|
||||
UnprotectedData<JS::Zone*> zoneGroups;
|
||||
UnprotectedData<JS::Zone*> currentZoneGroup;
|
||||
UnprotectedData<bool> sweepingTypes;
|
||||
UnprotectedData<unsigned> finalizePhase;
|
||||
UnprotectedData<JS::Zone*> sweepZone;
|
||||
UnprotectedData<AllocKind> sweepKind;
|
||||
UnprotectedData<bool> abortSweepAfterCurrentGroup;
|
||||
|
||||
/*
|
||||
* Concurrent sweep infrastructure.
|
||||
@ -1215,17 +1166,17 @@ class GCRuntime
|
||||
/*
|
||||
* List head of arenas allocated during the sweep phase.
|
||||
*/
|
||||
Arena* arenasAllocatedDuringSweep;
|
||||
UnprotectedData<Arena*> arenasAllocatedDuringSweep;
|
||||
|
||||
/*
|
||||
* Incremental compacting state.
|
||||
*/
|
||||
bool startedCompacting;
|
||||
ZoneList zonesToMaybeCompact;
|
||||
Arena* relocatedArenasToRelease;
|
||||
UnprotectedData<bool> startedCompacting;
|
||||
UnprotectedData<ZoneList> zonesToMaybeCompact;
|
||||
UnprotectedData<Arena*> relocatedArenasToRelease;
|
||||
|
||||
#ifdef JS_GC_ZEAL
|
||||
MarkingValidator* markingValidator;
|
||||
UnprotectedData<MarkingValidator*> markingValidator;
|
||||
#endif
|
||||
|
||||
/*
|
||||
@ -1233,34 +1184,23 @@ class GCRuntime
|
||||
* frame, rather than at the beginning. In this case, the next slice will be
|
||||
* delayed so that we don't get back-to-back slices.
|
||||
*/
|
||||
bool interFrameGC;
|
||||
UnprotectedData<bool> interFrameGC;
|
||||
|
||||
/* Default budget for incremental GC slice. See js/SliceBudget.h. */
|
||||
int64_t defaultTimeBudget_;
|
||||
UnprotectedData<int64_t> defaultTimeBudget_;
|
||||
|
||||
/*
|
||||
* We disable incremental GC if we encounter a Class with a trace hook
|
||||
* that does not implement write barriers.
|
||||
*/
|
||||
bool incrementalAllowed;
|
||||
|
||||
/*
|
||||
* GGC can be enabled from the command line while testing.
|
||||
*/
|
||||
unsigned generationalDisabled;
|
||||
UnprotectedData<bool> incrementalAllowed;
|
||||
|
||||
/*
|
||||
* Whether compacting GC can is enabled globally.
|
||||
*/
|
||||
bool compactingEnabled;
|
||||
UnprotectedData<bool> compactingEnabled;
|
||||
|
||||
/*
|
||||
* Some code cannot tolerate compacting GC so it can be disabled temporarily
|
||||
* with AutoDisableCompactingGC which uses this counter.
|
||||
*/
|
||||
unsigned compactingDisabledCount;
|
||||
|
||||
bool poked;
|
||||
UnprotectedData<bool> poked;
|
||||
|
||||
/*
|
||||
* These options control the zealousness of the GC. At every allocation,
|
||||
@ -1287,16 +1227,16 @@ class GCRuntime
|
||||
* zeal_ value 14 performs periodic shrinking collections.
|
||||
*/
|
||||
#ifdef JS_GC_ZEAL
|
||||
uint32_t zealModeBits;
|
||||
int zealFrequency;
|
||||
int nextScheduled;
|
||||
bool deterministicOnly;
|
||||
int incrementalLimit;
|
||||
UnprotectedData<uint32_t> zealModeBits;
|
||||
UnprotectedData<int> zealFrequency;
|
||||
UnprotectedData<int> nextScheduled;
|
||||
UnprotectedData<bool> deterministicOnly;
|
||||
UnprotectedData<int> incrementalLimit;
|
||||
|
||||
Vector<JSObject*, 0, SystemAllocPolicy> selectedForMarking;
|
||||
UnprotectedData<Vector<JSObject*, 0, SystemAllocPolicy>> selectedForMarking;
|
||||
#endif
|
||||
|
||||
bool fullCompartmentChecks;
|
||||
UnprotectedData<bool> fullCompartmentChecks;
|
||||
|
||||
Callback<JSGCCallback> gcCallback;
|
||||
Callback<JS::DoCycleCollectionCallback> gcDoCycleCollectionCallback;
|
||||
@ -1327,36 +1267,26 @@ class GCRuntime
|
||||
Callback<JSTraceDataOp> grayRootTracer;
|
||||
|
||||
/* Always preserve JIT code during GCs, for testing. */
|
||||
bool alwaysPreserveCode;
|
||||
|
||||
/*
|
||||
* Some regions of code are hard for the static rooting hazard analysis to
|
||||
* understand. In those cases, we trade the static analysis for a dynamic
|
||||
* analysis. When this is non-zero, we should assert if we trigger, or
|
||||
* might trigger, a GC.
|
||||
*/
|
||||
int inUnsafeRegion;
|
||||
UnprotectedData<bool> alwaysPreserveCode;
|
||||
|
||||
#ifdef DEBUG
|
||||
size_t noGCOrAllocationCheck;
|
||||
size_t noNurseryAllocationCheck;
|
||||
|
||||
bool arenasEmptyAtShutdown;
|
||||
UnprotectedData<bool> arenasEmptyAtShutdown;
|
||||
#endif
|
||||
|
||||
/* Synchronize GC heap access between main thread and GCHelperState. */
|
||||
/* Synchronize GC heap access among GC helper threads and main threads. */
|
||||
friend class js::AutoLockGC;
|
||||
js::Mutex lock;
|
||||
|
||||
BackgroundAllocTask allocTask;
|
||||
BackgroundDecommitTask decommitTask;
|
||||
|
||||
GCHelperState helperState;
|
||||
|
||||
/*
|
||||
* During incremental sweeping, this field temporarily holds the arenas of
|
||||
* the current AllocKind being swept in order of increasing free space.
|
||||
*/
|
||||
SortedArenaList incrementalSweepList;
|
||||
UnprotectedData<SortedArenaList> incrementalSweepList;
|
||||
|
||||
friend class js::GCHelperState;
|
||||
friend class MarkingValidator;
|
||||
|
@ -1064,7 +1064,7 @@ class HeapUsage
|
||||
* A heap usage that contains our parent's heap usage, or null if this is
|
||||
* the top-level usage container.
|
||||
*/
|
||||
HeapUsage* parent_;
|
||||
HeapUsage* const parent_;
|
||||
|
||||
/*
|
||||
* The approximate number of bytes in use on the GC heap, to the nearest
|
||||
|
@ -34,9 +34,9 @@ IterateCompartmentsArenasCells(JSContext* cx, Zone* zone, void* data,
|
||||
|
||||
for (ArenaIter aiter(zone, thingKind); !aiter.done(); aiter.next()) {
|
||||
Arena* arena = aiter.get();
|
||||
(*arenaCallback)(cx, data, arena, traceKind, thingSize);
|
||||
(*arenaCallback)(cx->runtime(), data, arena, traceKind, thingSize);
|
||||
for (ArenaCellIter iter(arena); !iter.done(); iter.next())
|
||||
(*cellCallback)(cx, data, iter.getCell(), traceKind, thingSize);
|
||||
(*cellCallback)(cx->runtime(), data, iter.getCell(), traceKind, thingSize);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -50,8 +50,8 @@ js::IterateZonesCompartmentsArenasCells(JSContext* cx, void* data,
|
||||
{
|
||||
AutoPrepareForTracing prop(cx, WithAtoms);
|
||||
|
||||
for (ZonesIter zone(cx, WithAtoms); !zone.done(); zone.next()) {
|
||||
(*zoneCallback)(cx, data, zone);
|
||||
for (ZonesIter zone(cx->runtime(), WithAtoms); !zone.done(); zone.next()) {
|
||||
(*zoneCallback)(cx->runtime(), data, zone);
|
||||
IterateCompartmentsArenasCells(cx, zone, data,
|
||||
compartmentCallback, arenaCallback, cellCallback);
|
||||
}
|
||||
@ -66,7 +66,7 @@ js::IterateZoneCompartmentsArenasCells(JSContext* cx, Zone* zone, void* data,
|
||||
{
|
||||
AutoPrepareForTracing prop(cx, WithAtoms);
|
||||
|
||||
(*zoneCallback)(cx, data, zone);
|
||||
(*zoneCallback)(cx->runtime(), data, zone);
|
||||
IterateCompartmentsArenasCells(cx, zone, data,
|
||||
compartmentCallback, arenaCallback, cellCallback);
|
||||
}
|
||||
@ -76,15 +76,15 @@ js::IterateChunks(JSContext* cx, void* data, IterateChunkCallback chunkCallback)
|
||||
{
|
||||
AutoPrepareForTracing prep(cx, SkipAtoms);
|
||||
|
||||
for (auto chunk = cx->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
|
||||
chunkCallback(cx, data, chunk);
|
||||
for (auto chunk = cx->runtime()->gc.allNonEmptyChunks(); !chunk.done(); chunk.next())
|
||||
chunkCallback(cx->runtime(), data, chunk);
|
||||
}
|
||||
|
||||
void
|
||||
js::IterateScripts(JSContext* cx, JSCompartment* compartment,
|
||||
void* data, IterateScriptCallback scriptCallback)
|
||||
{
|
||||
MOZ_ASSERT(!cx->mainThread().suppressGC);
|
||||
MOZ_ASSERT(!cx->suppressGC);
|
||||
AutoEmptyNursery empty(cx);
|
||||
AutoPrepareForTracing prep(cx, SkipAtoms);
|
||||
|
||||
@ -92,12 +92,12 @@ js::IterateScripts(JSContext* cx, JSCompartment* compartment,
|
||||
Zone* zone = compartment->zone();
|
||||
for (auto script = zone->cellIter<JSScript>(empty); !script.done(); script.next()) {
|
||||
if (script->compartment() == compartment)
|
||||
scriptCallback(cx, data, script);
|
||||
scriptCallback(cx->runtime(), data, script);
|
||||
}
|
||||
} else {
|
||||
for (ZonesIter zone(cx, SkipAtoms); !zone.done(); zone.next()) {
|
||||
for (ZonesIter zone(cx->runtime(), SkipAtoms); !zone.done(); zone.next()) {
|
||||
for (auto script = zone->cellIter<JSScript>(empty); !script.done(); script.next())
|
||||
scriptCallback(cx, data, script);
|
||||
scriptCallback(cx->runtime(), data, script);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -116,9 +116,8 @@ IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
|
||||
void
|
||||
js::IterateGrayObjects(Zone* zone, GCThingCallback cellCallback, void* data)
|
||||
{
|
||||
JSRuntime* rt = zone->runtimeFromMainThread();
|
||||
MOZ_ASSERT(!rt->isHeapBusy());
|
||||
AutoPrepareForTracing prep(rt->contextFromMainThread(), SkipAtoms);
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
|
||||
AutoPrepareForTracing prep(TlsContext.get(), SkipAtoms);
|
||||
::IterateGrayObjects(zone, cellCallback, data);
|
||||
}
|
||||
|
||||
@ -126,7 +125,7 @@ void
|
||||
js::IterateGrayObjectsUnderCC(Zone* zone, GCThingCallback cellCallback, void* data)
|
||||
{
|
||||
mozilla::DebugOnly<JSRuntime*> rt = zone->runtimeFromMainThread();
|
||||
MOZ_ASSERT(rt->isCycleCollecting());
|
||||
MOZ_ASSERT(JS::CurrentThreadIsHeapCycleCollecting());
|
||||
MOZ_ASSERT(!rt->gc.isIncrementalGCInProgress());
|
||||
::IterateGrayObjects(zone, cellCallback, data);
|
||||
}
|
||||
@ -135,8 +134,8 @@ JS_PUBLIC_API(void)
|
||||
JS_IterateCompartments(JSContext* cx, void* data,
|
||||
JSIterateCompartmentCallback compartmentCallback)
|
||||
{
|
||||
AutoTraceSession session(cx);
|
||||
AutoTraceSession session(cx->runtime());
|
||||
|
||||
for (CompartmentsIter c(cx, WithAtoms); !c.done(); c.next())
|
||||
for (CompartmentsIter c(cx->runtime(), WithAtoms); !c.done(); c.next())
|
||||
(*compartmentCallback)(cx, data, c);
|
||||
}
|
||||
|
@ -257,7 +257,9 @@ js::CheckTracedThing(JSTracer* trc, T* thing)
|
||||
* thread during compacting GC and reading the contents of the thing by
|
||||
* IsThingPoisoned would be racy in this case.
|
||||
*/
|
||||
MOZ_ASSERT_IF(rt->isHeapBusy() && !zone->isGCCompacting() && !rt->gc.isBackgroundSweeping(),
|
||||
MOZ_ASSERT_IF(JS::CurrentThreadIsHeapBusy() &&
|
||||
!zone->isGCCompacting() &&
|
||||
!rt->gc.isBackgroundSweeping(),
|
||||
!IsThingPoisoned(thing) || !InFreeList(thing->asTenured().arena(), thing));
|
||||
#endif
|
||||
}
|
||||
@ -725,7 +727,7 @@ GCMarker::markImplicitEdgesHelper(T markedThing)
|
||||
MOZ_ASSERT(zone->isGCMarking());
|
||||
MOZ_ASSERT(!zone->isGCSweeping());
|
||||
|
||||
auto p = zone->gcWeakKeys.get(JS::GCCellPtr(markedThing));
|
||||
auto p = zone->gcWeakKeys().get(JS::GCCellPtr(markedThing));
|
||||
if (!p)
|
||||
return;
|
||||
WeakEntryVector& markables = p->value;
|
||||
@ -844,7 +846,7 @@ js::GCMarker::noteWeakEdge(T* edge)
|
||||
// Note: we really want the *source* Zone here. The edge may start in a
|
||||
// non-gc heap location, however, so we use the fact that cross-zone weak
|
||||
// references are not allowed and use the *target's* zone.
|
||||
JS::Zone::WeakEdges &weakRefs = (*edge)->asTenured().zone()->gcWeakRefs;
|
||||
JS::Zone::WeakEdges &weakRefs = (*edge)->asTenured().zone()->gcWeakRefs();
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
if (!weakRefs.append(reinterpret_cast<TenuredCell**>(edge)))
|
||||
oomUnsafe.crash("Failed to record a weak edge for sweeping.");
|
||||
@ -1970,7 +1972,7 @@ MarkStack::reset()
|
||||
bool
|
||||
MarkStack::enlarge(unsigned count)
|
||||
{
|
||||
size_t newCapacity = Min(maxCapacity_, capacity() * 2);
|
||||
size_t newCapacity = Min(maxCapacity_.ref(), capacity() * 2);
|
||||
if (newCapacity < capacity() + count)
|
||||
return false;
|
||||
|
||||
@ -2058,7 +2060,7 @@ GCMarker::stop()
|
||||
stack.reset();
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
|
||||
if (!zone->gcWeakKeys.clear())
|
||||
if (!zone->gcWeakKeys().clear())
|
||||
oomUnsafe.crash("clearing weak keys in GCMarker::stop()");
|
||||
}
|
||||
}
|
||||
@ -2104,7 +2106,7 @@ GCMarker::enterWeakMarkingMode()
|
||||
tag_ = TracerKindTag::WeakMarking;
|
||||
|
||||
for (GCZoneGroupIter zone(runtime()); !zone.done(); zone.next()) {
|
||||
for (WeakMapBase* m : zone->gcWeakMapList) {
|
||||
for (WeakMapBase* m : zone->gcWeakMapList()) {
|
||||
if (m->marked)
|
||||
(void) m->markIteratively(this);
|
||||
}
|
||||
@ -2123,7 +2125,7 @@ GCMarker::leaveWeakMarkingMode()
|
||||
// rebuild it upon entry rather than allow it to contain stale data.
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
for (GCZonesIter zone(runtime()); !zone.done(); zone.next()) {
|
||||
if (!zone->gcWeakKeys.clear())
|
||||
if (!zone->gcWeakKeys().clear())
|
||||
oomUnsafe.crash("clearing weak keys in GCMarker::leaveWeakMarkingMode()");
|
||||
}
|
||||
}
|
||||
@ -2158,7 +2160,7 @@ bool
|
||||
GCMarker::markDelayedChildren(SliceBudget& budget)
|
||||
{
|
||||
GCRuntime& gc = runtime()->gc;
|
||||
gcstats::AutoPhase ap(gc.stats, gc.state() == State::Mark, gcstats::PHASE_MARK_DELAYED);
|
||||
gcstats::AutoPhase ap(gc.stats(), gc.state() == State::Mark, gcstats::PHASE_MARK_DELAYED);
|
||||
|
||||
MOZ_ASSERT(unmarkedArenaStackTop);
|
||||
do {
|
||||
@ -2222,7 +2224,7 @@ GCMarker::sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
|
||||
{
|
||||
size_t size = stack.sizeOfExcludingThis(mallocSizeOf);
|
||||
for (ZonesIter zone(runtime(), WithAtoms); !zone.done(); zone.next())
|
||||
size += zone->gcGrayRoots.sizeOfExcludingThis(mallocSizeOf);
|
||||
size += zone->gcGrayRoots().sizeOfExcludingThis(mallocSizeOf);
|
||||
return size;
|
||||
}
|
||||
|
||||
@ -2681,7 +2683,7 @@ CheckIsMarkedThing(T* thingp)
|
||||
JSRuntime* rt = (*thingp)->runtimeFromAnyThread();
|
||||
MOZ_ASSERT_IF(!ThingIsPermanentAtomOrWellKnownSymbol(*thingp),
|
||||
CurrentThreadCanAccessRuntime(rt) ||
|
||||
(rt->isHeapCollecting() && rt->gc.state() == State::Sweep));
|
||||
(JS::CurrentThreadIsHeapCollecting() && rt->gc.state() == State::Sweep));
|
||||
#endif
|
||||
}
|
||||
|
||||
@ -2719,7 +2721,7 @@ IsMarkedInternal(JSRuntime* rt, JSObject** thingp)
|
||||
|
||||
if (IsInsideNursery(*thingp)) {
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
return rt->gc.nursery.getForwardedPointer(thingp);
|
||||
return rt->zoneGroupFromMainThread()->nursery().getForwardedPointer(thingp);
|
||||
}
|
||||
return IsMarkedInternalCommon(thingp);
|
||||
}
|
||||
@ -2760,13 +2762,12 @@ IsAboutToBeFinalizedInternal(T** thingp)
|
||||
JSRuntime* rt = thing->runtimeFromAnyThread();
|
||||
|
||||
/* Permanent atoms are never finalized by non-owning runtimes. */
|
||||
if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && !TlsPerThreadData.get()->associatedWith(rt))
|
||||
if (ThingIsPermanentAtomOrWellKnownSymbol(thing) && TlsContext.get()->runtime() != rt)
|
||||
return false;
|
||||
|
||||
Nursery& nursery = rt->gc.nursery;
|
||||
if (IsInsideNursery(thing)) {
|
||||
MOZ_ASSERT(rt->isHeapMinorCollecting());
|
||||
return !nursery.getForwardedPointer(reinterpret_cast<JSObject**>(thingp));
|
||||
MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
|
||||
return !Nursery::getForwardedPointer(reinterpret_cast<JSObject**>(thingp));
|
||||
}
|
||||
|
||||
Zone* zone = thing->asTenured().zoneFromAnyThread();
|
||||
@ -2933,13 +2934,13 @@ void
|
||||
UnmarkGrayTracer::onChild(const JS::GCCellPtr& thing)
|
||||
{
|
||||
int stackDummy;
|
||||
JSContext* cx = runtime()->contextFromMainThread();
|
||||
if (!JS_CHECK_STACK_SIZE(cx->nativeStackLimit[StackForSystemCode], &stackDummy)) {
|
||||
JSContext* cx = TlsContext.get();
|
||||
if (!JS_CHECK_STACK_SIZE(cx->nativeStackLimit[JS::StackForSystemCode], &stackDummy)) {
|
||||
/*
|
||||
* If we run out of stack, we take a more drastic measure: require that
|
||||
* we GC again before the next CC.
|
||||
*/
|
||||
runtime()->setGCGrayBitsValid(false);
|
||||
runtime()->gc.setGrayBitsInvalid();
|
||||
return;
|
||||
}
|
||||
|
||||
@ -3000,8 +3001,8 @@ TypedUnmarkGrayCellRecursively(T* t)
|
||||
MOZ_ASSERT(t);
|
||||
|
||||
JSRuntime* rt = t->runtimeFromMainThread();
|
||||
MOZ_ASSERT(!rt->isHeapCollecting());
|
||||
MOZ_ASSERT(!rt->isCycleCollecting());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapCollecting());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapCycleCollecting());
|
||||
|
||||
bool unmarkedArg = false;
|
||||
if (t->isTenured()) {
|
||||
@ -3013,8 +3014,8 @@ TypedUnmarkGrayCellRecursively(T* t)
|
||||
}
|
||||
|
||||
UnmarkGrayTracer trc(rt);
|
||||
gcstats::AutoPhase outerPhase(rt->gc.stats, gcstats::PHASE_BARRIER);
|
||||
gcstats::AutoPhase innerPhase(rt->gc.stats, gcstats::PHASE_UNMARK_GRAY);
|
||||
gcstats::AutoPhase outerPhase(rt->gc.stats(), gcstats::PHASE_BARRIER);
|
||||
gcstats::AutoPhase innerPhase(rt->gc.stats(), gcstats::PHASE_UNMARK_GRAY);
|
||||
t->traceChildren(&trc);
|
||||
|
||||
return unmarkedArg || trc.unmarkedAny;
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include "js/HeapAPI.h"
|
||||
#include "js/SliceBudget.h"
|
||||
#include "js/TracingAPI.h"
|
||||
#include "threading/ProtectedData.h"
|
||||
#include "vm/TaggedProto.h"
|
||||
|
||||
class JSLinearString;
|
||||
@ -56,13 +57,13 @@ class MarkStack
|
||||
{
|
||||
friend class GCMarker;
|
||||
|
||||
uintptr_t* stack_;
|
||||
uintptr_t* tos_;
|
||||
uintptr_t* end_;
|
||||
UnprotectedData<uintptr_t*> stack_;
|
||||
UnprotectedData<uintptr_t*> tos_;
|
||||
UnprotectedData<uintptr_t*> end_;
|
||||
|
||||
// The capacity we start with and reset() to.
|
||||
size_t baseCapacity_;
|
||||
size_t maxCapacity_;
|
||||
UnprotectedData<size_t> baseCapacity_;
|
||||
UnprotectedData<size_t> maxCapacity_;
|
||||
|
||||
public:
|
||||
explicit MarkStack(size_t maxCapacity)
|
||||
@ -335,29 +336,29 @@ class GCMarker : public JSTracer
|
||||
MarkStack stack;
|
||||
|
||||
/* The color is only applied to objects and functions. */
|
||||
uint32_t color;
|
||||
UnprotectedData<uint32_t> color;
|
||||
|
||||
/* Pointer to the top of the stack of arenas we are delaying marking on. */
|
||||
js::gc::Arena* unmarkedArenaStackTop;
|
||||
UnprotectedData<js::gc::Arena*> unmarkedArenaStackTop;
|
||||
|
||||
/*
|
||||
* If the weakKeys table OOMs, disable the linear algorithm and fall back
|
||||
* to iterating until the next GC.
|
||||
*/
|
||||
bool linearWeakMarkingDisabled_;
|
||||
UnprotectedData<bool> linearWeakMarkingDisabled_;
|
||||
|
||||
#ifdef DEBUG
|
||||
/* Count of arenas that are currently in the stack. */
|
||||
size_t markLaterArenas;
|
||||
UnprotectedData<size_t> markLaterArenas;
|
||||
|
||||
/* Assert that start and stop are called with correct ordering. */
|
||||
bool started;
|
||||
UnprotectedData<bool> started;
|
||||
|
||||
/*
|
||||
* If this is true, all marked objects must belong to a compartment being
|
||||
* GCed. This is used to look for compartment bugs.
|
||||
*/
|
||||
bool strictCompartmentChecking;
|
||||
UnprotectedData<bool> strictCompartmentChecking;
|
||||
#endif // DEBUG
|
||||
};
|
||||
|
||||
|
@ -30,7 +30,7 @@ MemProfiler::GetGCHeapProfiler(JSRuntime* runtime)
|
||||
MemProfiler*
|
||||
MemProfiler::GetMemProfiler(JSContext* context)
|
||||
{
|
||||
return &context->gc.mMemProfiler;
|
||||
return &context->runtime()->gc.mMemProfiler;
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -17,11 +17,11 @@
|
||||
#include "js/TracingAPI.h"
|
||||
#include "vm/Runtime.h"
|
||||
|
||||
MOZ_ALWAYS_INLINE bool
|
||||
js::Nursery::getForwardedPointer(JSObject** ref) const
|
||||
MOZ_ALWAYS_INLINE /* static */ bool
|
||||
js::Nursery::getForwardedPointer(JSObject** ref)
|
||||
{
|
||||
MOZ_ASSERT(ref);
|
||||
MOZ_ASSERT(isInside((void*)*ref));
|
||||
MOZ_ASSERT(IsInsideNursery(*ref));
|
||||
const gc::RelocationOverlay* overlay = reinterpret_cast<const gc::RelocationOverlay*>(*ref);
|
||||
if (!overlay->isForwarded())
|
||||
return false;
|
||||
@ -37,12 +37,11 @@ namespace js {
|
||||
|
||||
template <typename T>
|
||||
static inline T*
|
||||
AllocateObjectBuffer(ExclusiveContext* cx, uint32_t count)
|
||||
AllocateObjectBuffer(JSContext* cx, uint32_t count)
|
||||
{
|
||||
if (cx->isJSContext()) {
|
||||
Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
|
||||
if (!cx->helperThread()) {
|
||||
size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
|
||||
T* buffer = static_cast<T*>(nursery.allocateBuffer(cx->zone(), nbytes));
|
||||
T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(cx->zone(), nbytes));
|
||||
if (!buffer)
|
||||
ReportOutOfMemory(cx);
|
||||
return buffer;
|
||||
@ -52,12 +51,11 @@ AllocateObjectBuffer(ExclusiveContext* cx, uint32_t count)
|
||||
|
||||
template <typename T>
|
||||
static inline T*
|
||||
AllocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, uint32_t count)
|
||||
AllocateObjectBuffer(JSContext* cx, JSObject* obj, uint32_t count)
|
||||
{
|
||||
if (cx->isJSContext()) {
|
||||
Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
|
||||
if (!cx->helperThread()) {
|
||||
size_t nbytes = JS_ROUNDUP(count * sizeof(T), sizeof(Value));
|
||||
T* buffer = static_cast<T*>(nursery.allocateBuffer(obj, nbytes));
|
||||
T* buffer = static_cast<T*>(cx->nursery().allocateBuffer(obj, nbytes));
|
||||
if (!buffer)
|
||||
ReportOutOfMemory(cx);
|
||||
return buffer;
|
||||
@ -68,14 +66,13 @@ AllocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, uint32_t count)
|
||||
// If this returns null then the old buffer will be left alone.
|
||||
template <typename T>
|
||||
static inline T*
|
||||
ReallocateObjectBuffer(ExclusiveContext* cx, JSObject* obj, T* oldBuffer,
|
||||
ReallocateObjectBuffer(JSContext* cx, JSObject* obj, T* oldBuffer,
|
||||
uint32_t oldCount, uint32_t newCount)
|
||||
{
|
||||
if (cx->isJSContext()) {
|
||||
Nursery& nursery = cx->asJSContext()->runtime()->gc.nursery;
|
||||
T* buffer = static_cast<T*>(nursery.reallocateBuffer(obj, oldBuffer,
|
||||
oldCount * sizeof(T),
|
||||
newCount * sizeof(T)));
|
||||
if (!cx->helperThread()) {
|
||||
T* buffer = static_cast<T*>(cx->nursery().reallocateBuffer(obj, oldBuffer,
|
||||
oldCount * sizeof(T),
|
||||
newCount * sizeof(T)));
|
||||
if (!buffer)
|
||||
ReportOutOfMemory(cx);
|
||||
return buffer;
|
||||
|
@ -46,7 +46,7 @@ static const uintptr_t CanaryMagicValue = 0xDEADB15D;
|
||||
|
||||
struct js::Nursery::FreeMallocedBuffersTask : public GCParallelTask
|
||||
{
|
||||
explicit FreeMallocedBuffersTask(FreeOp* fop) : fop_(fop) {}
|
||||
explicit FreeMallocedBuffersTask(FreeOp* fop) : GCParallelTask(fop->runtime()), fop_(fop) {}
|
||||
bool init() { return buffers_.init(); }
|
||||
void transferBuffersToFree(MallocedBuffersSet& buffersToFree,
|
||||
const AutoLockHelperThreadState& lock);
|
||||
@ -93,7 +93,7 @@ js::Nursery::NurseryChunk::poisonAndInit(JSRuntime* rt, uint8_t poison)
|
||||
inline void
|
||||
js::Nursery::NurseryChunk::init(JSRuntime* rt)
|
||||
{
|
||||
new (&trailer) gc::ChunkTrailer(rt, &rt->gc.storeBuffer);
|
||||
new (&trailer) gc::ChunkTrailer(rt, &rt->zoneGroupFromMainThread()->storeBuffer());
|
||||
}
|
||||
|
||||
/* static */ inline js::Nursery::NurseryChunk*
|
||||
@ -110,8 +110,8 @@ js::Nursery::NurseryChunk::toChunk(JSRuntime* rt)
|
||||
return chunk;
|
||||
}
|
||||
|
||||
js::Nursery::Nursery(JSRuntime* rt)
|
||||
: runtime_(rt)
|
||||
js::Nursery::Nursery(ZoneGroup* group)
|
||||
: zoneGroup_(group)
|
||||
, position_(0)
|
||||
, currentStartChunk_(0)
|
||||
, currentStartPosition_(0)
|
||||
@ -136,7 +136,7 @@ js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
|
||||
/* maxNurseryBytes parameter is rounded down to a multiple of chunk size. */
|
||||
maxNurseryChunks_ = maxNurseryBytes >> ChunkShift;
|
||||
|
||||
/* If no chunks are specified then the nursery is permenantly disabled. */
|
||||
/* If no chunks are specified then the nursery is permanently disabled. */
|
||||
if (maxNurseryChunks_ == 0)
|
||||
return true;
|
||||
|
||||
@ -146,7 +146,7 @@ js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
|
||||
if (!cellsWithUid_.init())
|
||||
return false;
|
||||
|
||||
freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(runtime()->defaultFreeOp());
|
||||
freeMallocedBuffersTask = js_new<FreeMallocedBuffersTask>(zoneGroup()->runtime->defaultFreeOp());
|
||||
if (!freeMallocedBuffersTask || !freeMallocedBuffersTask->init())
|
||||
return false;
|
||||
|
||||
@ -179,7 +179,7 @@ js::Nursery::init(uint32_t maxNurseryBytes, AutoLockGC& lock)
|
||||
reportTenurings_ = atoi(env);
|
||||
}
|
||||
|
||||
if (!runtime()->gc.storeBuffer.enable())
|
||||
if (!zoneGroup()->storeBuffer().enable())
|
||||
return false;
|
||||
|
||||
MOZ_ASSERT(isEnabled());
|
||||
@ -196,8 +196,8 @@ void
|
||||
js::Nursery::enable()
|
||||
{
|
||||
MOZ_ASSERT(isEmpty());
|
||||
MOZ_ASSERT(!runtime()->gc.isVerifyPreBarriersEnabled());
|
||||
if (isEnabled())
|
||||
MOZ_ASSERT(!zoneGroup()->runtime->gc.isVerifyPreBarriersEnabled());
|
||||
if (isEnabled() || !maxChunks())
|
||||
return;
|
||||
|
||||
updateNumChunks(1);
|
||||
@ -207,11 +207,11 @@ js::Nursery::enable()
|
||||
setCurrentChunk(0);
|
||||
setStartPosition();
|
||||
#ifdef JS_GC_ZEAL
|
||||
if (runtime()->hasZealMode(ZealMode::GenerationalGC))
|
||||
if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC))
|
||||
enterZealMode();
|
||||
#endif
|
||||
|
||||
MOZ_ALWAYS_TRUE(runtime()->gc.storeBuffer.enable());
|
||||
MOZ_ALWAYS_TRUE(zoneGroup()->storeBuffer().enable());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -223,17 +223,16 @@ js::Nursery::disable()
|
||||
return;
|
||||
updateNumChunks(0);
|
||||
currentEnd_ = 0;
|
||||
runtime()->gc.storeBuffer.disable();
|
||||
zoneGroup()->storeBuffer().disable();
|
||||
}
|
||||
|
||||
bool
|
||||
js::Nursery::isEmpty() const
|
||||
{
|
||||
MOZ_ASSERT(runtime_);
|
||||
if (!isEnabled())
|
||||
return true;
|
||||
|
||||
if (!runtime_->hasZealMode(ZealMode::GenerationalGC)) {
|
||||
if (!zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC)) {
|
||||
MOZ_ASSERT(currentStartChunk_ == 0);
|
||||
MOZ_ASSERT(currentStartPosition_ == chunk(0).start());
|
||||
}
|
||||
@ -297,14 +296,14 @@ void*
|
||||
js::Nursery::allocate(size_t size)
|
||||
{
|
||||
MOZ_ASSERT(isEnabled());
|
||||
MOZ_ASSERT(!runtime()->isHeapBusy());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
|
||||
MOZ_ASSERT_IF(currentChunk_ == currentStartChunk_, position() >= currentStartPosition_);
|
||||
MOZ_ASSERT(position() % gc::CellSize == 0);
|
||||
MOZ_ASSERT(size % gc::CellSize == 0);
|
||||
|
||||
#ifdef JS_GC_ZEAL
|
||||
static const size_t CanarySize = (sizeof(Nursery::Canary) + CellSize - 1) & ~CellMask;
|
||||
if (runtime()->gc.hasZealMode(ZealMode::CheckNursery))
|
||||
if (zoneGroup()->runtime->gc.hasZealMode(ZealMode::CheckNursery))
|
||||
size += CanarySize;
|
||||
#endif
|
||||
|
||||
@ -320,7 +319,7 @@ js::Nursery::allocate(size_t size)
|
||||
JS_EXTRA_POISON(thing, JS_ALLOCATED_NURSERY_PATTERN, size);
|
||||
|
||||
#ifdef JS_GC_ZEAL
|
||||
if (runtime()->gc.hasZealMode(ZealMode::CheckNursery)) {
|
||||
if (zoneGroup()->runtime->gc.hasZealMode(ZealMode::CheckNursery)) {
|
||||
auto canary = reinterpret_cast<Canary*>(position() - CanarySize);
|
||||
canary->magicValue = CanaryMagicValue;
|
||||
canary->next = nullptr;
|
||||
@ -544,22 +543,23 @@ js::Nursery::maybeEndProfile(ProfileKey key)
|
||||
}
|
||||
|
||||
void
|
||||
js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
|
||||
js::Nursery::collect(JS::gcreason::Reason reason)
|
||||
{
|
||||
MOZ_ASSERT(!rt->mainThread.suppressGC);
|
||||
MOZ_RELEASE_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
MOZ_ASSERT(!TlsContext.get()->suppressGC);
|
||||
MOZ_RELEASE_ASSERT(TlsContext.get() == zoneGroup()->context);
|
||||
|
||||
if (!isEnabled() || isEmpty()) {
|
||||
// Our barriers are not always exact, and there may be entries in the
|
||||
// storebuffer even when the nursery is disabled or empty. It's not safe
|
||||
// to keep these entries as they may refer to tenured cells which may be
|
||||
// freed after this point.
|
||||
rt->gc.storeBuffer.clear();
|
||||
zoneGroup()->storeBuffer().clear();
|
||||
}
|
||||
|
||||
if (!isEnabled())
|
||||
return;
|
||||
|
||||
JSRuntime* rt = zoneGroup()->runtime;
|
||||
rt->gc.incMinorGcNumber();
|
||||
|
||||
#ifdef JS_GC_ZEAL
|
||||
@ -570,7 +570,7 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
|
||||
lastCanary_ = nullptr;
|
||||
#endif
|
||||
|
||||
rt->gc.stats.beginNurseryCollection(reason);
|
||||
rt->gc.stats().beginNurseryCollection(reason);
|
||||
TraceMinorGCStart();
|
||||
|
||||
startProfile(ProfileKey::Total);
|
||||
@ -582,7 +582,7 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
|
||||
TenureCountCache tenureCounts;
|
||||
double promotionRate = 0;
|
||||
if (!isEmpty())
|
||||
promotionRate = doCollection(rt, reason, tenureCounts);
|
||||
promotionRate = doCollection(reason, tenureCounts);
|
||||
|
||||
// Resize the nursery.
|
||||
maybeStartProfile(ProfileKey::Resize);
|
||||
@ -596,7 +596,7 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
|
||||
maybeStartProfile(ProfileKey::Pretenure);
|
||||
uint32_t pretenureCount = 0;
|
||||
if (promotionRate > 0.8 || reason == JS::gcreason::FULL_STORE_BUFFER) {
|
||||
JSContext* cx = rt->contextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
for (auto& entry : tenureCounts.entries) {
|
||||
if (entry.count >= 3000) {
|
||||
ObjectGroup* group = entry.group;
|
||||
@ -627,11 +627,11 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
|
||||
rt->addTelemetry(JS_TELEMETRY_GC_NURSERY_BYTES, sizeOfHeapCommitted());
|
||||
rt->addTelemetry(JS_TELEMETRY_GC_PRETENURE_COUNT, pretenureCount);
|
||||
|
||||
rt->gc.stats.endNurseryCollection(reason);
|
||||
rt->gc.stats().endNurseryCollection(reason);
|
||||
TraceMinorGCEnd();
|
||||
|
||||
if (enableProfiling_ && totalTime >= profileThreshold_) {
|
||||
rt->gc.stats.maybePrintProfileHeaders();
|
||||
rt->gc.stats().maybePrintProfileHeaders();
|
||||
|
||||
fprintf(stderr, "MinorGC: %20s %5.1f%% %4u ",
|
||||
JS::gcreason::ExplainReason(reason),
|
||||
@ -651,13 +651,14 @@ js::Nursery::collect(JSRuntime* rt, JS::gcreason::Reason reason)
|
||||
}
|
||||
|
||||
double
|
||||
js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
|
||||
js::Nursery::doCollection(JS::gcreason::Reason reason,
|
||||
TenureCountCache& tenureCounts)
|
||||
{
|
||||
JSRuntime* rt = zoneGroup()->runtime;
|
||||
AutoTraceSession session(rt, JS::HeapState::MinorCollecting);
|
||||
AutoSetThreadIsPerformingGC performingGC;
|
||||
AutoStopVerifyingBarriers av(rt, false);
|
||||
AutoDisableProxyCheck disableStrictProxyChecking(rt);
|
||||
AutoDisableProxyCheck disableStrictProxyChecking;
|
||||
mozilla::DebugOnly<AutoEnterOOMUnsafeRegion> oomUnsafeRegion;
|
||||
|
||||
size_t initialNurserySize = spaceToEnd();
|
||||
@ -666,7 +667,7 @@ js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
|
||||
TenuringTracer mover(rt, this);
|
||||
|
||||
// Mark the store buffer. This must happen first.
|
||||
StoreBuffer& sb = rt->gc.storeBuffer;
|
||||
StoreBuffer& sb = zoneGroup()->storeBuffer();
|
||||
|
||||
// The MIR graph only contains nursery pointers if cancelIonCompilations()
|
||||
// is set on the store buffer, in which case we cancel all compilations.
|
||||
@ -701,13 +702,13 @@ js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
|
||||
|
||||
maybeStartProfile(ProfileKey::MarkDebugger);
|
||||
{
|
||||
gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_MARK_ROOTS);
|
||||
gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_MARK_ROOTS);
|
||||
Debugger::traceAll(&mover);
|
||||
}
|
||||
maybeEndProfile(ProfileKey::MarkDebugger);
|
||||
|
||||
maybeStartProfile(ProfileKey::ClearNewObjectCache);
|
||||
rt->contextFromMainThread()->caches.newObjectCache.clearNurseryObjects(rt);
|
||||
zoneGroup()->caches().newObjectCache.clearNurseryObjects(zoneGroup());
|
||||
maybeEndProfile(ProfileKey::ClearNewObjectCache);
|
||||
|
||||
// Most of the work is done here. This loop iterates over objects that have
|
||||
@ -744,7 +745,7 @@ js::Nursery::doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
|
||||
maybeEndProfile(ProfileKey::Sweep);
|
||||
|
||||
maybeStartProfile(ProfileKey::ClearStoreBuffer);
|
||||
rt->gc.storeBuffer.clear();
|
||||
zoneGroup()->storeBuffer().clear();
|
||||
maybeEndProfile(ProfileKey::ClearStoreBuffer);
|
||||
|
||||
// Make sure hashtables have been updated after the collection.
|
||||
@ -793,7 +794,7 @@ js::Nursery::freeMallocedBuffers()
|
||||
}
|
||||
|
||||
if (!started)
|
||||
freeMallocedBuffersTask->runFromMainThread(runtime());
|
||||
freeMallocedBuffersTask->runFromMainThread(zoneGroup()->runtime);
|
||||
|
||||
MOZ_ASSERT(mallocedBuffers.empty());
|
||||
}
|
||||
@ -828,9 +829,9 @@ js::Nursery::sweep()
|
||||
#ifdef JS_GC_ZEAL
|
||||
/* Poison the nursery contents so touching a freed object will crash. */
|
||||
for (unsigned i = 0; i < numChunks(); i++)
|
||||
chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
|
||||
chunk(i).poisonAndInit(zoneGroup()->runtime, JS_SWEPT_NURSERY_PATTERN);
|
||||
|
||||
if (runtime()->hasZealMode(ZealMode::GenerationalGC)) {
|
||||
if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC)) {
|
||||
/* Only reset the alloc point when we are close to the end. */
|
||||
if (currentChunk_ + 1 == numChunks())
|
||||
setCurrentChunk(0);
|
||||
@ -839,14 +840,14 @@ js::Nursery::sweep()
|
||||
{
|
||||
#ifdef JS_CRASH_DIAGNOSTICS
|
||||
for (unsigned i = 0; i < numChunks(); ++i)
|
||||
chunk(i).poisonAndInit(runtime(), JS_SWEPT_NURSERY_PATTERN);
|
||||
chunk(i).poisonAndInit(zoneGroup()->runtime, JS_SWEPT_NURSERY_PATTERN);
|
||||
#endif
|
||||
setCurrentChunk(0);
|
||||
}
|
||||
|
||||
/* Set current start position for isEmpty checks. */
|
||||
setStartPosition();
|
||||
MemProfiler::SweepNursery(runtime());
|
||||
MemProfiler::SweepNursery(zoneGroup()->runtime);
|
||||
}
|
||||
|
||||
size_t
|
||||
@ -873,7 +874,7 @@ js::Nursery::setCurrentChunk(unsigned chunkno)
|
||||
currentChunk_ = chunkno;
|
||||
position_ = chunk(chunkno).start();
|
||||
currentEnd_ = chunk(chunkno).end();
|
||||
chunk(chunkno).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
|
||||
chunk(chunkno).poisonAndInit(zoneGroup()->runtime, JS_FRESH_NURSERY_PATTERN);
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE void
|
||||
@ -914,7 +915,7 @@ void
|
||||
js::Nursery::shrinkAllocableSpace()
|
||||
{
|
||||
#ifdef JS_GC_ZEAL
|
||||
if (runtime()->hasZealMode(ZealMode::GenerationalGC))
|
||||
if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC))
|
||||
return;
|
||||
#endif
|
||||
updateNumChunks(Max(numChunks() - 1, 1u));
|
||||
@ -924,7 +925,7 @@ void
|
||||
js::Nursery::minimizeAllocableSpace()
|
||||
{
|
||||
#ifdef JS_GC_ZEAL
|
||||
if (runtime()->hasZealMode(ZealMode::GenerationalGC))
|
||||
if (zoneGroup()->runtime->hasZealMode(ZealMode::GenerationalGC))
|
||||
return;
|
||||
#endif
|
||||
updateNumChunks(1);
|
||||
@ -935,7 +936,7 @@ js::Nursery::updateNumChunks(unsigned newCount)
|
||||
{
|
||||
if (numChunks() != newCount) {
|
||||
AutoMaybeStartBackgroundAllocation maybeBgAlloc;
|
||||
AutoLockGC lock(runtime());
|
||||
AutoLockGC lock(zoneGroup()->runtime);
|
||||
updateNumChunksLocked(newCount, maybeBgAlloc, lock);
|
||||
}
|
||||
}
|
||||
@ -948,13 +949,15 @@ js::Nursery::updateNumChunksLocked(unsigned newCount,
|
||||
// The GC nursery is an optimization and so if we fail to allocate nursery
|
||||
// chunks we do not report an error.
|
||||
|
||||
MOZ_ASSERT(newCount <= maxChunks());
|
||||
|
||||
unsigned priorCount = numChunks();
|
||||
MOZ_ASSERT(priorCount != newCount);
|
||||
|
||||
if (newCount < priorCount) {
|
||||
// Shrink the nursery and free unused chunks.
|
||||
for (unsigned i = newCount; i < priorCount; i++)
|
||||
runtime()->gc.recycleChunk(chunk(i).toChunk(runtime()), lock);
|
||||
zoneGroup()->runtime->gc.recycleChunk(chunk(i).toChunk(zoneGroup()->runtime), lock);
|
||||
chunks_.shrinkTo(newCount);
|
||||
return;
|
||||
}
|
||||
@ -964,14 +967,14 @@ js::Nursery::updateNumChunksLocked(unsigned newCount,
|
||||
return;
|
||||
|
||||
for (unsigned i = priorCount; i < newCount; i++) {
|
||||
auto newChunk = runtime()->gc.getOrAllocChunk(lock, maybeBgAlloc);
|
||||
auto newChunk = zoneGroup()->runtime->gc.getOrAllocChunk(lock, maybeBgAlloc);
|
||||
if (!newChunk) {
|
||||
chunks_.shrinkTo(i);
|
||||
return;
|
||||
}
|
||||
|
||||
chunks_[i] = NurseryChunk::fromChunk(newChunk);
|
||||
chunk(i).poisonAndInit(runtime(), JS_FRESH_NURSERY_PATTERN);
|
||||
chunk(i).poisonAndInit(zoneGroup()->runtime, JS_FRESH_NURSERY_PATTERN);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -58,6 +58,7 @@ class ObjectElements;
|
||||
class NativeObject;
|
||||
class Nursery;
|
||||
class HeapSlot;
|
||||
class ZoneGroup;
|
||||
|
||||
void SetGCZeal(JSRuntime*, uint8_t, uint32_t);
|
||||
|
||||
@ -134,7 +135,7 @@ class Nursery
|
||||
static const size_t Alignment = gc::ChunkSize;
|
||||
static const size_t ChunkShift = gc::ChunkShift;
|
||||
|
||||
explicit Nursery(JSRuntime* rt);
|
||||
explicit Nursery(ZoneGroup* group);
|
||||
~Nursery();
|
||||
|
||||
MOZ_MUST_USE bool init(uint32_t maxNurseryBytes, AutoLockGC& lock);
|
||||
@ -195,14 +196,14 @@ class Nursery
|
||||
static const size_t MaxNurseryBufferSize = 1024;
|
||||
|
||||
/* Do a minor collection. */
|
||||
void collect(JSRuntime* rt, JS::gcreason::Reason reason);
|
||||
void collect(JS::gcreason::Reason reason);
|
||||
|
||||
/*
|
||||
* Check if the thing at |*ref| in the Nursery has been forwarded. If so,
|
||||
* sets |*ref| to the new location of the object and returns true. Otherwise
|
||||
* returns false and leaves |*ref| unset.
|
||||
*/
|
||||
MOZ_ALWAYS_INLINE MOZ_MUST_USE bool getForwardedPointer(JSObject** ref) const;
|
||||
MOZ_ALWAYS_INLINE MOZ_MUST_USE static bool getForwardedPointer(JSObject** ref);
|
||||
|
||||
/* Forward a slots/elements pointer stored in an Ion frame. */
|
||||
void forwardBufferPointer(HeapSlot** pSlotsElems);
|
||||
@ -264,6 +265,9 @@ class Nursery
|
||||
/* Print total profile times on shutdown. */
|
||||
void printTotalProfileTimes();
|
||||
|
||||
void* addressOfCurrentEnd() const { return (void*)¤tEnd_; }
|
||||
void* addressOfPosition() const { return (void*)&position_; }
|
||||
|
||||
private:
|
||||
/* The amount of space in the mapped nursery available to allocations. */
|
||||
static const size_t NurseryChunkUsableSize = gc::ChunkSize - sizeof(gc::ChunkTrailer);
|
||||
@ -281,12 +285,8 @@ class Nursery
|
||||
static_assert(sizeof(NurseryChunk) == gc::ChunkSize,
|
||||
"Nursery chunk size must match gc::Chunk size.");
|
||||
|
||||
/*
|
||||
* The start and end pointers are stored under the runtime so that we can
|
||||
* inline the isInsideNursery check into embedder code. Use the start()
|
||||
* and heapEnd() functions to access these values.
|
||||
*/
|
||||
JSRuntime* runtime_;
|
||||
// The set of zones which this is the nursery for.
|
||||
ZoneGroup* zoneGroup_;
|
||||
|
||||
/* Vector of allocated chunks to allocate from. */
|
||||
Vector<NurseryChunk*, 0, SystemAllocPolicy> chunks_;
|
||||
@ -406,19 +406,13 @@ class Nursery
|
||||
}
|
||||
|
||||
MOZ_ALWAYS_INLINE uintptr_t currentEnd() const {
|
||||
MOZ_ASSERT(runtime_);
|
||||
MOZ_ASSERT(currentEnd_ == chunk(currentChunk_).end());
|
||||
return currentEnd_;
|
||||
}
|
||||
void* addressOfCurrentEnd() const {
|
||||
MOZ_ASSERT(runtime_);
|
||||
return (void*)¤tEnd_;
|
||||
}
|
||||
|
||||
uintptr_t position() const { return position_; }
|
||||
void* addressOfPosition() const { return (void*)&position_; }
|
||||
|
||||
JSRuntime* runtime() const { return runtime_; }
|
||||
ZoneGroup* zoneGroup() const { return zoneGroup_; }
|
||||
|
||||
/* Allocates a new GC thing from the tenured generation during minor GC. */
|
||||
gc::TenuredCell* allocateFromTenured(JS::Zone* zone, gc::AllocKind thingKind);
|
||||
@ -426,7 +420,7 @@ class Nursery
|
||||
/* Common internal allocator function. */
|
||||
void* allocate(size_t size);
|
||||
|
||||
double doCollection(JSRuntime* rt, JS::gcreason::Reason reason,
|
||||
double doCollection(JS::gcreason::Reason reason,
|
||||
gc::TenureCountCache& tenureCounts);
|
||||
|
||||
/*
|
||||
|
@ -66,7 +66,7 @@ TraceExactStackRootList(JSTracer* trc, JS::Rooted<void*>* rooter, const char* na
|
||||
}
|
||||
|
||||
static inline void
|
||||
TraceStackRoots(JSTracer* trc, RootedListHeads& stackRoots)
|
||||
TraceStackRoots(JSTracer* trc, JS::RootedListHeads& stackRoots)
|
||||
{
|
||||
#define TRACE_ROOTS(name, type, _) \
|
||||
TraceExactStackRootList<type*>(trc, stackRoots[JS::RootKind::name], "exact-" #name);
|
||||
@ -80,7 +80,7 @@ JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
|
||||
}
|
||||
|
||||
void
|
||||
js::RootLists::traceStackRoots(JSTracer* trc)
|
||||
JS::RootingContext::traceStackRoots(JSTracer* trc)
|
||||
{
|
||||
TraceStackRoots(trc, stackRoots_);
|
||||
}
|
||||
@ -88,9 +88,7 @@ js::RootLists::traceStackRoots(JSTracer* trc)
|
||||
static void
|
||||
TraceExactStackRoots(JSRuntime* rt, JSTracer* trc)
|
||||
{
|
||||
for (ZonesIter zone(rt, SkipAtoms); !zone.done(); zone.next())
|
||||
TraceStackRoots(trc, zone->stackRoots_);
|
||||
rt->contextFromMainThread()->roots.traceStackRoots(trc);
|
||||
TlsContext.get()->traceStackRoots(trc);
|
||||
}
|
||||
|
||||
template <typename T, TraceFunction<T> TraceFn = TraceNullableRoot>
|
||||
@ -103,23 +101,23 @@ TracePersistentRootedList(JSTracer* trc, mozilla::LinkedList<PersistentRooted<vo
|
||||
}
|
||||
|
||||
void
|
||||
js::RootLists::tracePersistentRoots(JSTracer* trc)
|
||||
JSRuntime::tracePersistentRoots(JSTracer* trc)
|
||||
{
|
||||
#define TRACE_ROOTS(name, type, _) \
|
||||
TracePersistentRootedList<type*>(trc, heapRoots_[JS::RootKind::name], "persistent-" #name);
|
||||
TracePersistentRootedList<type*>(trc, heapRoots.ref()[JS::RootKind::name], "persistent-" #name);
|
||||
JS_FOR_EACH_TRACEKIND(TRACE_ROOTS)
|
||||
#undef TRACE_ROOTS
|
||||
TracePersistentRootedList<jsid>(trc, heapRoots_[JS::RootKind::Id], "persistent-id");
|
||||
TracePersistentRootedList<Value>(trc, heapRoots_[JS::RootKind::Value], "persistent-value");
|
||||
TracePersistentRootedList<jsid>(trc, heapRoots.ref()[JS::RootKind::Id], "persistent-id");
|
||||
TracePersistentRootedList<Value>(trc, heapRoots.ref()[JS::RootKind::Value], "persistent-value");
|
||||
TracePersistentRootedList<ConcreteTraceable,
|
||||
js::DispatchWrapper<ConcreteTraceable>::TraceWrapped>(trc,
|
||||
heapRoots_[JS::RootKind::Traceable], "persistent-traceable");
|
||||
heapRoots.ref()[JS::RootKind::Traceable], "persistent-traceable");
|
||||
}
|
||||
|
||||
static void
|
||||
TracePersistentRooted(JSRuntime* rt, JSTracer* trc)
|
||||
{
|
||||
rt->contextFromMainThread()->roots.tracePersistentRoots(trc);
|
||||
rt->tracePersistentRoots(trc);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
@ -132,14 +130,14 @@ FinishPersistentRootedChain(mozilla::LinkedList<PersistentRooted<void*>>& listAr
|
||||
}
|
||||
|
||||
void
|
||||
js::RootLists::finishPersistentRoots()
|
||||
JSRuntime::finishPersistentRoots()
|
||||
{
|
||||
#define FINISH_ROOT_LIST(name, type, _) \
|
||||
FinishPersistentRootedChain<type*>(heapRoots_[JS::RootKind::name]);
|
||||
#define FINISH_ROOT_LIST(name, type, _) \
|
||||
FinishPersistentRootedChain<type*>(heapRoots.ref()[JS::RootKind::name]);
|
||||
JS_FOR_EACH_TRACEKIND(FINISH_ROOT_LIST)
|
||||
#undef FINISH_ROOT_LIST
|
||||
FinishPersistentRootedChain<jsid>(heapRoots_[JS::RootKind::Id]);
|
||||
FinishPersistentRootedChain<Value>(heapRoots_[JS::RootKind::Value]);
|
||||
FinishPersistentRootedChain<jsid>(heapRoots.ref()[JS::RootKind::Id]);
|
||||
FinishPersistentRootedChain<Value>(heapRoots.ref()[JS::RootKind::Value]);
|
||||
|
||||
// Note that we do not finalize the Traceable list as we do not know how to
|
||||
// safely clear memebers. We instead assert that none escape the RootLists.
|
||||
@ -205,16 +203,16 @@ AutoGCRooter::trace(JSTracer* trc)
|
||||
/* static */ void
|
||||
AutoGCRooter::traceAll(JSTracer* trc)
|
||||
{
|
||||
for (AutoGCRooter* gcr = trc->runtime()->contextFromMainThread()->roots.autoGCRooters_; gcr; gcr = gcr->down)
|
||||
for (AutoGCRooter* gcr = TlsContext.get()->autoGCRooters_; gcr; gcr = gcr->down)
|
||||
gcr->trace(trc);
|
||||
}
|
||||
|
||||
/* static */ void
|
||||
AutoGCRooter::traceAllWrappers(JSTracer* trc)
|
||||
{
|
||||
JSContext* cx = trc->runtime()->contextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
|
||||
for (AutoGCRooter* gcr = cx->roots.autoGCRooters_; gcr; gcr = gcr->down) {
|
||||
for (AutoGCRooter* gcr = cx->autoGCRooters_; gcr; gcr = gcr->down) {
|
||||
if (gcr->tag_ == WRAPVECTOR || gcr->tag_ == WRAPPER)
|
||||
gcr->trace(trc);
|
||||
}
|
||||
@ -261,7 +259,7 @@ js::gc::GCRuntime::traceRuntimeForMajorGC(JSTracer* trc, AutoLockForExclusiveAcc
|
||||
if (rt->isBeingDestroyed())
|
||||
return;
|
||||
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
|
||||
if (rt->atomsCompartment(lock)->zone()->isCollecting())
|
||||
traceRuntimeAtoms(trc, lock);
|
||||
JSCompartment::traceIncomingCrossCompartmentEdgesForZoneGC(trc);
|
||||
@ -278,7 +276,7 @@ js::gc::GCRuntime::traceRuntimeForMinorGC(JSTracer* trc, AutoLockForExclusiveAcc
|
||||
// the map. And we can reach its trace function despite having finished the
|
||||
// roots via the edges stored by the pre-barrier verifier when we finish
|
||||
// the verifier for the last time.
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
|
||||
|
||||
// FIXME: As per bug 1298816 comment 12, we should be able to remove this.
|
||||
jit::JitRuntime::TraceJitcodeGlobalTable(trc);
|
||||
@ -292,9 +290,9 @@ js::TraceRuntime(JSTracer* trc)
|
||||
MOZ_ASSERT(!trc->isMarkingTracer());
|
||||
|
||||
JSRuntime* rt = trc->runtime();
|
||||
rt->gc.evictNursery();
|
||||
AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
|
||||
gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
|
||||
rt->zoneGroupFromMainThread()->evictNursery();
|
||||
AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
|
||||
gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
|
||||
rt->gc.traceRuntime(trc, prep.session().lock);
|
||||
}
|
||||
|
||||
@ -303,7 +301,7 @@ js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock)
|
||||
{
|
||||
MOZ_ASSERT(!rt->isBeingDestroyed());
|
||||
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_ROOTS);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_ROOTS);
|
||||
traceRuntimeAtoms(trc, lock);
|
||||
traceRuntimeCommon(trc, TraceRuntime, lock);
|
||||
}
|
||||
@ -311,7 +309,7 @@ js::gc::GCRuntime::traceRuntime(JSTracer* trc, AutoLockForExclusiveAccess& lock)
|
||||
void
|
||||
js::gc::GCRuntime::traceRuntimeAtoms(JSTracer* trc, AutoLockForExclusiveAccess& lock)
|
||||
{
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_RUNTIME_DATA);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_RUNTIME_DATA);
|
||||
TracePermanentAtoms(trc);
|
||||
TraceAtoms(trc, lock);
|
||||
TraceWellKnownSymbols(trc);
|
||||
@ -322,10 +320,10 @@ void
|
||||
js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrMark,
|
||||
AutoLockForExclusiveAccess& lock)
|
||||
{
|
||||
MOZ_ASSERT(!rt->mainThread.suppressGC);
|
||||
MOZ_ASSERT(!TlsContext.get()->suppressGC);
|
||||
|
||||
{
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_STACK);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_STACK);
|
||||
|
||||
// Trace active interpreter and JIT stack roots.
|
||||
TraceInterpreterActivations(rt, trc);
|
||||
@ -334,7 +332,7 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM
|
||||
// Trace legacy C stack roots.
|
||||
AutoGCRooter::traceAll(trc);
|
||||
|
||||
for (RootRange r = rootsHash.all(); !r.empty(); r.popFront()) {
|
||||
for (RootRange r = rootsHash.ref().all(); !r.empty(); r.popFront()) {
|
||||
const RootEntry& entry = r.front();
|
||||
TraceRoot(trc, entry.key(), entry.value());
|
||||
}
|
||||
@ -352,9 +350,9 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM
|
||||
// Trace the shared Intl data.
|
||||
rt->traceSharedIntlData(trc);
|
||||
|
||||
// Trace anything in the single context. Note that this is actually the
|
||||
// same struct as the JSRuntime, but is still split for historical reasons.
|
||||
rt->contextFromMainThread()->trace(trc);
|
||||
// Trace anything in the current thread's context. Ignore other JSContexts,
|
||||
// as these will only refer to ZoneGroups which we are not collecting/tracing.
|
||||
TlsContext.get()->trace(trc);
|
||||
|
||||
// Trace all compartment roots, but not the compartment itself; it is
|
||||
// traced via the parent pointer if traceRoots actually traces anything.
|
||||
@ -362,14 +360,14 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM
|
||||
c->traceRoots(trc, traceOrMark);
|
||||
|
||||
// Trace the Gecko Profiler.
|
||||
rt->geckoProfiler.trace(trc);
|
||||
rt->geckoProfiler().trace(trc);
|
||||
|
||||
// Trace helper thread roots.
|
||||
HelperThreadState().trace(trc);
|
||||
|
||||
// Trace the embedding's black and gray roots.
|
||||
if (!rt->isHeapMinorCollecting()) {
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_MARK_EMBEDDING);
|
||||
if (!JS::CurrentThreadIsHeapMinorCollecting()) {
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_MARK_EMBEDDING);
|
||||
|
||||
/*
|
||||
* The embedding can register additional roots here.
|
||||
@ -378,8 +376,8 @@ js::gc::GCRuntime::traceRuntimeCommon(JSTracer* trc, TraceOrMarkRuntime traceOrM
|
||||
* the nursery should be in the store buffer, and we want to avoid the
|
||||
* time taken to trace all these roots.
|
||||
*/
|
||||
for (size_t i = 0; i < blackRootTracers.length(); i++) {
|
||||
const Callback<JSTraceDataOp>& e = blackRootTracers[i];
|
||||
for (size_t i = 0; i < blackRootTracers.ref().length(); i++) {
|
||||
const Callback<JSTraceDataOp>& e = blackRootTracers.ref()[i];
|
||||
(*e.op)(trc, e.data);
|
||||
}
|
||||
|
||||
@ -408,12 +406,14 @@ class AssertNoRootsTracer : public JS::CallbackTracer
|
||||
void
|
||||
js::gc::GCRuntime::finishRoots()
|
||||
{
|
||||
AutoNoteSingleThreadedRegion anstr;
|
||||
|
||||
rt->finishAtoms();
|
||||
|
||||
if (rootsHash.initialized())
|
||||
rootsHash.clear();
|
||||
if (rootsHash.ref().initialized())
|
||||
rootsHash.ref().clear();
|
||||
|
||||
rt->contextFromMainThread()->roots.finishPersistentRoots();
|
||||
rt->finishPersistentRoots();
|
||||
|
||||
rt->finishSelfHosting();
|
||||
|
||||
@ -427,8 +427,8 @@ js::gc::GCRuntime::finishRoots()
|
||||
grayRootTracer = Callback<JSTraceDataOp>(nullptr, nullptr);
|
||||
|
||||
AssertNoRootsTracer trc(rt, TraceWeakMapKeysValues);
|
||||
AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
|
||||
gcstats::AutoPhase ap(rt->gc.stats, gcstats::PHASE_TRACE_HEAP);
|
||||
AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
|
||||
gcstats::AutoPhase ap(rt->gc.stats(), gcstats::PHASE_TRACE_HEAP);
|
||||
traceRuntime(&trc, prep.session().lock);
|
||||
|
||||
// Restore the wrapper tracing so that we leak instead of leaving dangling
|
||||
@ -476,9 +476,9 @@ js::gc::GCRuntime::bufferGrayRoots()
|
||||
// and the zone's buffers have been cleared.
|
||||
MOZ_ASSERT(grayBufferState == GrayBufferState::Unused);
|
||||
for (GCZonesIter zone(rt); !zone.done(); zone.next())
|
||||
MOZ_ASSERT(zone->gcGrayRoots.empty());
|
||||
MOZ_ASSERT(zone->gcGrayRoots().empty());
|
||||
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_BUFFER_GRAY_ROOTS);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_BUFFER_GRAY_ROOTS);
|
||||
|
||||
BufferGrayRootsTracer grayBufferer(rt);
|
||||
if (JSTraceDataOp op = grayRootTracer.op)
|
||||
@ -500,7 +500,7 @@ struct SetMaybeAliveFunctor {
|
||||
void
|
||||
BufferGrayRootsTracer::onChild(const JS::GCCellPtr& thing)
|
||||
{
|
||||
MOZ_ASSERT(runtime()->isHeapBusy());
|
||||
MOZ_ASSERT(JS::CurrentThreadIsHeapBusy());
|
||||
MOZ_RELEASE_ASSERT(thing);
|
||||
// Check if |thing| is corrupt by calling a method that touches the heap.
|
||||
MOZ_RELEASE_ASSERT(thing.asCell()->getTraceKind() <= JS::TraceKind::Null);
|
||||
@ -518,7 +518,7 @@ BufferGrayRootsTracer::onChild(const JS::GCCellPtr& thing)
|
||||
// incremental GCs (when we do gray root buffering).
|
||||
DispatchTyped(SetMaybeAliveFunctor(), thing);
|
||||
|
||||
if (!zone->gcGrayRoots.append(tenured))
|
||||
if (!zone->gcGrayRoots().append(tenured))
|
||||
bufferingGrayRootsFailed = true;
|
||||
}
|
||||
}
|
||||
@ -529,7 +529,7 @@ GCRuntime::markBufferedGrayRoots(JS::Zone* zone)
|
||||
MOZ_ASSERT(grayBufferState == GrayBufferState::Okay);
|
||||
MOZ_ASSERT(zone->isGCMarkingGray() || zone->isGCCompacting());
|
||||
|
||||
for (auto cell : zone->gcGrayRoots)
|
||||
for (auto cell : zone->gcGrayRoots())
|
||||
TraceManuallyBarrieredGenericPointerEdge(&marker, &cell, "buffered gray root");
|
||||
}
|
||||
|
||||
@ -539,6 +539,11 @@ GCRuntime::resetBufferedGrayRoots() const
|
||||
MOZ_ASSERT(grayBufferState != GrayBufferState::Okay,
|
||||
"Do not clear the gray buffers unless we are Failed or becoming Unused");
|
||||
for (GCZonesIter zone(rt); !zone.done(); zone.next())
|
||||
zone->gcGrayRoots.clearAndFree();
|
||||
zone->gcGrayRoots().clearAndFree();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(void)
|
||||
JS::AddPersistentRoot(JS::RootingContext* cx, RootKind kind, PersistentRooted<void*>* root)
|
||||
{
|
||||
static_cast<JSContext*>(cx)->runtime()->heapRoots.ref()[kind].insertBack(root);
|
||||
}
|
||||
|
@ -1100,7 +1100,7 @@ Statistics::beginNurseryCollection(JS::gcreason::Reason reason)
|
||||
{
|
||||
count(STAT_MINOR_GC);
|
||||
if (nurseryCollectionCallback) {
|
||||
(*nurseryCollectionCallback)(runtime->contextFromMainThread(),
|
||||
(*nurseryCollectionCallback)(TlsContext.get(),
|
||||
JS::GCNurseryProgress::GC_NURSERY_COLLECTION_START,
|
||||
reason);
|
||||
}
|
||||
@ -1110,7 +1110,7 @@ void
|
||||
Statistics::endNurseryCollection(JS::gcreason::Reason reason)
|
||||
{
|
||||
if (nurseryCollectionCallback) {
|
||||
(*nurseryCollectionCallback)(runtime->contextFromMainThread(),
|
||||
(*nurseryCollectionCallback)(TlsContext.get(),
|
||||
JS::GCNurseryProgress::GC_NURSERY_COLLECTION_END,
|
||||
reason);
|
||||
}
|
||||
@ -1142,7 +1142,7 @@ Statistics::beginSlice(const ZoneGCStats& zoneStats, JSGCInvocationKind gckind,
|
||||
// Slice callbacks should only fire for the outermost level.
|
||||
bool wasFullGC = zoneStats.isCollectingAllZones();
|
||||
if (sliceCallback)
|
||||
(*sliceCallback)(runtime->contextFromMainThread(),
|
||||
(*sliceCallback)(TlsContext.get(),
|
||||
first ? JS::GC_CYCLE_BEGIN : JS::GC_SLICE_BEGIN,
|
||||
JS::GCDescription(!wasFullGC, gckind, reason));
|
||||
}
|
||||
@ -1188,7 +1188,7 @@ Statistics::endSlice()
|
||||
if (!aborted) {
|
||||
bool wasFullGC = zoneStats.isCollectingAllZones();
|
||||
if (sliceCallback)
|
||||
(*sliceCallback)(runtime->contextFromMainThread(),
|
||||
(*sliceCallback)(TlsContext.get(),
|
||||
last ? JS::GC_CYCLE_END : JS::GC_SLICE_END,
|
||||
JS::GCDescription(!wasFullGC, gckind, slices.back().reason));
|
||||
}
|
||||
@ -1405,7 +1405,7 @@ Statistics::maybePrintProfileHeaders()
|
||||
static int printedHeader = 0;
|
||||
if ((printedHeader++ % 200) == 0) {
|
||||
printProfileHeader();
|
||||
runtime->gc.nursery.printProfileHeader();
|
||||
runtime->zoneGroupFromMainThread()->nursery().printProfileHeader();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,7 +89,7 @@ StoreBuffer::setAboutToOverflow()
|
||||
{
|
||||
if (!aboutToOverflow_) {
|
||||
aboutToOverflow_ = true;
|
||||
runtime_->gc.stats.count(gcstats::STAT_STOREBUFFER_OVERFLOW);
|
||||
runtime_->gc.stats().count(gcstats::STAT_STOREBUFFER_OVERFLOW);
|
||||
}
|
||||
runtime_->gc.requestMinorGC(JS::gcreason::FULL_STORE_BUFFER);
|
||||
}
|
||||
@ -126,12 +126,11 @@ ArenaCellSet*
|
||||
js::gc::AllocateWholeCellSet(Arena* arena)
|
||||
{
|
||||
Zone* zone = arena->zone;
|
||||
JSRuntime* rt = zone->runtimeFromMainThread();
|
||||
if (!rt->gc.nursery.isEnabled())
|
||||
if (!zone->group()->nursery().isEnabled())
|
||||
return nullptr;
|
||||
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
Nursery& nursery = rt->gc.nursery;
|
||||
Nursery& nursery = zone->group()->nursery();
|
||||
void* data = nursery.allocateBuffer(zone, sizeof(ArenaCellSet));
|
||||
if (!data) {
|
||||
oomUnsafe.crash("Failed to allocate WholeCellSet");
|
||||
@ -139,12 +138,12 @@ js::gc::AllocateWholeCellSet(Arena* arena)
|
||||
}
|
||||
|
||||
if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
|
||||
rt->gc.storeBuffer.setAboutToOverflow();
|
||||
zone->group()->storeBuffer().setAboutToOverflow();
|
||||
|
||||
auto cells = static_cast<ArenaCellSet*>(data);
|
||||
new (cells) ArenaCellSet(arena);
|
||||
arena->bufferedCells() = cells;
|
||||
rt->gc.storeBuffer.addToWholeCellBuffer(cells);
|
||||
zone->group()->storeBuffer().addToWholeCellBuffer(cells);
|
||||
return cells;
|
||||
}
|
||||
|
||||
|
@ -342,7 +342,7 @@ class StoreBuffer
|
||||
|
||||
template <typename Buffer, typename Edge>
|
||||
void unput(Buffer& buffer, const Edge& edge) {
|
||||
MOZ_ASSERT(!JS::shadow::Runtime::asShadowRuntime(runtime_)->isHeapBusy());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
|
||||
if (!isEnabled())
|
||||
return;
|
||||
@ -352,7 +352,7 @@ class StoreBuffer
|
||||
|
||||
template <typename Buffer, typename Edge>
|
||||
void put(Buffer& buffer, const Edge& edge) {
|
||||
MOZ_ASSERT(!JS::shadow::Runtime::asShadowRuntime(runtime_)->isHeapBusy());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapBusy());
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtime_));
|
||||
if (!isEnabled())
|
||||
return;
|
||||
|
@ -101,8 +101,8 @@ class js::VerifyPreTracer final : public JS::CallbackTracer
|
||||
NodeMap nodemap;
|
||||
|
||||
explicit VerifyPreTracer(JSRuntime* rt)
|
||||
: JS::CallbackTracer(rt), noggc(rt), number(rt->gc.gcNumber()), count(0), curnode(nullptr),
|
||||
root(nullptr), edgeptr(nullptr), term(nullptr)
|
||||
: JS::CallbackTracer(rt), noggc(TlsContext.get()), number(rt->gc.gcNumber()),
|
||||
count(0), curnode(nullptr), root(nullptr), edgeptr(nullptr), term(nullptr)
|
||||
{}
|
||||
|
||||
~VerifyPreTracer() {
|
||||
@ -179,7 +179,7 @@ gc::GCRuntime::startVerifyPreBarriers()
|
||||
if (verifyPreData || isIncrementalGCInProgress())
|
||||
return;
|
||||
|
||||
if (IsIncrementalGCUnsafe(rt) != AbortReason::None || rt->keepAtoms())
|
||||
if (IsIncrementalGCUnsafe(rt) != AbortReason::None || TlsContext.get()->keepAtoms || rt->exclusiveThreadsPresent())
|
||||
return;
|
||||
|
||||
number++;
|
||||
@ -188,12 +188,12 @@ gc::GCRuntime::startVerifyPreBarriers()
|
||||
if (!trc)
|
||||
return;
|
||||
|
||||
AutoPrepareForTracing prep(rt->contextFromMainThread(), WithAtoms);
|
||||
AutoPrepareForTracing prep(TlsContext.get(), WithAtoms);
|
||||
|
||||
for (auto chunk = allNonEmptyChunks(); !chunk.done(); chunk.next())
|
||||
chunk->bitmap.clear();
|
||||
|
||||
gcstats::AutoPhase ap(stats, gcstats::PHASE_TRACE_HEAP);
|
||||
gcstats::AutoPhase ap(stats(), gcstats::PHASE_TRACE_HEAP);
|
||||
|
||||
const size_t size = 64 * 1024 * 1024;
|
||||
trc->root = (VerifyNode*)js_malloc(size);
|
||||
@ -350,7 +350,11 @@ gc::GCRuntime::endVerifyPreBarriers()
|
||||
verifyPreData = nullptr;
|
||||
incrementalState = State::NotActive;
|
||||
|
||||
if (!compartmentCreated && IsIncrementalGCUnsafe(rt) == AbortReason::None && !rt->keepAtoms()) {
|
||||
if (!compartmentCreated &&
|
||||
IsIncrementalGCUnsafe(rt) == AbortReason::None &&
|
||||
!TlsContext.get()->keepAtoms &&
|
||||
!rt->exclusiveThreadsPresent())
|
||||
{
|
||||
CheckEdgeTracer cetrc(rt);
|
||||
|
||||
/* Start after the roots. */
|
||||
@ -409,7 +413,7 @@ gc::GCRuntime::maybeVerifyPreBarriers(bool always)
|
||||
if (!hasZealMode(ZealMode::VerifierPre))
|
||||
return;
|
||||
|
||||
if (rt->mainThread.suppressGC)
|
||||
if (TlsContext.get()->suppressGC)
|
||||
return;
|
||||
|
||||
if (verifyPreData) {
|
||||
@ -433,7 +437,7 @@ void
|
||||
js::gc::GCRuntime::finishVerifier()
|
||||
{
|
||||
if (verifyPreData) {
|
||||
js_delete(verifyPreData);
|
||||
js_delete(verifyPreData.ref());
|
||||
verifyPreData = nullptr;
|
||||
}
|
||||
}
|
||||
|
@ -23,33 +23,45 @@ using namespace js::gc;
|
||||
|
||||
Zone * const Zone::NotOnList = reinterpret_cast<Zone*>(1);
|
||||
|
||||
JS::Zone::Zone(JSRuntime* rt)
|
||||
JS::Zone::Zone(JSRuntime* rt, ZoneGroup* group)
|
||||
: JS::shadow::Zone(rt, &rt->gc.marker),
|
||||
debuggers(nullptr),
|
||||
suppressAllocationMetadataBuilder(false),
|
||||
arenas(rt),
|
||||
group_(group),
|
||||
debuggers(group, nullptr),
|
||||
uniqueIds_(group),
|
||||
suppressAllocationMetadataBuilder(group, false),
|
||||
arenas(rt, group),
|
||||
types(this),
|
||||
compartments(),
|
||||
gcGrayRoots(),
|
||||
gcWeakKeys(SystemAllocPolicy(), rt->randomHashCodeScrambler()),
|
||||
typeDescrObjects(this, SystemAllocPolicy()),
|
||||
gcWeakMapList_(group),
|
||||
compartments_(),
|
||||
gcGrayRoots_(group),
|
||||
gcWeakRefs_(group),
|
||||
weakCaches_(group),
|
||||
gcWeakKeys_(group, SystemAllocPolicy(), rt->randomHashCodeScrambler()),
|
||||
gcZoneGroupEdges_(group),
|
||||
typeDescrObjects_(group, this, SystemAllocPolicy()),
|
||||
gcMallocBytes(0),
|
||||
gcMaxMallocBytes(0),
|
||||
gcMallocGCTriggered(false),
|
||||
markedAtoms_(group),
|
||||
usage(&rt->gc.usage),
|
||||
threshold(),
|
||||
gcDelayBytes(0),
|
||||
propertyTree(this),
|
||||
baseShapes(this, BaseShapeSet()),
|
||||
initialShapes(this, InitialShapeSet()),
|
||||
data(nullptr),
|
||||
isSystem(false),
|
||||
propertyTree_(group, this),
|
||||
baseShapes_(group, this, BaseShapeSet()),
|
||||
initialShapes_(group, this, InitialShapeSet()),
|
||||
data(group, nullptr),
|
||||
isSystem(group, false),
|
||||
usedByExclusiveThread(false),
|
||||
jitZone_(nullptr),
|
||||
#ifdef DEBUG
|
||||
gcLastZoneGroupIndex(group, 0),
|
||||
#endif
|
||||
jitZone_(group, nullptr),
|
||||
gcState_(NoGC),
|
||||
gcScheduled_(false),
|
||||
gcPreserveCode_(false),
|
||||
jitUsingBarriers_(false),
|
||||
keepShapeTables_(false),
|
||||
listNext_(NotOnList)
|
||||
gcPreserveCode_(group, false),
|
||||
jitUsingBarriers_(group, false),
|
||||
keepShapeTables_(group, false),
|
||||
listNext_(group, NotOnList)
|
||||
{
|
||||
/* Ensure that there are no vtables to mess us up here. */
|
||||
MOZ_ASSERT(reinterpret_cast<JS::shadow::Zone*>(this) ==
|
||||
@ -62,27 +74,27 @@ JS::Zone::Zone(JSRuntime* rt)
|
||||
|
||||
Zone::~Zone()
|
||||
{
|
||||
JSRuntime* rt = runtimeFromMainThread();
|
||||
JSRuntime* rt = runtimeFromAnyThread();
|
||||
if (this == rt->gc.systemZone)
|
||||
rt->gc.systemZone = nullptr;
|
||||
|
||||
js_delete(debuggers);
|
||||
js_delete(jitZone_);
|
||||
js_delete(debuggers.ref());
|
||||
js_delete(jitZone_.ref());
|
||||
|
||||
#ifdef DEBUG
|
||||
// Avoid assertion destroying the weak map list if the embedding leaked GC things.
|
||||
if (!rt->gc.shutdownCollectedEverything())
|
||||
gcWeakMapList.clear();
|
||||
gcWeakMapList().clear();
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Zone::init(bool isSystemArg)
|
||||
{
|
||||
isSystem = isSystemArg;
|
||||
return uniqueIds_.init() &&
|
||||
gcZoneGroupEdges.init() &&
|
||||
gcWeakKeys.init() &&
|
||||
typeDescrObjects.init();
|
||||
return uniqueIds().init() &&
|
||||
gcZoneGroupEdges().init() &&
|
||||
gcWeakKeys().init() &&
|
||||
typeDescrObjects().init();
|
||||
}
|
||||
|
||||
void
|
||||
@ -147,7 +159,7 @@ Zone::getOrCreateDebuggers(JSContext* cx)
|
||||
void
|
||||
Zone::sweepBreakpoints(FreeOp* fop)
|
||||
{
|
||||
if (fop->runtime()->debuggerList.isEmpty())
|
||||
if (!group() || group()->debuggerList().isEmpty())
|
||||
return;
|
||||
|
||||
/*
|
||||
@ -273,7 +285,7 @@ Zone::discardJitCode(FreeOp* fop, bool discardBaselineCode)
|
||||
void
|
||||
JS::Zone::checkUniqueIdTableAfterMovingGC()
|
||||
{
|
||||
for (UniqueIdMap::Enum e(uniqueIds_); !e.empty(); e.popFront())
|
||||
for (UniqueIdMap::Enum e(uniqueIds()); !e.empty(); e.popFront())
|
||||
js::gc::CheckGCThingAfterMovingGC(e.front().key());
|
||||
}
|
||||
#endif
|
||||
@ -368,10 +380,10 @@ Zone::nextZone() const
|
||||
void
|
||||
Zone::clearTables()
|
||||
{
|
||||
if (baseShapes.initialized())
|
||||
baseShapes.clear();
|
||||
if (initialShapes.initialized())
|
||||
initialShapes.clear();
|
||||
if (baseShapes().initialized())
|
||||
baseShapes().clear();
|
||||
if (initialShapes().initialized())
|
||||
initialShapes().clear();
|
||||
}
|
||||
|
||||
void
|
||||
@ -387,7 +399,7 @@ Zone::addTypeDescrObject(JSContext* cx, HandleObject obj)
|
||||
// on the set.
|
||||
MOZ_ASSERT(!IsInsideNursery(obj));
|
||||
|
||||
if (!typeDescrObjects.put(obj)) {
|
||||
if (!typeDescrObjects().put(obj)) {
|
||||
ReportOutOfMemory(cx);
|
||||
return false;
|
||||
}
|
||||
|
234
js/src/gc/Zone.h
234
js/src/gc/Zone.h
@ -32,7 +32,7 @@ namespace gc {
|
||||
class ZoneHeapThreshold
|
||||
{
|
||||
// The "growth factor" for computing our next thresholds after a GC.
|
||||
double gcHeapGrowthFactor_;
|
||||
GCLockData<double> gcHeapGrowthFactor_;
|
||||
|
||||
// GC trigger threshold for allocations on the GC heap.
|
||||
mozilla::Atomic<size_t, mozilla::Relaxed> gcTriggerBytes_;
|
||||
@ -139,10 +139,22 @@ struct Zone : public JS::shadow::Zone,
|
||||
public js::gc::GraphNodeBase<JS::Zone>,
|
||||
public js::MallocProvider<JS::Zone>
|
||||
{
|
||||
explicit Zone(JSRuntime* rt);
|
||||
explicit Zone(JSRuntime* rt, js::ZoneGroup* group);
|
||||
~Zone();
|
||||
MOZ_MUST_USE bool init(bool isSystem);
|
||||
|
||||
private:
|
||||
js::ZoneGroup* const group_;
|
||||
public:
|
||||
js::ZoneGroup* group() const {
|
||||
return group_;
|
||||
}
|
||||
|
||||
// For JIT use.
|
||||
static size_t offsetOfGroup() {
|
||||
return offsetof(Zone, group_);
|
||||
}
|
||||
|
||||
void findOutgoingEdges(js::gc::ZoneComponentFinder& finder);
|
||||
|
||||
void discardJitCode(js::FreeOp* fop, bool discardBaselineCode = true);
|
||||
@ -186,7 +198,7 @@ struct Zone : public JS::shadow::Zone,
|
||||
|
||||
bool hasMarkedCompartments();
|
||||
|
||||
void scheduleGC() { MOZ_ASSERT(!runtimeFromMainThread()->isHeapBusy()); gcScheduled_ = true; }
|
||||
void scheduleGC() { MOZ_ASSERT(!CurrentThreadIsHeapBusy()); gcScheduled_ = true; }
|
||||
void unscheduleGC() { gcScheduled_ = false; }
|
||||
bool isGCScheduled() { return gcScheduled_ && canCollect(); }
|
||||
|
||||
@ -206,7 +218,7 @@ struct Zone : public JS::shadow::Zone,
|
||||
Compact
|
||||
};
|
||||
void setGCState(GCState state) {
|
||||
MOZ_ASSERT(runtimeFromMainThread()->isHeapBusy());
|
||||
MOZ_ASSERT(CurrentThreadIsHeapBusy());
|
||||
MOZ_ASSERT_IF(state != NoGC, canCollect());
|
||||
gcState_ = state;
|
||||
if (state == Finished)
|
||||
@ -214,14 +226,12 @@ struct Zone : public JS::shadow::Zone,
|
||||
}
|
||||
|
||||
bool isCollecting() const {
|
||||
if (runtimeFromMainThread()->isHeapCollecting())
|
||||
return gcState_ != NoGC;
|
||||
else
|
||||
return needsIncrementalBarrier();
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
|
||||
return isCollectingFromAnyThread();
|
||||
}
|
||||
|
||||
bool isCollectingFromAnyThread() const {
|
||||
if (runtimeFromAnyThread()->isHeapCollecting())
|
||||
if (CurrentThreadIsHeapCollecting())
|
||||
return gcState_ != NoGC;
|
||||
else
|
||||
return needsIncrementalBarrier();
|
||||
@ -231,11 +241,11 @@ struct Zone : public JS::shadow::Zone,
|
||||
// tracer.
|
||||
bool requireGCTracer() const {
|
||||
JSRuntime* rt = runtimeFromAnyThread();
|
||||
return rt->isHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
|
||||
return CurrentThreadIsHeapMajorCollecting() && !rt->gc.isHeapCompacting() && gcState_ != NoGC;
|
||||
}
|
||||
|
||||
bool isGCMarking() {
|
||||
if (runtimeFromMainThread()->isHeapCollecting())
|
||||
if (CurrentThreadIsHeapCollecting())
|
||||
return gcState_ == Mark || gcState_ == MarkGray;
|
||||
else
|
||||
return needsIncrementalBarrier();
|
||||
@ -281,7 +291,7 @@ struct Zone : public JS::shadow::Zone,
|
||||
using DebuggerVector = js::Vector<js::Debugger*, 0, js::SystemAllocPolicy>;
|
||||
|
||||
private:
|
||||
DebuggerVector* debuggers;
|
||||
js::ZoneGroupData<DebuggerVector*> debuggers;
|
||||
|
||||
void sweepBreakpoints(js::FreeOp* fop);
|
||||
void sweepUniqueIds(js::FreeOp* fop);
|
||||
@ -295,7 +305,9 @@ struct Zone : public JS::shadow::Zone,
|
||||
}
|
||||
|
||||
// Side map for storing a unique ids for cells, independent of address.
|
||||
js::gc::UniqueIdMap uniqueIds_;
|
||||
js::ZoneGroupData<js::gc::UniqueIdMap> uniqueIds_;
|
||||
|
||||
js::gc::UniqueIdMap& uniqueIds() { return uniqueIds_.ref(); }
|
||||
|
||||
public:
|
||||
bool hasDebuggers() const { return debuggers && debuggers->length(); }
|
||||
@ -313,45 +325,67 @@ struct Zone : public JS::shadow::Zone,
|
||||
* is silly
|
||||
* And so on.
|
||||
*/
|
||||
bool suppressAllocationMetadataBuilder;
|
||||
js::ZoneGroupData<bool> suppressAllocationMetadataBuilder;
|
||||
|
||||
js::gc::ArenaLists arenas;
|
||||
|
||||
js::TypeZone types;
|
||||
|
||||
private:
|
||||
/* Live weakmaps in this zone. */
|
||||
mozilla::LinkedList<js::WeakMapBase> gcWeakMapList;
|
||||
js::ZoneGroupData<mozilla::LinkedList<js::WeakMapBase>> gcWeakMapList_;
|
||||
public:
|
||||
mozilla::LinkedList<js::WeakMapBase>& gcWeakMapList() { return gcWeakMapList_.ref(); }
|
||||
|
||||
// The set of compartments in this zone.
|
||||
typedef js::Vector<JSCompartment*, 1, js::SystemAllocPolicy> CompartmentVector;
|
||||
CompartmentVector compartments;
|
||||
|
||||
private:
|
||||
// The set of compartments in this zone.
|
||||
js::UnprotectedData<CompartmentVector> compartments_;
|
||||
public:
|
||||
CompartmentVector& compartments() { return compartments_.ref(); }
|
||||
|
||||
// This zone's gray roots.
|
||||
typedef js::Vector<js::gc::Cell*, 0, js::SystemAllocPolicy> GrayRootVector;
|
||||
GrayRootVector gcGrayRoots;
|
||||
private:
|
||||
js::ZoneGroupData<GrayRootVector> gcGrayRoots_;
|
||||
public:
|
||||
GrayRootVector& gcGrayRoots() { return gcGrayRoots_.ref(); }
|
||||
|
||||
// This zone's weak edges found via graph traversal during marking,
|
||||
// preserved for re-scanning during sweeping.
|
||||
using WeakEdges = js::Vector<js::gc::TenuredCell**, 0, js::SystemAllocPolicy>;
|
||||
WeakEdges gcWeakRefs;
|
||||
private:
|
||||
js::ZoneGroupData<WeakEdges> gcWeakRefs_;
|
||||
public:
|
||||
WeakEdges& gcWeakRefs() { return gcWeakRefs_.ref(); }
|
||||
|
||||
private:
|
||||
// List of non-ephemeron weak containers to sweep during beginSweepingZoneGroup.
|
||||
mozilla::LinkedList<WeakCache<void*>> weakCaches_;
|
||||
js::ZoneGroupData<mozilla::LinkedList<WeakCache<void*>>> weakCaches_;
|
||||
public:
|
||||
mozilla::LinkedList<WeakCache<void*>>& weakCaches() { return weakCaches_.ref(); }
|
||||
void registerWeakCache(WeakCache<void*>* cachep) {
|
||||
weakCaches_.insertBack(cachep);
|
||||
weakCaches().insertBack(cachep);
|
||||
}
|
||||
|
||||
private:
|
||||
/*
|
||||
* Mapping from not yet marked keys to a vector of all values that the key
|
||||
* maps to in any live weak map.
|
||||
*/
|
||||
js::gc::WeakKeyTable gcWeakKeys;
|
||||
js::ZoneGroupData<js::gc::WeakKeyTable> gcWeakKeys_;
|
||||
public:
|
||||
js::gc::WeakKeyTable& gcWeakKeys() { return gcWeakKeys_.ref(); }
|
||||
|
||||
private:
|
||||
// A set of edges from this zone to other zones.
|
||||
//
|
||||
// This is used during GC while calculating zone groups to record edges that
|
||||
// can't be determined by examining this zone by itself.
|
||||
ZoneSet gcZoneGroupEdges;
|
||||
js::ZoneGroupData<ZoneSet> gcZoneGroupEdges_;
|
||||
public:
|
||||
ZoneSet& gcZoneGroupEdges() { return gcZoneGroupEdges_.ref(); }
|
||||
|
||||
// Keep track of all TypeDescr and related objects in this compartment.
|
||||
// This is used by the GC to trace them all first when compacting, since the
|
||||
@ -363,7 +397,10 @@ struct Zone : public JS::shadow::Zone,
|
||||
using TypeDescrObjectSet = js::GCHashSet<JSObject*,
|
||||
js::MovableCellHasher<JSObject*>,
|
||||
js::SystemAllocPolicy>;
|
||||
JS::WeakCache<TypeDescrObjectSet> typeDescrObjects;
|
||||
private:
|
||||
js::ZoneGroupData<JS::WeakCache<TypeDescrObjectSet>> typeDescrObjects_;
|
||||
public:
|
||||
JS::WeakCache<TypeDescrObjectSet>& typeDescrObjects() { return typeDescrObjects_.ref(); }
|
||||
|
||||
bool addTypeDescrObject(JSContext* cx, HandleObject obj);
|
||||
|
||||
@ -373,7 +410,7 @@ struct Zone : public JS::shadow::Zone,
|
||||
mozilla::Atomic<ptrdiff_t, mozilla::ReleaseAcquire> gcMallocBytes;
|
||||
|
||||
// GC trigger threshold for allocations on the C heap.
|
||||
size_t gcMaxMallocBytes;
|
||||
js::UnprotectedData<size_t> gcMaxMallocBytes;
|
||||
|
||||
// Whether a GC has been triggered as a result of gcMallocBytes falling
|
||||
// below zero.
|
||||
@ -382,8 +419,11 @@ struct Zone : public JS::shadow::Zone,
|
||||
// types.
|
||||
mozilla::Atomic<uint32_t, mozilla::ReleaseAcquire> gcMallocGCTriggered;
|
||||
|
||||
private:
|
||||
// Bitmap of atoms marked by this zone.
|
||||
js::gc::AtomMarkingRuntime::Bitmap markedAtoms;
|
||||
js::ZoneGroupOrGCTaskData<js::gc::AtomMarkingRuntime::Bitmap> markedAtoms_;
|
||||
public:
|
||||
js::gc::AtomMarkingRuntime::Bitmap& markedAtoms() { return markedAtoms_.ref(); }
|
||||
|
||||
// Track heap usage under this Zone.
|
||||
js::gc::HeapUsage usage;
|
||||
@ -393,19 +433,28 @@ struct Zone : public JS::shadow::Zone,
|
||||
|
||||
// Amount of data to allocate before triggering a new incremental slice for
|
||||
// the current GC.
|
||||
size_t gcDelayBytes;
|
||||
js::UnprotectedData<size_t> gcDelayBytes;
|
||||
|
||||
private:
|
||||
// Shared Shape property tree.
|
||||
js::PropertyTree propertyTree;
|
||||
js::ZoneGroupData<js::PropertyTree> propertyTree_;
|
||||
public:
|
||||
js::PropertyTree& propertyTree() { return propertyTree_.ref(); }
|
||||
|
||||
private:
|
||||
// Set of all unowned base shapes in the Zone.
|
||||
JS::WeakCache<js::BaseShapeSet> baseShapes;
|
||||
js::ZoneGroupData<JS::WeakCache<js::BaseShapeSet>> baseShapes_;
|
||||
public:
|
||||
JS::WeakCache<js::BaseShapeSet>& baseShapes() { return baseShapes_.ref(); }
|
||||
|
||||
private:
|
||||
// Set of initial shapes in the Zone. For certain prototypes -- namely,
|
||||
// those of various builtin classes -- there are two entries: one for a
|
||||
// lookup via TaggedProto, and one for a lookup via JSProtoKey. See
|
||||
// InitialShapeProto.
|
||||
JS::WeakCache<js::InitialShapeSet> initialShapes;
|
||||
js::ZoneGroupData<JS::WeakCache<js::InitialShapeSet>> initialShapes_;
|
||||
public:
|
||||
JS::WeakCache<js::InitialShapeSet>& initialShapes() { return initialShapes_.ref(); }
|
||||
|
||||
#ifdef JSGC_HASH_TABLE_CHECKS
|
||||
void checkInitialShapesTableAfterMovingGC();
|
||||
@ -415,14 +464,14 @@ struct Zone : public JS::shadow::Zone,
|
||||
void fixupAfterMovingGC();
|
||||
|
||||
// Per-zone data for use by an embedder.
|
||||
void* data;
|
||||
js::ZoneGroupData<void*> data;
|
||||
|
||||
bool isSystem;
|
||||
js::ZoneGroupData<bool> isSystem;
|
||||
|
||||
mozilla::Atomic<bool> usedByExclusiveThread;
|
||||
|
||||
#ifdef DEBUG
|
||||
unsigned gcLastZoneGroupIndex;
|
||||
js::ZoneGroupData<unsigned> gcLastZoneGroupIndex;
|
||||
#endif
|
||||
|
||||
static js::HashNumber UniqueIdToHash(uint64_t uid) {
|
||||
@ -445,7 +494,7 @@ struct Zone : public JS::shadow::Zone,
|
||||
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
|
||||
|
||||
// Get an existing uid, if one has been set.
|
||||
auto p = uniqueIds_.lookupForAdd(cell);
|
||||
auto p = uniqueIds().lookupForAdd(cell);
|
||||
if (p) {
|
||||
*uidp = p->value();
|
||||
return true;
|
||||
@ -453,14 +502,14 @@ struct Zone : public JS::shadow::Zone,
|
||||
|
||||
// Set a new uid on the cell.
|
||||
*uidp = js::gc::NextCellUniqueId(runtimeFromAnyThread());
|
||||
if (!uniqueIds_.add(p, cell, *uidp))
|
||||
if (!uniqueIds().add(p, cell, *uidp))
|
||||
return false;
|
||||
|
||||
// If the cell was in the nursery, hopefully unlikely, then we need to
|
||||
// tell the nursery about it so that it can sweep the uid if the thing
|
||||
// does not get tenured.
|
||||
if (!runtimeFromAnyThread()->gc.nursery.addedUniqueIdToCell(cell)) {
|
||||
uniqueIds_.remove(cell);
|
||||
if (!group()->nursery().addedUniqueIdToCell(cell)) {
|
||||
uniqueIds().remove(cell);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -482,7 +531,7 @@ struct Zone : public JS::shadow::Zone,
|
||||
// Return true if this cell has a UID associated with it.
|
||||
MOZ_MUST_USE bool hasUniqueId(js::gc::Cell* cell) {
|
||||
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
|
||||
return uniqueIds_.has(cell);
|
||||
return uniqueIds().has(cell);
|
||||
}
|
||||
|
||||
// Transfer an id from another cell. This must only be called on behalf of a
|
||||
@ -492,25 +541,25 @@ struct Zone : public JS::shadow::Zone,
|
||||
MOZ_ASSERT(!IsInsideNursery(tgt));
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(runtimeFromMainThread()));
|
||||
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
|
||||
uniqueIds_.rekeyIfMoved(src, tgt);
|
||||
uniqueIds().rekeyIfMoved(src, tgt);
|
||||
}
|
||||
|
||||
// Remove any unique id associated with this Cell.
|
||||
void removeUniqueId(js::gc::Cell* cell) {
|
||||
MOZ_ASSERT(js::CurrentThreadCanAccessZone(this));
|
||||
uniqueIds_.remove(cell);
|
||||
uniqueIds().remove(cell);
|
||||
}
|
||||
|
||||
// When finished parsing off-thread, transfer any UIDs we created in the
|
||||
// off-thread zone into the target zone.
|
||||
void adoptUniqueIds(JS::Zone* source) {
|
||||
js::AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
for (js::gc::UniqueIdMap::Enum e(source->uniqueIds_); !e.empty(); e.popFront()) {
|
||||
MOZ_ASSERT(!uniqueIds_.has(e.front().key()));
|
||||
if (!uniqueIds_.put(e.front().key(), e.front().value()))
|
||||
for (js::gc::UniqueIdMap::Enum e(source->uniqueIds()); !e.empty(); e.popFront()) {
|
||||
MOZ_ASSERT(!uniqueIds().has(e.front().key()));
|
||||
if (!uniqueIds().put(e.front().key(), e.front().value()))
|
||||
oomUnsafe.crash("failed to transfer unique ids from off-main-thread");
|
||||
}
|
||||
source->uniqueIds_.clear();
|
||||
source->uniqueIds().clear();
|
||||
}
|
||||
|
||||
JSContext* contextFromMainThread() {
|
||||
@ -530,18 +579,18 @@ struct Zone : public JS::shadow::Zone,
|
||||
}
|
||||
|
||||
private:
|
||||
js::jit::JitZone* jitZone_;
|
||||
js::ZoneGroupData<js::jit::JitZone*> jitZone_;
|
||||
|
||||
GCState gcState_;
|
||||
bool gcScheduled_;
|
||||
bool gcPreserveCode_;
|
||||
bool jitUsingBarriers_;
|
||||
bool keepShapeTables_;
|
||||
js::UnprotectedData<GCState> gcState_;
|
||||
js::UnprotectedData<bool> gcScheduled_;
|
||||
js::ZoneGroupData<bool> gcPreserveCode_;
|
||||
js::ZoneGroupData<bool> jitUsingBarriers_;
|
||||
js::ZoneGroupData<bool> keepShapeTables_;
|
||||
|
||||
// Allow zones to be linked into a list
|
||||
friend class js::gc::ZoneList;
|
||||
static Zone * const NotOnList;
|
||||
Zone* listNext_;
|
||||
js::ZoneGroupOrGCTaskData<Zone*> listNext_;
|
||||
bool isOnList() const;
|
||||
Zone* nextZone() const;
|
||||
|
||||
@ -565,18 +614,17 @@ enum ZoneSelector {
|
||||
class ZonesIter
|
||||
{
|
||||
gc::AutoEnterIteration iterMarker;
|
||||
JSRuntime* rt;
|
||||
JS::Zone** it;
|
||||
JS::Zone** end;
|
||||
|
||||
public:
|
||||
ZonesIter(JSRuntime* rt, ZoneSelector selector) : iterMarker(&rt->gc) {
|
||||
it = rt->gc.zones.begin();
|
||||
end = rt->gc.zones.end();
|
||||
|
||||
if (selector == SkipAtoms) {
|
||||
MOZ_ASSERT(atAtomsZone(rt));
|
||||
it++;
|
||||
}
|
||||
ZonesIter(JSRuntime* rt, ZoneSelector selector) : iterMarker(&rt->gc), rt(rt) {
|
||||
if (selector == WithAtoms && rt->gc.atomsZone)
|
||||
it = const_cast<JS::Zone**>(&rt->gc.atomsZone.ref());
|
||||
else
|
||||
it = rt->zoneGroupFromAnyThread()->zones().begin();
|
||||
end = rt->zoneGroupFromAnyThread()->zones().end();
|
||||
}
|
||||
|
||||
bool atAtomsZone(JSRuntime* rt);
|
||||
@ -586,7 +634,10 @@ class ZonesIter
|
||||
void next() {
|
||||
MOZ_ASSERT(!done());
|
||||
do {
|
||||
it++;
|
||||
if (it == &rt->gc.atomsZone.ref())
|
||||
it = rt->zoneGroupFromAnyThread()->zones().begin();
|
||||
else
|
||||
it++;
|
||||
} while (!done() && (*it)->usedByExclusiveThread);
|
||||
}
|
||||
|
||||
@ -602,13 +653,13 @@ class ZonesIter
|
||||
struct CompartmentsInZoneIter
|
||||
{
|
||||
explicit CompartmentsInZoneIter(JS::Zone* zone) : zone(zone) {
|
||||
it = zone->compartments.begin();
|
||||
it = zone->compartments().begin();
|
||||
}
|
||||
|
||||
bool done() const {
|
||||
MOZ_ASSERT(it);
|
||||
return it < zone->compartments.begin() ||
|
||||
it >= zone->compartments.end();
|
||||
return it < zone->compartments().begin() ||
|
||||
it >= zone->compartments().end();
|
||||
}
|
||||
void next() {
|
||||
MOZ_ASSERT(!done());
|
||||
@ -744,6 +795,63 @@ class ZoneAllocPolicy
|
||||
}
|
||||
};
|
||||
|
||||
/*
|
||||
* Provides a delete policy that can be used for objects which have their
|
||||
* lifetime managed by the GC and can only safely be destroyed while the nursery
|
||||
* is empty.
|
||||
*
|
||||
* This is necessary when initializing such an object may fail after the initial
|
||||
* allocation. The partially-initialized object must be destroyed, but it may
|
||||
* not be safe to do so at the current time. This policy puts the object on a
|
||||
* queue to be destroyed at a safe time.
|
||||
*/
|
||||
template <typename T>
|
||||
struct GCManagedDeletePolicy
|
||||
{
|
||||
void operator()(const T* ptr) {
|
||||
if (ptr) {
|
||||
JSContext* cx = TlsContext.get();
|
||||
if (cx->runtime()->zoneGroupFromMainThread()->nursery().isEnabled()) {
|
||||
// The object may contain nursery pointers and must only be
|
||||
// destroyed after a minor GC.
|
||||
cx->runtime()->zoneGroupFromMainThread()->callAfterMinorGC(deletePtr, const_cast<T*>(ptr));
|
||||
} else {
|
||||
// The object cannot contain nursery pointers so can be
|
||||
// destroyed immediately.
|
||||
gc::AutoSetThreadIsSweeping threadIsSweeping;
|
||||
js_delete(const_cast<T*>(ptr));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
static void deletePtr(void* data) {
|
||||
js_delete(reinterpret_cast<T*>(data));
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace js
|
||||
|
||||
namespace JS {
|
||||
|
||||
template <typename T>
|
||||
struct DeletePolicy<js::GCPtr<T>> : public js::GCManagedDeletePolicy<js::GCPtr<T>>
|
||||
{};
|
||||
|
||||
// Scope data that contain GCPtrs must use the correct DeletePolicy.
|
||||
//
|
||||
// This is defined here because vm/Scope.h cannot #include "vm/Runtime.h"
|
||||
|
||||
template <>
|
||||
struct DeletePolicy<js::FunctionScope::Data>
|
||||
: public js::GCManagedDeletePolicy<js::FunctionScope::Data>
|
||||
{ };
|
||||
|
||||
template <>
|
||||
struct DeletePolicy<js::ModuleScope::Data>
|
||||
: public js::GCManagedDeletePolicy<js::ModuleScope::Data>
|
||||
{ };
|
||||
|
||||
} // namespace JS
|
||||
|
||||
#endif // gc_Zone_h
|
||||
|
84
js/src/gc/ZoneGroup.cpp
Normal file
84
js/src/gc/ZoneGroup.cpp
Normal file
@ -0,0 +1,84 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "gc/ZoneGroup.h"
|
||||
|
||||
#include "jscntxt.h"
|
||||
|
||||
#include "jit/JitCompartment.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
ZoneGroup::ZoneGroup(JSRuntime* runtime)
|
||||
: runtime(runtime),
|
||||
context(TlsContext.get()),
|
||||
enterCount(this, 1),
|
||||
zones_(),
|
||||
nursery_(this),
|
||||
storeBuffer_(this, runtime, nursery()),
|
||||
blocksToFreeAfterMinorGC((size_t) JSContext::TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
|
||||
caches_(this),
|
||||
#ifdef DEBUG
|
||||
ionBailAfter_(this, 0),
|
||||
#endif
|
||||
jitZoneGroup(this, nullptr),
|
||||
debuggerList_(this),
|
||||
profilingScripts(this, false),
|
||||
scriptAndCountsVector(this, nullptr)
|
||||
{}
|
||||
|
||||
bool
|
||||
ZoneGroup::init(size_t maxNurseryBytes)
|
||||
{
|
||||
if (!caches().init())
|
||||
return false;
|
||||
|
||||
AutoLockGC lock(runtime);
|
||||
if (!nursery().init(maxNurseryBytes, lock))
|
||||
return false;
|
||||
|
||||
jitZoneGroup = js_new<jit::JitZoneGroup>(this);
|
||||
if (!jitZoneGroup)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
ZoneGroup::~ZoneGroup()
|
||||
{
|
||||
js_delete(jitZoneGroup.ref());
|
||||
}
|
||||
|
||||
void
|
||||
ZoneGroup::enter()
|
||||
{
|
||||
JSContext* cx = TlsContext.get();
|
||||
if (context == cx) {
|
||||
MOZ_ASSERT(enterCount);
|
||||
} else {
|
||||
JSContext* old = context.exchange(cx);
|
||||
MOZ_RELEASE_ASSERT(old == nullptr);
|
||||
MOZ_ASSERT(enterCount == 0);
|
||||
}
|
||||
enterCount++;
|
||||
}
|
||||
|
||||
void
|
||||
ZoneGroup::leave()
|
||||
{
|
||||
MOZ_ASSERT(ownedByCurrentThread());
|
||||
MOZ_ASSERT(enterCount);
|
||||
if (--enterCount == 0)
|
||||
context = nullptr;
|
||||
}
|
||||
|
||||
bool
|
||||
ZoneGroup::ownedByCurrentThread()
|
||||
{
|
||||
return context == TlsContext.get();
|
||||
}
|
||||
|
||||
} // namespace js
|
173
js/src/gc/ZoneGroup.h
Normal file
173
js/src/gc/ZoneGroup.h
Normal file
@ -0,0 +1,173 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef gc_ZoneGroup_h
|
||||
#define gc_ZoneGroup_h
|
||||
|
||||
#include "jsgc.h"
|
||||
|
||||
#include "gc/Statistics.h"
|
||||
#include "vm/Caches.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
namespace jit { class JitZoneGroup; }
|
||||
|
||||
class AutoKeepAtoms;
|
||||
|
||||
typedef Vector<JS::Zone*, 4, SystemAllocPolicy> ZoneVector;
|
||||
|
||||
using ScriptAndCountsVector = GCVector<ScriptAndCounts, 0, SystemAllocPolicy>;
|
||||
|
||||
// Zone groups encapsulate data about a group of zones that are logically
|
||||
// related in some way. Currently, each runtime has a single zone group, and
|
||||
// all zones except the atoms zone (which has no group) are in that group.
|
||||
// This will change soon.
|
||||
//
|
||||
// When JSRuntimes become multithreaded (also happening soon; see bug 1323066),
|
||||
// zone groups will be the primary means by which threads ensure exclusive
|
||||
// access to the data they are using. Most data in a zone group, its zones,
|
||||
// compartments, GC things and so forth may only be used by the thread that has
|
||||
// entered the zone group.
|
||||
//
|
||||
// This restriction is not quite in place yet: zones used by an parse thread
|
||||
// are accessed by that thread even though it does not have exclusive access
|
||||
// to the entire zone group. This will also be changing soon.
|
||||
|
||||
class ZoneGroup
|
||||
{
|
||||
public:
|
||||
JSRuntime* const runtime;
|
||||
|
||||
// The context with exclusive access to this zone group.
|
||||
mozilla::Atomic<JSContext*, mozilla::ReleaseAcquire> context;
|
||||
|
||||
// The number of times the context has entered this zone group.
|
||||
ZoneGroupData<size_t> enterCount;
|
||||
|
||||
void enter();
|
||||
void leave();
|
||||
bool ownedByCurrentThread();
|
||||
|
||||
// All zones in the group.
|
||||
private:
|
||||
UnprotectedData<ZoneVector> zones_;
|
||||
public:
|
||||
ZoneVector& zones() { return zones_.ref(); }
|
||||
|
||||
explicit ZoneGroup(JSRuntime* runtime);
|
||||
~ZoneGroup();
|
||||
|
||||
bool init(size_t maxNurseryBytes);
|
||||
|
||||
private:
|
||||
UnprotectedData<Nursery> nursery_;
|
||||
ZoneGroupData<gc::StoreBuffer> storeBuffer_;
|
||||
public:
|
||||
Nursery& nursery() { return nursery_.ref(); }
|
||||
gc::StoreBuffer& storeBuffer() { return storeBuffer_.ref(); }
|
||||
|
||||
// Free LIFO blocks are transferred to this allocator before being freed
|
||||
// after minor GC.
|
||||
UnprotectedData<LifoAlloc> blocksToFreeAfterMinorGC;
|
||||
|
||||
void minorGC(JS::gcreason::Reason reason,
|
||||
gcstats::Phase phase = gcstats::PHASE_MINOR_GC) JS_HAZ_GC_CALL;
|
||||
void evictNursery(JS::gcreason::Reason reason = JS::gcreason::EVICT_NURSERY) {
|
||||
minorGC(reason, gcstats::PHASE_EVICT_NURSERY);
|
||||
}
|
||||
void freeAllLifoBlocksAfterMinorGC(LifoAlloc* lifo);
|
||||
|
||||
const void* addressOfNurseryPosition() {
|
||||
return nursery_.refNoCheck().addressOfPosition();
|
||||
}
|
||||
const void* addressOfNurseryCurrentEnd() {
|
||||
return nursery_.refNoCheck().addressOfCurrentEnd();
|
||||
}
|
||||
|
||||
// Queue a thunk to run after the next minor GC.
|
||||
void callAfterMinorGC(void (*thunk)(void* data), void* data) {
|
||||
nursery().queueSweepAction(thunk, data);
|
||||
}
|
||||
|
||||
inline bool isCollecting();
|
||||
inline bool isGCScheduled();
|
||||
|
||||
private:
|
||||
ZoneGroupData<ZoneGroupCaches> caches_;
|
||||
public:
|
||||
ZoneGroupCaches& caches() { return caches_.ref(); }
|
||||
|
||||
#ifdef DEBUG
|
||||
private:
|
||||
// The number of possible bailing places encounters before forcefully bailing
|
||||
// in that place. Zero means inactive.
|
||||
ZoneGroupData<uint32_t> ionBailAfter_;
|
||||
|
||||
public:
|
||||
void* addressOfIonBailAfter() { return &ionBailAfter_; }
|
||||
|
||||
// Set after how many bailing places we should forcefully bail.
|
||||
// Zero disables this feature.
|
||||
void setIonBailAfter(uint32_t after) {
|
||||
ionBailAfter_ = after;
|
||||
}
|
||||
#endif
|
||||
|
||||
ZoneGroupData<jit::JitZoneGroup*> jitZoneGroup;
|
||||
|
||||
private:
|
||||
/* Linked list of all Debugger objects in the group. */
|
||||
ZoneGroupData<mozilla::LinkedList<js::Debugger>> debuggerList_;
|
||||
public:
|
||||
mozilla::LinkedList<js::Debugger>& debuggerList() { return debuggerList_.ref(); }
|
||||
|
||||
/* If true, new scripts must be created with PC counter information. */
|
||||
ZoneGroupOrIonCompileData<bool> profilingScripts;
|
||||
|
||||
/* Strong references on scripts held for PCCount profiling API. */
|
||||
ZoneGroupData<JS::PersistentRooted<ScriptAndCountsVector>*> scriptAndCountsVector;
|
||||
};
|
||||
|
||||
class MOZ_RAII AutoAccessZoneGroup
|
||||
{
|
||||
ZoneGroup* group;
|
||||
|
||||
public:
|
||||
explicit AutoAccessZoneGroup(ZoneGroup* group)
|
||||
: group(group)
|
||||
{
|
||||
group->enter();
|
||||
}
|
||||
|
||||
~AutoAccessZoneGroup() {
|
||||
group->leave();
|
||||
}
|
||||
};
|
||||
|
||||
class MOZ_RAII AutoAccessZoneGroups
|
||||
{
|
||||
Vector<ZoneGroup*, 4, SystemAllocPolicy> acquiredGroups;
|
||||
|
||||
public:
|
||||
AutoAccessZoneGroups() {}
|
||||
|
||||
~AutoAccessZoneGroups() {
|
||||
for (size_t i = 0; i < acquiredGroups.length(); i++)
|
||||
acquiredGroups[i]->leave();
|
||||
}
|
||||
|
||||
void access(ZoneGroup* group) {
|
||||
group->enter();
|
||||
AutoEnterOOMUnsafeRegion oomUnsafe;
|
||||
if (!acquiredGroups.append(group))
|
||||
oomUnsafe.crash("acquiredGroups.append failed");
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace js
|
||||
|
||||
#endif // gc_Zone_h
|
@ -79,7 +79,7 @@ class UnwinderTypeCache(object):
|
||||
commonFrameLayout = gdb.lookup_type('js::jit::CommonFrameLayout')
|
||||
self.d['typeCommonFrameLayout'] = commonFrameLayout
|
||||
self.d['typeCommonFrameLayoutPointer'] = commonFrameLayout.pointer()
|
||||
self.d['per_tls_data'] = gdb.lookup_global_symbol('js::TlsPerThreadData')
|
||||
self.d['per_tls_context'] = gdb.lookup_global_symbol('js::TlsContext')
|
||||
|
||||
self.d['void_starstar'] = gdb.lookup_type('void').pointer().pointer()
|
||||
self.d['mod_ExecutableAllocator'] = jsjitExecutableAllocatorCache()
|
||||
@ -330,8 +330,8 @@ class UnwinderState(object):
|
||||
if self.proc_mappings != None:
|
||||
return not self.text_address_claimed(pc)
|
||||
|
||||
ptd = self.get_tls_per_thread_data()
|
||||
runtime = ptd['runtime_']
|
||||
cx = self.get_tls_context()
|
||||
runtime = cx['runtime_']
|
||||
if runtime == 0:
|
||||
return False
|
||||
|
||||
@ -352,9 +352,9 @@ class UnwinderState(object):
|
||||
def check(self):
|
||||
return gdb.selected_thread() is self.thread
|
||||
|
||||
# Essentially js::TlsPerThreadData.get().
|
||||
def get_tls_per_thread_data(self):
|
||||
return self.typecache.per_tls_data.value()['mValue']
|
||||
# Essentially js::TlsContext.get().
|
||||
def get_tls_context(self):
|
||||
return self.typecache.per_tls_context.value()['mValue']
|
||||
|
||||
# |common| is a pointer to a CommonFrameLayout object. Return a
|
||||
# tuple (local_size, header_size, frame_type), where |size| is the
|
||||
@ -434,9 +434,9 @@ class UnwinderState(object):
|
||||
# Reached the end of the list.
|
||||
return None
|
||||
elif self.activation is None:
|
||||
ptd = self.get_tls_per_thread_data()
|
||||
self.activation = ptd['runtime_']['jitActivation']
|
||||
jittop = ptd['runtime_']['jitTop']
|
||||
cx = self.get_tls_context()
|
||||
self.activation = cx['jitActivation']
|
||||
jittop = cx['jitTop']
|
||||
else:
|
||||
jittop = self.activation['prevJitTop_']
|
||||
self.activation = self.activation['prevJitActivation_']
|
||||
|
@ -64,10 +64,10 @@ using namespace js::jit;
|
||||
* The tempN registers are free to use for computations.
|
||||
*/
|
||||
|
||||
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(LifoAlloc* alloc, RegExpShared* shared,
|
||||
JSRuntime* rt, Mode mode, int registers_to_save)
|
||||
NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(JSContext* cx, LifoAlloc* alloc, RegExpShared* shared,
|
||||
Mode mode, int registers_to_save)
|
||||
: RegExpMacroAssembler(*alloc, shared, registers_to_save),
|
||||
runtime(rt), mode_(mode)
|
||||
cx(cx), mode_(mode)
|
||||
{
|
||||
// Find physical registers for each compiler register.
|
||||
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
|
||||
@ -157,7 +157,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
|
||||
// avoid failing repeatedly when the regex code is called from Ion JIT code,
|
||||
// see bug 1208819.
|
||||
Label stack_ok;
|
||||
void* stack_limit = runtime->addressOfJitStackLimitNoInterrupt();
|
||||
void* stack_limit = &cx->runtime()->contextFromMainThread()->jitStackLimitNoInterrupt;
|
||||
masm.branchStackPtrRhs(Assembler::Below, AbsoluteAddress(stack_limit), &stack_ok);
|
||||
|
||||
// Exit with an exception. There is not enough space on the stack
|
||||
@ -272,7 +272,8 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
|
||||
}
|
||||
|
||||
// Initialize backtrack stack pointer.
|
||||
masm.loadPtr(AbsoluteAddress(runtime->regexpStack.addressOfBase()), backtrack_stack_pointer);
|
||||
masm.loadPtr(AbsoluteAddress(cx->runtime()->contextFromMainThread()->regexpStack.ref().addressOfBase()),
|
||||
backtrack_stack_pointer);
|
||||
masm.storePtr(backtrack_stack_pointer,
|
||||
Address(masm.getStackPointer(), offsetof(FrameData, backtrackStackBase)));
|
||||
|
||||
@ -431,7 +432,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
|
||||
|
||||
Label grow_failed;
|
||||
|
||||
masm.movePtr(ImmPtr(runtime), temp1);
|
||||
masm.movePtr(ImmPtr(cx->runtime()), temp1);
|
||||
|
||||
// Save registers before calling C function
|
||||
LiveGeneralRegisterSet volatileRegs(GeneralRegisterSet::Volatile());
|
||||
@ -462,7 +463,7 @@ NativeRegExpMacroAssembler::GenerateCode(JSContext* cx, bool match_only)
|
||||
Address backtrackStackBaseAddress(temp2, offsetof(FrameData, backtrackStackBase));
|
||||
masm.subPtr(backtrackStackBaseAddress, backtrack_stack_pointer);
|
||||
|
||||
masm.loadPtr(AbsoluteAddress(runtime->regexpStack.addressOfBase()), temp1);
|
||||
masm.loadPtr(AbsoluteAddress(cx->runtime()->contextFromMainThread()->regexpStack.ref().addressOfBase()), temp1);
|
||||
masm.storePtr(temp1, backtrackStackBaseAddress);
|
||||
masm.addPtr(temp1, backtrack_stack_pointer);
|
||||
|
||||
@ -542,7 +543,7 @@ NativeRegExpMacroAssembler::Backtrack()
|
||||
// Check for an interrupt.
|
||||
Label noInterrupt;
|
||||
masm.branch32(Assembler::Equal,
|
||||
AbsoluteAddress(runtime->addressOfInterruptUint32()), Imm32(0),
|
||||
AbsoluteAddress(&cx->runtime()->contextFromMainThread()->interrupt_), Imm32(0),
|
||||
&noInterrupt);
|
||||
masm.movePtr(ImmWord(RegExpRunStatus_Error), temp0);
|
||||
masm.jump(&exit_label_);
|
||||
@ -1100,7 +1101,7 @@ NativeRegExpMacroAssembler::CheckBacktrackStackLimit()
|
||||
{
|
||||
JitSpew(SPEW_PREFIX "CheckBacktrackStackLimit");
|
||||
|
||||
const void* limitAddr = runtime->regexpStack.addressOfLimit();
|
||||
const void* limitAddr = cx->runtime()->contextFromMainThread()->regexpStack.ref().addressOfLimit();
|
||||
|
||||
Label no_stack_overflow;
|
||||
masm.branchPtr(Assembler::AboveOrEqual, AbsoluteAddress(limitAddr),
|
||||
|
@ -88,8 +88,8 @@ class MOZ_STACK_CLASS NativeRegExpMacroAssembler final : public RegExpMacroAssem
|
||||
// Type of input string to generate code for.
|
||||
enum Mode { ASCII = 1, CHAR16 = 2 };
|
||||
|
||||
NativeRegExpMacroAssembler(LifoAlloc* alloc, RegExpShared* shared,
|
||||
JSRuntime* rt, Mode mode, int registers_to_save);
|
||||
NativeRegExpMacroAssembler(JSContext* cx, LifoAlloc* alloc, RegExpShared* shared,
|
||||
Mode mode, int registers_to_save);
|
||||
|
||||
// Inherited virtual methods.
|
||||
RegExpCode GenerateCode(JSContext* cx, bool match_only);
|
||||
@ -174,7 +174,7 @@ class MOZ_STACK_CLASS NativeRegExpMacroAssembler final : public RegExpMacroAssem
|
||||
private:
|
||||
jit::MacroAssembler masm;
|
||||
|
||||
JSRuntime* runtime;
|
||||
JSContext* cx;
|
||||
Mode mode_;
|
||||
jit::Label entry_label_;
|
||||
jit::Label start_label_;
|
||||
|
@ -1787,7 +1787,7 @@ irregexp::CompilePattern(JSContext* cx, RegExpShared* shared, RegExpCompileData*
|
||||
: NativeRegExpMacroAssembler::CHAR16;
|
||||
|
||||
ctx.emplace(cx, (jit::TempAllocator*) nullptr);
|
||||
native_assembler.emplace(&alloc, shared, cx->runtime(), mode, (data->capture_count + 1) * 2);
|
||||
native_assembler.emplace(cx, &alloc, shared, mode, (data->capture_count + 1) * 2);
|
||||
assembler = native_assembler.ptr();
|
||||
} else {
|
||||
interpreted_assembler.emplace(&alloc, shared, (data->capture_count + 1) * 2);
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "irregexp/RegExpMacroAssembler.h"
|
||||
#include "vm/MatchPairs.h"
|
||||
|
||||
#include "jscntxtinlines.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::irregexp;
|
||||
|
||||
|
@ -30,13 +30,13 @@
|
||||
|
||||
#include "irregexp/RegExpStack.h"
|
||||
|
||||
#include "vm/Runtime.h"
|
||||
#include "jscntxt.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::irregexp;
|
||||
|
||||
RegExpStackScope::RegExpStackScope(JSRuntime* rt)
|
||||
: regexp_stack(&rt->regexpStack)
|
||||
RegExpStackScope::RegExpStackScope(JSContext* cx)
|
||||
: regexp_stack(&cx->regexpStack.ref())
|
||||
{}
|
||||
|
||||
RegExpStackScope::~RegExpStackScope()
|
||||
@ -47,7 +47,7 @@ RegExpStackScope::~RegExpStackScope()
|
||||
int
|
||||
irregexp::GrowBacktrackStack(JSRuntime* rt)
|
||||
{
|
||||
return rt->regexpStack.grow();
|
||||
return TlsContext.get()->regexpStack.ref().grow();
|
||||
}
|
||||
|
||||
RegExpStack::RegExpStack()
|
||||
|
@ -51,7 +51,7 @@ class RegExpStackScope
|
||||
// Create and delete an instance to control the life-time of a growing stack.
|
||||
|
||||
// Initializes the stack memory area if necessary.
|
||||
explicit RegExpStackScope(JSRuntime* rt);
|
||||
explicit RegExpStackScope(JSContext* cx);
|
||||
|
||||
// Releases the stack if it has grown.
|
||||
~RegExpStackScope();
|
||||
|
@ -29,14 +29,14 @@ using mozilla::IsInRange;
|
||||
uint32_t
|
||||
jit::Bailout(BailoutStack* sp, BaselineBailoutInfo** bailoutInfo)
|
||||
{
|
||||
JSContext* cx = GetJSContextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
MOZ_ASSERT(bailoutInfo);
|
||||
|
||||
// We don't have an exit frame.
|
||||
MOZ_ASSERT(IsInRange(FAKE_JIT_TOP_FOR_BAILOUT, 0, 0x1000) &&
|
||||
IsInRange(FAKE_JIT_TOP_FOR_BAILOUT + sizeof(CommonFrameLayout), 0, 0x1000),
|
||||
"Fake jitTop pointer should be within the first page.");
|
||||
cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
|
||||
cx->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
|
||||
|
||||
JitActivationIterator jitActivations(cx->runtime());
|
||||
BailoutFrameInfo bailoutData(jitActivations, sp);
|
||||
@ -94,7 +94,7 @@ jit::Bailout(BailoutStack* sp, BaselineBailoutInfo** bailoutInfo)
|
||||
// In both cases, we want to temporarily set the |lastProfilingFrame|
|
||||
// to the current frame being bailed out, and then fix it up later.
|
||||
if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
|
||||
cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);
|
||||
cx->jitActivation->setLastProfilingFrame(currentFramePtr);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -105,10 +105,10 @@ jit::InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut,
|
||||
{
|
||||
sp->checkInvariants();
|
||||
|
||||
JSContext* cx = GetJSContextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
|
||||
// We don't have an exit frame.
|
||||
cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
|
||||
cx->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
|
||||
|
||||
JitActivationIterator jitActivations(cx->runtime());
|
||||
BailoutFrameInfo bailoutData(jitActivations, sp);
|
||||
@ -163,7 +163,7 @@ jit::InvalidationBailout(InvalidationBailoutStack* sp, size_t* frameSizeOut,
|
||||
|
||||
// Make the frame being bailed out the top profiled frame.
|
||||
if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
|
||||
cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);
|
||||
cx->jitActivation->setLastProfilingFrame(currentFramePtr);
|
||||
|
||||
return retval;
|
||||
}
|
||||
@ -192,9 +192,9 @@ jit::ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame,
|
||||
// operation callback like a timeout handler.
|
||||
MOZ_ASSERT_IF(!excInfo.propagatingIonExceptionForDebugMode(), cx->isExceptionPending());
|
||||
|
||||
uint8_t* prevJitTop = cx->runtime()->jitTop;
|
||||
auto restoreJitTop = mozilla::MakeScopeExit([&]() { cx->runtime()->jitTop = prevJitTop; });
|
||||
cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
|
||||
uint8_t* prevJitTop = cx->jitTop;
|
||||
auto restoreJitTop = mozilla::MakeScopeExit([&]() { cx->jitTop = prevJitTop; });
|
||||
cx->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
|
||||
|
||||
gc::AutoSuppressGC suppress(cx);
|
||||
|
||||
@ -249,7 +249,7 @@ jit::ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame,
|
||||
|
||||
// Make the frame being bailed out the top profiled frame.
|
||||
if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
|
||||
cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);
|
||||
cx->jitActivation->setLastProfilingFrame(currentFramePtr);
|
||||
|
||||
return retval;
|
||||
}
|
||||
|
@ -632,7 +632,7 @@ InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC,
|
||||
{
|
||||
// The Baseline frames we will reconstruct on the heap are not rooted, so GC
|
||||
// must be suppressed here.
|
||||
MOZ_ASSERT(cx->mainThread().suppressGC);
|
||||
MOZ_ASSERT(cx->suppressGC);
|
||||
|
||||
MOZ_ASSERT(script->hasBaselineScript());
|
||||
|
||||
@ -946,7 +946,7 @@ InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC,
|
||||
Value v;
|
||||
|
||||
if (!iter.moreFrames() && i == exprStackSlots - 1 &&
|
||||
cx->runtime()->jitRuntime()->hasIonReturnOverride())
|
||||
cx->hasIonReturnOverride())
|
||||
{
|
||||
// If coming from an invalidation bailout, and this is the topmost
|
||||
// value, and a value override has been specified, don't read from the
|
||||
@ -954,7 +954,7 @@ InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC,
|
||||
MOZ_ASSERT(invalidate);
|
||||
iter.skip();
|
||||
JitSpew(JitSpew_BaselineBailouts, " [Return Override]");
|
||||
v = cx->runtime()->jitRuntime()->takeIonReturnOverride();
|
||||
v = cx->takeIonReturnOverride();
|
||||
} else if (excInfo && excInfo->propagatingIonExceptionForDebugMode()) {
|
||||
// If we are in the middle of propagating an exception from Ion by
|
||||
// bailing to baseline due to debug mode, we might not have all
|
||||
@ -1202,7 +1202,7 @@ InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC,
|
||||
JitSpew(JitSpew_BaselineBailouts, " Set resumeAddr=%p", opReturnAddr);
|
||||
}
|
||||
|
||||
if (cx->runtime()->geckoProfiler.enabled()) {
|
||||
if (cx->runtime()->geckoProfiler().enabled()) {
|
||||
// Register bailout with profiler.
|
||||
const char* filename = script->filename();
|
||||
if (filename == nullptr)
|
||||
@ -1220,7 +1220,7 @@ InitFromBailout(JSContext* cx, HandleScript caller, jsbytecode* callerPC,
|
||||
PCToLineNumber(script, pc),
|
||||
filename,
|
||||
script->lineno());
|
||||
cx->runtime()->geckoProfiler.markEvent(buf);
|
||||
cx->runtime()->geckoProfiler().markEvent(buf);
|
||||
js_free(buf);
|
||||
}
|
||||
|
||||
@ -1810,7 +1810,7 @@ jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo)
|
||||
{
|
||||
// The caller pushes R0 and R1 on the stack without rooting them.
|
||||
// Since GC here is very unlikely just suppress it.
|
||||
JSContext* cx = GetJSContextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
js::gc::AutoSuppressGC suppressGC(cx);
|
||||
|
||||
JitSpew(JitSpew_BaselineBailouts, " Done restoring frames");
|
||||
@ -1862,7 +1862,7 @@ jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo)
|
||||
// (which must be a baseline frame), and set it as the last profiling
|
||||
// frame.
|
||||
if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
|
||||
cx->runtime()->jitActivation->setLastProfilingFrame(iter.prevFp());
|
||||
cx->jitActivation->setLastProfilingFrame(iter.prevFp());
|
||||
|
||||
uint32_t frameno = 0;
|
||||
while (frameno < numFrames) {
|
||||
@ -1915,7 +1915,7 @@ jit::FinishBailoutToBaseline(BaselineBailoutInfo* bailoutInfo)
|
||||
// values into the baseline frame. We need to do this even when debug mode
|
||||
// is off, as we should respect the mutations made while debug mode was
|
||||
// on.
|
||||
JitActivation* act = cx->runtime()->activation()->asJit();
|
||||
JitActivation* act = cx->activation()->asJit();
|
||||
if (act->hasRematerializedFrame(outerFp)) {
|
||||
JitFrameIterator iter(cx);
|
||||
size_t inlineDepth = numFrames;
|
||||
|
@ -11,6 +11,8 @@
|
||||
#include "jit/SharedICHelpers.h"
|
||||
#include "proxy/Proxy.h"
|
||||
|
||||
#include "jscntxtinlines.h"
|
||||
|
||||
#include "jit/MacroAssembler-inl.h"
|
||||
|
||||
using namespace js;
|
||||
@ -148,7 +150,7 @@ BaselineCacheIRCompiler::callVM(MacroAssembler& masm, const VMFunction& fun)
|
||||
{
|
||||
MOZ_ASSERT(inStubFrame_);
|
||||
|
||||
JitCode* code = cx_->jitRuntime()->getVMWrapper(fun);
|
||||
JitCode* code = cx_->runtime()->jitRuntime()->getVMWrapper(fun);
|
||||
if (!code)
|
||||
return false;
|
||||
|
||||
@ -753,7 +755,7 @@ BaselineCacheIRCompiler::emitStoreSlotShared(bool isFixed)
|
||||
masm.storeValue(val, slot);
|
||||
}
|
||||
|
||||
if (cx_->gc.nursery.exists())
|
||||
if (cx_->nursery().exists())
|
||||
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
|
||||
return true;
|
||||
}
|
||||
@ -835,7 +837,7 @@ BaselineCacheIRCompiler::emitAddAndStoreSlotShared(bool isFixed)
|
||||
masm.storeValue(val, slot);
|
||||
}
|
||||
|
||||
if (cx_->gc.nursery.exists())
|
||||
if (cx_->nursery().exists())
|
||||
BaselineEmitPostWriteBarrierSlot(masm, obj, val, scratch, LiveGeneralRegisterSet(), cx_);
|
||||
return true;
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ BaselineCompiler::BaselineCompiler(JSContext* cx, TempAllocator& alloc, JSScript
|
||||
bool
|
||||
BaselineCompiler::init()
|
||||
{
|
||||
if (!analysis_.init(alloc_, cx->caches.gsnCache))
|
||||
if (!analysis_.init(alloc_, cx->caches().gsnCache))
|
||||
return false;
|
||||
|
||||
if (!labels_.init(alloc_, script->length()))
|
||||
@ -525,7 +525,7 @@ bool
|
||||
BaselineCompiler::emitStackCheck(bool earlyCheck)
|
||||
{
|
||||
Label skipCall;
|
||||
void* limitAddr = cx->runtime()->addressOfJitStackLimit();
|
||||
void* limitAddr = &cx->runtime()->contextFromMainThread()->jitStackLimit;
|
||||
uint32_t slotsSize = script->nslots() * sizeof(Value);
|
||||
uint32_t tolerance = earlyCheck ? slotsSize : 0;
|
||||
|
||||
@ -697,7 +697,7 @@ BaselineCompiler::emitInterruptCheck()
|
||||
frame.syncStack(0);
|
||||
|
||||
Label done;
|
||||
void* interrupt = cx->runtimeAddressOfInterruptUint32();
|
||||
void* interrupt = &cx->runtime()->contextFromMainThread()->interrupt_;
|
||||
masm.branch32(Assembler::Equal, AbsoluteAddress(interrupt), Imm32(0), &done);
|
||||
|
||||
prepareVMCall();
|
||||
@ -4418,9 +4418,9 @@ BaselineCompiler::emit_JSOP_RESUME()
|
||||
{
|
||||
Register scratchReg = scratch2;
|
||||
Label skip;
|
||||
AbsoluteAddress addressOfEnabled(cx->runtime()->geckoProfiler.addressOfEnabled());
|
||||
AbsoluteAddress addressOfEnabled(cx->runtime()->geckoProfiler().addressOfEnabled());
|
||||
masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &skip);
|
||||
masm.loadPtr(AbsoluteAddress(cx->runtime()->addressOfProfilingActivation()), scratchReg);
|
||||
masm.loadPtr(AbsoluteAddress(cx->addressOfProfilingActivation()), scratchReg);
|
||||
masm.storePtr(masm.getStackPointer(),
|
||||
Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()));
|
||||
masm.bind(&skip);
|
||||
|
@ -443,7 +443,7 @@ PatchBaselineFramesForDebugMode(JSContext* cx, const Debugger::ExecutionObservab
|
||||
MOZ_ASSERT(iter.baselineFrame()->isHandlingException());
|
||||
MOZ_ASSERT(iter.baselineFrame()->overridePc() == pc);
|
||||
uint8_t* retAddr;
|
||||
if (cx->runtime()->geckoProfiler.enabled())
|
||||
if (cx->runtime()->geckoProfiler().enabled())
|
||||
retAddr = bl->nativeCodeForPC(script, pc);
|
||||
else
|
||||
retAddr = nullptr;
|
||||
@ -852,7 +852,7 @@ jit::RecompileOnStackBaselineScriptsForDebugMode(JSContext* cx,
|
||||
|
||||
// When the profiler is enabled, we need to have suppressed sampling,
|
||||
// since the basline jit scripts are in a state of flux.
|
||||
MOZ_ASSERT(!cx->runtime()->isProfilerSamplingEnabled());
|
||||
MOZ_ASSERT(!cx->isProfilerSamplingEnabled());
|
||||
|
||||
// Invalidate all scripts we are recompiling.
|
||||
if (Zone* zone = obs.singleZone()) {
|
||||
@ -1046,7 +1046,7 @@ JitRuntime::getBaselineDebugModeOSRHandlerAddress(JSContext* cx, bool popFrameRe
|
||||
return nullptr;
|
||||
return popFrameReg
|
||||
? baselineDebugModeOSRHandler_->raw()
|
||||
: baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
|
||||
: baselineDebugModeOSRHandlerNoFrameRegPopAddr_.ref();
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -94,7 +94,7 @@ class DebugModeOSRVolatileJitFrameIterator : public JitFrameIterator
|
||||
explicit DebugModeOSRVolatileJitFrameIterator(JSContext* cx)
|
||||
: JitFrameIterator(cx)
|
||||
{
|
||||
stack = &cx->liveVolatileJitFrameIterators_;
|
||||
stack = &cx->liveVolatileJitFrameIterators_.ref();
|
||||
prev = *stack;
|
||||
*stack = this;
|
||||
}
|
||||
|
@ -133,7 +133,7 @@ BaselineFrame::initForOsr(InterpreterFrame* fp, uint32_t numStackValues)
|
||||
*valueSlot(i) = fp->slots()[i];
|
||||
|
||||
if (fp->isDebuggee()) {
|
||||
JSContext* cx = GetJSContextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
|
||||
// For debuggee frames, update any Debugger.Frame objects for the
|
||||
// InterpreterFrame to point to the BaselineFrame.
|
||||
|
@ -103,7 +103,7 @@ PrepareOsrTempData(JSContext* cx, ICWarmUpCounter_Fallback* stub, BaselineFrame*
|
||||
size_t totalSpace = AlignBytes(frameSpace, sizeof(Value)) +
|
||||
AlignBytes(ionOsrTempDataSpace, sizeof(Value));
|
||||
|
||||
IonOsrTempData* info = (IonOsrTempData*)cx->runtime()->getJitRuntime(cx)->allocateOsrTempData(totalSpace);
|
||||
IonOsrTempData* info = (IonOsrTempData*)cx->allocateOsrTempData(totalSpace);
|
||||
if (!info)
|
||||
return nullptr;
|
||||
|
||||
@ -152,7 +152,7 @@ DoWarmUpCounterFallbackOSR(JSContext* cx, BaselineFrame* frame, ICWarmUpCounter_
|
||||
}
|
||||
|
||||
IonScript* ion = script->ionScript();
|
||||
MOZ_ASSERT(cx->runtime()->geckoProfiler.enabled() == ion->hasProfilingInstrumentation());
|
||||
MOZ_ASSERT(cx->runtime()->geckoProfiler().enabled() == ion->hasProfilingInstrumentation());
|
||||
MOZ_ASSERT(ion->osrPc() == pc);
|
||||
|
||||
JitSpew(JitSpew_BaselineOSR, " OSR possible!");
|
||||
@ -234,9 +234,9 @@ ICWarmUpCounter_Fallback::Compiler::generateStubCode(MacroAssembler& masm)
|
||||
// the frame currently being OSR-ed
|
||||
{
|
||||
Label checkOk;
|
||||
AbsoluteAddress addressOfEnabled(cx->runtime()->geckoProfiler.addressOfEnabled());
|
||||
AbsoluteAddress addressOfEnabled(cx->runtime()->geckoProfiler().addressOfEnabled());
|
||||
masm.branch32(Assembler::Equal, addressOfEnabled, Imm32(0), &checkOk);
|
||||
masm.loadPtr(AbsoluteAddress((void*)&cx->runtime()->jitActivation), scratchReg);
|
||||
masm.loadPtr(AbsoluteAddress((void*)&cx->jitActivation), scratchReg);
|
||||
masm.loadPtr(Address(scratchReg, JitActivation::offsetOfLastProfilingFrame()), scratchReg);
|
||||
|
||||
// It may be the case that we entered the baseline frame with
|
||||
@ -2489,7 +2489,7 @@ DoSetPropFallback(JSContext* cx, BaselineFrame* frame, ICSetProp_Fallback* stub_
|
||||
|
||||
RootedPropertyName name(cx);
|
||||
if (op == JSOP_SETALIASEDVAR || op == JSOP_INITALIASEDLEXICAL)
|
||||
name = EnvironmentCoordinateName(cx->caches.envCoordinateNameCache, script, pc);
|
||||
name = EnvironmentCoordinateName(cx->caches().envCoordinateNameCache, script, pc);
|
||||
else
|
||||
name = script->getName(pc);
|
||||
RootedId id(cx, NameToId(name));
|
||||
|
@ -51,7 +51,7 @@ PCMappingSlotInfo::ToSlotLocation(const StackValue* stackVal)
|
||||
void
|
||||
ICStubSpace::freeAllAfterMinorGC(JSRuntime* rt)
|
||||
{
|
||||
rt->gc.freeAllLifoBlocksAfterMinorGC(&allocator_);
|
||||
rt->zoneGroupFromMainThread()->freeAllLifoBlocksAfterMinorGC(&allocator_);
|
||||
}
|
||||
|
||||
BaselineScript::BaselineScript(uint32_t prologueOffset, uint32_t epilogueOffset,
|
||||
@ -160,7 +160,7 @@ EnterBaseline(JSContext* cx, EnterJitData& data)
|
||||
data.osrFrame->clearRunningInJit();
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
|
||||
MOZ_ASSERT(!cx->hasIonReturnOverride());
|
||||
|
||||
// Jit callers wrap primitive constructor return, except for derived
|
||||
// class constructors, which are forced to do it themselves.
|
||||
@ -174,7 +174,7 @@ EnterBaseline(JSContext* cx, EnterJitData& data)
|
||||
}
|
||||
|
||||
// Release temporary buffer used for OSR into Ion.
|
||||
cx->runtime()->getJitRuntime(cx)->freeOsrTempData();
|
||||
cx->freeOsrTempData();
|
||||
|
||||
MOZ_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR));
|
||||
return data.result.isMagic() ? JitExec_Error : JitExec_Ok;
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#include "jsobjinlines.h"
|
||||
|
||||
#include "vm/EnvironmentObject-inl.h"
|
||||
#include "vm/UnboxedObject-inl.h"
|
||||
|
||||
using namespace js;
|
||||
@ -510,7 +511,7 @@ GetPropIRGenerator::tryAttachWindowProxy(HandleObject obj, ObjOperandId objId, H
|
||||
// This must be a WindowProxy for the current Window/global. Else it would
|
||||
// be a cross-compartment wrapper and IsWindowProxy returns false for
|
||||
// those.
|
||||
MOZ_ASSERT(obj->getClass() == cx_->maybeWindowProxyClass());
|
||||
MOZ_ASSERT(obj->getClass() == cx_->runtime()->maybeWindowProxyClass());
|
||||
MOZ_ASSERT(ToWindowIfWindowProxy(obj) == cx_->global());
|
||||
|
||||
// Now try to do the lookup on the Window (the current global).
|
||||
|
@ -1226,7 +1226,7 @@ CacheIRCompiler::emitGuardClass()
|
||||
clasp = &UnmappedArgumentsObject::class_;
|
||||
break;
|
||||
case GuardClassKind::WindowProxy:
|
||||
clasp = cx_->maybeWindowProxyClass();
|
||||
clasp = cx_->runtime()->maybeWindowProxyClass();
|
||||
break;
|
||||
case GuardClassKind::JSFunction:
|
||||
clasp = &JSFunction::class_;
|
||||
|
@ -283,7 +283,7 @@ CodeGenerator::~CodeGenerator()
|
||||
js_delete(scriptCounts_);
|
||||
}
|
||||
|
||||
typedef bool (*StringToNumberFn)(ExclusiveContext*, JSString*, double*);
|
||||
typedef bool (*StringToNumberFn)(JSContext*, JSString*, double*);
|
||||
static const VMFunction StringToNumberInfo =
|
||||
FunctionInfo<StringToNumberFn>(StringToNumber, "StringToNumber");
|
||||
|
||||
@ -940,7 +940,7 @@ CodeGenerator::emitIntToString(Register input, Register output, Label* ool)
|
||||
masm.loadPtr(BaseIndex(output, input, ScalePointer), output);
|
||||
}
|
||||
|
||||
typedef JSFlatString* (*IntToStringFn)(ExclusiveContext*, int);
|
||||
typedef JSFlatString* (*IntToStringFn)(JSContext*, int);
|
||||
static const VMFunction IntToStringInfo =
|
||||
FunctionInfo<IntToStringFn>(Int32ToString<CanGC>, "Int32ToString");
|
||||
|
||||
@ -958,7 +958,7 @@ CodeGenerator::visitIntToString(LIntToString* lir)
|
||||
masm.bind(ool->rejoin());
|
||||
}
|
||||
|
||||
typedef JSString* (*DoubleToStringFn)(ExclusiveContext*, double);
|
||||
typedef JSString* (*DoubleToStringFn)(JSContext*, double);
|
||||
static const VMFunction DoubleToStringInfo =
|
||||
FunctionInfo<DoubleToStringFn>(NumberToString<CanGC>, "NumberToString");
|
||||
|
||||
@ -3387,7 +3387,7 @@ CodeGenerator::visitMaybeToDoubleElement(LMaybeToDoubleElement* lir)
|
||||
masm.bind(&done);
|
||||
}
|
||||
|
||||
typedef bool (*CopyElementsForWriteFn)(ExclusiveContext*, NativeObject*);
|
||||
typedef bool (*CopyElementsForWriteFn)(JSContext*, NativeObject*);
|
||||
static const VMFunction CopyElementsForWriteInfo =
|
||||
FunctionInfo<CopyElementsForWriteFn>(NativeObject::CopyElementsForWrite,
|
||||
"NativeObject::CopyElementsForWrite");
|
||||
@ -7331,7 +7331,7 @@ CodeGenerator::visitIsNullOrLikeUndefinedAndBranchT(LIsNullOrLikeUndefinedAndBra
|
||||
}
|
||||
}
|
||||
|
||||
typedef JSString* (*ConcatStringsFn)(ExclusiveContext*, HandleString, HandleString);
|
||||
typedef JSString* (*ConcatStringsFn)(JSContext*, HandleString, HandleString);
|
||||
static const VMFunction ConcatStringsInfo =
|
||||
FunctionInfo<ConcatStringsFn>(ConcatStrings<CanGC>, "ConcatStrings");
|
||||
|
||||
@ -9937,7 +9937,7 @@ CodeGenerator::link(JSContext* cx, CompilerConstraintList* constraints)
|
||||
for (size_t i = 0; i < graph.numConstants(); i++) {
|
||||
const Value& v = vp[i];
|
||||
if (v.isObject() && IsInsideNursery(&v.toObject())) {
|
||||
cx->runtime()->gc.storeBuffer.putWholeCell(script);
|
||||
cx->zone()->group()->storeBuffer().putWholeCell(script);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -29,49 +29,42 @@ CompileRuntime::onMainThread()
|
||||
return js::CurrentThreadCanAccessRuntime(runtime());
|
||||
}
|
||||
|
||||
js::PerThreadData*
|
||||
CompileRuntime::mainThread()
|
||||
{
|
||||
MOZ_ASSERT(onMainThread());
|
||||
return &runtime()->mainThread;
|
||||
}
|
||||
|
||||
const void*
|
||||
CompileRuntime::addressOfJitTop()
|
||||
{
|
||||
return &runtime()->jitTop;
|
||||
return &runtime()->unsafeContextFromAnyThread()->jitTop;
|
||||
}
|
||||
|
||||
const void*
|
||||
CompileRuntime::addressOfJitActivation()
|
||||
{
|
||||
return &runtime()->jitActivation;
|
||||
return &runtime()->unsafeContextFromAnyThread()->jitActivation;
|
||||
}
|
||||
|
||||
const void*
|
||||
CompileRuntime::addressOfProfilingActivation()
|
||||
{
|
||||
return (const void*) &runtime()->profilingActivation_;
|
||||
return (const void*) &runtime()->unsafeContextFromAnyThread()->profilingActivation_;
|
||||
}
|
||||
|
||||
const void*
|
||||
CompileRuntime::addressOfJitStackLimit()
|
||||
{
|
||||
return runtime()->addressOfJitStackLimit();
|
||||
return &runtime()->unsafeContextFromAnyThread()->jitStackLimit;
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
const void*
|
||||
CompileRuntime::addressOfIonBailAfter()
|
||||
{
|
||||
return runtime()->addressOfIonBailAfter();
|
||||
return runtime()->zoneGroupFromAnyThread()->addressOfIonBailAfter();
|
||||
}
|
||||
#endif
|
||||
|
||||
const void*
|
||||
CompileRuntime::addressOfActivation()
|
||||
{
|
||||
return runtime()->addressOfActivation();
|
||||
return &runtime()->unsafeContextFromAnyThread()->activation_;
|
||||
}
|
||||
|
||||
#ifdef JS_GC_ZEAL
|
||||
@ -85,7 +78,7 @@ CompileRuntime::addressOfGCZealModeBits()
|
||||
const void*
|
||||
CompileRuntime::addressOfInterruptUint32()
|
||||
{
|
||||
return runtime()->addressOfInterruptUint32();
|
||||
return &runtime()->unsafeContextFromAnyThread()->interrupt_;
|
||||
}
|
||||
|
||||
const void*
|
||||
@ -103,7 +96,7 @@ CompileRuntime::jitRuntime()
|
||||
GeckoProfiler&
|
||||
CompileRuntime::geckoProfiler()
|
||||
{
|
||||
return runtime()->geckoProfiler;
|
||||
return runtime()->geckoProfiler();
|
||||
}
|
||||
|
||||
bool
|
||||
@ -121,7 +114,7 @@ CompileRuntime::hadOutOfMemory()
|
||||
bool
|
||||
CompileRuntime::profilingScripts()
|
||||
{
|
||||
return runtime()->profilingScripts;
|
||||
return runtime()->zoneGroupFromAnyThread()->profilingScripts;
|
||||
}
|
||||
|
||||
const JSAtomState&
|
||||
@ -178,14 +171,14 @@ CompileRuntime::DOMcallbacks()
|
||||
const Nursery&
|
||||
CompileRuntime::gcNursery()
|
||||
{
|
||||
return runtime()->gc.nursery;
|
||||
return runtime()->zoneGroupFromAnyThread()->nursery();
|
||||
}
|
||||
|
||||
void
|
||||
CompileRuntime::setMinorGCShouldCancelIonCompilations()
|
||||
{
|
||||
MOZ_ASSERT(onMainThread());
|
||||
runtime()->gc.storeBuffer.setShouldCancelIonCompilations();
|
||||
runtime()->zoneGroupFromAnyThread()->storeBuffer().setShouldCancelIonCompilations();
|
||||
}
|
||||
|
||||
bool
|
||||
@ -304,7 +297,7 @@ JitCompileOptions::JitCompileOptions()
|
||||
JitCompileOptions::JitCompileOptions(JSContext* cx)
|
||||
{
|
||||
cloneSingletons_ = cx->compartment()->creationOptions().cloneSingletons();
|
||||
profilerSlowAssertionsEnabled_ = cx->runtime()->geckoProfiler.enabled() &&
|
||||
cx->runtime()->geckoProfiler.slowAssertionsEnabled();
|
||||
profilerSlowAssertionsEnabled_ = cx->runtime()->geckoProfiler().enabled() &&
|
||||
cx->runtime()->geckoProfiler().slowAssertionsEnabled();
|
||||
offThreadCompilationAvailable_ = OffThreadCompilationAvailable(cx);
|
||||
}
|
||||
|
@ -29,8 +29,6 @@ class CompileRuntime
|
||||
|
||||
bool onMainThread();
|
||||
|
||||
js::PerThreadData* mainThread();
|
||||
|
||||
// &runtime()->jitTop
|
||||
const void* addressOfJitTop();
|
||||
|
||||
|
@ -357,7 +357,8 @@ ExecutableAllocator::reprotectPool(JSRuntime* rt, ExecutablePool* pool, Protecti
|
||||
{
|
||||
#ifdef NON_WRITABLE_JIT_CODE
|
||||
// Don't race with reprotectAll called from the signal handler.
|
||||
MOZ_ASSERT(rt->jitRuntime()->preventBackedgePatching() || rt->handlingJitInterrupt());
|
||||
MOZ_ASSERT(rt->jitRuntime()->preventBackedgePatching() ||
|
||||
rt->unsafeContextFromAnyThread()->handlingJitInterrupt());
|
||||
|
||||
char* start = pool->m_allocation.pages;
|
||||
if (!reprotectRegion(start, pool->m_freePtr - start, protection))
|
||||
|
@ -108,17 +108,6 @@ JitContext::JitContext(JSContext* cx, TempAllocator* temp)
|
||||
SetJitContext(this);
|
||||
}
|
||||
|
||||
JitContext::JitContext(ExclusiveContext* cx, TempAllocator* temp)
|
||||
: cx(nullptr),
|
||||
temp(temp),
|
||||
runtime(CompileRuntime::get(cx->runtime_)),
|
||||
compartment(nullptr),
|
||||
prev_(CurrentJitContext()),
|
||||
assemblerCount_(0)
|
||||
{
|
||||
SetJitContext(this);
|
||||
}
|
||||
|
||||
JitContext::JitContext(CompileRuntime* rt, CompileCompartment* comp, TempAllocator* temp)
|
||||
: cx(nullptr),
|
||||
temp(temp),
|
||||
@ -206,22 +195,18 @@ JitRuntime::JitRuntime(JSRuntime* rt)
|
||||
debugTrapHandler_(nullptr),
|
||||
baselineDebugModeOSRHandler_(nullptr),
|
||||
functionWrappers_(nullptr),
|
||||
osrTempData_(nullptr),
|
||||
preventBackedgePatching_(false),
|
||||
backedgeTarget_(BackedgeLoopHeader),
|
||||
ionReturnOverride_(MagicValue(JS_ARG_POISON)),
|
||||
jitcodeGlobalTable_(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
JitRuntime::~JitRuntime()
|
||||
{
|
||||
js_delete(functionWrappers_);
|
||||
freeOsrTempData();
|
||||
js_delete(functionWrappers_.ref());
|
||||
|
||||
// By this point, the jitcode global table should be empty.
|
||||
MOZ_ASSERT_IF(jitcodeGlobalTable_, jitcodeGlobalTable_->empty());
|
||||
js_delete(jitcodeGlobalTable_);
|
||||
js_delete(jitcodeGlobalTable_.ref());
|
||||
}
|
||||
|
||||
bool
|
||||
@ -260,17 +245,18 @@ JitRuntime::initialize(JSContext* cx, AutoLockForExclusiveAccess& lock)
|
||||
JitSpew(JitSpew_Codegen, "# Emitting bailout tables");
|
||||
|
||||
// Initialize some Ion-only stubs that require floating-point support.
|
||||
if (!bailoutTables_.reserve(FrameSizeClass::ClassLimit().classId()))
|
||||
BailoutTableVector& bailoutTables = bailoutTables_.writeRef();
|
||||
if (!bailoutTables.reserve(FrameSizeClass::ClassLimit().classId()))
|
||||
return false;
|
||||
|
||||
for (uint32_t id = 0;; id++) {
|
||||
FrameSizeClass class_ = FrameSizeClass::FromClass(id);
|
||||
if (class_ == FrameSizeClass::ClassLimit())
|
||||
break;
|
||||
bailoutTables_.infallibleAppend((JitCode*)nullptr);
|
||||
bailoutTables.infallibleAppend((JitCode*)nullptr);
|
||||
JitSpew(JitSpew_Codegen, "# Bailout table");
|
||||
bailoutTables_[id] = generateBailoutTable(cx, id);
|
||||
if (!bailoutTables_[id])
|
||||
bailoutTables[id] = generateBailoutTable(cx, id);
|
||||
if (!bailoutTables[id])
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -286,7 +272,7 @@ JitRuntime::initialize(JSContext* cx, AutoLockForExclusiveAccess& lock)
|
||||
}
|
||||
|
||||
JitSpew(JitSpew_Codegen, "# Emitting sequential arguments rectifier");
|
||||
argumentsRectifier_ = generateArgumentsRectifier(cx, &argumentsRectifierReturnAddr_);
|
||||
argumentsRectifier_ = generateArgumentsRectifier(cx, &argumentsRectifierReturnAddr_.writeRef());
|
||||
if (!argumentsRectifier_)
|
||||
return false;
|
||||
|
||||
@ -368,33 +354,33 @@ JitRuntime::debugTrapHandler(JSContext* cx)
|
||||
}
|
||||
|
||||
uint8_t*
|
||||
JitRuntime::allocateOsrTempData(size_t size)
|
||||
JSContext::allocateOsrTempData(size_t size)
|
||||
{
|
||||
osrTempData_ = (uint8_t*)js_realloc(osrTempData_, size);
|
||||
return osrTempData_;
|
||||
}
|
||||
|
||||
void
|
||||
JitRuntime::freeOsrTempData()
|
||||
JSContext::freeOsrTempData()
|
||||
{
|
||||
js_free(osrTempData_);
|
||||
osrTempData_ = nullptr;
|
||||
}
|
||||
|
||||
void
|
||||
JitRuntime::patchIonBackedges(JSRuntime* rt, BackedgeTarget target)
|
||||
JitZoneGroup::patchIonBackedges(JSContext* cx, BackedgeTarget target)
|
||||
{
|
||||
if (target == BackedgeLoopHeader) {
|
||||
// We must be on the main thread. The caller must use
|
||||
// AutoPreventBackedgePatching to ensure we don't reenter.
|
||||
MOZ_ASSERT(preventBackedgePatching_);
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
MOZ_ASSERT(cx->runtime()->jitRuntime()->preventBackedgePatching());
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(cx->runtime()));
|
||||
} else {
|
||||
// We must be called from InterruptRunningJitCode, or a signal handler
|
||||
// triggered there. rt->handlingJitInterrupt() ensures we can't reenter
|
||||
// this code.
|
||||
MOZ_ASSERT(!preventBackedgePatching_);
|
||||
MOZ_ASSERT(rt->handlingJitInterrupt());
|
||||
MOZ_ASSERT(!cx->runtime()->jitRuntime()->preventBackedgePatching());
|
||||
MOZ_ASSERT(cx->handlingJitInterrupt());
|
||||
}
|
||||
|
||||
// Do nothing if we know all backedges are already jumping to `target`.
|
||||
@ -403,12 +389,12 @@ JitRuntime::patchIonBackedges(JSRuntime* rt, BackedgeTarget target)
|
||||
|
||||
backedgeTarget_ = target;
|
||||
|
||||
backedgeExecAlloc_.makeAllWritable();
|
||||
cx->runtime()->jitRuntime()->backedgeExecAlloc().makeAllWritable();
|
||||
|
||||
// Patch all loop backedges in Ion code so that they either jump to the
|
||||
// normal loop header or to an interrupt handler each time they run.
|
||||
for (InlineListIterator<PatchableBackedge> iter(backedgeList_.begin());
|
||||
iter != backedgeList_.end();
|
||||
for (InlineListIterator<PatchableBackedge> iter(backedgeList().begin());
|
||||
iter != backedgeList().end();
|
||||
iter++)
|
||||
{
|
||||
PatchableBackedge* patchableBackedge = *iter;
|
||||
@ -418,9 +404,14 @@ JitRuntime::patchIonBackedges(JSRuntime* rt, BackedgeTarget target)
|
||||
PatchBackedge(patchableBackedge->backedge, patchableBackedge->interruptCheck, target);
|
||||
}
|
||||
|
||||
backedgeExecAlloc_.makeAllExecutable();
|
||||
cx->runtime()->jitRuntime()->backedgeExecAlloc().makeAllExecutable();
|
||||
}
|
||||
|
||||
JitZoneGroup::JitZoneGroup(ZoneGroup* group)
|
||||
: backedgeTarget_(group, BackedgeLoopHeader),
|
||||
backedgeList_(group)
|
||||
{}
|
||||
|
||||
JitCompartment::JitCompartment()
|
||||
: stubCodes_(nullptr),
|
||||
cacheIRStubCodes_(nullptr),
|
||||
@ -602,7 +593,7 @@ jit::LazyLinkTopActivation(JSContext* cx)
|
||||
/* static */ void
|
||||
JitRuntime::Trace(JSTracer* trc, AutoLockForExclusiveAccess& lock)
|
||||
{
|
||||
MOZ_ASSERT(!trc->runtime()->isHeapMinorCollecting());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
|
||||
|
||||
// Shared stubs are allocated in the atoms compartment, so do not iterate
|
||||
// them after the atoms heap after it has been "finished."
|
||||
@ -619,7 +610,7 @@ JitRuntime::Trace(JSTracer* trc, AutoLockForExclusiveAccess& lock)
|
||||
/* static */ void
|
||||
JitRuntime::TraceJitcodeGlobalTable(JSTracer* trc)
|
||||
{
|
||||
if (trc->runtime()->geckoProfiler.enabled() &&
|
||||
if (trc->runtime()->geckoProfiler().enabled() &&
|
||||
trc->runtime()->hasJitRuntime() &&
|
||||
trc->runtime()->jitRuntime()->hasJitcodeGlobalTable())
|
||||
{
|
||||
@ -649,7 +640,7 @@ void
|
||||
JitCompartment::trace(JSTracer* trc, JSCompartment* compartment)
|
||||
{
|
||||
// Free temporary OSR buffer.
|
||||
trc->runtime()->jitRuntime()->freeOsrTempData();
|
||||
trc->runtime()->contextFromMainThread()->freeOsrTempData();
|
||||
}
|
||||
|
||||
void
|
||||
@ -729,7 +720,7 @@ JitCode*
|
||||
JitRuntime::getBailoutTable(const FrameSizeClass& frameClass) const
|
||||
{
|
||||
MOZ_ASSERT(frameClass != FrameSizeClass::None());
|
||||
return bailoutTables_[frameClass.classId()];
|
||||
return bailoutTables_.ref()[frameClass.classId()];
|
||||
}
|
||||
|
||||
JitCode*
|
||||
@ -804,7 +795,7 @@ JitCode::traceChildren(JSTracer* trc)
|
||||
}
|
||||
if (dataRelocTableBytes_) {
|
||||
// If we're moving objects, we need writable JIT code.
|
||||
bool movingObjects = trc->runtime()->isHeapMinorCollecting() || zone()->isGCCompacting();
|
||||
bool movingObjects = JS::CurrentThreadIsHeapMinorCollecting() || zone()->isGCCompacting();
|
||||
MaybeAutoWritableJitCode awjc(this, movingObjects ? Reprotect : DontReprotect);
|
||||
|
||||
uint8_t* start = code_ + dataRelocTableOffset();
|
||||
@ -1107,7 +1098,7 @@ IonScript::copyPatchableBackedges(JSContext* cx, JitCode* code,
|
||||
PatchableBackedgeInfo* backedges,
|
||||
MacroAssembler& masm)
|
||||
{
|
||||
JitRuntime* jrt = cx->runtime()->jitRuntime();
|
||||
JitZoneGroup* jzg = cx->zone()->group()->jitZoneGroup;
|
||||
JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime());
|
||||
|
||||
for (size_t i = 0; i < backedgeEntries_; i++) {
|
||||
@ -1122,12 +1113,12 @@ IonScript::copyPatchableBackedges(JSContext* cx, JitCode* code,
|
||||
|
||||
// Point the backedge to either of its possible targets, matching the
|
||||
// other backedges in the runtime.
|
||||
if (jrt->backedgeTarget() == JitRuntime::BackedgeInterruptCheck)
|
||||
PatchBackedge(backedge, interruptCheck, JitRuntime::BackedgeInterruptCheck);
|
||||
if (jzg->backedgeTarget() == JitZoneGroup::BackedgeInterruptCheck)
|
||||
PatchBackedge(backedge, interruptCheck, JitZoneGroup::BackedgeInterruptCheck);
|
||||
else
|
||||
PatchBackedge(backedge, loopHeader, JitRuntime::BackedgeLoopHeader);
|
||||
PatchBackedge(backedge, loopHeader, JitZoneGroup::BackedgeLoopHeader);
|
||||
|
||||
jrt->addPatchableBackedge(patchableBackedge);
|
||||
jzg->addPatchableBackedge(cx->runtime()->jitRuntime(), patchableBackedge);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1380,10 +1371,10 @@ IonScript::unlinkFromRuntime(FreeOp* fop)
|
||||
// The writes to the executable buffer below may clobber backedge jumps, so
|
||||
// make sure that those backedges are unlinked from the runtime and not
|
||||
// reclobbered with garbage if an interrupt is requested.
|
||||
JitRuntime* jrt = fop->runtime()->jitRuntime();
|
||||
JitZoneGroup* jzg = method()->zone()->group()->jitZoneGroup;
|
||||
JitRuntime::AutoPreventBackedgePatching apbp(fop->runtime());
|
||||
for (size_t i = 0; i < backedgeEntries_; i++)
|
||||
jrt->removePatchableBackedge(&backedgeList()[i]);
|
||||
jzg->removePatchableBackedge(fop->runtime()->jitRuntime(), &backedgeList()[i]);
|
||||
|
||||
// Clear the list of backedges, so that this method is idempotent. It is
|
||||
// called during destruction, and may be additionally called when the
|
||||
@ -2174,7 +2165,7 @@ TrackPropertiesForSingletonScopes(JSContext* cx, JSScript* script, BaselineFrame
|
||||
static void
|
||||
TrackIonAbort(JSContext* cx, JSScript* script, jsbytecode* pc, const char* message)
|
||||
{
|
||||
if (!cx->runtime()->jitRuntime()->isOptimizationTrackingEnabled(cx->runtime()))
|
||||
if (!cx->runtime()->jitRuntime()->isOptimizationTrackingEnabled(cx->zone()->group()))
|
||||
return;
|
||||
|
||||
// Only bother tracking aborts of functions we're attempting to
|
||||
@ -2269,7 +2260,7 @@ IonCompile(JSContext* cx, JSScript* script,
|
||||
if (!builder)
|
||||
return AbortReason::Alloc;
|
||||
|
||||
if (cx->runtime()->gc.storeBuffer.cancelIonCompilations())
|
||||
if (cx->zone()->group()->storeBuffer().cancelIonCompilations())
|
||||
builder->setNotSafeForMinorGC();
|
||||
|
||||
MOZ_ASSERT(recompile == builder->script()->hasIonScript());
|
||||
@ -2909,7 +2900,7 @@ EnterIon(JSContext* cx, EnterJitData& data)
|
||||
/* envChain = */ nullptr, 0, data.result.address());
|
||||
}
|
||||
|
||||
MOZ_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
|
||||
MOZ_ASSERT(!cx->hasIonReturnOverride());
|
||||
|
||||
// Jit callers wrap primitive constructor return, except for derived class constructors.
|
||||
if (!data.result.isMagic() && data.constructing &&
|
||||
@ -2920,7 +2911,7 @@ EnterIon(JSContext* cx, EnterJitData& data)
|
||||
}
|
||||
|
||||
// Release temporary buffer used for OSR into Ion.
|
||||
cx->runtime()->getJitRuntime(cx)->freeOsrTempData();
|
||||
cx->freeOsrTempData();
|
||||
|
||||
MOZ_ASSERT_IF(data.result.isMagic(), data.result.isMagic(JS_ION_ERROR));
|
||||
return data.result.isMagic() ? JitExec_Error : JitExec_Ok;
|
||||
@ -3049,7 +3040,7 @@ jit::FastInvoke(JSContext* cx, HandleFunction fun, CallArgs& args)
|
||||
CALL_GENERATED_CODE(enter, jitcode, args.length() + 1, args.array() - 1, /* osrFrame = */nullptr,
|
||||
calleeToken, /* envChain = */ nullptr, 0, result.address());
|
||||
|
||||
MOZ_ASSERT(!cx->runtime()->jitRuntime()->hasIonReturnOverride());
|
||||
MOZ_ASSERT(!cx->hasIonReturnOverride());
|
||||
|
||||
args.rval().set(result);
|
||||
|
||||
@ -3328,7 +3319,7 @@ jit::Invalidate(JSContext* cx, JSScript* script, bool resetUses, bool cancelOffT
|
||||
{
|
||||
MOZ_ASSERT(script->hasIonScript());
|
||||
|
||||
if (cx->runtime()->geckoProfiler.enabled()) {
|
||||
if (cx->runtime()->geckoProfiler().enabled()) {
|
||||
// Register invalidation with profiler.
|
||||
// Format of event payload string:
|
||||
// "<filename>:<lineno>"
|
||||
@ -3343,7 +3334,7 @@ jit::Invalidate(JSContext* cx, JSScript* script, bool resetUses, bool cancelOffT
|
||||
|
||||
// Ignore the event on allocation failure.
|
||||
if (buf) {
|
||||
cx->runtime()->geckoProfiler.markEvent(buf);
|
||||
cx->runtime()->geckoProfiler().markEvent(buf);
|
||||
JS_smprintf_free(buf);
|
||||
}
|
||||
}
|
||||
@ -3399,13 +3390,13 @@ jit::ForbidCompilation(JSContext* cx, JSScript* script)
|
||||
}
|
||||
|
||||
AutoFlushICache*
|
||||
PerThreadData::autoFlushICache() const
|
||||
JSContext::autoFlushICache() const
|
||||
{
|
||||
return autoFlushICache_;
|
||||
}
|
||||
|
||||
void
|
||||
PerThreadData::setAutoFlushICache(AutoFlushICache* afc)
|
||||
JSContext::setAutoFlushICache(AutoFlushICache* afc)
|
||||
{
|
||||
autoFlushICache_ = afc;
|
||||
}
|
||||
@ -3419,7 +3410,7 @@ void
|
||||
AutoFlushICache::setRange(uintptr_t start, size_t len)
|
||||
{
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
AutoFlushICache* afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
|
||||
AutoFlushICache* afc = TlsContext.get()->autoFlushICache();
|
||||
MOZ_ASSERT(afc);
|
||||
MOZ_ASSERT(!afc->start_);
|
||||
JitSpewCont(JitSpew_CacheFlush, "(%" PRIxPTR " %" PRIxSIZE "):", start, len);
|
||||
@ -3445,15 +3436,15 @@ AutoFlushICache::setRange(uintptr_t start, size_t len)
|
||||
// indicate a program fault but it might indicate a lost opportunity to merge cache
|
||||
// flushing. It can be corrected by wrapping the call in an AutoFlushICache to context.
|
||||
//
|
||||
// Note this can be called without TLS PerThreadData defined so this case needs
|
||||
// Note this can be called without TLS JSContext defined so this case needs
|
||||
// to be guarded against. E.g. when patching instructions from the exception
|
||||
// handler on MacOS running the ARM simulator.
|
||||
void
|
||||
AutoFlushICache::flush(uintptr_t start, size_t len)
|
||||
{
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
PerThreadData* pt = TlsPerThreadData.get();
|
||||
AutoFlushICache* afc = pt ? pt->PerThreadData::autoFlushICache() : nullptr;
|
||||
JSContext* cx = TlsContext.get();
|
||||
AutoFlushICache* afc = cx ? cx->autoFlushICache() : nullptr;
|
||||
if (!afc) {
|
||||
JitSpewCont(JitSpew_CacheFlush, "#");
|
||||
ExecutableAllocator::cacheFlush((void*)start, len);
|
||||
@ -3479,7 +3470,7 @@ void
|
||||
AutoFlushICache::setInhibit()
|
||||
{
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
AutoFlushICache* afc = TlsPerThreadData.get()->PerThreadData::autoFlushICache();
|
||||
AutoFlushICache* afc = TlsContext.get()->autoFlushICache();
|
||||
MOZ_ASSERT(afc);
|
||||
MOZ_ASSERT(afc->start_);
|
||||
JitSpewCont(JitSpew_CacheFlush, "I");
|
||||
@ -3513,30 +3504,30 @@ AutoFlushICache::AutoFlushICache(const char* nonce, bool inhibit)
|
||||
#endif
|
||||
{
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
PerThreadData* pt = TlsPerThreadData.get();
|
||||
AutoFlushICache* afc = pt->PerThreadData::autoFlushICache();
|
||||
JSContext* cx = TlsContext.get();
|
||||
AutoFlushICache* afc = cx->autoFlushICache();
|
||||
if (afc)
|
||||
JitSpew(JitSpew_CacheFlush, "<%s,%s%s ", nonce, afc->name_, inhibit ? " I" : "");
|
||||
else
|
||||
JitSpewCont(JitSpew_CacheFlush, "<%s%s ", nonce, inhibit ? " I" : "");
|
||||
|
||||
prev_ = afc;
|
||||
pt->PerThreadData::setAutoFlushICache(this);
|
||||
cx->setAutoFlushICache(this);
|
||||
#endif
|
||||
}
|
||||
|
||||
AutoFlushICache::~AutoFlushICache()
|
||||
{
|
||||
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
||||
PerThreadData* pt = TlsPerThreadData.get();
|
||||
MOZ_ASSERT(pt->PerThreadData::autoFlushICache() == this);
|
||||
JSContext* cx = TlsContext.get();
|
||||
MOZ_ASSERT(cx->autoFlushICache() == this);
|
||||
|
||||
if (!inhibit_ && start_)
|
||||
ExecutableAllocator::cacheFlush((void*)start_, size_t(stop_ - start_));
|
||||
|
||||
JitSpewCont(JitSpew_CacheFlush, "%s%s>", name_, start_ ? "" : " U");
|
||||
JitSpewFin(JitSpew_CacheFlush);
|
||||
pt->PerThreadData::setAutoFlushICache(prev_);
|
||||
cx->setAutoFlushICache(prev_);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,6 @@ class JitContext
|
||||
{
|
||||
public:
|
||||
JitContext(JSContext* cx, TempAllocator* temp);
|
||||
JitContext(ExclusiveContext* cx, TempAllocator* temp);
|
||||
JitContext(CompileRuntime* rt, CompileCompartment* comp, TempAllocator* temp);
|
||||
JitContext(CompileRuntime* rt, TempAllocator* temp);
|
||||
explicit JitContext(CompileRuntime* rt);
|
||||
|
@ -8432,7 +8432,7 @@ IonBuilder::addTypedArrayLengthAndData(MDefinition* obj,
|
||||
SharedMem<void*> data = tarr->as<TypedArrayObject>().viewDataEither();
|
||||
// Bug 979449 - Optimistically embed the elements and use TI to
|
||||
// invalidate if we move them.
|
||||
bool isTenured = !tarr->runtimeFromMainThread()->gc.nursery.isInside(data);
|
||||
bool isTenured = !tarr->zone()->group()->nursery().isInside(data);
|
||||
if (isTenured && tarr->isSingleton()) {
|
||||
// The 'data' pointer of TypedArrayObject can change in rare circumstances
|
||||
// (ArrayBufferObject::changeContents).
|
||||
@ -8807,7 +8807,7 @@ IonBuilder::setElemTryTypedStatic(bool* emitted, MDefinition* object,
|
||||
return Ok();
|
||||
|
||||
SharedMem<void*> viewData = tarrObj->as<TypedArrayObject>().viewDataEither();
|
||||
if (tarrObj->runtimeFromMainThread()->gc.nursery.isInside(viewData))
|
||||
if (tarrObj->zone()->group()->nursery().isInside(viewData))
|
||||
return Ok();
|
||||
|
||||
Scalar::Type viewType = tarrObj->as<TypedArrayObject>().type();
|
||||
|
@ -945,7 +945,7 @@ class IonBuilder
|
||||
|
||||
TraceLoggerThread *traceLogger() {
|
||||
// Currently ionbuilder only runs on the main thread.
|
||||
return TraceLoggerForMainThread(compartment->runtime()->mainThread()->runtimeFromMainThread());
|
||||
return TraceLoggerForMainThread(compartment->runtime());
|
||||
}
|
||||
|
||||
void actionableAbortLocationAndMessage(JSScript** abortScript, jsbytecode** abortPc,
|
||||
|
@ -315,7 +315,7 @@ IonCacheIRCompiler::callVM(MacroAssembler& masm, const VMFunction& fun)
|
||||
{
|
||||
MOZ_ASSERT(calledPrepareVMCall_);
|
||||
|
||||
JitCode* code = cx_->jitRuntime()->getVMWrapper(fun);
|
||||
JitCode* code = cx_->runtime()->jitRuntime()->getVMWrapper(fun);
|
||||
if (!code)
|
||||
return false;
|
||||
|
||||
@ -380,7 +380,7 @@ JitCode*
|
||||
IonCacheIRCompiler::compile()
|
||||
{
|
||||
masm.setFramePushed(ionScript_->frameSize());
|
||||
if (cx_->geckoProfiler.enabled())
|
||||
if (cx_->runtime()->geckoProfiler().enabled())
|
||||
masm.enableProfilingInstrumentation();
|
||||
|
||||
do {
|
||||
|
@ -66,7 +66,7 @@ class JitcodeGlobalTable;
|
||||
// if signal handlers are being used to implement interrupts.
|
||||
class PatchableBackedge : public InlineListNode<PatchableBackedge>
|
||||
{
|
||||
friend class JitRuntime;
|
||||
friend class JitZoneGroup;
|
||||
|
||||
CodeLocationJump backedge;
|
||||
CodeLocationLabel loopHeader;
|
||||
@ -82,111 +82,77 @@ class PatchableBackedge : public InlineListNode<PatchableBackedge>
|
||||
|
||||
class JitRuntime
|
||||
{
|
||||
public:
|
||||
enum BackedgeTarget {
|
||||
BackedgeLoopHeader,
|
||||
BackedgeInterruptCheck
|
||||
};
|
||||
|
||||
private:
|
||||
friend class JitCompartment;
|
||||
|
||||
// Executable allocator for all code except wasm code and Ion code with
|
||||
// patchable backedges (see below).
|
||||
ExecutableAllocator execAlloc_;
|
||||
UnprotectedData<ExecutableAllocator> execAlloc_;
|
||||
|
||||
// Executable allocator for Ion scripts with patchable backedges.
|
||||
ExecutableAllocator backedgeExecAlloc_;
|
||||
UnprotectedData<ExecutableAllocator> backedgeExecAlloc_;
|
||||
|
||||
// Shared exception-handler tail.
|
||||
JitCode* exceptionTail_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> exceptionTail_;
|
||||
|
||||
// Shared post-bailout-handler tail.
|
||||
JitCode* bailoutTail_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> bailoutTail_;
|
||||
|
||||
// Shared profiler exit frame tail.
|
||||
JitCode* profilerExitFrameTail_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> profilerExitFrameTail_;
|
||||
|
||||
// Trampoline for entering JIT code. Contains OSR prologue.
|
||||
JitCode* enterJIT_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> enterJIT_;
|
||||
|
||||
// Trampoline for entering baseline JIT code.
|
||||
JitCode* enterBaselineJIT_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> enterBaselineJIT_;
|
||||
|
||||
// Vector mapping frame class sizes to bailout tables.
|
||||
Vector<JitCode*, 4, SystemAllocPolicy> bailoutTables_;
|
||||
typedef Vector<JitCode*, 4, SystemAllocPolicy> BailoutTableVector;
|
||||
ExclusiveAccessLockWriteOnceData<BailoutTableVector> bailoutTables_;
|
||||
|
||||
// Generic bailout table; used if the bailout table overflows.
|
||||
JitCode* bailoutHandler_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> bailoutHandler_;
|
||||
|
||||
// Argument-rectifying thunk, in the case of insufficient arguments passed
|
||||
// to a function call site.
|
||||
JitCode* argumentsRectifier_;
|
||||
void* argumentsRectifierReturnAddr_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> argumentsRectifier_;
|
||||
ExclusiveAccessLockWriteOnceData<void*> argumentsRectifierReturnAddr_;
|
||||
|
||||
// Thunk that invalides an (Ion compiled) caller on the Ion stack.
|
||||
JitCode* invalidator_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> invalidator_;
|
||||
|
||||
// Thunk that calls the GC pre barrier.
|
||||
JitCode* valuePreBarrier_;
|
||||
JitCode* stringPreBarrier_;
|
||||
JitCode* objectPreBarrier_;
|
||||
JitCode* shapePreBarrier_;
|
||||
JitCode* objectGroupPreBarrier_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> valuePreBarrier_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> stringPreBarrier_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> objectPreBarrier_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> shapePreBarrier_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> objectGroupPreBarrier_;
|
||||
|
||||
// Thunk to call malloc/free.
|
||||
JitCode* mallocStub_;
|
||||
JitCode* freeStub_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> mallocStub_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> freeStub_;
|
||||
|
||||
// Thunk called to finish compilation of an IonScript.
|
||||
JitCode* lazyLinkStub_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> lazyLinkStub_;
|
||||
|
||||
// Thunk used by the debugger for breakpoint and step mode.
|
||||
JitCode* debugTrapHandler_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> debugTrapHandler_;
|
||||
|
||||
// Thunk used to fix up on-stack recompile of baseline scripts.
|
||||
JitCode* baselineDebugModeOSRHandler_;
|
||||
void* baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
|
||||
ExclusiveAccessLockWriteOnceData<JitCode*> baselineDebugModeOSRHandler_;
|
||||
ExclusiveAccessLockWriteOnceData<void*> baselineDebugModeOSRHandlerNoFrameRegPopAddr_;
|
||||
|
||||
// Map VMFunction addresses to the JitCode of the wrapper.
|
||||
using VMWrapperMap = HashMap<const VMFunction*, JitCode*>;
|
||||
VMWrapperMap* functionWrappers_;
|
||||
|
||||
// Buffer for OSR from baseline to Ion. To avoid holding on to this for
|
||||
// too long, it's also freed in JitCompartment::mark and in EnterBaseline
|
||||
// (after returning from JIT code).
|
||||
uint8_t* osrTempData_;
|
||||
ExclusiveAccessLockWriteOnceData<VMWrapperMap*> functionWrappers_;
|
||||
|
||||
// If true, the signal handler to interrupt Ion code should not attempt to
|
||||
// patch backedges, as we're busy modifying data structures.
|
||||
// patch backedges, as some thread is busy modifying data structures.
|
||||
mozilla::Atomic<bool> preventBackedgePatching_;
|
||||
|
||||
// Whether patchable backedges currently jump to the loop header or the
|
||||
// interrupt check.
|
||||
BackedgeTarget backedgeTarget_;
|
||||
|
||||
// List of all backedges in all Ion code. The backedge edge list is accessed
|
||||
// asynchronously when the main thread is paused and preventBackedgePatching_
|
||||
// is false. Thus, the list must only be mutated while preventBackedgePatching_
|
||||
// is true.
|
||||
InlineList<PatchableBackedge> backedgeList_;
|
||||
|
||||
// In certain cases, we want to optimize certain opcodes to typed instructions,
|
||||
// to avoid carrying an extra register to feed into an unbox. Unfortunately,
|
||||
// that's not always possible. For example, a GetPropertyCacheT could return a
|
||||
// typed double, but if it takes its out-of-line path, it could return an
|
||||
// object, and trigger invalidation. The invalidation bailout will consider the
|
||||
// return value to be a double, and create a garbage Value.
|
||||
//
|
||||
// To allow the GetPropertyCacheT optimization, we allow the ability for
|
||||
// GetPropertyCache to override the return value at the top of the stack - the
|
||||
// value that will be temporarily corrupt. This special override value is set
|
||||
// only in callVM() targets that are about to return *and* have invalidated
|
||||
// their callee.
|
||||
js::Value ionReturnOverride_;
|
||||
|
||||
// Global table of jitcode native address => bytecode address mappings.
|
||||
JitcodeGlobalTable* jitcodeGlobalTable_;
|
||||
UnprotectedData<JitcodeGlobalTable*> jitcodeGlobalTable_;
|
||||
|
||||
private:
|
||||
JitCode* generateLazyLinkStub(JSContext* cx);
|
||||
@ -219,19 +185,16 @@ class JitRuntime
|
||||
~JitRuntime();
|
||||
MOZ_MUST_USE bool initialize(JSContext* cx, js::AutoLockForExclusiveAccess& lock);
|
||||
|
||||
uint8_t* allocateOsrTempData(size_t size);
|
||||
void freeOsrTempData();
|
||||
|
||||
static void Trace(JSTracer* trc, js::AutoLockForExclusiveAccess& lock);
|
||||
static void TraceJitcodeGlobalTable(JSTracer* trc);
|
||||
static MOZ_MUST_USE bool MarkJitcodeGlobalTableIteratively(GCMarker* marker);
|
||||
static void SweepJitcodeGlobalTable(JSRuntime* rt);
|
||||
|
||||
ExecutableAllocator& execAlloc() {
|
||||
return execAlloc_;
|
||||
return execAlloc_.ref();
|
||||
}
|
||||
ExecutableAllocator& backedgeExecAlloc() {
|
||||
return backedgeExecAlloc_;
|
||||
return backedgeExecAlloc_.ref();
|
||||
}
|
||||
|
||||
class AutoPreventBackedgePatching
|
||||
@ -248,7 +211,6 @@ class JitRuntime
|
||||
jrt_(jrt),
|
||||
prev_(false) // silence GCC warning
|
||||
{
|
||||
MOZ_ASSERT(CurrentThreadCanAccessRuntime(rt));
|
||||
if (jrt_) {
|
||||
prev_ = jrt_->preventBackedgePatching_;
|
||||
jrt_->preventBackedgePatching_ = true;
|
||||
@ -269,19 +231,6 @@ class JitRuntime
|
||||
bool preventBackedgePatching() const {
|
||||
return preventBackedgePatching_;
|
||||
}
|
||||
BackedgeTarget backedgeTarget() const {
|
||||
return backedgeTarget_;
|
||||
}
|
||||
void addPatchableBackedge(PatchableBackedge* backedge) {
|
||||
MOZ_ASSERT(preventBackedgePatching_);
|
||||
backedgeList_.pushFront(backedge);
|
||||
}
|
||||
void removePatchableBackedge(PatchableBackedge* backedge) {
|
||||
MOZ_ASSERT(preventBackedgePatching_);
|
||||
backedgeList_.remove(backedge);
|
||||
}
|
||||
|
||||
void patchIonBackedges(JSRuntime* rt, BackedgeTarget target);
|
||||
|
||||
JitCode* getVMWrapper(const VMFunction& f) const;
|
||||
JitCode* debugTrapHandler(JSContext* cx);
|
||||
@ -349,20 +298,6 @@ class JitRuntime
|
||||
return lazyLinkStub_;
|
||||
}
|
||||
|
||||
bool hasIonReturnOverride() const {
|
||||
return !ionReturnOverride_.isMagic(JS_ARG_POISON);
|
||||
}
|
||||
js::Value takeIonReturnOverride() {
|
||||
js::Value v = ionReturnOverride_;
|
||||
ionReturnOverride_ = js::MagicValue(JS_ARG_POISON);
|
||||
return v;
|
||||
}
|
||||
void setIonReturnOverride(const js::Value& v) {
|
||||
MOZ_ASSERT(!hasIonReturnOverride());
|
||||
MOZ_ASSERT(!v.isMagic());
|
||||
ionReturnOverride_ = v;
|
||||
}
|
||||
|
||||
bool hasJitcodeGlobalTable() const {
|
||||
return jitcodeGlobalTable_ != nullptr;
|
||||
}
|
||||
@ -373,14 +308,52 @@ class JitRuntime
|
||||
}
|
||||
|
||||
bool isProfilerInstrumentationEnabled(JSRuntime* rt) {
|
||||
return rt->geckoProfiler.enabled();
|
||||
return rt->geckoProfiler().enabled();
|
||||
}
|
||||
|
||||
bool isOptimizationTrackingEnabled(JSRuntime* rt) {
|
||||
return isProfilerInstrumentationEnabled(rt);
|
||||
bool isOptimizationTrackingEnabled(ZoneGroup* group) {
|
||||
return isProfilerInstrumentationEnabled(group->runtime);
|
||||
}
|
||||
};
|
||||
|
||||
class JitZoneGroup
|
||||
{
|
||||
public:
|
||||
enum BackedgeTarget {
|
||||
BackedgeLoopHeader,
|
||||
BackedgeInterruptCheck
|
||||
};
|
||||
|
||||
private:
|
||||
// Whether patchable backedges currently jump to the loop header or the
|
||||
// interrupt check.
|
||||
ZoneGroupData<BackedgeTarget> backedgeTarget_;
|
||||
|
||||
// List of all backedges in all Ion code. The backedge edge list is accessed
|
||||
// asynchronously when the main thread is paused and preventBackedgePatching_
|
||||
// is false. Thus, the list must only be mutated while preventBackedgePatching_
|
||||
// is true.
|
||||
ZoneGroupData<InlineList<PatchableBackedge>> backedgeList_;
|
||||
InlineList<PatchableBackedge>& backedgeList() { return backedgeList_.ref(); }
|
||||
|
||||
public:
|
||||
explicit JitZoneGroup(ZoneGroup* group);
|
||||
|
||||
BackedgeTarget backedgeTarget() const {
|
||||
return backedgeTarget_;
|
||||
}
|
||||
void addPatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
|
||||
MOZ_ASSERT(jrt->preventBackedgePatching());
|
||||
backedgeList().pushFront(backedge);
|
||||
}
|
||||
void removePatchableBackedge(JitRuntime* jrt, PatchableBackedge* backedge) {
|
||||
MOZ_ASSERT(jrt->preventBackedgePatching());
|
||||
backedgeList().remove(backedge);
|
||||
}
|
||||
|
||||
void patchIonBackedges(JSContext* cx, BackedgeTarget target);
|
||||
};
|
||||
|
||||
enum class CacheKind : uint8_t;
|
||||
class CacheIRStubInfo;
|
||||
|
||||
@ -675,7 +648,7 @@ class MOZ_STACK_CLASS AutoWritableJitCode
|
||||
MOZ_CRASH();
|
||||
}
|
||||
AutoWritableJitCode(void* addr, size_t size)
|
||||
: AutoWritableJitCode(TlsPerThreadData.get()->runtimeFromMainThread(), addr, size)
|
||||
: AutoWritableJitCode(TlsContext.get()->runtime(), addr, size)
|
||||
{}
|
||||
explicit AutoWritableJitCode(JitCode* code)
|
||||
: AutoWritableJitCode(code->runtimeFromMainThread(), code->raw(), code->bufferSize())
|
||||
|
@ -107,12 +107,12 @@ JitFrameIterator::JitFrameIterator()
|
||||
}
|
||||
|
||||
JitFrameIterator::JitFrameIterator(JSContext* cx)
|
||||
: current_(cx->runtime()->jitTop),
|
||||
: current_(cx->jitTop),
|
||||
type_(JitFrame_Exit),
|
||||
returnAddressToFp_(nullptr),
|
||||
frameSize_(0),
|
||||
cachedSafepointIndex_(nullptr),
|
||||
activation_(cx->runtime()->activation()->asJit())
|
||||
activation_(cx->activation()->asJit())
|
||||
{
|
||||
if (activation_->bailoutData()) {
|
||||
current_ = activation_->bailoutData()->fp();
|
||||
@ -405,7 +405,7 @@ HandleExceptionIon(JSContext* cx, const InlineFrameIterator& frame, ResumeFromEx
|
||||
bool shouldBail = Debugger::hasLiveHook(cx->global(), Debugger::OnExceptionUnwind);
|
||||
RematerializedFrame* rematFrame = nullptr;
|
||||
if (!shouldBail) {
|
||||
JitActivation* act = cx->runtime()->activation()->asJit();
|
||||
JitActivation* act = cx->activation()->asJit();
|
||||
rematFrame = act->lookupRematerializedFrame(frame.frame().fp(), frame.frameNo());
|
||||
shouldBail = rematFrame && rematFrame->isDebuggee();
|
||||
}
|
||||
@ -768,10 +768,10 @@ struct AutoResetLastProfilerFrameOnReturnFromException
|
||||
if (!cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
|
||||
return;
|
||||
|
||||
MOZ_ASSERT(cx->runtime()->jitActivation == cx->runtime()->profilingActivation());
|
||||
MOZ_ASSERT(cx->jitActivation == cx->profilingActivation());
|
||||
|
||||
void* lastProfilingFrame = getLastProfilingFrame();
|
||||
cx->runtime()->jitActivation->setLastProfilingFrame(lastProfilingFrame);
|
||||
cx->jitActivation->setLastProfilingFrame(lastProfilingFrame);
|
||||
}
|
||||
|
||||
void* getLastProfilingFrame() {
|
||||
@ -799,7 +799,7 @@ struct AutoResetLastProfilerFrameOnReturnFromException
|
||||
void
|
||||
HandleException(ResumeFromException* rfe)
|
||||
{
|
||||
JSContext* cx = GetJSContextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
TraceLoggerThread* logger = TraceLoggerForMainThread(cx->runtime());
|
||||
|
||||
AutoResetLastProfilerFrameOnReturnFromException profFrameReset(cx, rfe);
|
||||
@ -812,10 +812,10 @@ HandleException(ResumeFromException* rfe)
|
||||
// This may happen if a callVM function causes an invalidation (setting the
|
||||
// override), and then fails, bypassing the bailout handlers that would
|
||||
// otherwise clear the return override.
|
||||
if (cx->runtime()->jitRuntime()->hasIonReturnOverride())
|
||||
cx->runtime()->jitRuntime()->takeIonReturnOverride();
|
||||
if (cx->hasIonReturnOverride())
|
||||
cx->takeIonReturnOverride();
|
||||
|
||||
JitActivation* activation = cx->runtime()->activation()->asJit();
|
||||
JitActivation* activation = cx->activation()->asJit();
|
||||
|
||||
#ifdef CHECK_OSIPOINT_REGISTERS
|
||||
if (JitOptions.checkOsiPointRegisters)
|
||||
@ -951,7 +951,7 @@ EnsureBareExitFrame(JSContext* cx, JitFrameLayout* frame)
|
||||
{
|
||||
ExitFrameLayout* exitFrame = reinterpret_cast<ExitFrameLayout*>(frame);
|
||||
|
||||
if (cx->runtime()->jitTop == (uint8_t*)frame) {
|
||||
if (cx->jitTop == (uint8_t*)frame) {
|
||||
// If we already called this function for the current frame, do
|
||||
// nothing.
|
||||
MOZ_ASSERT(exitFrame->isBareExit());
|
||||
@ -964,11 +964,11 @@ EnsureBareExitFrame(JSContext* cx, JitFrameLayout* frame)
|
||||
++iter;
|
||||
MOZ_ASSERT(iter.current() == frame, "|frame| must be the top JS frame");
|
||||
|
||||
MOZ_ASSERT((uint8_t*)exitFrame->footer() >= cx->runtime()->jitTop,
|
||||
MOZ_ASSERT((uint8_t*)exitFrame->footer() >= cx->jitTop,
|
||||
"Must have space for ExitFooterFrame before jitTop");
|
||||
#endif
|
||||
|
||||
cx->runtime()->jitTop = (uint8_t*)frame;
|
||||
cx->jitTop = (uint8_t*)frame;
|
||||
*exitFrame->footer()->addressOfJitCode() = ExitFrameLayout::BareToken();
|
||||
MOZ_ASSERT(exitFrame->isBareExit());
|
||||
}
|
||||
@ -1190,7 +1190,7 @@ UpdateIonJSFrameForMinorGC(JSTracer* trc, const JitFrameIterator& frame)
|
||||
ionScript = frame.ionScriptFromCalleeToken();
|
||||
}
|
||||
|
||||
Nursery& nursery = trc->runtime()->gc.nursery;
|
||||
Nursery& nursery = ionScript->method()->zone()->group()->nursery();
|
||||
|
||||
const SafepointIndex* si = ionScript->getSafepointIndex(frame.returnAddressToFp());
|
||||
SafepointReader safepoint(ionScript, si);
|
||||
@ -1526,7 +1526,7 @@ TopmostIonActivationCompartment(JSRuntime* rt)
|
||||
|
||||
void UpdateJitActivationsForMinorGC(JSRuntime* rt, JSTracer* trc)
|
||||
{
|
||||
MOZ_ASSERT(trc->runtime()->isHeapMinorCollecting());
|
||||
MOZ_ASSERT(JS::CurrentThreadIsHeapMinorCollecting());
|
||||
for (JitActivationIterator activations(rt); !activations.done(); ++activations) {
|
||||
for (JitFrameIterator frames(activations); !frames.done(); ++frames) {
|
||||
if (frames.type() == JitFrame_IonJS)
|
||||
@ -1588,13 +1588,13 @@ GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes)
|
||||
hash = PcScriptCache::Hash(retAddr);
|
||||
|
||||
// Lazily initialize the cache. The allocation may safely fail and will not GC.
|
||||
if (MOZ_UNLIKELY(rt->ionPcScriptCache == nullptr)) {
|
||||
rt->ionPcScriptCache = (PcScriptCache*)js_malloc(sizeof(struct PcScriptCache));
|
||||
if (rt->ionPcScriptCache)
|
||||
rt->ionPcScriptCache->clear(rt->gc.gcNumber());
|
||||
if (MOZ_UNLIKELY(cx->ionPcScriptCache == nullptr)) {
|
||||
cx->ionPcScriptCache = (PcScriptCache*)js_malloc(sizeof(struct PcScriptCache));
|
||||
if (cx->ionPcScriptCache)
|
||||
cx->ionPcScriptCache->clear(rt->gc.gcNumber());
|
||||
}
|
||||
|
||||
if (rt->ionPcScriptCache && rt->ionPcScriptCache->get(rt, hash, retAddr, scriptRes, pcRes))
|
||||
if (cx->ionPcScriptCache && cx->ionPcScriptCache->get(rt, hash, retAddr, scriptRes, pcRes))
|
||||
return;
|
||||
}
|
||||
|
||||
@ -1613,8 +1613,8 @@ GetPcScript(JSContext* cx, JSScript** scriptRes, jsbytecode** pcRes)
|
||||
*pcRes = pc;
|
||||
|
||||
// Add entry to cache.
|
||||
if (retAddr && rt->ionPcScriptCache)
|
||||
rt->ionPcScriptCache->add(hash, retAddr, pc, *scriptRes);
|
||||
if (retAddr && cx->ionPcScriptCache)
|
||||
cx->ionPcScriptCache->add(hash, retAddr, pc, *scriptRes);
|
||||
}
|
||||
|
||||
uint32_t
|
||||
@ -2306,9 +2306,9 @@ InlineFrameIterator::InlineFrameIterator(JSContext* cx, const JitFrameIterator*
|
||||
}
|
||||
|
||||
InlineFrameIterator::InlineFrameIterator(JSRuntime* rt, const JitFrameIterator* iter)
|
||||
: calleeTemplate_(rt->contextFromMainThread()),
|
||||
: calleeTemplate_(TlsContext.get()),
|
||||
calleeRVA_(),
|
||||
script_(rt->contextFromMainThread())
|
||||
script_(TlsContext.get())
|
||||
{
|
||||
resetOn(iter);
|
||||
}
|
||||
@ -2542,7 +2542,7 @@ InlineFrameIterator::isConstructing() const
|
||||
{
|
||||
// Skip the current frame and look at the caller's.
|
||||
if (more()) {
|
||||
InlineFrameIterator parent(GetJSContextFromMainThread(), this);
|
||||
InlineFrameIterator parent(TlsContext.get(), this);
|
||||
++parent;
|
||||
|
||||
// Inlined Getters and Setters are never constructing.
|
||||
@ -2615,7 +2615,7 @@ JitFrameIterator::dumpBaseline() const
|
||||
fprintf(stderr, " file %s line %" PRIuSIZE "\n",
|
||||
script()->filename(), script()->lineno());
|
||||
|
||||
JSContext* cx = GetJSContextFromMainThread();
|
||||
JSContext* cx = TlsContext.get();
|
||||
RootedScript script(cx);
|
||||
jsbytecode* pc;
|
||||
baselineScriptAndPc(script.address(), &pc);
|
||||
@ -2684,7 +2684,7 @@ InlineFrameIterator::dump() const
|
||||
else {
|
||||
if (i - 2 == calleeTemplate()->nargs() && numActualArgs() > calleeTemplate()->nargs()) {
|
||||
DumpOp d(calleeTemplate()->nargs());
|
||||
unaliasedForEachActual(GetJSContextFromMainThread(), d, ReadFrame_Overflown, fallback);
|
||||
unaliasedForEachActual(TlsContext.get(), d, ReadFrame_Overflown, fallback);
|
||||
}
|
||||
|
||||
fprintf(stderr, " slot %d: ", int(i - 2 - calleeTemplate()->nargs()));
|
||||
@ -2719,7 +2719,7 @@ JitFrameIterator::dump() const
|
||||
case JitFrame_Bailout:
|
||||
case JitFrame_IonJS:
|
||||
{
|
||||
InlineFrameIterator frames(GetJSContextFromMainThread(), this);
|
||||
InlineFrameIterator frames(TlsContext.get(), this);
|
||||
for (;;) {
|
||||
frames.dump();
|
||||
if (!frames.more())
|
||||
@ -2757,17 +2757,17 @@ JitFrameIterator::verifyReturnAddressUsingNativeToBytecodeMap()
|
||||
if (type_ != JitFrame_IonJS && type_ != JitFrame_BaselineJS)
|
||||
return true;
|
||||
|
||||
JSRuntime* rt = js::TlsPerThreadData.get()->runtimeIfOnOwnerThread();
|
||||
JSRuntime* rt = TlsContext.get()->runtime();
|
||||
|
||||
// Don't verify on non-main-thread.
|
||||
if (!rt)
|
||||
if (!CurrentThreadCanAccessRuntime(rt))
|
||||
return true;
|
||||
|
||||
// Don't verify if sampling is being suppressed.
|
||||
if (!rt->isProfilerSamplingEnabled())
|
||||
if (!TlsContext.get()->isProfilerSamplingEnabled())
|
||||
return true;
|
||||
|
||||
if (rt->isHeapMinorCollecting())
|
||||
if (JS::CurrentThreadIsHeapMinorCollecting())
|
||||
return true;
|
||||
|
||||
JitRuntime* jitrt = rt->jitRuntime();
|
||||
@ -2796,7 +2796,7 @@ JitFrameIterator::verifyReturnAddressUsingNativeToBytecodeMap()
|
||||
|
||||
if (type_ == JitFrame_IonJS) {
|
||||
// Create an InlineFrameIterator here and verify the mapped info against the iterator info.
|
||||
InlineFrameIterator inlineFrames(GetJSContextFromMainThread(), this);
|
||||
InlineFrameIterator inlineFrames(TlsContext.get(), this);
|
||||
for (size_t idx = 0; idx < location.length(); idx++) {
|
||||
MOZ_ASSERT(idx < location.length());
|
||||
MOZ_ASSERT_IF(idx < location.length() - 1, inlineFrames.more());
|
||||
@ -2827,16 +2827,17 @@ JitProfilingFrameIterator::JitProfilingFrameIterator(
|
||||
{
|
||||
// If no profilingActivation is live, initialize directly to
|
||||
// end-of-iteration state.
|
||||
if (!rt->profilingActivation()) {
|
||||
JSContext* cx = rt->unsafeContextFromAnyThread();
|
||||
if (!cx->profilingActivation()) {
|
||||
type_ = JitFrame_Entry;
|
||||
fp_ = nullptr;
|
||||
returnAddressToFp_ = nullptr;
|
||||
return;
|
||||
}
|
||||
|
||||
MOZ_ASSERT(rt->profilingActivation()->isJit());
|
||||
MOZ_ASSERT(cx->profilingActivation()->isJit());
|
||||
|
||||
JitActivation* act = rt->profilingActivation()->asJit();
|
||||
JitActivation* act = cx->profilingActivation()->asJit();
|
||||
|
||||
// If the top JitActivation has a null lastProfilingFrame, assume that
|
||||
// it's a trivially empty activation, and initialize directly
|
||||
@ -2855,7 +2856,7 @@ JitProfilingFrameIterator::JitProfilingFrameIterator(
|
||||
JitcodeGlobalTable* table = rt->jitRuntime()->getJitcodeGlobalTable();
|
||||
|
||||
// Profiler sampling must NOT be suppressed if we are here.
|
||||
MOZ_ASSERT(rt->isProfilerSamplingEnabled());
|
||||
MOZ_ASSERT(cx->isProfilerSamplingEnabled());
|
||||
|
||||
// Try initializing with sampler pc
|
||||
if (tryInitWithPC(state.pc))
|
||||
|
@ -510,7 +510,7 @@ JitcodeGlobalTable::addEntry(const JitcodeGlobalEntry& entry, JSRuntime* rt)
|
||||
newEntry->tower_ = newTower;
|
||||
|
||||
// Suppress profiler sampling while skiplist is being mutated.
|
||||
AutoSuppressProfilerSampling suppressSampling(rt);
|
||||
AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
|
||||
|
||||
// Link up entry with forward entries taken from tower.
|
||||
for (int level = newTower->height() - 1; level >= 0; level--) {
|
||||
@ -537,7 +537,7 @@ void
|
||||
JitcodeGlobalTable::removeEntry(JitcodeGlobalEntry& entry, JitcodeGlobalEntry** prevTower,
|
||||
JSRuntime* rt)
|
||||
{
|
||||
MOZ_ASSERT(!rt->isProfilerSamplingEnabled());
|
||||
MOZ_ASSERT(!TlsContext.get()->isProfilerSamplingEnabled());
|
||||
|
||||
// Unlink query entry.
|
||||
for (int level = entry.tower_->height() - 1; level >= 0; level--) {
|
||||
@ -716,7 +716,7 @@ JitcodeGlobalTable::verifySkiplist()
|
||||
void
|
||||
JitcodeGlobalTable::setAllEntriesAsExpired(JSRuntime* rt)
|
||||
{
|
||||
AutoSuppressProfilerSampling suppressSampling(rt);
|
||||
AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
|
||||
for (Range r(*this); !r.empty(); r.popFront())
|
||||
r.front()->setAsExpired();
|
||||
}
|
||||
@ -733,9 +733,9 @@ JitcodeGlobalTable::trace(JSTracer* trc)
|
||||
// Trace all entries unconditionally. This is done during minor collection
|
||||
// to tenure and update object pointers.
|
||||
|
||||
MOZ_ASSERT(trc->runtime()->geckoProfiler.enabled());
|
||||
MOZ_ASSERT(trc->runtime()->geckoProfiler().enabled());
|
||||
|
||||
AutoSuppressProfilerSampling suppressSampling(trc->runtime());
|
||||
AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
|
||||
for (Range r(*this); !r.empty(); r.popFront())
|
||||
r.front()->trace<Unconditionally>(trc);
|
||||
}
|
||||
@ -777,14 +777,14 @@ JitcodeGlobalTable::markIteratively(GCMarker* marker)
|
||||
// The approach above obviates the need for read barriers. The assumption
|
||||
// above is checked in JitcodeGlobalTable::lookupForSampler.
|
||||
|
||||
MOZ_ASSERT(!marker->runtime()->isHeapMinorCollecting());
|
||||
MOZ_ASSERT(!JS::CurrentThreadIsHeapMinorCollecting());
|
||||
|
||||
AutoSuppressProfilerSampling suppressSampling(marker->runtime());
|
||||
AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
|
||||
uint32_t gen = marker->runtime()->profilerSampleBufferGen();
|
||||
uint32_t lapCount = marker->runtime()->profilerSampleBufferLapCount();
|
||||
|
||||
// If the profiler is off, all entries are considered to be expired.
|
||||
if (!marker->runtime()->geckoProfiler.enabled())
|
||||
if (!marker->runtime()->geckoProfiler().enabled())
|
||||
gen = UINT32_MAX;
|
||||
|
||||
bool markedAny = false;
|
||||
@ -818,7 +818,7 @@ JitcodeGlobalTable::markIteratively(GCMarker* marker)
|
||||
void
|
||||
JitcodeGlobalTable::sweep(JSRuntime* rt)
|
||||
{
|
||||
AutoSuppressProfilerSampling suppressSampling(rt);
|
||||
AutoSuppressProfilerSampling suppressSampling(TlsContext.get());
|
||||
for (Enum e(*this, rt); !e.empty(); e.popFront()) {
|
||||
JitcodeGlobalEntry* entry = e.front();
|
||||
|
||||
@ -1648,15 +1648,15 @@ JS::ForEachProfiledFrameOp::FrameHandle::frameKind() const
|
||||
JS_PUBLIC_API(void)
|
||||
JS::ForEachProfiledFrame(JSContext* cx, void* addr, ForEachProfiledFrameOp& op)
|
||||
{
|
||||
js::jit::JitcodeGlobalTable* table = cx->jitRuntime()->getJitcodeGlobalTable();
|
||||
js::jit::JitcodeGlobalTable* table = cx->runtime()->jitRuntime()->getJitcodeGlobalTable();
|
||||
js::jit::JitcodeGlobalEntry& entry = table->lookupInfallible(addr);
|
||||
|
||||
// Extract the stack for the entry. Assume maximum inlining depth is <64
|
||||
const char* labels[64];
|
||||
uint32_t depth = entry.callStackAtAddr(cx, addr, labels, 64);
|
||||
uint32_t depth = entry.callStackAtAddr(cx->runtime(), addr, labels, 64);
|
||||
MOZ_ASSERT(depth < 64);
|
||||
for (uint32_t i = depth; i != 0; i--) {
|
||||
JS::ForEachProfiledFrameOp::FrameHandle handle(cx, entry, addr, labels[i - 1], i - 1);
|
||||
JS::ForEachProfiledFrameOp::FrameHandle handle(cx->runtime(), entry, addr, labels[i - 1], i - 1);
|
||||
op(handle);
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ Linker::newCode(JSContext* cx, CodeKind kind, bool hasPatchableBackedges /* = fa
|
||||
code->copyFrom(masm);
|
||||
masm.link(code);
|
||||
if (masm.embedsNurseryPointers())
|
||||
cx->runtime()->gc.storeBuffer.putWholeCell(code);
|
||||
cx->zone()->group()->storeBuffer().putWholeCell(code);
|
||||
return code;
|
||||
}
|
||||
|
||||
|
@ -454,7 +454,7 @@ IonBuilder::inlineMathFunction(CallInfo& callInfo, MMathFunction::Function funct
|
||||
if (!IsNumberType(callInfo.getArg(0)->type()))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
const MathCache* cache = GetJSContextFromMainThread()->caches.maybeGetMathCache();
|
||||
const MathCache* cache = TlsContext.get()->caches().maybeGetMathCache();
|
||||
|
||||
callInfo.fun()->setImplicitlyUsedUnchecked();
|
||||
callInfo.thisArg()->setImplicitlyUsedUnchecked();
|
||||
|
@ -893,15 +893,15 @@ jit::IonCompilationCanUseNurseryPointers()
|
||||
{
|
||||
// If we are doing backend compilation, which could occur on a helper
|
||||
// thread but might actually be on the main thread, check the flag set on
|
||||
// the PerThreadData by AutoEnterIonCompilation.
|
||||
// the JSContext by AutoEnterIonCompilation.
|
||||
if (CurrentThreadIsIonCompiling())
|
||||
return !CurrentThreadIsIonCompilingSafeForMinorGC();
|
||||
|
||||
// Otherwise, we must be on the main thread during MIR construction. The
|
||||
// store buffer must have been notified that minor GCs must cancel pending
|
||||
// or in progress Ion compilations.
|
||||
JSRuntime* rt = TlsPerThreadData.get()->runtimeFromMainThread();
|
||||
return rt->gc.storeBuffer.cancelIonCompilations();
|
||||
JSContext* cx = TlsContext.get();
|
||||
return cx->zone()->group()->storeBuffer().cancelIonCompilations();
|
||||
}
|
||||
|
||||
#endif // DEBUG
|
||||
|
@ -1047,10 +1047,7 @@ class CompilerGCPointer
|
||||
: ptr_(ptr)
|
||||
{
|
||||
MOZ_ASSERT_IF(IsInsideNursery(ptr), IonCompilationCanUseNurseryPointers());
|
||||
#ifdef DEBUG
|
||||
PerThreadData* pt = TlsPerThreadData.get();
|
||||
MOZ_ASSERT_IF(pt->runtimeIfOnOwnerThread(), pt->suppressGC);
|
||||
#endif
|
||||
MOZ_ASSERT_IF(!CurrentThreadIsIonCompiling(), TlsContext.get()->suppressGC);
|
||||
}
|
||||
|
||||
operator T() const { return static_cast<T>(ptr_); }
|
||||
|
@ -1063,8 +1063,7 @@ JS_FOR_EACH_TYPED_ARRAY(CREATE_TYPED_ARRAY)
|
||||
return;
|
||||
|
||||
nbytes = JS_ROUNDUP(nbytes, sizeof(Value));
|
||||
Nursery& nursery = cx->runtime()->gc.nursery;
|
||||
void* buf = nursery.allocateBuffer(obj, nbytes);
|
||||
void* buf = cx->nursery().allocateBuffer(obj, nbytes);
|
||||
if (buf) {
|
||||
obj->initPrivate(buf);
|
||||
memset(buf, 0, nbytes);
|
||||
@ -2366,7 +2365,7 @@ MacroAssembler::MacroAssembler(JSContext* cx, IonScript* ion,
|
||||
#endif
|
||||
if (ion) {
|
||||
setFramePushed(ion->frameSize());
|
||||
if (pc && cx->runtime()->geckoProfiler.enabled())
|
||||
if (pc && cx->runtime()->geckoProfiler().enabled())
|
||||
enableProfilingInstrumentation();
|
||||
}
|
||||
}
|
||||
|
@ -699,7 +699,7 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
inline void leaveExitFrame(size_t extraFrame = 0);
|
||||
|
||||
private:
|
||||
// Save the top of the stack into PerThreadData::jitTop of the main thread,
|
||||
// Save the top of the stack into JSontext::jitTop of the current thread,
|
||||
// which should be the location of the latest exit frame.
|
||||
void linkExitFrame();
|
||||
|
||||
|
@ -694,7 +694,7 @@ ICStubCompiler::PushStubPayload(MacroAssembler& masm, Register scratch)
|
||||
void
|
||||
BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs,
|
||||
JSRuntime* rt)
|
||||
JSContext* cx)
|
||||
{
|
||||
Label skipBarrier;
|
||||
masm.branchPtrInNurseryChunk(Assembler::Equal, obj, scratch, &skipBarrier);
|
||||
@ -707,7 +707,7 @@ BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperan
|
||||
saveRegs.set() = GeneralRegisterSet::Intersect(saveRegs.set(), GeneralRegisterSet::Volatile());
|
||||
masm.PushRegsInMask(saveRegs);
|
||||
masm.setupUnalignedABICall(scratch);
|
||||
masm.movePtr(ImmPtr(rt), scratch);
|
||||
masm.movePtr(ImmPtr(cx->runtime()), scratch);
|
||||
masm.passABIArg(scratch);
|
||||
masm.passABIArg(obj);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void*, PostWriteBarrier));
|
||||
|
@ -1123,7 +1123,7 @@ class ICStubCompiler
|
||||
|
||||
void BaselineEmitPostWriteBarrierSlot(MacroAssembler& masm, Register obj, ValueOperand val,
|
||||
Register scratch, LiveGeneralRegisterSet saveRegs,
|
||||
JSRuntime* rt);
|
||||
JSContext* cx);
|
||||
|
||||
class SharedStubInfo
|
||||
{
|
||||
|
@ -136,7 +136,7 @@ CheckOverRecursed(JSContext* cx)
|
||||
JS_CHECK_RECURSION(cx, return false);
|
||||
#endif
|
||||
gc::MaybeVerifyBarriers(cx);
|
||||
return cx->runtime()->handleInterrupt(cx);
|
||||
return cx->handleInterrupt();
|
||||
}
|
||||
|
||||
// This function can get called in two contexts. In the usual context, it's
|
||||
@ -181,7 +181,7 @@ CheckOverRecursedWithExtra(JSContext* cx, BaselineFrame* frame,
|
||||
#endif
|
||||
|
||||
gc::MaybeVerifyBarriers(cx);
|
||||
return cx->runtime()->handleInterrupt(cx);
|
||||
return cx->handleInterrupt();
|
||||
}
|
||||
|
||||
JSObject*
|
||||
@ -481,7 +481,7 @@ InterruptCheck(JSContext* cx)
|
||||
{
|
||||
JSRuntime* rt = cx->runtime();
|
||||
JitRuntime::AutoPreventBackedgePatching apbp(rt);
|
||||
rt->jitRuntime()->patchIonBackedges(rt, JitRuntime::BackedgeLoopHeader);
|
||||
cx->zone()->group()->jitZoneGroup->patchIonBackedges(cx, JitZoneGroup::BackedgeLoopHeader);
|
||||
}
|
||||
|
||||
return CheckForInterrupt(cx);
|
||||
@ -504,7 +504,7 @@ NewCallObject(JSContext* cx, HandleShape shape, HandleObjectGroup group)
|
||||
// the initializing writes. The interpreter, however, may have allocated
|
||||
// the call object tenured, so barrier as needed before re-entering.
|
||||
if (!IsInsideNursery(obj))
|
||||
cx->runtime()->gc.storeBuffer.putWholeCell(obj);
|
||||
cx->zone()->group()->storeBuffer().putWholeCell(obj);
|
||||
|
||||
return obj;
|
||||
}
|
||||
@ -521,7 +521,7 @@ NewSingletonCallObject(JSContext* cx, HandleShape shape)
|
||||
// the call object tenured, so barrier as needed before re-entering.
|
||||
MOZ_ASSERT(!IsInsideNursery(obj),
|
||||
"singletons are created in the tenured heap");
|
||||
cx->runtime()->gc.storeBuffer.putWholeCell(obj);
|
||||
cx->zone()->group()->storeBuffer().putWholeCell(obj);
|
||||
|
||||
return obj;
|
||||
}
|
||||
@ -626,7 +626,7 @@ void
|
||||
PostWriteBarrier(JSRuntime* rt, JSObject* obj)
|
||||
{
|
||||
MOZ_ASSERT(!IsInsideNursery(obj));
|
||||
rt->gc.storeBuffer.putWholeCell(obj);
|
||||
obj->zone()->group()->storeBuffer().putWholeCell(obj);
|
||||
}
|
||||
|
||||
static const size_t MAX_WHOLE_CELL_BUFFER_SIZE = 4096;
|
||||
@ -644,11 +644,11 @@ PostWriteElementBarrier(JSRuntime* rt, JSObject* obj, int32_t index)
|
||||
#endif
|
||||
))
|
||||
{
|
||||
rt->gc.storeBuffer.putSlot(&obj->as<NativeObject>(), HeapSlot::Element, index, 1);
|
||||
obj->zone()->group()->storeBuffer().putSlot(&obj->as<NativeObject>(), HeapSlot::Element, index, 1);
|
||||
return;
|
||||
}
|
||||
|
||||
rt->gc.storeBuffer.putWholeCell(obj);
|
||||
obj->zone()->group()->storeBuffer().putWholeCell(obj);
|
||||
}
|
||||
|
||||
void
|
||||
@ -1180,7 +1180,7 @@ SetDenseOrUnboxedArrayElement(JSContext* cx, HandleObject obj, int32_t index,
|
||||
void
|
||||
AutoDetectInvalidation::setReturnOverride()
|
||||
{
|
||||
cx_->runtime()->jitRuntime()->setIonReturnOverride(rval_.get());
|
||||
cx_->setIonReturnOverride(rval_.get());
|
||||
}
|
||||
|
||||
void
|
||||
|
@ -461,9 +461,6 @@ template <class> struct MatchContext { };
|
||||
template <> struct MatchContext<JSContext*> {
|
||||
static const bool valid = true;
|
||||
};
|
||||
template <> struct MatchContext<ExclusiveContext*> {
|
||||
static const bool valid = true;
|
||||
};
|
||||
|
||||
// Extract the last element of a list of types.
|
||||
template <typename... ArgTypes>
|
||||
|
@ -1129,7 +1129,7 @@ PatchJump(CodeLocationJump& jump_, CodeLocationLabel label,
|
||||
ReprotectCode reprotect = DontReprotect);
|
||||
|
||||
static inline void
|
||||
PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitRuntime::BackedgeTarget target)
|
||||
PatchBackedge(CodeLocationJump& jump_, CodeLocationLabel label, JitZoneGroup::BackedgeTarget target)
|
||||
{
|
||||
PatchJump(jump_, label);
|
||||
}
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user