Bug 675078 - rm JSThreadData and JSThread (JSRuntime is now officially single-threaded) (r=igor,rs=mccr8)

--HG--
extra : rebase_source : f6d2c2c6083f4589691ede571159bfd7e7ef8701
This commit is contained in:
Luke Wagner 2011-07-18 14:54:48 -07:00
parent 2afe66178f
commit d85fb51b34
47 changed files with 575 additions and 2859 deletions

View File

@ -122,10 +122,10 @@ struct IterateData
: runtimeObject(0)
, runtimeAtomsTable(0)
, runtimeContexts(0)
, runtimeThreadsNormal(0)
, runtimeThreadsTemporary(0)
, runtimeThreadsRegexpCode(0)
, runtimeThreadsStackCommitted(0)
, runtimeNormal(0)
, runtimeTemporary(0)
, runtimeRegexpCode(0)
, runtimeStackCommitted(0)
, gcHeapChunkTotal(0)
, gcHeapChunkCleanUnused(0)
, gcHeapChunkDirtyUnused(0)
@ -153,10 +153,10 @@ struct IterateData
int64_t runtimeObject;
int64_t runtimeAtomsTable;
int64_t runtimeContexts;
int64_t runtimeThreadsNormal;
int64_t runtimeThreadsTemporary;
int64_t runtimeThreadsRegexpCode;
int64_t runtimeThreadsStackCommitted;
int64_t runtimeNormal;
int64_t runtimeTemporary;
int64_t runtimeRegexpCode;
int64_t runtimeStackCommitted;
int64_t gcHeapChunkTotal;
int64_t gcHeapChunkCleanUnused;
int64_t gcHeapChunkDirtyUnused;

View File

@ -130,7 +130,6 @@ CPPSRCS = \
jsinfer.cpp \
jsinterp.cpp \
jsiter.cpp \
jslock.cpp \
jslog2.cpp \
jsmath.cpp \
jsnativestack.cpp \
@ -619,7 +618,7 @@ check-malloc-function-usage: $(filter-out %jsalloc.h %jscntxt.h %jsutil.h, $(ALL
# We desire these numbers to go down, not up. See "User guide to memory
# management within SpiderMonkey" in jsutil.h.
$(srcdir)/config/check_source_count.py OffTheBooks:: 59 \
$(srcdir)/config/check_source_count.py OffTheBooks:: 58 \
"in Makefile.in" "{cx,rt}->{new_,array_new,malloc_,calloc_,realloc_}" $^
# This should go to zero, if possible.
$(srcdir)/config/check_source_count.py UnwantedForeground:: 31 \
@ -700,10 +699,6 @@ DEFINES += -DJS_HAS_CTYPES
DEFINES += -DDLL_PREFIX=\"$(DLL_PREFIX)\" -DDLL_SUFFIX=\"$(DLL_SUFFIX)\"
endif
ifdef JS_NO_THIN_LOCKS
DEFINES += -DJS_USE_ONLY_NSPR_LOCKS
endif
ifdef JS_VERSION
DEFINES += -DJS_VERSION=$(JS_VERSION)
endif
@ -781,20 +776,6 @@ EXTRA_LIBS += -lposix4 -ldl -lnsl -lsocket
endif
endif
ifdef SOLARIS_SUNPRO_CXX
ifeq ($(TARGET_CPU),sparc)
# Sun Studio SPARC doesn't work well with gcc inline asm, use lock_SunOS_sparc*.il
jslock.o: jslock.cpp Makefile.in lock_sparcv8plus.il lock_sparcv9.il
$(REPORT_BUILD)
@$(MAKE_DEPS_AUTO_CXX)
ifeq (sparcv9,$(findstring sparcv9,$(OS_TEST)))
$(CXX) -o $@ -c $(COMPILE_CFLAGS) $(srcdir)/lock_sparcv9.il $<
else
$(CXX) -o $@ -c $(COMPILE_CFLAGS) $(srcdir)/lock_sparcv8plus.il $<
endif # sparcv9
endif # sparc
endif # SOLARIS_SUNPRO_CXX
# An AIX Optimization bug causes PR_dtoa() & JS_dtoa to produce wrong result.
# This suppresses optimization for this single compilation unit.
ifeq ($(OS_ARCH),AIX)

View File

@ -219,37 +219,26 @@ CollectCompartmentStatsForRuntime(JSRuntime *rt, IterateData *data)
data->runtimeObject = data->mallocSizeOf(rt, sizeof(JSRuntime));
size_t normal, temporary, regexpCode, stackCommitted;
rt->sizeOfExcludingThis(data->mallocSizeOf,
&normal,
&temporary,
&regexpCode,
&stackCommitted);
data->runtimeNormal = normal;
data->runtimeTemporary = temporary;
data->runtimeRegexpCode = regexpCode;
data->runtimeStackCommitted = stackCommitted;
// Nb: we use sizeOfExcludingThis() because atomState.atoms is within
// JSRuntime, and so counted when JSRuntime is counted.
data->runtimeAtomsTable =
rt->atomState.atoms.sizeOfExcludingThis(data->mallocSizeOf);
{
// Need the GC lock to call JS_ContextIteratorUnlocked() and to
// access rt->threads.
AutoLockGC lock(rt);
JSContext *acx, *iter = NULL;
while ((acx = JS_ContextIteratorUnlocked(rt, &iter)) != NULL) {
data->runtimeContexts +=
acx->sizeOfIncludingThis(data->mallocSizeOf);
}
for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
JSThread *thread = r.front().value;
size_t normal, temporary, regexpCode, stackCommitted;
thread->sizeOfIncludingThis(data->mallocSizeOf,
&normal,
&temporary,
&regexpCode,
&stackCommitted);
data->runtimeThreadsNormal += normal;
data->runtimeThreadsTemporary += temporary;
data->runtimeThreadsRegexpCode += regexpCode;
data->runtimeThreadsStackCommitted += stackCommitted;
}
}
JSContext *acx, *iter = NULL;
while ((acx = JS_ContextIteratorUnlocked(rt, &iter)) != NULL)
data->runtimeContexts += acx->sizeOfIncludingThis(data->mallocSizeOf);
}
JS_DestroyContextNoGC(cx);
@ -346,26 +335,17 @@ GetExplicitNonHeapForRuntime(JSRuntime *rt, int64_t *amount,
IterateCompartments(cx, &n, ExplicitNonHeapCompartmentCallback);
*amount += n;
{
// Need the GC lock to call JS_ContextIteratorUnlocked() and to
// access rt->threads.
AutoLockGC lock(rt);
// explicit/runtime/regexp-code
// explicit/runtime/stack-committed
size_t regexpCode, stackCommitted;
rt->sizeOfExcludingThis(mallocSizeOf,
NULL,
NULL,
&regexpCode,
&stackCommitted);
// explicit/runtime/threads/regexp-code
// explicit/runtime/threads/stack-committed
for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
JSThread *thread = r.front().value;
size_t regexpCode, stackCommitted;
thread->sizeOfIncludingThis(mallocSizeOf,
NULL,
NULL,
&regexpCode,
&stackCommitted);
*amount += regexpCode;
*amount += stackCommitted;
}
}
*amount += regexpCode;
*amount += stackCommitted;
}
JS_DestroyContextNoGC(cx);

View File

@ -61,19 +61,6 @@ using namespace std;
namespace js {
namespace ctypes {
/*******************************************************************************
** Helper classes
*******************************************************************************/
class ScopedContextThread
{
public:
ScopedContextThread(JSContext* cx) : mCx(cx) { JS_SetContextThread(cx); }
~ScopedContextThread() { JS_ClearContextThread(mCx); }
private:
JSContext* mCx;
};
/*******************************************************************************
** JSAPI function prototypes
*******************************************************************************/
@ -2791,7 +2778,6 @@ CType::FinalizeProtoClass(JSContext* cx, JSObject* obj)
return;
JSContext* closureCx = static_cast<JSContext*>(JSVAL_TO_PRIVATE(slot));
JS_SetContextThread(closureCx);
JS_DestroyContextNoGC(closureCx);
}
@ -5383,15 +5369,8 @@ CClosure::Create(JSContext* cx,
JS_DestroyContextNoGC(cinfo->cx);
return NULL;
}
JS_ClearContextThread(cinfo->cx);
}
#ifdef DEBUG
// We want *this* context's thread here so use cx instead of cinfo->cx.
cinfo->cxThread = JS_GetContextThread(cx);
#endif
// Prepare the error sentinel value. It's important to do this now, because
// we might be unable to convert the value to the proper type. If so, we want
// the caller to know about it _now_, rather than some uncertain time in the
@ -5505,11 +5484,6 @@ CClosure::ClosureStub(ffi_cif* cif, void* result, void** args, void* userData)
JSObject* thisObj = cinfo->thisObj;
JSObject* jsfnObj = cinfo->jsfnObj;
ScopedContextThread scopedThread(cx);
// Assert that we're on the thread we were created from.
JS_ASSERT(cinfo->cxThread == JS_GetContextThread(cx));
JS_AbortIfWrongThread(JS_GetRuntime(cx));
JSAutoRequest ar(cx);

View File

@ -331,9 +331,6 @@ struct ClosureInfo
JSObject* jsfnObj; // JS function
void* errResult; // Result that will be returned if the closure throws
ffi_closure* closure; // The C closure itself
#ifdef DEBUG
intptr_t cxThread; // The thread on which the context may be used
#endif
// Anything conditionally freed in the destructor should be initialized to
// NULL here.

View File

@ -60,6 +60,7 @@
#include "jscntxt.h"
#include "jsversion.h"
#include "jsdate.h"
#include "jsdtoa.h"
#include "jsexn.h"
#include "jsfun.h"
#include "jsgc.h"
@ -68,6 +69,7 @@
#include "jsiter.h"
#include "jslock.h"
#include "jsmath.h"
#include "jsnativestack.h"
#include "jsnum.h"
#include "json.h"
#include "jsobj.h"
@ -88,7 +90,8 @@
#include "frontend/BytecodeCompiler.h"
#include "frontend/BytecodeEmitter.h"
#include "js/MemoryMetrics.h"
#include "mozilla/Util.h" // DebugOnly
#include "mozilla/Util.h"
#include "yarr/BumpPointerAllocator.h"
#include "jsatominlines.h"
#include "jsinferinlines.h"
@ -692,11 +695,27 @@ JS_IsBuiltinFunctionConstructor(JSFunction *fun)
static JSBool js_NewRuntimeWasCalled = JS_FALSE;
JSRuntime::JSRuntime()
: atomsCompartment(NULL),
: interrupt(0),
atomsCompartment(NULL),
#ifdef JS_THREADSAFE
ownerThread_(NULL),
#endif
tempLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
execAlloc_(NULL),
bumpAlloc_(NULL),
repCache_(NULL),
interpreterFrames(NULL),
cxCallback(NULL),
compartmentCallback(NULL),
activityCallback(NULL),
activityCallbackArg(NULL),
#ifdef JS_THREADSAFE
suspendCount(0),
requestDepth(0),
# ifdef DEBUG
checkRequestDepth(0),
# endif
#endif
gcSystemAvailableChunkListHead(NULL),
gcUserAvailableChunkListHead(NULL),
gcKeepAtoms(0),
@ -746,10 +765,7 @@ JSRuntime::JSRuntime()
data(NULL),
#ifdef JS_THREADSAFE
gcLock(NULL),
gcDone(NULL),
requestDone(NULL),
requestCount(0),
gcThread(NULL),
gcHelperThread(thisFromCtor()),
#endif
debuggerMutations(0),
@ -757,24 +773,21 @@ JSRuntime::JSRuntime()
structuredCloneCallbacks(NULL),
telemetryCallback(NULL),
propertyRemovals(0),
scriptFilenameTable(NULL),
#ifdef JS_THREADSAFE
scriptFilenameTableLock(NULL),
#endif
thousandsSeparator(0),
decimalSeparator(0),
numGrouping(0),
anynameObject(NULL),
functionNamespaceObject(NULL),
#ifdef JS_THREADSAFE
interruptCounter(0),
#else
threadData(thisFromCtor()),
#endif
waiveGCQuota(false),
dtoaState(NULL),
pendingProxyOperation(NULL),
trustedPrincipals_(NULL),
wrapObjectCallback(NULL),
wrapObjectCallback(TransparentObjectWrapper),
preWrapObjectCallback(NULL),
preserveWrapperCallback(NULL),
#ifdef DEBUG
noGCOrAllocationCheck(0),
#endif
inOOMReport(0)
{
/* Initialize infallibly first, so we can goto bad and JS_DestroyRuntime. */
@ -789,7 +802,7 @@ bool
JSRuntime::init(uint32_t maxbytes)
{
#ifdef JS_THREADSAFE
ownerThread_ = js_CurrentThreadId();
ownerThread_ = PR_GetCurrentThread();
#endif
#ifdef JS_METHODJIT_SPEW
@ -812,25 +825,28 @@ JSRuntime::init(uint32_t maxbytes)
if (!js_InitAtomState(this))
return false;
wrapObjectCallback = js::TransparentObjectWrapper;
#ifdef JS_THREADSAFE
/* this is asymmetric with JS_ShutDown: */
if (!js_SetupLocks(8, 16))
return false;
#endif
debugMode = false;
if (!js_InitThreads(this))
return false;
if (!InitRuntimeNumberState(this))
return false;
dtoaState = js_NewDtoaState();
if (!dtoaState)
return false;
if (!stackSpace.init())
return false;
conservativeGC.nativeStackBase = GetNativeStackBase();
return true;
}
JSRuntime::~JSRuntime()
{
JS_ASSERT(onOwnerThread());
delete_<JSC::ExecutableAllocator>(execAlloc_);
delete_<WTF::BumpPointerAllocator>(bumpAlloc_);
JS_ASSERT(!repCache_);
#ifdef DEBUG
/* Don't hurt everyone in leaky ol' Mozilla with a fatal JS_ASSERT! */
if (!JS_CLIST_IS_EMPTY(&contextList)) {
@ -849,17 +865,15 @@ JSRuntime::~JSRuntime()
#endif
FinishRuntimeNumberState(this);
js_FinishThreads(this);
js_FinishAtomState(this);
if (dtoaState)
js_DestroyDtoaState(dtoaState);
js_FinishGC(this);
#ifdef JS_THREADSAFE
if (gcLock)
JS_DESTROY_LOCK(gcLock);
if (gcDone)
JS_DESTROY_CONDVAR(gcDone);
if (requestDone)
JS_DESTROY_CONDVAR(requestDone);
PR_DestroyLock(gcLock);
#endif
}
@ -867,23 +881,27 @@ JSRuntime::~JSRuntime()
void
JSRuntime::setOwnerThread()
{
JS_ASSERT(ownerThread_ == (void *)-1);
ownerThread_ = js_CurrentThreadId();
JS_ASSERT(ownerThread_ == (void *)0xc1ea12); /* "clear" */
JS_ASSERT(requestDepth == 0);
ownerThread_ = PR_GetCurrentThread();
conservativeGC.nativeStackBase = GetNativeStackBase();
}
void
JSRuntime::clearOwnerThread()
{
JS_ASSERT(onOwnerThread());
ownerThread_ = (void *)-1;
JS_ASSERT(requestDepth == 0);
ownerThread_ = (void *)0xc1ea12; /* "clear" */
conservativeGC.nativeStackBase = 0;
}
JS_FRIEND_API(bool)
JSRuntime::onOwnerThread() const
{
return ownerThread_ == js_CurrentThreadId();
return ownerThread_ == PR_GetCurrentThread();
}
#endif
#endif /* JS_THREADSAFE */
JS_PUBLIC_API(JSRuntime *)
JS_NewRuntime(uint32_t maxbytes)
@ -943,10 +961,6 @@ JS_PUBLIC_API(void)
JS_ShutDown(void)
{
Probes::shutdown();
#ifdef JS_THREADSAFE
js_CleanupLocks();
#endif
PRMJ_NowShutdown();
}
@ -990,31 +1004,17 @@ JS::UserCompartmentCount(const JSRuntime *rt)
static void
StartRequest(JSContext *cx)
{
JSThread *t = cx->thread();
JS_ASSERT(CURRENT_THREAD_IS_ME(t));
JSRuntime *rt = cx->runtime;
JS_ASSERT(rt->onOwnerThread());
if (t->data.requestDepth) {
t->data.requestDepth++;
if (rt->requestDepth) {
rt->requestDepth++;
} else {
JSRuntime *rt = cx->runtime;
AutoLockGC lock(rt);
/* Wait until the GC is finished. */
if (rt->gcThread != cx->thread()) {
while (rt->gcThread)
JS_AWAIT_GC_DONE(rt);
}
/* Indicate that a request is running. */
rt->requestCount++;
t->data.requestDepth = 1;
/*
* Adjust rt->interruptCounter to reflect any interrupts added while the
* thread was suspended.
*/
if (t->data.interruptFlags)
JS_ATOMIC_INCREMENT(&rt->interruptCounter);
rt->requestDepth = 1;
if (rt->requestCount == 1 && rt->activityCallback)
rt->activityCallback(rt->activityCallbackArg, true);
@ -1024,32 +1024,23 @@ StartRequest(JSContext *cx)
static void
StopRequest(JSContext *cx)
{
JSThread *t = cx->thread();
JS_ASSERT(CURRENT_THREAD_IS_ME(t));
JS_ASSERT(t->data.requestDepth != 0);
if (t->data.requestDepth != 1) {
t->data.requestDepth--;
JSRuntime *rt = cx->runtime;
JS_ASSERT(rt->onOwnerThread());
JS_ASSERT(rt->requestDepth != 0);
if (rt->requestDepth != 1) {
rt->requestDepth--;
} else {
t->data.conservativeGC.updateForRequestEnd(t->suspendCount);
rt->conservativeGC.updateForRequestEnd(rt->suspendCount);
/* Lock before clearing to interlock with ClaimScope, in jslock.c. */
JSRuntime *rt = cx->runtime;
AutoLockGC lock(rt);
t->data.requestDepth = 0;
/*
* Adjust rt->interruptCounter to reflect any interrupts added while the
* thread still had active requests.
*/
if (t->data.interruptFlags)
JS_ATOMIC_DECREMENT(&rt->interruptCounter);
rt->requestDepth = 0;
/* Give the GC a chance to run if this was the last request running. */
JS_ASSERT(rt->requestCount > 0);
rt->requestCount--;
if (rt->requestCount == 0) {
JS_NOTIFY_REQUEST_DONE(rt);
if (rt->activityCallback)
rt->activityCallback(rt->activityCallbackArg, false);
}
@ -1090,15 +1081,15 @@ JS_PUBLIC_API(jsrefcount)
JS_SuspendRequest(JSContext *cx)
{
#ifdef JS_THREADSAFE
JSThread *t = cx->thread();
JS_ASSERT(CURRENT_THREAD_IS_ME(t));
JSRuntime *rt = cx->runtime;
JS_ASSERT(rt->onOwnerThread());
jsrefcount saveDepth = t->data.requestDepth;
jsrefcount saveDepth = rt->requestDepth;
if (!saveDepth)
return 0;
t->suspendCount++;
t->data.requestDepth = 1;
rt->suspendCount++;
rt->requestDepth = 1;
StopRequest(cx);
return saveDepth;
#else
@ -1110,25 +1101,36 @@ JS_PUBLIC_API(void)
JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth)
{
#ifdef JS_THREADSAFE
JSThread *t = cx->thread();
JS_ASSERT(CURRENT_THREAD_IS_ME(t));
JSRuntime *rt = cx->runtime;
JS_ASSERT(rt->onOwnerThread());
if (saveDepth == 0)
return;
JS_ASSERT(saveDepth >= 1);
JS_ASSERT(!t->data.requestDepth);
JS_ASSERT(t->suspendCount);
JS_ASSERT(!rt->requestDepth);
JS_ASSERT(rt->suspendCount);
StartRequest(cx);
t->data.requestDepth = saveDepth;
t->suspendCount--;
rt->requestDepth = saveDepth;
rt->suspendCount--;
#endif
}
JS_PUBLIC_API(JSBool)
JS_IsInRequest(JSContext *cx)
JS_IsInRequest(JSRuntime *rt)
{
#ifdef JS_THREADSAFE
JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread()));
return JS_THREAD_DATA(cx)->requestDepth != 0;
JS_ASSERT(rt->onOwnerThread());
return rt->requestDepth != 0;
#else
return false;
#endif
}
JS_PUBLIC_API(JSBool)
JS_IsInSuspendedRequest(JSRuntime *rt)
{
#ifdef JS_THREADSAFE
JS_ASSERT(rt->onOwnerThread());
return rt->suspendCount != 0;
#else
return false;
#endif
@ -3019,15 +3021,11 @@ JS_SetThreadStackLimit(JSContext *cx, uintptr_t limitAddr)
JS_PUBLIC_API(void)
JS_SetNativeStackQuota(JSContext *cx, size_t stackSize)
{
#ifdef JS_THREADSAFE
JS_ASSERT(cx->thread());
#endif
#if JS_STACK_GROWTH_DIRECTION > 0
if (stackSize == 0) {
cx->stackLimit = UINTPTR_MAX;
} else {
uintptr_t stackBase = reinterpret_cast<uintptr_t>(JS_THREAD_DATA(cx)->nativeStackBase);
uintptr_t stackBase = cx->runtime->nativeStackBase;
JS_ASSERT(stackBase <= size_t(-1) - stackSize);
cx->stackLimit = stackBase + stackSize - 1;
}
@ -3035,7 +3033,7 @@ JS_SetNativeStackQuota(JSContext *cx, size_t stackSize)
if (stackSize == 0) {
cx->stackLimit = 0;
} else {
uintptr_t stackBase = reinterpret_cast<uintptr_t>(JS_THREAD_DATA(cx)->nativeStackBase);
uintptr_t stackBase = uintptr_t(cx->runtime->conservativeGC.nativeStackBase);
JS_ASSERT(stackBase >= stackSize);
cx->stackLimit = stackBase - (stackSize - 1);
}
@ -5518,9 +5516,6 @@ JS_New(JSContext *cx, JSObject *ctor, uintN argc, jsval *argv)
JS_PUBLIC_API(JSOperationCallback)
JS_SetOperationCallback(JSContext *cx, JSOperationCallback callback)
{
#ifdef JS_THREADSAFE
JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread()));
#endif
JSOperationCallback old = cx->operationCallback;
cx->operationCallback = callback;
return old;
@ -5538,26 +5533,21 @@ JS_TriggerOperationCallback(JSContext *cx)
#ifdef JS_THREADSAFE
AutoLockGC lock(cx->runtime);
#endif
TriggerOperationCallback(cx);
cx->runtime->triggerOperationCallback();
}
JS_PUBLIC_API(void)
JS_TriggerAllOperationCallbacks(JSRuntime *rt)
JS_TriggerRuntimeOperationCallback(JSRuntime *rt)
{
#ifdef JS_THREADSAFE
AutoLockGC lock(rt);
#endif
TriggerAllOperationCallbacks(rt);
rt->triggerOperationCallback();
}
JS_PUBLIC_API(JSBool)
JS_IsRunning(JSContext *cx)
{
#ifdef JS_THREADSAFE
if (!cx->thread())
return false;
#endif
StackFrame *fp = cx->maybefp();
while (fp && fp->isDummyFrame())
fp = fp->prev();
@ -6589,51 +6579,14 @@ JS_ThrowStopIteration(JSContext *cx)
JS_PUBLIC_API(intptr_t)
JS_GetCurrentThread()
{
return reinterpret_cast<intptr_t>(js_CurrentThreadId());
}
/*
* Get the owning thread id of a context. Returns 0 if the context is not
* owned by any thread.
*/
JS_PUBLIC_API(intptr_t)
JS_GetContextThread(JSContext *cx)
{
#ifdef JS_THREADSAFE
return cx->thread() ? reinterpret_cast<intptr_t>(cx->thread()->id) : 0;
return reinterpret_cast<intptr_t>(PR_GetCurrentThread());
#else
return 0;
#endif
}
/*
* Set the current thread as the owning thread of a context. Returns the
* old owning thread id, or -1 if the operation failed.
*/
JS_PUBLIC_API(intptr_t)
JS_SetContextThread(JSContext *cx)
{
/* This function can be called by a finalizer. */
JS_AbortIfWrongThread(cx->runtime);
#ifdef JS_THREADSAFE
JS_ASSERT(!cx->outstandingRequests);
if (cx->thread()) {
JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread()));
return reinterpret_cast<intptr_t>(cx->thread()->id);
}
if (!js_InitContextThreadAndLockGC(cx)) {
js_ReportOutOfMemory(cx);
return -1;
}
JS_UNLOCK_GC(cx->runtime);
#endif
return 0;
}
extern JS_PUBLIC_API(void)
JS_ClearRuntimeThread(JSRuntime *rt)
{
@ -6661,44 +6614,6 @@ JS_AbortIfWrongThread(JSRuntime *rt)
#endif
}
JS_PUBLIC_API(intptr_t)
JS_ClearContextThread(JSContext *cx)
{
JS_AbortIfWrongThread(cx->runtime);
AssertNoGC(cx);
#ifdef JS_THREADSAFE
/*
* cx must have exited all requests it entered and, if cx is associated
* with a thread, this must be called only from that thread. If not, this
* is a harmless no-op.
*/
JS_ASSERT(cx->outstandingRequests == 0);
JSThread *t = cx->thread();
if (!t)
return 0;
JS_ASSERT(CURRENT_THREAD_IS_ME(t));
/*
* We must not race with a GC that accesses cx->thread for all threads,
* see bug 476934.
*/
JSRuntime *rt = cx->runtime;
AutoLockGC lock(rt);
js_WaitForGC(rt);
js_ClearContextThread(cx);
JS_ASSERT_IF(JS_CLIST_IS_EMPTY(&t->contextList), !t->data.requestDepth);
/*
* We can access t->id as long as the GC lock is held and we cannot race
* with the GC that may delete t.
*/
return reinterpret_cast<intptr_t>(t->id);
#else
return 0;
#endif
}
#ifdef JS_GC_ZEAL
JS_PUBLIC_API(void)
JS_SetGCZeal(JSContext *cx, uint8_t zeal, uint32_t frequency, JSBool compartment)

View File

@ -2323,6 +2323,9 @@ JS_ShutDown(void);
JS_PUBLIC_API(void *)
JS_GetRuntimePrivate(JSRuntime *rt);
extern JS_PUBLIC_API(JSRuntime *)
JS_GetRuntime(JSContext *cx);
JS_PUBLIC_API(void)
JS_SetRuntimePrivate(JSRuntime *rt, void *data);
@ -2343,7 +2346,10 @@ extern JS_PUBLIC_API(void)
JS_ResumeRequest(JSContext *cx, jsrefcount saveDepth);
extern JS_PUBLIC_API(JSBool)
JS_IsInRequest(JSContext *cx);
JS_IsInRequest(JSRuntime *rt);
extern JS_PUBLIC_API(JSBool)
JS_IsInSuspendedRequest(JSRuntime *rt);
#ifdef __cplusplus
JS_END_EXTERN_C
@ -2425,14 +2431,14 @@ class JSAutoCheckRequest {
JSAutoCheckRequest(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM) {
#if defined JS_THREADSAFE && defined DEBUG
mContext = cx;
JS_ASSERT(JS_IsInRequest(cx));
JS_ASSERT(JS_IsInRequest(JS_GetRuntime(cx)));
#endif
JS_GUARD_OBJECT_NOTIFIER_INIT;
}
~JSAutoCheckRequest() {
#if defined JS_THREADSAFE && defined DEBUG
JS_ASSERT(JS_IsInRequest(mContext));
JS_ASSERT(JS_IsInRequest(JS_GetRuntime(mContext)));
#endif
}
@ -4489,7 +4495,7 @@ extern JS_PUBLIC_API(void)
JS_TriggerOperationCallback(JSContext *cx);
extern JS_PUBLIC_API(void)
JS_TriggerAllOperationCallbacks(JSRuntime *rt);
JS_TriggerRuntimeOperationCallback(JSRuntime *rt);
extern JS_PUBLIC_API(JSBool)
JS_IsRunning(JSContext *cx);
@ -5261,24 +5267,6 @@ JS_ThrowStopIteration(JSContext *cx);
extern JS_PUBLIC_API(intptr_t)
JS_GetCurrentThread();
/*
* Associate the current thread with the given context. This is done
* implicitly by JS_NewContext.
*
* Returns the old thread id for this context, which should be treated as
* an opaque value. This value is provided for comparison to 0, which
* indicates that ClearContextThread has been called on this context
* since the last SetContextThread, or non-0, which indicates the opposite.
*/
extern JS_PUBLIC_API(intptr_t)
JS_GetContextThread(JSContext *cx);
extern JS_PUBLIC_API(intptr_t)
JS_SetContextThread(JSContext *cx);
extern JS_PUBLIC_API(intptr_t)
JS_ClearContextThread(JSContext *cx);
/*
* A JS runtime always has an "owner thread". The owner thread is set when the
* runtime is created (to the current thread) and practically all entry points

View File

@ -333,11 +333,8 @@ js_InitAtomState(JSRuntime *rt)
if (!state->atoms.init(JS_STRING_HASH_COUNT))
return false;
#ifdef JS_THREADSAFE
js_InitLock(&state->lock);
#endif
JS_ASSERT(state->atoms.initialized());
return JS_TRUE;
return true;
}
void
@ -355,10 +352,6 @@ js_FinishAtomState(JSRuntime *rt)
for (AtomSet::Range r = state->atoms.all(); !r.empty(); r.popFront())
r.front().asPtr()->finalize(rt);
#ifdef JS_THREADSAFE
js_FinishLock(&state->lock);
#endif
}
bool
@ -433,7 +426,6 @@ AtomIsInterned(JSContext *cx, JSAtom *atom)
if (StaticStrings::isStatic(atom))
return true;
AutoLockAtomsCompartment lock(cx);
AtomSet::Ptr p = cx->runtime->atomState.atoms.lookup(atom);
if (!p)
return false;
@ -462,8 +454,6 @@ AtomizeInline(JSContext *cx, const jschar **pchars, size_t length,
if (JSAtom *s = cx->runtime->staticStrings.lookup(chars, length))
return s;
AutoLockAtomsCompartment lock(cx);
AtomSet &atoms = cx->runtime->atomState.atoms;
AtomSet::AddPtr p = atoms.lookupForAdd(AtomHasher::Lookup(chars, length));
@ -522,9 +512,6 @@ js_AtomizeString(JSContext *cx, JSString *str, InternBehavior ib)
if (ib != InternAtom || js::StaticStrings::isStatic(&atom))
return &atom;
/* Here we have to check whether the atom is already interned. */
AutoLockAtomsCompartment lock(cx);
AtomSet &atoms = cx->runtime->atomState.atoms;
AtomSet::Ptr p = atoms.lookup(AtomHasher::Lookup(&atom));
JS_ASSERT(p); /* Non-static atom must exist in atom state set. */
@ -604,9 +591,9 @@ js_GetExistingStringAtom(JSContext *cx, const jschar *chars, size_t length)
{
if (JSAtom *atom = cx->runtime->staticStrings.lookup(chars, length))
return atom;
AutoLockAtomsCompartment lock(cx);
AtomSet::Ptr p = cx->runtime->atomState.atoms.lookup(AtomHasher::Lookup(chars, length));
return p ? p->asPtr() : NULL;
if (AtomSet::Ptr p = cx->runtime->atomState.atoms.lookup(AtomHasher::Lookup(chars, length)))
return p->asPtr();
return NULL;
}
#ifdef DEBUG

View File

@ -268,10 +268,6 @@ struct JSAtomState
{
js::AtomSet atoms;
#ifdef JS_THREADSAFE
JSThinLock lock;
#endif
/*
* From this point until the end of struct definition the struct must
* contain only js::PropertyName fields. We use this to access the storage

View File

@ -64,7 +64,6 @@
#include "jscntxt.h"
#include "jsversion.h"
#include "jsdbgapi.h"
#include "jsdtoa.h"
#include "jsexn.h"
#include "jsfun.h"
#include "jsgc.h"
@ -72,7 +71,6 @@
#include "jsiter.h"
#include "jslock.h"
#include "jsmath.h"
#include "jsnativestack.h"
#include "jsnum.h"
#include "jsobj.h"
#include "jsopcode.h"
@ -96,57 +94,10 @@
using namespace js;
using namespace js::gc;
namespace js {
ThreadData::ThreadData(JSRuntime *rt)
: rt(rt),
interruptFlags(0),
#ifdef JS_THREADSAFE
requestDepth(0),
#endif
tempLifoAlloc(TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE),
execAlloc(NULL),
bumpAlloc(NULL),
repCache(NULL),
dtoaState(NULL),
nativeStackBase(GetNativeStackBase()),
pendingProxyOperation(NULL),
interpreterFrames(NULL)
{
#ifdef DEBUG
noGCOrAllocationCheck = 0;
#endif
}
ThreadData::~ThreadData()
{
JS_ASSERT(!repCache);
rt->delete_<JSC::ExecutableAllocator>(execAlloc);
rt->delete_<WTF::BumpPointerAllocator>(bumpAlloc);
if (dtoaState)
js_DestroyDtoaState(dtoaState);
}
bool
ThreadData::init()
{
JS_ASSERT(!repCache);
return stackSpace.init() && !!(dtoaState = js_NewDtoaState());
}
#ifdef JS_THREADSAFE
void
ThreadData::sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal, size_t *temporary,
size_t *regexpCode, size_t *stackCommitted)
JSRuntime::sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal, size_t *temporary,
size_t *regexpCode, size_t *stackCommitted)
{
/*
* There are other ThreadData members that could be measured; the ones
* below have been seen by DMD to be worth measuring. More stuff may be
* added later.
*/
/*
* The computedSize is 0 because sizeof(DtoaState) isn't available here and
* it's not worth making it available.
@ -159,8 +110,8 @@ ThreadData::sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal,
if (regexpCode) {
size_t method = 0, regexp = 0, unused = 0;
if (execAlloc)
execAlloc->sizeOfCode(&method, &regexp, &unused);
if (execAlloc_)
execAlloc_->sizeOfCode(&method, &regexp, &unused);
JS_ASSERT(method == 0); /* this execAlloc is only used for regexp code */
*regexpCode = regexp + unused;
}
@ -168,261 +119,65 @@ ThreadData::sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal,
if (stackCommitted)
*stackCommitted = stackSpace.sizeOfCommitted();
}
#endif
void
ThreadData::triggerOperationCallback(JSRuntime *rt)
JS_FRIEND_API(void)
JSRuntime::triggerOperationCallback()
{
JS_ASSERT(rt == this->rt);
/*
* Use JS_ATOMIC_SET and JS_ATOMIC_INCREMENT in the hope that it ensures
* the write will become immediately visible to other processors polling
* the flag. Note that we only care about visibility here, not read/write
* ordering: this field can only be written with the GC lock held.
* Use JS_ATOMIC_SET in the hope that it ensures the write will become
* immediately visible to other processors polling the flag.
*/
if (interruptFlags)
return;
JS_ATOMIC_SET(&interruptFlags, 1);
#ifdef JS_THREADSAFE
/* rt->interruptCounter does not reflect suspended threads. */
if (requestDepth != 0)
JS_ATOMIC_INCREMENT(&rt->interruptCounter);
#endif
JS_ATOMIC_SET(&interrupt, 1);
}
JSC::ExecutableAllocator *
ThreadData::createExecutableAllocator(JSContext *cx)
JSRuntime::createExecutableAllocator(JSContext *cx)
{
JS_ASSERT(!execAlloc);
JS_ASSERT(cx->runtime == rt);
JS_ASSERT(!execAlloc_);
JS_ASSERT(cx->runtime == this);
execAlloc = rt->new_<JSC::ExecutableAllocator>();
if (!execAlloc)
execAlloc_ = new_<JSC::ExecutableAllocator>();
if (!execAlloc_)
js_ReportOutOfMemory(cx);
return execAlloc;
return execAlloc_;
}
WTF::BumpPointerAllocator *
ThreadData::createBumpPointerAllocator(JSContext *cx)
JSRuntime::createBumpPointerAllocator(JSContext *cx)
{
JS_ASSERT(!bumpAlloc);
JS_ASSERT(cx->runtime == rt);
JS_ASSERT(!bumpAlloc_);
JS_ASSERT(cx->runtime == this);
bumpAlloc = rt->new_<WTF::BumpPointerAllocator>();
if (!bumpAlloc)
bumpAlloc_ = new_<WTF::BumpPointerAllocator>();
if (!bumpAlloc_)
js_ReportOutOfMemory(cx);
return bumpAlloc;
return bumpAlloc_;
}
RegExpPrivateCache *
ThreadData::createRegExpPrivateCache(JSContext *cx)
JSRuntime::createRegExpPrivateCache(JSContext *cx)
{
JS_ASSERT(!repCache);
JS_ASSERT(cx->runtime == rt);
JS_ASSERT(!repCache_);
JS_ASSERT(cx->runtime == this);
RegExpPrivateCache *newCache = rt->new_<RegExpPrivateCache>(rt);
RegExpPrivateCache *newCache = new_<RegExpPrivateCache>(this);
if (!newCache || !newCache->init()) {
js_ReportOutOfMemory(cx);
rt->delete_<RegExpPrivateCache>(newCache);
delete_<RegExpPrivateCache>(newCache);
return NULL;
}
repCache = newCache;
return repCache;
repCache_ = newCache;
return repCache_;
}
void
ThreadData::purgeRegExpPrivateCache()
{
rt->delete_<RegExpPrivateCache>(repCache);
repCache = NULL;
}
} /* namespace js */
JSScript *
js_GetCurrentScript(JSContext *cx)
{
return cx->hasfp() ? cx->fp()->maybeScript() : NULL;
}
#ifdef JS_THREADSAFE
void
JSThread::sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal, size_t *temporary,
size_t *regexpCode, size_t *stackCommitted)
{
data.sizeOfExcludingThis(mallocSizeOf, normal, temporary, regexpCode, stackCommitted);
if (normal)
*normal += mallocSizeOf(this, sizeof(JSThread));
}
JSThread *
js_CurrentThreadAndLockGC(JSRuntime *rt)
{
void *id = js_CurrentThreadId();
JS_LOCK_GC(rt);
/*
* We must not race with a GC that accesses cx->thread for JSContext
* instances on all threads, see bug 476934.
*/
js_WaitForGC(rt);
JSThread *thread;
JSThread::Map::AddPtr p = rt->threads.lookupForAdd(id);
if (p) {
thread = p->value;
/*
* If thread has no contexts, it might be left over from a previous
* thread with the same id but a different stack address.
*/
if (JS_CLIST_IS_EMPTY(&thread->contextList))
thread->data.nativeStackBase = GetNativeStackBase();
} else {
JS_UNLOCK_GC(rt);
thread = OffTheBooks::new_<JSThread>(rt, id);
if (!thread || !thread->init()) {
Foreground::delete_(thread);
return NULL;
}
JS_LOCK_GC(rt);
js_WaitForGC(rt);
if (!rt->threads.relookupOrAdd(p, id, thread)) {
JS_UNLOCK_GC(rt);
Foreground::delete_(thread);
return NULL;
}
/* Another thread cannot add an entry for the current thread id. */
JS_ASSERT(p->value == thread);
}
JS_ASSERT(thread->id == id);
/*
* We skip the assert under glibc due to an apparent bug there, see
* bug 608526.
*/
#ifndef __GLIBC__
JS_ASSERT(GetNativeStackBase() == thread->data.nativeStackBase);
#endif
return thread;
}
JSBool
js_InitContextThreadAndLockGC(JSContext *cx)
{
JSThread *thread = js_CurrentThreadAndLockGC(cx->runtime);
if (!thread)
return false;
JS_APPEND_LINK(&cx->threadLinks, &thread->contextList);
cx->setThread(thread);
return true;
}
void
JSContext::setThread(JSThread *thread)
{
thread_ = thread;
stack.threadReset();
}
void
js_ClearContextThread(JSContext *cx)
{
JS_ASSERT(CURRENT_THREAD_IS_ME(cx->thread()));
JS_REMOVE_AND_INIT_LINK(&cx->threadLinks);
cx->setThread(NULL);
}
#endif /* JS_THREADSAFE */
ThreadData *
js_CurrentThreadData(JSRuntime *rt)
{
#ifdef JS_THREADSAFE
JSThread *thread = js_CurrentThreadAndLockGC(rt);
if (!thread)
return NULL;
return &thread->data;
#else
return &rt->threadData;
#endif
}
JSBool
js_InitThreads(JSRuntime *rt)
{
#ifdef JS_THREADSAFE
return rt->threads.init(4);
#else
return rt->threadData.init();
#endif
}
void
js_FinishThreads(JSRuntime *rt)
{
#ifdef JS_THREADSAFE
if (!rt->threads.initialized())
return;
for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
JSThread *thread = r.front().value;
Foreground::delete_(thread);
}
rt->threads.clear();
#endif
}
void
js_PurgeThreads(JSContext *cx)
{
#ifdef JS_THREADSAFE
for (JSThread::Map::Enum e(cx->runtime->threads);
!e.empty();
e.popFront()) {
JSThread *thread = e.front().value;
if (JS_CLIST_IS_EMPTY(&thread->contextList)) {
JS_ASSERT(cx->thread() != thread);
Foreground::delete_(thread);
e.removeFront();
} else {
thread->data.purge(cx);
}
}
#else
cx->runtime->threadData.purge(cx);
#endif
}
void
js_PurgeThreads_PostGlobalSweep(JSContext *cx)
{
#ifdef JS_THREADSAFE
for (JSThread::Map::Enum e(cx->runtime->threads);
!e.empty();
e.popFront())
{
JSThread *thread = e.front().value;
JS_ASSERT(!JS_CLIST_IS_EMPTY(&thread->contextList));
thread->data.purgeRegExpPrivateCache();
}
#else
cx->runtime->threadData.purgeRegExpPrivateCache();
#endif
}
JSContext *
js_NewContext(JSRuntime *rt, size_t stackChunkSize)
{
@ -444,20 +199,12 @@ js_NewContext(JSRuntime *rt, size_t stackChunkSize)
return NULL;
}
#ifdef JS_THREADSAFE
if (!js_InitContextThreadAndLockGC(cx)) {
Foreground::delete_(cx);
return NULL;
}
#endif
/*
* Here the GC lock is still held after js_InitContextThreadAndLockGC took it and
* the GC is not running on another thread.
*/
bool first = JS_CLIST_IS_EMPTY(&rt->contextList);
JS_APPEND_LINK(&cx->link, &rt->contextList);
JS_UNLOCK_GC(rt);
js_InitRandom(cx);
@ -501,32 +248,20 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
JSRuntime *rt = cx->runtime;
JS_AbortIfWrongThread(rt);
JSContextCallback cxCallback;
JS_ASSERT(!cx->enumerators);
#ifdef JS_THREADSAFE
/*
* For API compatibility we allow to destroy contexts without a thread in
* optimized builds. We assume that the embedding knows that an OOM error
* cannot happen in JS_SetContextThread.
*/
JS_ASSERT(cx->thread() && CURRENT_THREAD_IS_ME(cx->thread()));
if (!cx->thread())
JS_SetContextThread(cx);
/*
* For API compatibility we support destroying contexts with non-zero
* cx->outstandingRequests but we assume that all JS_BeginRequest calls
* on this cx contributes to cx->thread->data.requestDepth and there is no
* JS_SuspendRequest calls that set aside the counter.
*/
JS_ASSERT(cx->outstandingRequests <= cx->thread()->data.requestDepth);
JS_ASSERT(cx->outstandingRequests <= cx->runtime->requestDepth);
#endif
if (mode != JSDCM_NEW_FAILED) {
cxCallback = rt->cxCallback;
if (cxCallback) {
if (JSContextCallback cxCallback = rt->cxCallback) {
/*
* JSCONTEXT_DESTROY callback is not allowed to fail and must
* return true.
@ -537,14 +272,6 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
}
JS_LOCK_GC(rt);
#ifdef JS_THREADSAFE
/*
* Typically we are called outside a request, so ensure that the GC is not
* running before removing the context from rt->contextList, see bug 477021.
*/
if (cx->thread()->data.requestDepth == 0)
js_WaitForGC(rt);
#endif
JS_REMOVE_LINK(&cx->link);
bool last = !rt->hasContexts();
if (last || mode == JSDCM_FORCE_GC || mode == JSDCM_MAYBE_GC
@ -566,7 +293,7 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
* that we wait for any racing GC started on a not-last context to
* finish, before we plow ahead and unpin atoms.
*/
if (cx->thread()->data.requestDepth == 0)
if (cx->runtime->requestDepth == 0)
JS_BeginRequest(cx);
#endif
@ -607,16 +334,8 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
JS_MaybeGC(cx);
JS_LOCK_GC(rt);
js_WaitForGC(rt);
}
}
#ifdef JS_THREADSAFE
#ifdef DEBUG
JSThread *t = cx->thread();
#endif
js_ClearContextThread(cx);
JS_ASSERT_IF(JS_CLIST_IS_EMPTY(&t->contextList), !t->data.requestDepth);
#endif
#ifdef JS_THREADSAFE
rt->gcHelperThread.waitBackgroundSweepEnd();
#endif
@ -645,7 +364,7 @@ js_NextActiveContext(JSRuntime *rt, JSContext *cx)
JSContext *iter = cx;
#ifdef JS_THREADSAFE
while ((cx = js_ContextIterator(rt, JS_FALSE, &iter)) != NULL) {
if (cx->outstandingRequests && cx->thread()->data.requestDepth)
if (cx->outstandingRequests && cx->runtime->requestDepth)
break;
}
return cx;
@ -738,9 +457,6 @@ js_ReportOutOfMemory(JSContext *cx)
{
cx->runtime->hadOutOfMemory = true;
/* AtomizeInline can call this indirectly when it creates the string. */
AutoUnlockAtomsCompartmentWhenLocked unlockAtomsCompartment(cx);
JSErrorReport report;
JSErrorReporter onError = cx->errorReporter;
@ -1206,23 +922,17 @@ js_GetErrorMessage(void *userRef, const char *locale, const uintN errorNumber)
JSBool
js_InvokeOperationCallback(JSContext *cx)
{
JSRuntime *rt = cx->runtime;
ThreadData *td = JS_THREAD_DATA(cx);
JS_ASSERT_REQUEST_DEPTH(cx);
JS_ASSERT(td->interruptFlags != 0);
JSRuntime *rt = cx->runtime;
JS_ASSERT(rt->interrupt != 0);
/*
* Reset the callback counter first, then run GC and yield. If another
* thread is racing us here we will accumulate another callback request
* which will be serviced at the next opportunity.
*/
JS_LOCK_GC(rt);
td->interruptFlags = 0;
#ifdef JS_THREADSAFE
JS_ATOMIC_DECREMENT(&rt->interruptCounter);
#endif
JS_UNLOCK_GC(rt);
JS_ATOMIC_SET(&rt->interrupt, 0);
if (rt->gcIsNeeded)
js_GC(cx, rt->gcTriggerCompartment, GC_NORMAL, rt->gcTriggerReason);
@ -1256,43 +966,11 @@ JSBool
js_HandleExecutionInterrupt(JSContext *cx)
{
JSBool result = JS_TRUE;
if (JS_THREAD_DATA(cx)->interruptFlags)
if (cx->runtime->interrupt)
result = js_InvokeOperationCallback(cx) && result;
return result;
}
namespace js {
void
TriggerOperationCallback(JSContext *cx)
{
/*
* We allow for cx to come from another thread. Thus we must deal with
* possible JS_ClearContextThread calls when accessing cx->thread. But we
* assume that the calling thread is in a request so JSThread cannot be
* GC-ed.
*/
ThreadData *td;
#ifdef JS_THREADSAFE
JSThread *thread = cx->thread();
if (!thread)
return;
td = &thread->data;
#else
td = JS_THREAD_DATA(cx);
#endif
td->triggerOperationCallback(cx->runtime);
}
void
TriggerAllOperationCallbacks(JSRuntime *rt)
{
for (ThreadDataIter i(rt); !i.empty(); i.popFront())
i.threadData()->triggerOperationCallback(rt);
}
} /* namespace js */
StackFrame *
js_GetScriptedCaller(JSContext *cx, StackFrame *fp)
{
@ -1354,9 +1032,6 @@ JSContext::JSContext(JSRuntime *rt)
#endif
runtime(rt),
compartment(NULL),
#ifdef JS_THREADSAFE
thread_(NULL),
#endif
stack(thisDuringConstruction()), /* depends on cx->thread_ */
parseMapPool_(NULL),
globalObject(NULL),
@ -1367,7 +1042,6 @@ JSContext::JSContext(JSRuntime *rt)
data(NULL),
data2(NULL),
#ifdef JS_THREADSAFE
atomsCompartmentIsLocked(false),
outstandingRequests(0),
#endif
autoGCRooters(NULL),
@ -1407,10 +1081,6 @@ JSContext::JSContext(JSRuntime *rt)
JSContext::~JSContext()
{
#ifdef JS_THREADSAFE
JS_ASSERT(!thread_);
#endif
/* Free the stuff hanging off of cx. */
if (parseMapPool_)
Foreground::delete_<ParseMapPool>(parseMapPool_);
@ -1509,15 +1179,6 @@ JSContext::runningWithTrustedPrincipals() const
JS_FRIEND_API(void)
JSRuntime::onTooMuchMalloc()
{
#ifdef JS_THREADSAFE
AutoLockGC lock(this);
/*
* We can be called outside a request and can race against a GC that
* mutates the JSThread set during the sweeping phase.
*/
js_WaitForGC(this);
#endif
TriggerGC(this, gcstats::TOOMUCHMALLOC);
}
@ -1548,6 +1209,19 @@ JSRuntime::onOutOfMemory(void *p, size_t nbytes, JSContext *cx)
return NULL;
}
void
JSRuntime::purge(JSContext *cx)
{
tempLifoAlloc.freeUnused();
gsnCache.purge();
/* FIXME: bug 506341 */
propertyCache.purge(cx);
delete_<RegExpPrivateCache>(repCache_);
repCache_ = NULL;
}
void
JSContext::purge()
{
@ -1657,16 +1331,15 @@ namespace JS {
AutoCheckRequestDepth::AutoCheckRequestDepth(JSContext *cx)
: cx(cx)
{
JS_ASSERT(cx->thread());
JS_ASSERT(cx->thread()->data.requestDepth || cx->thread() == cx->runtime->gcThread);
JS_ASSERT(cx->runtime->requestDepth || cx->runtime->gcRunning);
JS_ASSERT(cx->runtime->onOwnerThread());
cx->thread()->checkRequestDepth++;
cx->runtime->checkRequestDepth++;
}
AutoCheckRequestDepth::~AutoCheckRequestDepth()
{
JS_ASSERT(cx->thread()->checkRequestDepth != 0);
cx->thread()->checkRequestDepth--;
JS_ASSERT(cx->runtime->checkRequestDepth != 0);
cx->runtime->checkRequestDepth--;
}
#endif

View File

@ -123,221 +123,71 @@ struct PendingProxyOperation {
JSObject *object;
};
struct ThreadData {
JSRuntime *rt;
/*
* If non-zero, we were been asked to call the operation callback as soon
* as possible. If the thread has an active request, this contributes
* towards rt->interruptCounter.
*/
volatile int32_t interruptFlags;
#ifdef JS_THREADSAFE
/* The request depth for this thread. */
unsigned requestDepth;
#endif
/* Keeper of the contiguous stack used by all contexts in this thread. */
StackSpace stackSpace;
/* Temporary arena pool used while compiling and decompiling. */
static const size_t TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12;
LifoAlloc tempLifoAlloc;
private:
/*
* Both of these allocators are used for regular expression code which is shared at the
* thread-data level.
*/
JSC::ExecutableAllocator *execAlloc;
WTF::BumpPointerAllocator *bumpAlloc;
js::RegExpPrivateCache *repCache;
JSC::ExecutableAllocator *createExecutableAllocator(JSContext *cx);
WTF::BumpPointerAllocator *createBumpPointerAllocator(JSContext *cx);
js::RegExpPrivateCache *createRegExpPrivateCache(JSContext *cx);
public:
JSC::ExecutableAllocator *getOrCreateExecutableAllocator(JSContext *cx) {
if (execAlloc)
return execAlloc;
return createExecutableAllocator(cx);
}
WTF::BumpPointerAllocator *getOrCreateBumpPointerAllocator(JSContext *cx) {
if (bumpAlloc)
return bumpAlloc;
return createBumpPointerAllocator(cx);
}
js::RegExpPrivateCache *getRegExpPrivateCache() {
return repCache;
}
js::RegExpPrivateCache *getOrCreateRegExpPrivateCache(JSContext *cx) {
if (repCache)
return repCache;
return createRegExpPrivateCache(cx);
}
/* Called at the end of the global GC sweep phase to deallocate repCache memory. */
void purgeRegExpPrivateCache();
/*
* The GSN cache is per thread since even multi-cx-per-thread embeddings
* do not interleave js_GetSrcNote calls.
*/
GSNCache gsnCache;
/* Property cache for faster call/get/set invocation. */
PropertyCache propertyCache;
/* State used by jsdtoa.cpp. */
DtoaState *dtoaState;
typedef Vector<ScriptOpcodeCountsPair, 0, SystemAllocPolicy> ScriptOpcodeCountsVector;
struct ConservativeGCData
{
/* Base address of the native stack for the current thread. */
uintptr_t *nativeStackBase;
/* List of currently pending operations on proxies. */
PendingProxyOperation *pendingProxyOperation;
/*
* The GC scans conservatively between ThreadData::nativeStackBase and
* nativeStackTop unless the latter is NULL.
*/
uintptr_t *nativeStackTop;
ConservativeGCThreadData conservativeGC;
#ifdef DEBUG
size_t noGCOrAllocationCheck;
#endif
ThreadData(JSRuntime *rt);
~ThreadData();
bool init();
void mark(JSTracer *trc) {
stackSpace.mark(trc);
}
void purge(JSContext *cx) {
tempLifoAlloc.freeUnused();
gsnCache.purge();
/* FIXME: bug 506341. */
propertyCache.purge(cx);
}
#ifdef JS_THREADSAFE
void sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal, size_t *temporary,
size_t *regexpCode, size_t *stackCommitted);
#endif
/* This must be called with the GC lock held. */
void triggerOperationCallback(JSRuntime *rt);
union {
jmp_buf jmpbuf;
uintptr_t words[JS_HOWMANY(sizeof(jmp_buf), sizeof(uintptr_t))];
} registerSnapshot;
/*
* Frames currently running in js::Interpret. See InterpreterFrames for
* details.
* Cycle collector uses this to communicate that the native stack of the
* GC thread should be scanned only if the thread have more than the given
* threshold of requests.
*/
InterpreterFrames *interpreterFrames;
unsigned requestThreshold;
ConservativeGCData()
: nativeStackBase(NULL), nativeStackTop(NULL), requestThreshold(0)
{}
~ConservativeGCData() {
#ifdef JS_THREADSAFE
/*
* The conservative GC scanner should be disabled when the thread leaves
* the last request.
*/
JS_ASSERT(!hasStackToScan());
#endif
}
JS_NEVER_INLINE void recordStackTop();
#ifdef JS_THREADSAFE
void updateForRequestEnd(unsigned suspendCount) {
if (suspendCount)
recordStackTop();
else
nativeStackTop = NULL;
}
#endif
bool hasStackToScan() const {
return !!nativeStackTop;
}
};
} /* namespace js */
#ifdef JS_THREADSAFE
/*
* Structure uniquely representing a thread. It holds thread-private data
* that can be accessed without a global lock.
*/
struct JSThread {
typedef js::HashMap<void *,
JSThread *,
js::DefaultHasher<void *>,
js::SystemAllocPolicy> Map;
/* Linked list of all contexts in use on this thread. */
JSCList contextList;
/* Opaque thread-id, from NSPR's PR_GetCurrentThread(). */
void *id;
/* Number of JS_SuspendRequest calls without JS_ResumeRequest. */
unsigned suspendCount;
# ifdef DEBUG
unsigned checkRequestDepth;
# endif
/* Factored out of JSThread for !JS_THREADSAFE embedding in JSRuntime. */
js::ThreadData data;
JSThread(JSRuntime *rt, void *id)
: id(id),
suspendCount(0),
# ifdef DEBUG
checkRequestDepth(0),
# endif
data(rt)
{
JS_INIT_CLIST(&contextList);
}
~JSThread() {
/* The thread must have zero contexts. */
JS_ASSERT(JS_CLIST_IS_EMPTY(&contextList));
}
bool init() {
return data.init();
}
JS_FRIEND_API(void) sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal,
size_t *temporary, size_t *regexpCode,
size_t *stackCommitted);
};
#define JS_THREAD_DATA(cx) (&(cx)->thread()->data)
extern JSThread *
js_CurrentThreadAndLockGC(JSRuntime *rt);
/*
* The function takes the GC lock and does not release in successful return.
* On error (out of memory) the function releases the lock but delegates
* the error reporting to the caller.
*/
extern JSBool
js_InitContextThreadAndLockGC(JSContext *cx);
/*
* On entrance the GC lock must be held and it will be held on exit.
*/
extern void
js_ClearContextThread(JSContext *cx);
#endif /* JS_THREADSAFE */
typedef enum JSDestroyContextMode {
JSDCM_NO_GC,
JSDCM_MAYBE_GC,
JSDCM_FORCE_GC,
JSDCM_NEW_FAILED
} JSDestroyContextMode;
typedef struct JSPropertyTreeEntry {
JSDHashEntryHdr hdr;
js::Shape *child;
} JSPropertyTreeEntry;
namespace js {
typedef Vector<ScriptOpcodeCountsPair, 0, SystemAllocPolicy> ScriptOpcodeCountsVector;
}
struct JSRuntime
{
/*
* If non-zero, we were been asked to call the operation callback as soon
* as possible.
*/
volatile int32_t interrupt;
/* Default compartment. */
JSCompartment *atomsCompartment;
@ -347,6 +197,7 @@ struct JSRuntime
/* See comment for JS_AbortIfWrongThread in jsapi.h. */
#ifdef JS_THREADSAFE
public:
void *ownerThread() const { return ownerThread_; }
void clearOwnerThread();
void setOwnerThread();
JS_FRIEND_API(bool) onOwnerThread() const;
@ -358,6 +209,46 @@ struct JSRuntime
bool onOwnerThread() const { return true; }
#endif
/* Keeper of the contiguous stack used by all contexts in this thread. */
js::StackSpace stackSpace;
/* Temporary arena pool used while compiling and decompiling. */
static const size_t TEMP_LIFO_ALLOC_PRIMARY_CHUNK_SIZE = 1 << 12;
js::LifoAlloc tempLifoAlloc;
private:
/*
* Both of these allocators are used for regular expression code which is shared at the
* thread-data level.
*/
JSC::ExecutableAllocator *execAlloc_;
WTF::BumpPointerAllocator *bumpAlloc_;
js::RegExpPrivateCache *repCache_;
JSC::ExecutableAllocator *createExecutableAllocator(JSContext *cx);
WTF::BumpPointerAllocator *createBumpPointerAllocator(JSContext *cx);
js::RegExpPrivateCache *createRegExpPrivateCache(JSContext *cx);
public:
JSC::ExecutableAllocator *getExecutableAllocator(JSContext *cx) {
return execAlloc_ ? execAlloc_ : createExecutableAllocator(cx);
}
WTF::BumpPointerAllocator *getBumpPointerAllocator(JSContext *cx) {
return bumpAlloc_ ? bumpAlloc_ : createBumpPointerAllocator(cx);
}
js::RegExpPrivateCache *maybeRegExpPrivateCache() {
return repCache_;
}
js::RegExpPrivateCache *getRegExpPrivateCache(JSContext *cx) {
return repCache_ ? repCache_ : createRegExpPrivateCache(cx);
}
/*
* Frames currently running in js::Interpret. See InterpreterFrames for
* details.
*/
js::InterpreterFrames *interpreterFrames;
/* Context create/destroy callback. */
JSContextCallback cxCallback;
@ -367,6 +258,18 @@ struct JSRuntime
js::ActivityCallback activityCallback;
void *activityCallbackArg;
#ifdef JS_THREADSAFE
/* Number of JS_SuspendRequest calls withot JS_ResumeRequest. */
unsigned suspendCount;
/* The request depth for this thread. */
unsigned requestDepth;
# ifdef DEBUG
unsigned checkRequestDepth;
# endif
#endif
/* Garbage collector state, used by jsgc.c. */
/*
@ -519,7 +422,7 @@ struct JSRuntime
JSAtom *emptyString;
/* List of active contexts sharing this runtime; protected by gcLock. */
/* List of active contexts sharing this runtime. */
JSCList contextList;
bool hasContexts() const {
@ -550,20 +453,9 @@ struct JSRuntime
#ifdef JS_THREADSAFE
/* These combine to interlock the GC and new requests. */
PRLock *gcLock;
PRCondVar *gcDone;
PRCondVar *requestDone;
uint32_t requestCount;
JSThread *gcThread;
js::GCHelperThread gcHelperThread;
/*
* Mapping from NSPR thread identifiers to JSThreads.
*
* This map can be accessed by the GC thread; or by the thread that holds
* gcLock, if GC is not running.
*/
JSThread::Map threads;
#endif /* JS_THREADSAFE */
uint32_t debuggerMutations;
@ -587,12 +479,6 @@ struct JSRuntime
*/
int32_t propertyRemovals;
/* Script filename table. */
struct JSHashTable *scriptFilenameTable;
#ifdef JS_THREADSAFE
PRLock *scriptFilenameTableLock;
#endif
/* Number localization, used by jsnum.c */
const char *thousandsSeparator;
const char *decimalSeparator;
@ -608,14 +494,28 @@ struct JSRuntime
JSObject *anynameObject;
JSObject *functionNamespaceObject;
#ifdef JS_THREADSAFE
/* Number of threads with active requests and unhandled interrupts. */
volatile int32_t interruptCounter;
#else
js::ThreadData threadData;
/*
* Flag indicating that we are waiving any soft limits on the GC heap
* because we want allocations to be infallible (except when we hit OOM).
*/
bool waiveGCQuota;
#define JS_THREAD_DATA(cx) (&(cx)->runtime->threadData)
#endif
/*
* The GSN cache is per thread since even multi-cx-per-thread embeddings
* do not interleave js_GetSrcNote calls.
*/
js::GSNCache gsnCache;
/* Property cache for faster call/get/set invocation. */
js::PropertyCache propertyCache;
/* State used by jsdtoa.cpp. */
DtoaState *dtoaState;
/* List of currently pending operations on proxies. */
js::PendingProxyOperation *pendingProxyOperation;
js::ConservativeGCData conservativeGC;
private:
JSPrincipals *trustedPrincipals_;
@ -633,6 +533,10 @@ struct JSRuntime
JSPreWrapCallback preWrapObjectCallback;
js::PreserveWrapperCallback preserveWrapperCallback;
#ifdef DEBUG
size_t noGCOrAllocationCheck;
#endif
/*
* To ensure that cx->malloc does not cause a GC, we set this flag during
* OOM reporting (in js_ReportOutOfMemory). If a GC is requested while
@ -739,13 +643,20 @@ struct JSRuntime
* The function must be called outside the GC lock.
*/
JS_FRIEND_API(void *) onOutOfMemory(void *p, size_t nbytes, JSContext *cx);
JS_FRIEND_API(void) triggerOperationCallback();
void sizeOfExcludingThis(JSMallocSizeOfFun mallocSizeOf, size_t *normal, size_t *temporary,
size_t *regexpCode, size_t *stackCommitted);
void purge(JSContext *cx);
};
/* Common macros to access thread-local caches in JSThread or JSRuntime. */
#define JS_PROPERTY_CACHE(cx) (JS_THREAD_DATA(cx)->propertyCache)
/* Common macros to access thread-local caches in JSRuntime. */
#define JS_PROPERTY_CACHE(cx) (cx->runtime->propertyCache)
#define JS_KEEP_ATOMS(rt) JS_ATOMIC_INCREMENT(&(rt)->gcKeepAtoms);
#define JS_UNKEEP_ATOMS(rt) JS_ATOMIC_DECREMENT(&(rt)->gcKeepAtoms);
#define JS_KEEP_ATOMS(rt) (rt)->gcKeepAtoms++;
#define JS_UNKEEP_ATOMS(rt) (rt)->gcKeepAtoms--;
#ifdef JS_ARGUMENT_FORMATTER_DEFINED
/*
@ -899,7 +810,7 @@ struct JSContext
/* Limit pointer for checking native stack consumption during recursion. */
uintptr_t stackLimit;
/* Data shared by threads in an address space. */
/* Data shared by contexts and compartments in an address space. */
JSRuntime *const runtime;
/* GC heap compartment. */
@ -907,16 +818,6 @@ struct JSContext
inline void setCompartment(JSCompartment *compartment);
#ifdef JS_THREADSAFE
private:
JSThread *thread_;
public:
JSThread *thread() const { return thread_; }
void setThread(JSThread *thread);
static const size_t threadOffset() { return offsetof(JSContext, thread_); }
#endif
/* Current execution stack. */
js::ContextStack stack;
@ -1044,17 +945,10 @@ struct JSContext
bool hasAtLineOption() const { return hasRunOption(JSOPTION_ATLINE); }
bool hasJITHardeningOption() const { return !hasRunOption(JSOPTION_SOFTEN); }
js::LifoAlloc &tempLifoAlloc() { return JS_THREAD_DATA(this)->tempLifoAlloc; }
js::LifoAlloc &tempLifoAlloc() { return runtime->tempLifoAlloc; }
inline js::LifoAlloc &typeLifoAlloc();
#ifdef JS_THREADSAFE
/*
* AtomizeInline uses this flag to tell RunLastDitchGC and
* js_ReportOutOfMemory that they should temporarily unlock the atoms
* compartment.
*/
bool atomsCompartmentIsLocked;
unsigned outstandingRequests;/* number of JS_BeginRequest calls
without the corresponding
JS_EndRequest. */
@ -1165,8 +1059,6 @@ struct JSContext
js::GCHelperThread *gcBackgroundFree;
#endif
js::ThreadData *threadData() { return JS_THREAD_DATA(this); }
inline void* malloc_(size_t bytes) {
return runtime->malloc_(bytes, this);
}
@ -1251,13 +1143,6 @@ struct JSContext
return reinterpret_cast<JSContext *>(uintptr_t(link) - offsetof(JSContext, link));
}
#ifdef JS_THREADSAFE
static inline JSContext *fromThreadLinks(JSCList *link) {
JS_ASSERT(link);
return reinterpret_cast<JSContext *>(uintptr_t(link) - offsetof(JSContext, threadLinks));
}
#endif
private:
/*
* The allocation code calls the function to indicate either OOM failure
@ -1325,6 +1210,14 @@ class AutoXMLRooter : private AutoGCRooter {
};
#endif /* JS_HAS_XML_SUPPORT */
#ifdef JS_THREADSAFE
# define JS_LOCK_GC(rt) PR_Lock((rt)->gcLock)
# define JS_UNLOCK_GC(rt) PR_Unlock((rt)->gcLock)
#else
# define JS_LOCK_GC(rt)
# define JS_UNLOCK_GC(rt)
#endif
class AutoUnlockGC {
private:
JSRuntime *rt;
@ -1341,63 +1234,6 @@ class AutoUnlockGC {
~AutoUnlockGC() { JS_LOCK_GC(rt); }
};
class AutoLockAtomsCompartment {
private:
JSContext *cx;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
public:
AutoLockAtomsCompartment(JSContext *cx
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: cx(cx)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
#ifdef JS_THREADSAFE
JS_ASSERT(!cx->atomsCompartmentIsLocked);
JS_LOCK(cx, &cx->runtime->atomState.lock);
cx->atomsCompartmentIsLocked = true;
#endif
}
~AutoLockAtomsCompartment() {
#ifdef JS_THREADSAFE
JS_ASSERT(cx->atomsCompartmentIsLocked);
cx->atomsCompartmentIsLocked = false;
JS_UNLOCK(cx, &cx->runtime->atomState.lock);
#endif
}
};
class AutoUnlockAtomsCompartmentWhenLocked {
JSContext *cx;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
public:
AutoUnlockAtomsCompartmentWhenLocked(JSContext *cx
JS_GUARD_OBJECT_NOTIFIER_PARAM)
: cx(NULL)
{
JS_GUARD_OBJECT_NOTIFIER_INIT;
#ifdef JS_THREADSAFE
if (cx->atomsCompartmentIsLocked) {
this->cx = cx;
cx->atomsCompartmentIsLocked = false;
JS_UNLOCK(cx, &cx->runtime->atomState.lock);
}
#endif
}
~AutoUnlockAtomsCompartmentWhenLocked() {
#ifdef JS_THREADSAFE
if (cx) {
JS_ASSERT(!cx->atomsCompartmentIsLocked);
JS_LOCK(cx, &cx->runtime->atomState.lock);
cx->atomsCompartmentIsLocked = true;
}
#endif
}
};
class AutoKeepAtoms {
JSRuntime *rt;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
@ -1478,62 +1314,8 @@ class JSAutoResolveFlags
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
};
extern js::ThreadData *
js_CurrentThreadData(JSRuntime *rt);
extern JSBool
js_InitThreads(JSRuntime *rt);
extern void
js_FinishThreads(JSRuntime *rt);
extern void
js_PurgeThreads(JSContext *cx);
extern void
js_PurgeThreads_PostGlobalSweep(JSContext *cx);
namespace js {
#ifdef JS_THREADSAFE
/* Iterator over ThreadData from all JSThread instances. */
class ThreadDataIter : public JSThread::Map::Range
{
public:
ThreadDataIter(JSRuntime *rt) : JSThread::Map::Range(rt->threads.all()) {}
ThreadData *threadData() const {
return &front().value->data;
}
};
#else /* !JS_THREADSAFE */
class ThreadDataIter
{
JSRuntime *runtime;
bool done;
public:
ThreadDataIter(JSRuntime *rt) : runtime(rt), done(false) {}
bool empty() const {
return done;
}
void popFront() {
JS_ASSERT(!done);
done = true;
}
ThreadData *threadData() const {
JS_ASSERT(!done);
return &runtime->threadData;
}
};
#endif /* !JS_THREADSAFE */
/*
* Enumerate all contexts in a runtime that are in the same thread as a given
* context.
@ -1544,11 +1326,7 @@ class ThreadContextRange {
public:
explicit ThreadContextRange(JSContext *cx) {
#ifdef JS_THREADSAFE
end = &cx->thread()->contextList;
#else
end = &cx->runtime->contextList;
#endif
begin = end->next;
}
@ -1556,11 +1334,7 @@ public:
void popFront() { JS_ASSERT(!empty()); begin = begin->next; }
JSContext *front() const {
#ifdef JS_THREADSAFE
return JSContext::fromThreadLinks(begin);
#else
return JSContext::fromLinkField(begin);
#endif
}
};
@ -1573,6 +1347,13 @@ public:
extern JSContext *
js_NewContext(JSRuntime *rt, size_t stackChunkSize);
typedef enum JSDestroyContextMode {
JSDCM_NO_GC,
JSDCM_MAYBE_GC,
JSDCM_FORCE_GC,
JSDCM_NEW_FAILED
} JSDestroyContextMode;
extern void
js_DestroyContext(JSContext *cx, JSDestroyContextMode mode);
@ -1658,8 +1439,7 @@ js_ReportValueErrorFlags(JSContext *cx, uintN flags, const uintN errorNumber,
extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit];
#ifdef JS_THREADSAFE
# define JS_ASSERT_REQUEST_DEPTH(cx) (JS_ASSERT((cx)->thread()), \
JS_ASSERT((cx)->thread()->data.requestDepth >= 1))
# define JS_ASSERT_REQUEST_DEPTH(cx) JS_ASSERT((cx)->runtime->requestDepth >= 1)
#else
# define JS_ASSERT_REQUEST_DEPTH(cx) ((void) 0)
#endif
@ -1671,7 +1451,7 @@ extern JSErrorFormatString js_ErrorFormatString[JSErr_Limit];
*/
#define JS_CHECK_OPERATION_LIMIT(cx) \
(JS_ASSERT_REQUEST_DEPTH(cx), \
(!JS_THREAD_DATA(cx)->interruptFlags || js_InvokeOperationCallback(cx)))
(!cx->runtime->interrupt || js_InvokeOperationCallback(cx)))
/*
* Invoke the operation callback and return false if the current execution
@ -1683,18 +1463,6 @@ js_InvokeOperationCallback(JSContext *cx);
extern JSBool
js_HandleExecutionInterrupt(JSContext *cx);
namespace js {
/* These must be called with GC lock taken. */
void
TriggerOperationCallback(JSContext *cx);
void
TriggerAllOperationCallbacks(JSRuntime *rt);
} /* namespace js */
/*
* Get the topmost scripted frame in a context. Note: if the topmost frame is
* in the middle of an inline call, that call will be expanded. To avoid this,

View File

@ -87,7 +87,7 @@ GetGlobalForScopeChain(JSContext *cx)
inline GSNCache *
GetGSNCache(JSContext *cx)
{
return &JS_THREAD_DATA(cx)->gsnCache;
return &cx->runtime->gsnCache;
}
class AutoNamespaceArray : protected AutoGCRooter {

View File

@ -561,10 +561,10 @@ CallContextDebugHandler(JSContext *cx, JSScript *script, jsbytecode *bc, Value *
}
#ifdef JS_THREADSAFE
JSThread *
GetContextThread(const JSContext *cx)
void *
GetOwnerThread(const JSContext *cx)
{
return cx->thread();
return cx->runtime->ownerThread();
}
JS_FRIEND_API(unsigned)
@ -585,18 +585,18 @@ AutoSkipConservativeScan::AutoSkipConservativeScan(JSContext *cx
{
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
ThreadData &threadData = context->thread()->data;
JS_ASSERT(threadData.requestDepth >= 1);
JS_ASSERT(!threadData.conservativeGC.requestThreshold);
if (threadData.requestDepth == 1)
threadData.conservativeGC.requestThreshold = 1;
JSRuntime *rt = context->runtime;
JS_ASSERT(rt->requestDepth >= 1);
JS_ASSERT(!rt->conservativeGC.requestThreshold);
if (rt->requestDepth == 1)
rt->conservativeGC.requestThreshold = 1;
}
AutoSkipConservativeScan::~AutoSkipConservativeScan()
{
ThreadData &threadData = context->thread()->data;
if (threadData.requestDepth == 1)
threadData.conservativeGC.requestThreshold = 0;
JSRuntime *rt = context->runtime;
if (rt->requestDepth == 1)
rt->conservativeGC.requestThreshold = 0;
}
#endif
@ -626,12 +626,9 @@ IsContextRunningJS(JSContext *cx)
}
JS_FRIEND_API(void)
TriggerOperationCallbacksForActiveContexts(JSRuntime *rt)
TriggerOperationCallback(JSRuntime *rt)
{
JSContext* cx = NULL;
while ((cx = js_NextActiveContext(rt, cx))) {
TriggerOperationCallback(cx);
}
rt->triggerOperationCallback();
}
JS_FRIEND_API(const CompartmentVector&)

View File

@ -475,8 +475,8 @@ JS_FRIEND_API(JSString *)
GetPCCountScriptContents(JSContext *cx, size_t script);
#ifdef JS_THREADSAFE
JS_FRIEND_API(JSThread *)
GetContextThread(const JSContext *cx);
JS_FRIEND_API(void *)
GetOwnerThread(const JSContext *cx);
JS_FRIEND_API(unsigned)
GetContextOutstandingRequests(const JSContext *cx);
@ -562,7 +562,7 @@ IsContextRunningJS(JSContext *cx);
/* Must be called with GC lock taken. */
extern JS_FRIEND_API(void)
TriggerOperationCallbacksForActiveContexts(JSRuntime *rt);
TriggerOperationCallback(JSRuntime *rt);
class SystemAllocPolicy;
typedef Vector<JSCompartment*, 0, SystemAllocPolicy> CompartmentVector;

View File

@ -106,6 +106,12 @@
# include <valgrind/memcheck.h>
#endif
#ifdef XP_WIN
# include "jswin.h"
#else
# include <unistd.h>
#endif
using namespace mozilla;
using namespace js;
using namespace js::gc;
@ -875,15 +881,9 @@ js_InitGC(JSRuntime *rt, uint32_t maxbytes)
return false;
#ifdef JS_THREADSAFE
rt->gcLock = JS_NEW_LOCK();
rt->gcLock = PR_NewLock();
if (!rt->gcLock)
return false;
rt->gcDone = JS_NEW_CONDVAR(rt->gcLock);
if (!rt->gcDone)
return false;
rt->requestDone = JS_NEW_CONDVAR(rt->gcLock);
if (!rt->requestDone)
return false;
if (!rt->gcHelperThread.init())
return false;
#endif
@ -1079,23 +1079,31 @@ MarkRangeConservatively(JSTracer *trc, const uintptr_t *begin, const uintptr_t *
MarkWordConservatively(trc, *i);
}
static void
MarkThreadDataConservatively(JSTracer *trc, ThreadData *td)
static JS_NEVER_INLINE void
MarkConservativeStackRoots(JSTracer *trc, JSRuntime *rt)
{
ConservativeGCThreadData *ctd = &td->conservativeGC;
JS_ASSERT(ctd->hasStackToScan());
ConservativeGCData *cgcd = &rt->conservativeGC;
if (!cgcd->hasStackToScan()) {
#ifdef JS_THREADSAFE
JS_ASSERT(!rt->suspendCount);
JS_ASSERT(rt->requestDepth <= cgcd->requestThreshold);
#endif
return;
}
uintptr_t *stackMin, *stackEnd;
#if JS_STACK_GROWTH_DIRECTION > 0
stackMin = td->nativeStackBase;
stackEnd = ctd->nativeStackTop;
stackMin = rt->conservativeGC.nativeStackBase;
stackEnd = cgcd->nativeStackTop;
#else
stackMin = ctd->nativeStackTop + 1;
stackEnd = td->nativeStackBase;
stackMin = cgcd->nativeStackTop + 1;
stackEnd = rt->conservativeGC.nativeStackBase;
#endif
JS_ASSERT(stackMin <= stackEnd);
MarkRangeConservatively(trc, stackMin, stackEnd);
MarkRangeConservatively(trc, ctd->registerSnapshot.words,
ArrayEnd(ctd->registerSnapshot.words));
MarkRangeConservatively(trc, cgcd->registerSnapshot.words,
ArrayEnd(cgcd->registerSnapshot.words));
}
void
@ -1134,36 +1142,16 @@ MarkStackRangeConservatively(JSTracer *trc, Value *beginv, Value *endv)
#endif
}
void
MarkConservativeStackRoots(JSTracer *trc)
{
#ifdef JS_THREADSAFE
for (JSThread::Map::Range r = trc->runtime->threads.all(); !r.empty(); r.popFront()) {
JSThread *thread = r.front().value;
ConservativeGCThreadData *ctd = &thread->data.conservativeGC;
if (ctd->hasStackToScan()) {
JS_ASSERT_IF(!thread->data.requestDepth, thread->suspendCount);
MarkThreadDataConservatively(trc, &thread->data);
} else {
JS_ASSERT(!thread->suspendCount);
JS_ASSERT(thread->data.requestDepth <= ctd->requestThreshold);
}
}
#else
MarkThreadDataConservatively(trc, &trc->runtime->threadData);
#endif
}
JS_NEVER_INLINE void
ConservativeGCThreadData::recordStackTop()
ConservativeGCData::recordStackTop()
{
/* Update the native stack pointer if it points to a bigger stack. */
uintptr_t dummy;
nativeStackTop = &dummy;
/*
* To record and update the register snapshot for the conservative
* scanning with the latest values we use setjmp.
* To record and update the register snapshot for the conservative scanning
* with the latest values we use setjmp.
*/
#if defined(_MSC_VER)
# pragma warning(push)
@ -1175,18 +1163,18 @@ ConservativeGCThreadData::recordStackTop()
#endif
}
static inline void
void
RecordNativeStackTopForGC(JSContext *cx)
{
ConservativeGCThreadData *ctd = &JS_THREAD_DATA(cx)->conservativeGC;
ConservativeGCData *cgcd = &cx->runtime->conservativeGC;
#ifdef JS_THREADSAFE
/* Record the stack top here only if we are called from a request. */
JS_ASSERT(cx->thread()->data.requestDepth >= ctd->requestThreshold);
if (cx->thread()->data.requestDepth == ctd->requestThreshold)
JS_ASSERT(cx->runtime->requestDepth >= cgcd->requestThreshold);
if (cx->runtime->requestDepth == cgcd->requestThreshold)
return;
#endif
ctd->recordStackTop();
cgcd->recordStackTop();
}
} /* namespace js */
@ -1264,7 +1252,6 @@ js_AddRootRT(JSRuntime *rt, jsval *vp, const char *name)
* rt->gcLock across the mark phase (including the root hashtable mark).
*/
AutoLockGC lock(rt);
js_WaitForGC(rt);
return !!rt->gcRootsHash.put((void *)vp,
RootInfo(name, JS_GC_ROOT_VALUE_PTR));
@ -1281,7 +1268,6 @@ js_AddGCThingRootRT(JSRuntime *rt, void **rp, const char *name)
* rt->gcLock across the mark phase (including the root hashtable mark).
*/
AutoLockGC lock(rt);
js_WaitForGC(rt);
return !!rt->gcRootsHash.put((void *)rp,
RootInfo(name, JS_GC_ROOT_GCTHING_PTR));
@ -1295,7 +1281,6 @@ js_RemoveRoot(JSRuntime *rt, void *rp)
* Same synchronization drill as above in js_AddRoot.
*/
AutoLockGC lock(rt);
js_WaitForGC(rt);
rt->gcRootsHash.remove(rp);
rt->gcPoke = JS_TRUE;
return JS_TRUE;
@ -1660,9 +1645,6 @@ RunLastDitchGC(JSContext *cx)
{
JSRuntime *rt = cx->runtime;
/* The atoms are locked when we create a string in AtomizeInline. */
AutoUnlockAtomsCompartmentWhenLocked unlockAtomsCompartment(cx);
/* The last ditch GC preserves all atoms. */
AutoKeepAtoms keep(rt);
js_GC(cx, rt->gcTriggerCompartment, GC_NORMAL, gcstats::LASTDITCH);
@ -2100,7 +2082,7 @@ MarkRuntime(JSTracer *trc)
JSRuntime *rt = trc->runtime;
if (rt->hasContexts())
MarkConservativeStackRoots(trc);
MarkConservativeStackRoots(trc, rt);
for (RootRange r = rt->gcRootsHash.all(); !r.empty(); r.popFront())
gc_root_traversal(trc, r.front());
@ -2141,8 +2123,7 @@ MarkRuntime(JSTracer *trc)
}
}
for (ThreadDataIter i(rt); !i.empty(); i.popFront())
i.threadData()->mark(trc);
rt->stackSpace.mark(trc);
/* The embedding can register additional roots here. */
if (JSTraceDataOp op = rt->gcBlackRootsTraceOp)
@ -2158,17 +2139,16 @@ MarkRuntime(JSTracer *trc)
void
TriggerGC(JSRuntime *rt, gcstats::Reason reason)
{
JS_ASSERT(rt->onOwnerThread());
if (rt->gcRunning || rt->gcIsNeeded)
return;
/*
* Trigger the GC when it is safe to call an operation callback on any
* thread.
*/
/* Trigger the GC when it is safe to call an operation callback. */
rt->gcIsNeeded = true;
rt->gcTriggerCompartment = NULL;
rt->gcTriggerReason = reason;
TriggerAllOperationCallbacks(rt);
rt->triggerOperationCallback();
}
void
@ -2208,7 +2188,7 @@ TriggerCompartmentGC(JSCompartment *comp, gcstats::Reason reason)
rt->gcIsNeeded = true;
rt->gcTriggerCompartment = comp;
rt->gcTriggerReason = reason;
TriggerAllOperationCallbacks(comp->rt);
comp->rt->triggerOperationCallback();
}
void
@ -2381,6 +2361,23 @@ ExpireChunksAndArenas(JSRuntime *rt, bool shouldShrink)
#ifdef JS_THREADSAFE
static unsigned
GetCPUCount()
{
static unsigned ncpus = 0;
if (ncpus == 0) {
# ifdef XP_WIN
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
ncpus = unsigned(sysinfo.dwNumberOfProcessors);
# else
long n = sysconf(_SC_NPROCESSORS_ONLN);
ncpus = (n > 0) ? unsigned(n) : 1;
# endif
}
return ncpus;
}
bool
GCHelperThread::init()
{
@ -2394,7 +2391,7 @@ GCHelperThread::init()
if (!thread)
return false;
backgroundAllocation = (js_GetCPUCount() >= 2);
backgroundAllocation = (GetCPUCount() >= 2);
return true;
}
@ -2680,7 +2677,7 @@ BeginMarkPhase(JSContext *cx, GCMarker *gcmarker, JSGCInvocationKind gckind)
for (GCCompartmentsIter c(rt); !c.done(); c.next())
c->purge(cx);
js_PurgeThreads(cx);
rt->purge(cx);
{
JSContext *iter = NULL;
@ -2844,13 +2841,7 @@ SweepPhase(JSContext *cx, GCMarker *gcmarker, JSGCInvocationKind gckind)
}
}
/*
* Perform mark-and-sweep GC.
*
* In a JS_THREADSAFE build, the calling thread must be rt->gcThread and each
* other thread must be either outside all requests or blocked waiting for GC
* to finish. The caller must hold rt->gcLock.
*/
/* Perform mark-and-sweep GC. If comp is set, we perform a single-compartment GC. */
static void
MarkAndSweep(JSContext *cx, JSGCInvocationKind gckind)
{
@ -2880,74 +2871,6 @@ MarkAndSweep(JSContext *cx, JSGCInvocationKind gckind)
SweepPhase(cx, &gcmarker, gckind);
}
#ifdef JS_THREADSAFE
/*
* If the GC is running and we're called on another thread, wait for this GC
* activation to finish. We can safely wait here without fear of deadlock (in
* the case where we are called within a request on another thread's context)
* because the GC doesn't set rt->gcRunning until after it has waited for all
* active requests to end.
*
* We call here js_CurrentThreadId() after checking for rt->gcState to avoid
* an expensive call when the GC is not running.
*/
void
js_WaitForGC(JSRuntime *rt)
{
if (rt->gcRunning && rt->gcThread->id != js_CurrentThreadId()) {
do {
JS_AWAIT_GC_DONE(rt);
} while (rt->gcRunning);
}
}
/*
* GC is running on another thread. Temporarily suspend all requests running
* on the current thread and wait until the GC is done.
*/
static void
LetOtherGCFinish(JSContext *cx)
{
JSRuntime *rt = cx->runtime;
JS_ASSERT(rt->gcThread);
JS_ASSERT(cx->thread() != rt->gcThread);
size_t requestDebit = cx->thread()->data.requestDepth ? 1 : 0;
JS_ASSERT(requestDebit <= rt->requestCount);
if (requestDebit != 0) {
rt->requestCount -= requestDebit;
if (rt->requestCount == 0)
JS_NOTIFY_REQUEST_DONE(rt);
/*
* Update the native stack before we wait so the GC thread see the
* correct stack bounds.
*/
RecordNativeStackTopForGC(cx);
}
/*
* Check that we did not release the GC lock above and let the GC to
* finish before we wait.
*/
JS_ASSERT(rt->gcThread);
/*
* Wait for GC to finish on the other thread, even if requestDebit is 0
* and even if GC has not started yet because the gcThread is waiting in
* AutoGCSession. This ensures that js_GC never returns without a full GC
* cycle happening.
*/
do {
JS_AWAIT_GC_DONE(rt);
} while (rt->gcThread);
rt->requestCount += requestDebit;
}
#endif
class AutoGCSession {
public:
explicit AutoGCSession(JSContext *cx);
@ -2960,88 +2883,27 @@ class AutoGCSession {
void operator=(const AutoGCSession&) MOZ_DELETE;
};
/*
* Start a new GC session. Together with LetOtherGCFinish this function
* contains the rendezvous algorithm by which we stop the world for GC.
*
* This thread becomes the GC thread. Wait for all other threads to quiesce.
* Then set rt->gcRunning and return.
*/
/* Start a new GC session. */
AutoGCSession::AutoGCSession(JSContext *cx)
: context(cx)
{
JS_ASSERT(!JS_THREAD_DATA(cx)->noGCOrAllocationCheck);
JS_ASSERT(!cx->runtime->noGCOrAllocationCheck);
JSRuntime *rt = cx->runtime;
#ifdef JS_THREADSAFE
if (rt->gcThread && rt->gcThread != cx->thread())
LetOtherGCFinish(cx);
#endif
JS_ASSERT(!rt->gcRunning);
#ifdef JS_THREADSAFE
/* No other thread is in GC, so indicate that we're now in GC. */
JS_ASSERT(!rt->gcThread);
rt->gcThread = cx->thread();
/*
* Notify operation callbacks on other threads, which will give them a
* chance to yield their requests. Threads without requests perform their
* callback at some later point, which then will be unnecessary, but
* harmless.
*/
for (JSThread::Map::Range r = rt->threads.all(); !r.empty(); r.popFront()) {
JSThread *thread = r.front().value;
if (thread != cx->thread())
thread->data.triggerOperationCallback(rt);
}
/*
* Discount the request on the current thread from contributing to
* rt->requestCount before we wait for all other requests to finish.
* JS_NOTIFY_REQUEST_DONE, which will wake us up, is only called on
* rt->requestCount transitions to 0.
*/
size_t requestDebit = cx->thread()->data.requestDepth ? 1 : 0;
JS_ASSERT(requestDebit <= rt->requestCount);
if (requestDebit != rt->requestCount) {
rt->requestCount -= requestDebit;
do {
JS_AWAIT_REQUEST_DONE(rt);
} while (rt->requestCount > 0);
rt->requestCount += requestDebit;
}
#endif /* JS_THREADSAFE */
/*
* Set rt->gcRunning here within the GC lock, and after waiting for any
* active requests to end. This way js_WaitForGC called outside a request
* would not block on the GC that is waiting for other requests to finish
* with rt->gcThread set while JS_BeginRequest would do such wait.
*/
rt->gcRunning = true;
}
/* End the current GC session and allow other threads to proceed. */
AutoGCSession::~AutoGCSession()
{
JSRuntime *rt = context->runtime;
rt->gcRunning = false;
#ifdef JS_THREADSAFE
JS_ASSERT(rt->gcThread == context->thread());
rt->gcThread = NULL;
JS_NOTIFY_GC_DONE(rt);
#endif
}
/*
* GC, repeatedly if necessary, until we think we have not created any new
* garbage and no other threads are demanding more GC. We disable inlining
* to ensure that the bottom of the stack with possible GC roots recorded in
* js_GC excludes any pointers we use during the marking implementation.
* garbage. We disable inlining to ensure that the bottom of the stack with
* possible GC roots recorded in js_GC excludes any pointers we use during the
* marking implementation.
*/
static JS_NEVER_INLINE void
GCCycle(JSContext *cx, JSCompartment *comp, JSGCInvocationKind gckind)
@ -3051,29 +2913,13 @@ GCCycle(JSContext *cx, JSCompartment *comp, JSGCInvocationKind gckind)
JS_ASSERT_IF(comp, comp != rt->atomsCompartment);
JS_ASSERT_IF(comp, rt->gcMode == JSGC_MODE_COMPARTMENT);
/*
* Recursive GC is no-op and a call from another thread waits the started
* GC cycle to finish.
*/
if (rt->gcMarkAndSweep) {
#ifdef JS_THREADSAFE
JS_ASSERT(rt->gcThread);
if (rt->gcThread != cx->thread()) {
/* We do not return until another GC finishes. */
LetOtherGCFinish(cx);
}
#endif
/* Recursive GC is no-op. */
if (rt->gcMarkAndSweep)
return;
}
AutoGCSession gcsession(cx);
/*
* Don't GC if any thread is reporting an OOM. We check the flag after we
* have set up the GC session and know that the thread that reported OOM
* is either the current thread or waits for the GC to complete on this
* thread.
*/
/* Don't GC if we are reporting an OOM. */
if (rt->inOOMReport)
return;
@ -3103,9 +2949,6 @@ GCCycle(JSContext *cx, JSCompartment *comp, JSGCInvocationKind gckind)
MarkAndSweep(cx, gckind);
if (!comp)
js_PurgeThreads_PostGlobalSweep(cx);
#ifdef JS_THREADSAFE
if (cx->gcBackgroundFree) {
JS_ASSERT(cx->gcBackgroundFree == &rt->gcHelperThread);
@ -3216,7 +3059,7 @@ TraceRuntime(JSTracer *trc)
{
JSContext *cx = trc->context;
JSRuntime *rt = cx->runtime;
if (rt->gcThread != cx->thread()) {
if (!rt->gcRunning) {
AutoLockGC lock(rt);
AutoGCSession gcsession(cx);

View File

@ -1320,11 +1320,6 @@ typedef HashMap<Value, Value, WrapperHasher, SystemAllocPolicy> WrapperMap;
} /* namespace js */
#ifdef DEBUG
extern bool
CheckAllocation(JSContext *cx);
#endif
extern JS_FRIEND_API(JSGCTraceKind)
js_GetGCThingTraceKind(void *thing);
@ -1416,22 +1411,6 @@ typedef enum JSGCInvocationKind {
extern void
js_GC(JSContext *cx, JSCompartment *comp, JSGCInvocationKind gckind, js::gcstats::Reason r);
#ifdef JS_THREADSAFE
/*
* This is a helper for code at can potentially run outside JS request to
* ensure that the GC is not running when the function returns.
*
* This function must be called with the GC lock held.
*/
extern void
js_WaitForGC(JSRuntime *rt);
#else /* !JS_THREADSAFE */
# define js_WaitForGC(rt) ((void) 0)
#endif
namespace js {
#ifdef JS_THREADSAFE
@ -1585,57 +1564,6 @@ struct GCChunkHasher {
typedef HashSet<js::gc::Chunk *, GCChunkHasher, SystemAllocPolicy> GCChunkSet;
struct ConservativeGCThreadData {
/*
* The GC scans conservatively between ThreadData::nativeStackBase and
* nativeStackTop unless the latter is NULL.
*/
uintptr_t *nativeStackTop;
union {
jmp_buf jmpbuf;
uintptr_t words[JS_HOWMANY(sizeof(jmp_buf), sizeof(uintptr_t))];
} registerSnapshot;
/*
* Cycle collector uses this to communicate that the native stack of the
* GC thread should be scanned only if the thread have more than the given
* threshold of requests.
*/
unsigned requestThreshold;
ConservativeGCThreadData()
: nativeStackTop(NULL), requestThreshold(0)
{
}
~ConservativeGCThreadData() {
#ifdef JS_THREADSAFE
/*
* The conservative GC scanner should be disabled when the thread leaves
* the last request.
*/
JS_ASSERT(!hasStackToScan());
#endif
}
JS_NEVER_INLINE void recordStackTop();
#ifdef JS_THREADSAFE
void updateForRequestEnd(unsigned suspendCount) {
if (suspendCount)
recordStackTop();
else
nativeStackTop = NULL;
}
#endif
bool hasStackToScan() const {
return !!nativeStackTop;
}
};
template<class T>
struct MarkStack {
T *stack;

View File

@ -346,7 +346,7 @@ class CellIter: public CellIterImpl
lists->copyFreeListToArena(kind);
}
#ifdef DEBUG
counter = &JS_THREAD_DATA(cx)->noGCOrAllocationCheck;
counter = &cx->runtime->noGCOrAllocationCheck;
++*counter;
#endif
init(comp, kind);
@ -384,7 +384,7 @@ NewGCThing(JSContext *cx, js::gc::AllocKind kind, size_t thingSize)
kind == js::gc::FINALIZE_STRING || kind == js::gc::FINALIZE_SHORT_STRING);
#endif
JS_ASSERT(!cx->runtime->gcRunning);
JS_ASSERT(!JS_THREAD_DATA(cx)->noGCOrAllocationCheck);
JS_ASSERT(!cx->runtime->noGCOrAllocationCheck);
#ifdef JS_GC_ZEAL
if (cx->runtime->needZealousGC())
@ -411,7 +411,7 @@ TryNewGCThing(JSContext *cx, js::gc::AllocKind kind, size_t thingSize)
kind == js::gc::FINALIZE_STRING || kind == js::gc::FINALIZE_SHORT_STRING);
#endif
JS_ASSERT(!cx->runtime->gcRunning);
JS_ASSERT(!JS_THREAD_DATA(cx)->noGCOrAllocationCheck);
JS_ASSERT(!cx->runtime->noGCOrAllocationCheck);
#ifdef JS_GC_ZEAL
if (cx->runtime->needZealousGC())

View File

@ -1258,13 +1258,13 @@ inline InterpreterFrames::InterpreterFrames(JSContext *cx, FrameRegs *regs,
const InterruptEnablerBase &enabler)
: context(cx), regs(regs), enabler(enabler)
{
older = JS_THREAD_DATA(cx)->interpreterFrames;
JS_THREAD_DATA(cx)->interpreterFrames = this;
older = cx->runtime->interpreterFrames;
cx->runtime->interpreterFrames = this;
}
inline InterpreterFrames::~InterpreterFrames()
{
JS_THREAD_DATA(context)->interpreterFrames = older;
context->runtime->interpreterFrames = older;
}
#if defined(DEBUG) && !defined(JS_THREADSAFE)
@ -1554,7 +1554,7 @@ js::Interpret(JSContext *cx, StackFrame *entryFrame, InterpMode interpMode)
*/
#define CHECK_BRANCH() \
JS_BEGIN_MACRO \
if (JS_THREAD_DATA(cx)->interruptFlags && !js_HandleExecutionInterrupt(cx)) \
if (cx->runtime->interrupt && !js_HandleExecutionInterrupt(cx)) \
goto error; \
JS_END_MACRO

View File

@ -1,743 +0,0 @@
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
*
* ***** BEGIN LICENSE BLOCK *****
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* The Original Code is Mozilla Communicator client code, released
* March 31, 1998.
*
* The Initial Developer of the Original Code is
* Netscape Communications Corporation.
* Portions created by the Initial Developer are Copyright (C) 1998
* the Initial Developer. All Rights Reserved.
*
* Contributor(s):
*
* Alternatively, the contents of this file may be used under the terms of
* either of the GNU General Public License Version 2 or later (the "GPL"),
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
* in which case the provisions of the GPL or the LGPL are applicable instead
* of those above. If you wish to allow use of your version of this file only
* under the terms of either the GPL or the LGPL, and not to allow others to
* use your version of this file under the terms of the MPL, indicate your
* decision by deleting the provisions above and replace them with the notice
* and other provisions required by the GPL or the LGPL. If you do not delete
* the provisions above, a recipient may use your version of this file under
* the terms of any one of the MPL, the GPL or the LGPL.
*
* ***** END LICENSE BLOCK ***** */
#ifdef JS_THREADSAFE
/*
* JS locking stubs.
*/
#include <stdlib.h>
#include <string.h>
#ifdef XP_WIN
# include "jswin.h"
#else
# include <unistd.h>
#endif
#include "jspubtd.h"
#include "jstypes.h"
#include "jsutil.h"
#include "jsstdint.h"
#include "jscntxt.h"
#include "jsgc.h"
#include "jslock.h"
#include "jsscope.h"
#include "jsstr.h"
#include "jsscopeinlines.h"
using namespace js;
#define ReadWord(W) (W)
#if !defined(__GNUC__)
# define __asm__ asm
# define __volatile__ volatile
#endif
/* Implement NativeCompareAndSwap. */
#if defined(_MSC_VER) && defined(_M_IX86)
// TODO: Bug 716204 - undo this pragma.
#pragma warning( disable : 4035 )
JS_BEGIN_EXTERN_C
extern long __cdecl
_InterlockedCompareExchange(long *volatile dest, long exchange, long comp);
JS_END_EXTERN_C
#pragma intrinsic(_InterlockedCompareExchange)
JS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(long));
static JS_ALWAYS_INLINE int
NativeCompareAndSwapHelper(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
_InterlockedCompareExchange((long*) w, nv, ov);
__asm {
sete al
}
}
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
return (NativeCompareAndSwapHelper(w, ov, nv) & 1);
}
#elif defined(_MSC_VER) && (defined(_M_AMD64) || defined(_M_X64))
/*
* Compared with the _InterlockedCompareExchange in the 32 bit case above MSVC
* declares _InterlockedCompareExchange64 through <windows.h>.
*/
#pragma intrinsic(_InterlockedCompareExchange64)
JS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(long long));
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
return _InterlockedCompareExchange64((long long *volatile)w, nv, ov) == ov;
}
#elif defined(XP_MACOSX) || defined(DARWIN)
#include <libkern/OSAtomic.h>
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
/* Details on these functions available in the manpage for atomic */
return OSAtomicCompareAndSwapPtrBarrier(reinterpret_cast<void *>(ov),
reinterpret_cast<void *>(nv),
reinterpret_cast<void * volatile *>(w));
}
#elif defined(__i386) && (defined(__GNUC__) || defined(__SUNPRO_CC))
/* Note: This fails on 386 cpus, cmpxchgl is a >= 486 instruction */
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
unsigned int res;
__asm__ __volatile__ (
"lock\n"
"cmpxchgl %2, (%1)\n"
"sete %%al\n"
"andl $1, %%eax\n"
: "=a" (res)
#ifdef __SUNPRO_CC
/* Different code for Sun Studio because of a bug of SS12U1 */
: "c" (w), "d" (nv), "a" (ov)
#else
: "r" (w), "r" (nv), "a" (ov)
#endif
: "cc", "memory");
return (int)res;
}
#elif defined(__x86_64) && (defined(__GNUC__) || defined(__SUNPRO_CC))
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
unsigned int res;
__asm__ __volatile__ (
"lock\n"
"cmpxchgq %2, (%1)\n"
"sete %%al\n"
"movzbl %%al, %%eax\n"
: "=a" (res)
: "r" (w), "r" (nv), "a" (ov)
: "cc", "memory");
return (int)res;
}
#elif defined(__sparc)
#if defined(__GNUC__)
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
unsigned int res;
__asm__ __volatile__ (
"membar #StoreLoad | #LoadLoad\n"
#if JS_BITS_PER_WORD == 32
"cas [%1],%2,%3\n"
#else
"casx [%1],%2,%3\n"
#endif
"membar #StoreLoad | #LoadLoad\n"
"cmp %2,%3\n"
"be,a 1f\n"
"mov 1,%0\n"
"mov 0,%0\n"
"1:"
: "=r" (res)
: "r" (w), "r" (ov), "r" (nv));
return (int)res;
}
#elif defined(__SUNPRO_CC)
/* Implementation in lock_sparc*.il */
extern "C" int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv);
#endif
#elif defined(AIX)
#include <sys/atomic_op.h>
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
int res;
JS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(long));
res = compare_and_swaplp((atomic_l)w, &ov, nv);
if (res)
__asm__("isync");
return res;
}
#elif defined(__arm__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)
JS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(int));
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
return __sync_bool_compare_and_swap(w, ov, nv);
}
#elif defined(USE_ARM_KUSER)
/* See https://bugzilla.mozilla.org/show_bug.cgi?id=429387 for a
* description of this ABI; this is a function provided at a fixed
* location by the kernel in the memory space of each process.
*/
typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
#define __kernel_cmpxchg (*(__kernel_cmpxchg_t *)0xffff0fc0)
JS_STATIC_ASSERT(sizeof(intptr_t) == sizeof(int));
static JS_ALWAYS_INLINE int
NativeCompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
volatile int *vp = (volatile int *) w;
PRInt32 failed = 1;
/* Loop until a __kernel_cmpxchg succeeds. See bug 446169 */
do {
failed = __kernel_cmpxchg(ov, nv, vp);
} while (failed && *vp == ov);
return !failed;
}
#elif JS_HAS_NATIVE_COMPARE_AND_SWAP
#error "JS_HAS_NATIVE_COMPARE_AND_SWAP should be 0 if your platform lacks a compare-and-swap instruction."
#endif /* arch-tests */
#if JS_HAS_NATIVE_COMPARE_AND_SWAP
JSBool
js_CompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
return !!NativeCompareAndSwap(w, ov, nv);
}
#elif defined(NSPR_LOCK)
# ifdef __GNUC__
# warning "js_CompareAndSwap is implemented using NSPR lock"
# endif
JSBool
js_CompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv)
{
int result;
static PRLock *CompareAndSwapLock = JS_NEW_LOCK();
JS_ACQUIRE_LOCK(CompareAndSwapLock);
result = (*w == ov);
if (result)
*w = nv;
JS_RELEASE_LOCK(CompareAndSwapLock);
return result;
}
#else /* !defined(NSPR_LOCK) */
#error "NSPR_LOCK should be on when the platform lacks native compare-and-swap."
#endif
void
js_AtomicSetMask(volatile intptr_t *w, intptr_t mask)
{
intptr_t ov, nv;
do {
ov = *w;
nv = ov | mask;
} while (!js_CompareAndSwap(w, ov, nv));
}
void
js_AtomicClearMask(volatile intptr_t *w, intptr_t mask)
{
intptr_t ov, nv;
do {
ov = *w;
nv = ov & ~mask;
} while (!js_CompareAndSwap(w, ov, nv));
}
unsigned
js_GetCPUCount()
{
static unsigned ncpus = 0;
if (ncpus == 0) {
# ifdef XP_WIN
SYSTEM_INFO sysinfo;
GetSystemInfo(&sysinfo);
ncpus = unsigned(sysinfo.dwNumberOfProcessors);
# else
long n = sysconf(_SC_NPROCESSORS_ONLN);
ncpus = (n > 0) ? unsigned(n) : 1;
# endif
}
return ncpus;
}
#ifndef NSPR_LOCK
struct JSFatLock {
int susp;
PRLock *slock;
PRCondVar *svar;
JSFatLock *next;
JSFatLock **prevp;
};
typedef struct JSFatLockTable {
JSFatLock *free_;
JSFatLock *taken;
} JSFatLockTable;
#define GLOBAL_LOCK_INDEX(id) (((uint32_t)(uintptr_t)(id)>>2) & global_locks_mask)
static void
js_Dequeue(JSThinLock *);
static PRLock **global_locks;
static uint32_t global_lock_count = 1;
static uint32_t global_locks_log2 = 0;
static uint32_t global_locks_mask = 0;
static void
js_LockGlobal(void *id)
{
uint32_t i = GLOBAL_LOCK_INDEX(id);
PR_Lock(global_locks[i]);
}
static void
js_UnlockGlobal(void *id)
{
uint32_t i = GLOBAL_LOCK_INDEX(id);
PR_Unlock(global_locks[i]);
}
#endif /* !NSPR_LOCK */
void
js_InitLock(JSThinLock *tl)
{
#ifdef NSPR_LOCK
tl->owner = 0;
tl->fat = (JSFatLock*)JS_NEW_LOCK();
#else
PodZero(tl);
#endif
}
void
js_FinishLock(JSThinLock *tl)
{
#ifdef NSPR_LOCK
tl->owner = 0xdeadbeef;
if (tl->fat)
JS_DESTROY_LOCK(((JSLock*)tl->fat));
#else
JS_ASSERT(tl->owner == 0);
JS_ASSERT(tl->fat == NULL);
#endif
}
#ifndef NSPR_LOCK
static JSFatLock *
NewFatlock()
{
JSFatLock *fl = (JSFatLock *) OffTheBooks::malloc_(sizeof(JSFatLock)); /* for now */
if (!fl) return NULL;
fl->susp = 0;
fl->next = NULL;
fl->prevp = NULL;
fl->slock = PR_NewLock();
fl->svar = PR_NewCondVar(fl->slock);
return fl;
}
static void
DestroyFatlock(JSFatLock *fl)
{
PR_DestroyLock(fl->slock);
PR_DestroyCondVar(fl->svar);
UnwantedForeground::free_(fl);
}
static JSFatLock *
ListOfFatlocks(int listc)
{
JSFatLock *m;
JSFatLock *m0;
int i;
JS_ASSERT(listc>0);
m0 = m = NewFatlock();
for (i=1; i<listc; i++) {
m->next = NewFatlock();
m = m->next;
}
return m0;
}
static void
DeleteListOfFatlocks(JSFatLock *m)
{
JSFatLock *m0;
for (; m; m=m0) {
m0 = m->next;
DestroyFatlock(m);
}
}
static JSFatLockTable *fl_list_table = NULL;
static uint32_t fl_list_table_len = 0;
static uint32_t fl_list_chunk_len = 0;
static JSFatLock *
GetFatlock(void *id)
{
JSFatLock *m;
uint32_t i = GLOBAL_LOCK_INDEX(id);
if (fl_list_table[i].free_ == NULL) {
#ifdef DEBUG
if (fl_list_table[i].taken)
printf("Ran out of fat locks!\n");
#endif
fl_list_table[i].free_ = ListOfFatlocks(fl_list_chunk_len);
}
m = fl_list_table[i].free_;
fl_list_table[i].free_ = m->next;
m->susp = 0;
m->next = fl_list_table[i].taken;
m->prevp = &fl_list_table[i].taken;
if (fl_list_table[i].taken)
fl_list_table[i].taken->prevp = &m->next;
fl_list_table[i].taken = m;
return m;
}
static void
PutFatlock(JSFatLock *m, void *id)
{
uint32_t i;
if (m == NULL)
return;
/* Unlink m from fl_list_table[i].taken. */
*m->prevp = m->next;
if (m->next)
m->next->prevp = m->prevp;
/* Insert m in fl_list_table[i].free. */
i = GLOBAL_LOCK_INDEX(id);
m->next = fl_list_table[i].free_;
fl_list_table[i].free_ = m;
}
#endif /* !NSPR_LOCK */
JSBool
js_SetupLocks(int listc, int globc)
{
#ifndef NSPR_LOCK
uint32_t i;
if (global_locks)
return JS_TRUE;
#ifdef DEBUG
if (listc > 10000 || listc < 0) /* listc == fat lock list chunk length */
printf("Bad number %d in js_SetupLocks()!\n", listc);
if (globc > 100 || globc < 0) /* globc == number of global locks */
printf("Bad number %d in js_SetupLocks()!\n", listc);
#endif
global_locks_log2 = JS_CEILING_LOG2W(globc);
global_locks_mask = JS_BITMASK(global_locks_log2);
global_lock_count = JS_BIT(global_locks_log2);
global_locks = (PRLock **) OffTheBooks::malloc_(global_lock_count * sizeof(PRLock*));
if (!global_locks)
return JS_FALSE;
for (i = 0; i < global_lock_count; i++) {
global_locks[i] = PR_NewLock();
if (!global_locks[i]) {
global_lock_count = i;
js_CleanupLocks();
return JS_FALSE;
}
}
fl_list_table = (JSFatLockTable *) OffTheBooks::malloc_(i * sizeof(JSFatLockTable));
if (!fl_list_table) {
js_CleanupLocks();
return JS_FALSE;
}
fl_list_table_len = global_lock_count;
for (i = 0; i < global_lock_count; i++)
fl_list_table[i].free_ = fl_list_table[i].taken = NULL;
fl_list_chunk_len = listc;
#endif /* !NSPR_LOCK */
return JS_TRUE;
}
void
js_CleanupLocks()
{
#ifndef NSPR_LOCK
uint32_t i;
if (global_locks) {
for (i = 0; i < global_lock_count; i++)
PR_DestroyLock(global_locks[i]);
UnwantedForeground::free_(global_locks);
global_locks = NULL;
global_lock_count = 1;
global_locks_log2 = 0;
global_locks_mask = 0;
}
if (fl_list_table) {
for (i = 0; i < fl_list_table_len; i++) {
DeleteListOfFatlocks(fl_list_table[i].free_);
fl_list_table[i].free_ = NULL;
DeleteListOfFatlocks(fl_list_table[i].taken);
fl_list_table[i].taken = NULL;
}
UnwantedForeground::free_(fl_list_table);
fl_list_table = NULL;
fl_list_table_len = 0;
}
#endif /* !NSPR_LOCK */
}
#ifdef NSPR_LOCK
static JS_ALWAYS_INLINE void
ThinLock(JSThinLock *tl, intptr_t me)
{
JS_ACQUIRE_LOCK((JSLock *) tl->fat);
tl->owner = me;
}
static JS_ALWAYS_INLINE void
ThinUnlock(JSThinLock *tl, intptr_t /*me*/)
{
tl->owner = 0;
JS_RELEASE_LOCK((JSLock *) tl->fat);
}
#else
/*
* Fast locking and unlocking is implemented by delaying the allocation of a
* system lock (fat lock) until contention. As long as a locking thread A
* runs uncontended, the lock is represented solely by storing A's identity in
* the object being locked.
*
* If another thread B tries to lock the object currently locked by A, B is
* enqueued into a fat lock structure (which might have to be allocated and
* pointed to by the object), and suspended using NSPR conditional variables
* (wait). A wait bit (Bacon bit) is set in the lock word of the object,
* signalling to A that when releasing the lock, B must be dequeued and
* notified.
*
* The basic operation of the locking primitives (js_Lock, js_Unlock,
* js_Enqueue, and js_Dequeue) is compare-and-swap. Hence, when locking into
* the word pointed at by p, compare-and-swap(p, 0, A) success implies that p
* is unlocked. Similarly, when unlocking p, if compare-and-swap(p, A, 0)
* succeeds this implies that p is uncontended (no one is waiting because the
* wait bit is not set).
*
* When dequeueing, the lock is released, and one of the threads suspended on
* the lock is notified. If other threads still are waiting, the wait bit is
* kept (in js_Enqueue), and if not, the fat lock is deallocated.
*
* The functions js_Enqueue, js_Dequeue, js_SuspendThread, and js_ResumeThread
* are serialized using a global lock. For scalability, a hashtable of global
* locks is used, which is indexed modulo the thin lock pointer.
*/
/*
* Invariants:
* (i) global lock is held
* (ii) fl->susp >= 0
*/
static int
js_SuspendThread(JSThinLock *tl)
{
JSFatLock *fl;
if (tl->fat == NULL)
fl = tl->fat = GetFatlock(tl);
else
fl = tl->fat;
JS_ASSERT(fl->susp >= 0);
fl->susp++;
PR_Lock(fl->slock);
js_UnlockGlobal(tl);
DebugOnly<PRStatus> stat = PR_WaitCondVar(fl->svar, PR_INTERVAL_NO_TIMEOUT);
JS_ASSERT(stat != PR_FAILURE);
PR_Unlock(fl->slock);
js_LockGlobal(tl);
fl->susp--;
if (fl->susp == 0) {
PutFatlock(fl, tl);
tl->fat = NULL;
}
return tl->fat == NULL;
}
/*
* (i) global lock is held
* (ii) fl->susp > 0
*/
static void
js_ResumeThread(JSThinLock *tl)
{
JSFatLock *fl = tl->fat;
JS_ASSERT(fl != NULL);
JS_ASSERT(fl->susp > 0);
PR_Lock(fl->slock);
js_UnlockGlobal(tl);
DebugOnly<PRStatus> stat = PR_NotifyCondVar(fl->svar);
JS_ASSERT(stat != PR_FAILURE);
PR_Unlock(fl->slock);
}
static void
js_Enqueue(JSThinLock *tl, intptr_t me)
{
intptr_t o, n;
js_LockGlobal(tl);
for (;;) {
o = ReadWord(tl->owner);
n = Thin_SetWait(o);
if (o != 0 && NativeCompareAndSwap(&tl->owner, o, n)) {
if (js_SuspendThread(tl))
me = Thin_RemoveWait(me);
else
me = Thin_SetWait(me);
}
else if (NativeCompareAndSwap(&tl->owner, 0, me)) {
js_UnlockGlobal(tl);
return;
}
}
}
static void
js_Dequeue(JSThinLock *tl)
{
intptr_t o;
js_LockGlobal(tl);
o = ReadWord(tl->owner);
JS_ASSERT(Thin_GetWait(o) != 0);
JS_ASSERT(tl->fat != NULL);
if (!NativeCompareAndSwap(&tl->owner, o, 0)) /* release it */
JS_ASSERT(0);
js_ResumeThread(tl);
}
static JS_ALWAYS_INLINE void
ThinLock(JSThinLock *tl, intptr_t me)
{
JS_ASSERT(CURRENT_THREAD_IS_ME(me));
if (NativeCompareAndSwap(&tl->owner, 0, me))
return;
if (Thin_RemoveWait(ReadWord(tl->owner)) != me)
js_Enqueue(tl, me);
#ifdef DEBUG
else
JS_ASSERT(0);
#endif
}
static JS_ALWAYS_INLINE void
ThinUnlock(JSThinLock *tl, intptr_t me)
{
JS_ASSERT(CURRENT_THREAD_IS_ME(me));
/*
* Since we can race with the NativeCompareAndSwap in js_Enqueue, we need
* to use a C_A_S here as well -- Arjan van de Ven 30/1/08
*/
if (NativeCompareAndSwap(&tl->owner, me, 0))
return;
JS_ASSERT(Thin_GetWait(tl->owner));
if (Thin_RemoveWait(ReadWord(tl->owner)) == me)
js_Dequeue(tl);
#ifdef DEBUG
else
JS_ASSERT(0); /* unbalanced unlock */
#endif
}
#endif /* !NSPR_LOCK */
void
js_Lock(JSContext *cx, JSThinLock *tl)
{
ThinLock(tl, CX_THINLOCK_ID(cx));
}
void
js_Unlock(JSContext *cx, JSThinLock *tl)
{
ThinUnlock(tl, CX_THINLOCK_ID(cx));
}
#endif /* JS_THREADSAFE */

View File

@ -39,185 +39,34 @@
#ifndef jslock_h__
#define jslock_h__
#include "jstypes.h"
#include "jsapi.h"
#include "jsprvtd.h"
#ifdef JS_THREADSAFE
# include "pratom.h"
# include "prlock.h"
# include "prcvar.h"
# include "prthread.h"
# include "prinit.h"
#endif
#ifdef JS_THREADSAFE
# define JS_ATOMIC_INCREMENT(p) PR_ATOMIC_INCREMENT((PRInt32 *)(p))
# define JS_ATOMIC_DECREMENT(p) PR_ATOMIC_DECREMENT((PRInt32 *)(p))
# define JS_ATOMIC_ADD(p,v) PR_ATOMIC_ADD((PRInt32 *)(p), (PRInt32)(v))
# define JS_ATOMIC_SET(p,v) PR_ATOMIC_SET((PRInt32 *)(p), (PRInt32)(v))
#if (defined(_WIN32) && defined(_M_IX86)) || \
(defined(_WIN64) && (defined(_M_AMD64) || defined(_M_X64))) || \
(defined(__i386) && (defined(__GNUC__) || defined(__SUNPRO_CC))) || \
(defined(__x86_64) && (defined(__GNUC__) || defined(__SUNPRO_CC))) || \
(defined(__sparc) && (defined(__GNUC__) || defined(__SUNPRO_CC))) || \
(defined(__arm__) && defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_4)) || \
defined(AIX) || \
defined(USE_ARM_KUSER)
# define JS_HAS_NATIVE_COMPARE_AND_SWAP 1
#else
# define JS_HAS_NATIVE_COMPARE_AND_SWAP 0
#endif
#else /* JS_THREADSAFE */
#if defined(JS_USE_ONLY_NSPR_LOCKS) || !JS_HAS_NATIVE_COMPARE_AND_SWAP
# define NSPR_LOCK 1
#else
# undef NSPR_LOCK
#endif
# define JS_ATOMIC_INCREMENT(p) (++*(p))
# define JS_ATOMIC_DECREMENT(p) (--*(p))
# define JS_ATOMIC_ADD(p,v) (*(p) += (v))
# define JS_ATOMIC_SET(p,v) (*(p) = (v))
#define Thin_GetWait(W) ((intptr_t)(W) & 0x1)
#define Thin_SetWait(W) ((intptr_t)(W) | 0x1)
#define Thin_RemoveWait(W) ((intptr_t)(W) & ~0x1)
typedef struct JSFatLock JSFatLock;
typedef struct JSThinLock {
intptr_t owner;
JSFatLock *fat;
} JSThinLock;
#define CX_THINLOCK_ID(cx) ((intptr_t)(cx)->thread())
#define CURRENT_THREAD_IS_ME(me) (((JSThread *)me)->id == js_CurrentThreadId())
typedef PRLock JSLock;
/*
* Atomic increment and decrement for a reference counter, given jsrefcount *p.
* NB: jsrefcount is int32, aka PRInt32, so that pratom.h functions work.
*/
#define JS_ATOMIC_INCREMENT(p) PR_ATOMIC_INCREMENT((PRInt32 *)(p))
#define JS_ATOMIC_DECREMENT(p) PR_ATOMIC_DECREMENT((PRInt32 *)(p))
#define JS_ATOMIC_ADD(p,v) PR_ATOMIC_ADD((PRInt32 *)(p), (PRInt32)(v))
#define JS_ATOMIC_SET(p,v) PR_ATOMIC_SET((PRInt32 *)(p), (PRInt32)(v))
#define js_CurrentThreadId() PR_GetCurrentThread()
#define JS_NEW_LOCK() PR_NewLock()
#define JS_DESTROY_LOCK(l) PR_DestroyLock(l)
#define JS_ACQUIRE_LOCK(l) PR_Lock(l)
#define JS_RELEASE_LOCK(l) PR_Unlock(l)
#define JS_NEW_CONDVAR(l) PR_NewCondVar(l)
#define JS_DESTROY_CONDVAR(cv) PR_DestroyCondVar(cv)
#define JS_WAIT_CONDVAR(cv,to) PR_WaitCondVar(cv,to)
#define JS_NO_TIMEOUT PR_INTERVAL_NO_TIMEOUT
#define JS_NOTIFY_CONDVAR(cv) PR_NotifyCondVar(cv)
#define JS_NOTIFY_ALL_CONDVAR(cv) PR_NotifyAllCondVar(cv)
#define JS_LOCK(cx, tl) js_Lock(cx, tl)
#define JS_UNLOCK(cx, tl) js_Unlock(cx, tl)
extern void js_Lock(JSContext *cx, JSThinLock *tl);
extern void js_Unlock(JSContext *cx, JSThinLock *tl);
extern int js_SetupLocks(int,int);
extern void js_CleanupLocks();
extern void js_InitLock(JSThinLock *);
extern void js_FinishLock(JSThinLock *);
#else /* !JS_THREADSAFE */
#define JS_ATOMIC_INCREMENT(p) (++*(p))
#define JS_ATOMIC_DECREMENT(p) (--*(p))
#define JS_ATOMIC_ADD(p,v) (*(p) += (v))
#define JS_ATOMIC_SET(p,v) (*(p) = (v))
#define js_CurrentThreadId() ((void*)NULL)
#define JS_NEW_LOCK() NULL
#define JS_DESTROY_LOCK(l) ((void)0)
#define JS_ACQUIRE_LOCK(l) ((void)0)
#define JS_RELEASE_LOCK(l) ((void)0)
#define JS_LOCK(cx, tl) ((void)0)
#define JS_UNLOCK(cx, tl) ((void)0)
#define JS_NEW_CONDVAR(l) NULL
#define JS_DESTROY_CONDVAR(cv) ((void)0)
#define JS_WAIT_CONDVAR(cv,to) ((void)0)
#define JS_NOTIFY_CONDVAR(cv) ((void)0)
#define JS_NOTIFY_ALL_CONDVAR(cv) ((void)0)
#endif /* !JS_THREADSAFE */
#define JS_LOCK_GC(rt) JS_ACQUIRE_LOCK((rt)->gcLock)
#define JS_UNLOCK_GC(rt) JS_RELEASE_LOCK((rt)->gcLock)
#define JS_AWAIT_GC_DONE(rt) JS_WAIT_CONDVAR((rt)->gcDone, JS_NO_TIMEOUT)
#define JS_NOTIFY_GC_DONE(rt) JS_NOTIFY_ALL_CONDVAR((rt)->gcDone)
#define JS_AWAIT_REQUEST_DONE(rt) JS_WAIT_CONDVAR((rt)->requestDone, \
JS_NO_TIMEOUT)
#define JS_NOTIFY_REQUEST_DONE(rt) JS_NOTIFY_CONDVAR((rt)->requestDone)
#ifndef JS_SET_OBJ_INFO
#define JS_SET_OBJ_INFO(obj,f,l) ((void)0)
#endif
#ifndef JS_SET_TITLE_INFO
#define JS_SET_TITLE_INFO(title,f,l) ((void)0)
#endif
#ifdef JS_THREADSAFE
extern JSBool
js_CompareAndSwap(volatile intptr_t *w, intptr_t ov, intptr_t nv);
/* Atomically bitwise-or the mask into the word *w using compare and swap. */
extern void
js_AtomicSetMask(volatile intptr_t *w, intptr_t mask);
/*
* Atomically bitwise-and the complement of the mask into the word *w using
* compare and swap.
*/
extern void
js_AtomicClearMask(volatile intptr_t *w, intptr_t mask);
#define JS_ATOMIC_SET_MASK(w, mask) js_AtomicSetMask(w, mask)
#define JS_ATOMIC_CLEAR_MASK(w, mask) js_AtomicClearMask(w, mask)
extern unsigned
js_GetCPUCount();
#else
static inline JSBool
js_CompareAndSwap(intptr_t *w, intptr_t ov, intptr_t nv)
{
return (*w == ov) ? *w = nv, JS_TRUE : JS_FALSE;
}
#define JS_ATOMIC_SET_MASK(w, mask) (*(w) |= (mask))
#define JS_ATOMIC_CLEAR_MASK(w, mask) (*(w) &= ~(mask))
static inline unsigned
js_GetCPUCount()
{
return 1;
}
#endif
#ifdef __cplusplus
#endif /* JS_THREADSAFE */
namespace js {
#ifdef JS_THREADSAFE
class AutoLock {
private:
JSLock *lock;
public:
AutoLock(JSLock *lock) : lock(lock) { JS_ACQUIRE_LOCK(lock); }
~AutoLock() { JS_RELEASE_LOCK(lock); }
};
# define JS_AUTO_LOCK_GUARD(name, l) AutoLock name((l));
#else
# define JS_AUTO_LOCK_GUARD(name, l)
#endif
class AutoAtomicIncrement {
class AutoAtomicIncrement
{
int32_t *p;
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
@ -233,8 +82,6 @@ class AutoAtomicIncrement {
}
};
} /* namespace js */
#endif
} /* namespace js */
#endif /* jslock_h___ */

View File

@ -1,60 +0,0 @@
; -*- Mode: asm; tab-width: 8; c-basic-offset: 4 -*-
; ***** BEGIN LICENSE BLOCK *****
; Version: MPL 1.1/GPL 2.0/LGPL 2.1
;
; The contents of this file are subject to the Mozilla Public License Version
; 1.1 (the "License"); you may not use this file except in compliance with
; the License. You may obtain a copy of the License at
; http://www.mozilla.org/MPL/
;
; Software distributed under the License is distributed on an "AS IS" basis,
; WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
; for the specific language governing rights and limitations under the
; License.
;
; The Original Code is an OS/2 implementation of js_CompareAndSwap in assembly.
;
; The Initial Developer of the Original Code is
; IBM Corporation.
; Portions created by the Initial Developer are Copyright (C) 2001
; the Initial Developer. All Rights Reserved.
;
; Contributor(s):
;
; Alternatively, the contents of this file may be used under the terms of
; either the GNU General Public License Version 2 or later (the "GPL"), or
; the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
; in which case the provisions of the GPL or the LGPL are applicable instead
; of those above. If you wish to allow use of your version of this file only
; under the terms of either the GPL or the LGPL, and not to allow others to
; use your version of this file under the terms of the MPL, indicate your
; decision by deleting the provisions above and replace them with the notice
; and other provisions required by the GPL or the LGPL. If you do not delete
; the provisions above, a recipient may use your version of this file under
; the terms of any one of the MPL, the GPL or the LGPL.
;
; ***** END LICENSE BLOCK *****
.486P
.MODEL FLAT, OPTLINK
.STACK
.CODE
;;;---------------------------------------------------------------------
;;; int _Optlink js_CompareAndSwap(intptr_t *w, intptr_t ov, intptr_t nv)
;;;---------------------------------------------------------------------
js_CompareAndSwap PROC OPTLINK EXPORT
push ebx
mov ebx, eax
mov eax, edx
mov edx, ebx
lock cmpxchg [ebx], ecx
sete al
and eax, 1h
pop ebx
ret
js_CompareAndSwap endp
END

View File

@ -108,7 +108,7 @@ ComputeAccurateDecimalInteger(JSContext *cx, const jschar *start, const jschar *
char *estr;
int err = 0;
*dp = js_strtod_harder(JS_THREAD_DATA(cx)->dtoaState, cstr, &estr, &err);
*dp = js_strtod_harder(cx->runtime->dtoaState, cstr, &estr, &err);
if (err == JS_DTOA_ENOMEM) {
JS_ReportOutOfMemory(cx);
cx->free_(cstr);
@ -816,7 +816,7 @@ num_to(JSContext *cx, Native native, JSDToStrMode zeroArgMode, JSDToStrMode oneA
}
}
numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, buf, sizeof buf,
numStr = js_dtostr(cx->runtime->dtoaState, buf, sizeof buf,
oneArgMode, (jsint)precision + precisionOffset, d);
if (!numStr) {
JS_ReportOutOfMemory(cx);
@ -1087,10 +1087,10 @@ FracNumberToCString(JSContext *cx, ToCStringBuf *cbuf, jsdouble d, jsint base =
*/
numStr = v8::internal::DoubleToCString(d, cbuf->sbuf, cbuf->sbufSize);
if (!numStr)
numStr = js_dtostr(JS_THREAD_DATA(cx)->dtoaState, cbuf->sbuf, cbuf->sbufSize,
numStr = js_dtostr(cx->runtime->dtoaState, cbuf->sbuf, cbuf->sbufSize,
DTOSTR_STANDARD, 0, d);
} else {
numStr = cbuf->dbuf = js_dtobasestr(JS_THREAD_DATA(cx)->dtoaState, base, d);
numStr = cbuf->dbuf = js_dtobasestr(cx->runtime->dtoaState, base, d);
}
return numStr;
}
@ -1421,7 +1421,7 @@ js_strtod(JSContext *cx, const jschar *s, const jschar *send,
estr = istr + 8;
} else {
int err;
d = js_strtod_harder(JS_THREAD_DATA(cx)->dtoaState, cstr, &estr, &err);
d = js_strtod_harder(cx->runtime->dtoaState, cstr, &estr, &err);
if (d == HUGE_VAL)
d = js_PositiveInfinity;
else if (d == -HUGE_VAL)

View File

@ -1519,21 +1519,6 @@ struct JSObject_Slots16 : JSObject { js::Value fslots[16]; };
#define JSSLOT_FREE(clasp) JSCLASS_RESERVED_SLOTS(clasp)
#ifdef JS_THREADSAFE
/*
* The GC runs only when all threads except the one on which the GC is active
* are suspended at GC-safe points, so calling obj->getSlot() from the GC's
* thread is safe when rt->gcRunning is set. See jsgc.cpp for details.
*/
#define THREAD_IS_RUNNING_GC(rt, thread) \
((rt)->gcRunning && (rt)->gcThread == (thread))
#define CX_THREAD_IS_RUNNING_GC(cx) \
THREAD_IS_RUNNING_GC((cx)->runtime, (cx)->thread)
#endif /* JS_THREADSAFE */
class JSValueArray {
public:
jsval *array;

View File

@ -462,7 +462,7 @@ ToDisassemblySource(JSContext *cx, jsval v, JSAutoByteString *bytes)
return true;
}
if (cx->runtime->gcRunning || JS_THREAD_DATA(cx)->noGCOrAllocationCheck) {
if (cx->runtime->gcRunning || cx->runtime->noGCOrAllocationCheck) {
char *source = JS_sprintf_append(NULL, "<value>");
if (!source)
return false;

View File

@ -83,7 +83,7 @@ GetFunctionProxyConstruct(JSObject *proxy)
static bool
OperationInProgress(JSContext *cx, JSObject *proxy)
{
PendingProxyOperation *op = JS_THREAD_DATA(cx)->pendingProxyOperation;
PendingProxyOperation *op = cx->runtime->pendingProxyOperation;
while (op) {
if (op->object == proxy)
return true;
@ -710,18 +710,18 @@ ScriptedProxyHandler::iterate(JSContext *cx, JSObject *proxy, uintN flags, Value
ScriptedProxyHandler ScriptedProxyHandler::singleton;
class AutoPendingProxyOperation {
ThreadData *data;
JSRuntime *rt;
PendingProxyOperation op;
public:
AutoPendingProxyOperation(JSContext *cx, JSObject *proxy) : data(JS_THREAD_DATA(cx)) {
op.next = data->pendingProxyOperation;
AutoPendingProxyOperation(JSContext *cx, JSObject *proxy) : rt(cx->runtime) {
op.next = rt->pendingProxyOperation;
op.object = proxy;
data->pendingProxyOperation = &op;
rt->pendingProxyOperation = &op;
}
~AutoPendingProxyOperation() {
JS_ASSERT(data->pendingProxyOperation == &op);
data->pendingProxyOperation = op.next;
JS_ASSERT(rt->pendingProxyOperation == &op);
rt->pendingProxyOperation = op.next;
}
};

View File

@ -86,7 +86,6 @@ typedef struct JSGenerator JSGenerator;
typedef struct JSNativeEnumerator JSNativeEnumerator;
typedef struct JSProperty JSProperty;
typedef struct JSSharpObjectMap JSSharpObjectMap;
typedef struct JSThread JSThread;
typedef struct JSTryNote JSTryNote;
/* Friend "Advanced API" typedefs. */

View File

@ -781,7 +781,7 @@ JSScript::initCounts(JSContext *cx)
/* Enable interrupts in any interpreter frames running on this script. */
InterpreterFrames *frames;
for (frames = JS_THREAD_DATA(cx)->interpreterFrames; frames; frames = frames->older)
for (frames = cx->runtime->interpreterFrames; frames; frames = frames->older)
frames->enableInterruptsIfRunning(this);
return true;
@ -1735,7 +1735,7 @@ JSScript::ensureHasDebug(JSContext *cx)
* debug state is destroyed.
*/
InterpreterFrames *frames;
for (frames = JS_THREAD_DATA(cx)->interpreterFrames; frames; frames = frames->older)
for (frames = cx->runtime->interpreterFrames; frames; frames = frames->older)
frames->enableInterruptsIfRunning(this);
return true;

View File

@ -1,84 +0,0 @@
!
! ***** BEGIN LICENSE BLOCK *****
! Version: MPL 1.1/GPL 2.0/LGPL 2.1
!
! The contents of this file are subject to the Mozilla Public License Version
! 1.1 (the "License"); you may not use this file except in compliance with
! the License. You may obtain a copy of the License at
! http://www.mozilla.org/MPL/
!
! Software distributed under the License is distributed on an "AS IS" basis,
! WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
! for the specific language governing rights and limitations under the
! License.
!
! The Original Code is Mozilla Communicator client code, released
! March 31, 1998.
!
! The Initial Developer of the Original Code is
! Netscape Communications Corporation.
! Portions created by the Initial Developer are Copyright (C) 1998-1999
! the Initial Developer. All Rights Reserved.
!
! Contributor(s):
!
! Alternatively, the contents of this file may be used under the terms of
! either the GNU General Public License Version 2 or later (the "GPL"), or
! the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
! in which case the provisions of the GPL or the LGPL are applicable instead
! of those above. If you wish to allow use of your version of this file only
! under the terms of either the GPL or the LGPL, and not to allow others to
! use your version of this file under the terms of the MPL, indicate your
! decision by deleting the provisions above and replace them with the notice
! and other provisions required by the GPL or the LGPL. If you do not delete
! the provisions above, a recipient may use your version of this file under
! the terms of any one of the MPL, the GPL or the LGPL.
!
! ***** END LICENSE BLOCK *****
!
! atomic compare-and-swap routines for V8+ (ultrasparc)
!
! ======================================================================
!
! Perform the sequence *a = b atomically with respect to previous value
! of a (a0). If *a==a0 then assign *a to b, all in one atomic operation.
! Returns 1 if assignment happened, and 0 otherwise.
!
! usage : old_val = compare_and_swap(address, oldval, newval)
!
! -----------------------
! Note on REGISTER USAGE:
! as this is a LEAF procedure, a new stack frame is not created;
! we use the caller stack frame so what would normally be %i (input)
! registers are actually %o (output registers). Also, we must not
! overwrite the contents of %l (local) registers as they are not
! assumed to be volatile during calls.
!
! So, the registers used are:
! %o0 [input] - the address of the value to increment
! %o1 [input] - the old value to compare with
! %o2 [input] - the new value to set for [%o0]
! %o3 [local] - work register
! -----------------------
! ======================================================================
!
! v8plus
.inline NativeCompareAndSwap,3
stbar
cas [%o0],%o1,%o2 ! compare *w with old value and set to new if equal
cmp %o1,%o2 ! did we succeed?
be,a 1f ! yes
mov 1,%o0 ! return true (annulled when no jump)
mov 0,%o0 ! return false
1:
.end
!
! end
!
! ======================================================================
!

View File

@ -1,84 +0,0 @@
!
! ***** BEGIN LICENSE BLOCK *****
! Version: MPL 1.1/GPL 2.0/LGPL 2.1
!
! The contents of this file are subject to the Mozilla Public License Version
! 1.1 (the "License"); you may not use this file except in compliance with
! the License. You may obtain a copy of the License at
! http://www.mozilla.org/MPL/
!
! Software distributed under the License is distributed on an "AS IS" basis,
! WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
! for the specific language governing rights and limitations under the
! License.
!
! The Original Code is Mozilla Communicator client code, released
! March 31, 1998.
!
! The Initial Developer of the Original Code is
! Netscape Communications Corporation.
! Portions created by the Initial Developer are Copyright (C) 1998-1999
! the Initial Developer. All Rights Reserved.
!
! Contributor(s):
!
! Alternatively, the contents of this file may be used under the terms of
! either the GNU General Public License Version 2 or later (the "GPL"), or
! the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
! in which case the provisions of the GPL or the LGPL are applicable instead
! of those above. If you wish to allow use of your version of this file only
! under the terms of either the GPL or the LGPL, and not to allow others to
! use your version of this file under the terms of the MPL, indicate your
! decision by deleting the provisions above and replace them with the notice
! and other provisions required by the GPL or the LGPL. If you do not delete
! the provisions above, a recipient may use your version of this file under
! the terms of any one of the MPL, the GPL or the LGPL.
!
! ***** END LICENSE BLOCK *****
!
! atomic compare-and-swap routines for V9 (ultrasparc)
!
! ======================================================================
!
! Perform the sequence *a = b atomically with respect to previous value
! of a (a0). If *a==a0 then assign *a to b, all in one atomic operation.
! Returns 1 if assignment happened, and 0 otherwise.
!
! usage : old_val = compare_and_swap(address, oldval, newval)
!
! -----------------------
! Note on REGISTER USAGE:
! as this is a LEAF procedure, a new stack frame is not created;
! we use the caller stack frame so what would normally be %i (input)
! registers are actually %o (output registers). Also, we must not
! overwrite the contents of %l (local) registers as they are not
! assumed to be volatile during calls.
!
! So, the registers used are:
! %o0 [input] - the address of the value to increment
! %o1 [input] - the old value to compare with
! %o2 [input] - the new value to set for [%o0]
! %o3 [local] - work register
! -----------------------
! ======================================================================
!
! v9
.inline NativeCompareAndSwap,3
stbar
casx [%o0],%o1,%o2 ! compare *w with old value and set to new if equal
cmp %o1,%o2 ! did we succeed?
be,a 1f ! yes
mov 1,%o0 ! return true (annulled when no jump)
mov 0,%o0 ! return false
1:
.end
!
! end
!
! ======================================================================
!

View File

@ -3933,19 +3933,7 @@ mjit::Compiler::interruptCheckHelper()
/* For barrier verification, always take the interrupt so we can verify. */
jump = masm.jump();
} else {
/*
* Bake in and test the address of the interrupt counter for the runtime.
* This is faster than doing two additional loads for the context's
* thread data, but will cause this thread to run slower if there are
* pending interrupts on some other thread. For non-JS_THREADSAFE builds
* we can skip this, as there is only one flag to poll.
*/
#ifdef JS_THREADSAFE
void *interrupt = (void*) &cx->runtime->interruptCounter;
#else
void *interrupt = (void*) &JS_THREAD_DATA(cx)->interruptFlags;
#endif
void *interrupt = (void*) &cx->runtime->interrupt;
#if defined(JS_CPU_X86) || defined(JS_CPU_ARM) || defined(JS_CPU_MIPS)
jump = masm.branch32(Assembler::NotEqual, AbsoluteAddress(interrupt), Imm32(0));
#else

View File

@ -1897,7 +1897,7 @@ stubs::Exception(VMFrame &f)
{
// Check the interrupt flag to allow interrupting deeply nested exception
// handling.
if (JS_THREAD_DATA(f.cx)->interruptFlags && !js_HandleExecutionInterrupt(f.cx))
if (f.cx->runtime->interrupt && !js_HandleExecutionInterrupt(f.cx))
THROW();
f.regs.sp[0] = f.cx->getPendingException();

View File

@ -3455,7 +3455,7 @@ CancelExecution(JSRuntime *rt)
if (gWorkerThreadPool)
js::workers::terminateAll(gWorkerThreadPool);
#endif
JS_TriggerAllOperationCallbacks(rt);
JS_TriggerRuntimeOperationCallback(rt);
static const char msg[] = "Script runs for too long, terminating.\n";
#if defined(XP_UNIX) && !defined(JS_THREADSAFE)

View File

@ -55,6 +55,16 @@
extern size_t gMaxStackSize;
class AutoLock
{
private:
PRLock *lock;
public:
AutoLock(PRLock *lock) : lock(lock) { PR_Lock(lock); }
~AutoLock() { PR_Unlock(lock); }
};
/*
* JavaScript shell workers.
*
@ -150,7 +160,7 @@ class WorkerParent {
bool initWorkerParent() { return children.init(8); }
public:
virtual JSLock *getLock() = 0;
virtual PRLock *getLock() = 0;
virtual ThreadPool *getThreadPool() = 0;
virtual bool post(Event *item) = 0; // false on OOM or queue closed
virtual void trace(JSTracer *trc) = 0;
@ -177,7 +187,7 @@ class ThreadSafeQueue
{
protected:
Queue<T, SystemAllocPolicy> queue;
JSLock *lock;
PRLock *lock;
PRCondVar *condvar;
bool closed;
@ -189,9 +199,9 @@ class ThreadSafeQueue
~ThreadSafeQueue() {
if (condvar)
JS_DESTROY_CONDVAR(condvar);
PR_DestroyCondVar(condvar);
if (lock)
JS_DESTROY_LOCK(lock);
PR_DestroyLock(lock);
}
// Called by take() with the lock held.
@ -201,7 +211,7 @@ class ThreadSafeQueue
bool initThreadSafeQueue() {
JS_ASSERT(!lock);
JS_ASSERT(!condvar);
return (lock = JS_NEW_LOCK()) && (condvar = JS_NEW_CONDVAR(lock));
return (lock = PR_NewLock()) && (condvar = PR_NewCondVar(lock));
}
bool post(T t) {
@ -209,7 +219,7 @@ class ThreadSafeQueue
if (closed)
return false;
if (queue.empty())
JS_NOTIFY_ALL_CONDVAR(condvar);
PR_NotifyAllCondVar(condvar);
return queue.push(t);
}
@ -217,7 +227,7 @@ class ThreadSafeQueue
AutoLock hold(lock);
closed = true;
queue.clear();
JS_NOTIFY_ALL_CONDVAR(condvar);
PR_NotifyAllCondVar(condvar);
}
// The caller must hold the lock.
@ -225,7 +235,7 @@ class ThreadSafeQueue
while (queue.empty()) {
if (shouldStop())
return false;
JS_WAIT_CONDVAR(condvar, JS_NO_TIMEOUT);
PR_WaitCondVar(condvar, PR_INTERVAL_NO_TIMEOUT);
}
*t = queue.pop();
busy.append(*t);
@ -253,7 +263,7 @@ class ThreadSafeQueue
void wake() {
AutoLock hold(lock);
JS_NOTIFY_ALL_CONDVAR(condvar);
PR_NotifyAllCondVar(condvar);
}
void trace(JSTracer *trc) {
@ -371,7 +381,7 @@ class MainQueue MOZ_FINAL : public EventQueue, public WorkerParent
delete this;
}
virtual JSLock *getLock() { return lock; }
virtual PRLock *getLock() { return lock; }
virtual ThreadPool *getThreadPool() { return threadPool; }
protected:
@ -390,7 +400,7 @@ class MainQueue MOZ_FINAL : public EventQueue, public WorkerParent
Event *event;
while (take(&event)) {
JS_RELEASE_LOCK(lock);
PR_Unlock(lock);
Event::Result result;
{
JSAutoRequest req(cx);
@ -414,7 +424,7 @@ class MainQueue MOZ_FINAL : public EventQueue, public WorkerParent
result = Event::ok;
}
}
JS_ACQUIRE_LOCK(lock);
PR_Lock(lock);
drop(event);
event->destroy(cx);
if (result != Event::ok)
@ -596,7 +606,7 @@ class Worker MOZ_FINAL : public WorkerParent
JSObject *object; // Worker object exposed to parent
JSRuntime *runtime;
JSContext *context;
JSLock *lock;
PRLock *lock;
Queue<Event *, SystemAllocPolicy> events; // owning pointers to pending events
Event *current;
bool terminated;
@ -616,7 +626,7 @@ class Worker MOZ_FINAL : public WorkerParent
threadPool = parent->getThreadPool();
this->parent = parent;
this->object = obj;
lock = JS_NEW_LOCK();
lock = PR_NewLock();
return lock &&
createRuntime(parentcx) &&
createContext(parentcx, parent) &&
@ -681,7 +691,6 @@ class Worker MOZ_FINAL : public WorkerParent
js::SetFunctionNativeReserved(post, 0, PRIVATE_TO_JSVAL(this));
JS_EndRequest(context);
JS_ClearContextThread(context);
return true;
bad:
@ -769,13 +778,12 @@ class Worker MOZ_FINAL : public WorkerParent
while (!events.empty())
events.pop()->destroy(context);
if (lock) {
JS_DESTROY_LOCK(lock);
PR_DestroyLock(lock);
lock = NULL;
}
if (runtime)
JS_SetRuntimeThread(runtime);
if (context) {
JS_SetContextThread(context);
JS_DestroyContextNoGC(context);
context = NULL;
}
@ -801,7 +809,7 @@ class Worker MOZ_FINAL : public WorkerParent
WorkerParent *getParent() { return parent; }
virtual JSLock *getLock() { return lock; }
virtual PRLock *getLock() { return lock; }
virtual ThreadPool *getThreadPool() { return threadPool; }
@ -1030,15 +1038,15 @@ WorkerQueue::work() {
Worker *w;
while (take(&w)) { // can block outside the mutex
JS_RELEASE_LOCK(lock);
PR_Unlock(lock);
w->processOneEvent(); // enters request on w->context
JS_ACQUIRE_LOCK(lock);
PR_Lock(lock);
drop(w);
if (lockedIsIdle()) {
JS_RELEASE_LOCK(lock);
PR_Unlock(lock);
main->wake();
JS_ACQUIRE_LOCK(lock);
PR_Lock(lock);
}
}
}
@ -1136,8 +1144,6 @@ Worker::processOneEvent()
}
JS_SetRuntimeThread(runtime);
JS_SetContextThread(context);
JS_SetNativeStackQuota(context, gMaxStackSize);
Event::Result result;
{
@ -1171,7 +1177,6 @@ Worker::processOneEvent()
if (event)
event->destroy(context);
JS_ClearContextThread(context);
JS_ClearRuntimeThread(runtime);
{

View File

@ -263,22 +263,12 @@ RegExpObject::setSticky(bool enabled)
/* RegExpPrivate inlines. */
inline RegExpPrivateCache *
detail::RegExpPrivate::getOrCreateCache(JSContext *cx)
{
if (RegExpPrivateCache *cache = cx->threadData()->getOrCreateRegExpPrivateCache(cx))
return cache;
js_ReportOutOfMemory(cx);
return NULL;
}
inline bool
detail::RegExpPrivate::cacheLookup(JSContext *cx, JSAtom *atom, RegExpFlag flags,
RegExpPrivateCacheKind targetKind,
AlreadyIncRefed<RegExpPrivate> *result)
{
RegExpPrivateCache *cache = getOrCreateCache(cx);
RegExpPrivateCache *cache = cx->runtime->getRegExpPrivateCache(cx);
if (!cache)
return false;
@ -307,7 +297,7 @@ detail::RegExpPrivate::cacheInsert(JSContext *cx, JSAtom *atom, RegExpPrivateCac
* so we have to re-lookup the cache (and inside the cache) after the
* allocation is performed.
*/
RegExpPrivateCache *cache = getOrCreateCache(cx);
RegExpPrivateCache *cache = cx->runtime->getRegExpPrivateCache(cx);
if (!cache)
return false;
@ -401,7 +391,7 @@ detail::RegExpPrivateCode::compile(JSContext *cx, JSLinearString &pattern, Token
#ifdef JS_METHODJIT
if (isJITRuntimeEnabled(cx) && !yarrPattern.m_containsBackreferences) {
JSC::ExecutableAllocator *execAlloc = cx->threadData()->getOrCreateExecutableAllocator(cx);
JSC::ExecutableAllocator *execAlloc = cx->runtime->getExecutableAllocator(cx);
if (!execAlloc) {
js_ReportOutOfMemory(cx);
return false;
@ -414,7 +404,7 @@ detail::RegExpPrivateCode::compile(JSContext *cx, JSLinearString &pattern, Token
}
#endif
WTF::BumpPointerAllocator *bumpAlloc = cx->threadData()->getOrCreateBumpPointerAllocator(cx);
WTF::BumpPointerAllocator *bumpAlloc = cx->runtime->getBumpPointerAllocator(cx);
if (!bumpAlloc) {
js_ReportOutOfMemory(cx);
return false;
@ -510,11 +500,13 @@ detail::RegExpPrivate::decref(JSContext *cx)
if (--refCount != 0)
return;
RegExpPrivateCache *cache;
if (source->isAtom() && (cache = cx->threadData()->getRegExpPrivateCache())) {
RegExpPrivateCache::Ptr ptr = cache->lookup(&source->asAtom());
if (ptr && ptr->value.rep() == this)
cache->remove(ptr);
if (RegExpPrivateCache *cache = cx->runtime->maybeRegExpPrivateCache()) {
if (source->isAtom()) {
if (RegExpPrivateCache::Ptr p = cache->lookup(&source->asAtom())) {
if (p->value.rep() == this)
cache->remove(p);
}
}
}
#ifdef DEBUG

View File

@ -388,7 +388,6 @@ class RegExpPrivate
createUncached(JSContext *cx, JSLinearString *source, RegExpFlag flags,
TokenStream *tokenStream);
static RegExpPrivateCache *getOrCreateCache(JSContext *cx);
static bool cacheLookup(JSContext *cx, JSAtom *atom, RegExpFlag flags,
RegExpPrivateCacheKind kind, AlreadyIncRefed<RegExpPrivate> *result);
static bool cacheInsert(JSContext *cx, JSAtom *atom,

View File

@ -492,39 +492,15 @@ StackSpace::sizeOfCommitted()
ContextStack::ContextStack(JSContext *cx)
: seg_(NULL),
space_(&JS_THREAD_DATA(cx)->stackSpace),
space_(&cx->runtime->stackSpace),
cx_(cx)
{
threadReset();
}
{}
ContextStack::~ContextStack()
{
JS_ASSERT(!seg_);
}
void
ContextStack::threadReset()
{
#ifdef JS_THREADSAFE
if (cx_->thread())
space_ = &JS_THREAD_DATA(cx_)->stackSpace;
else
space_ = NULL;
#else
space_ = &JS_THREAD_DATA(cx_)->stackSpace;
#endif
}
#ifdef DEBUG
void
ContextStack::assertSpaceInSync() const
{
JS_ASSERT(space_);
JS_ASSERT(space_ == &JS_THREAD_DATA(cx_)->stackSpace);
}
#endif
bool
ContextStack::onTop() const
{

View File

@ -317,7 +317,6 @@ public:
private:
JSContext* mContext;
intN mContextThread;
nsIThreadJSContextStack* mContextStack;
char* mBuf;
@ -1355,23 +1354,18 @@ mozJSComponentLoader::ModuleEntry::GetFactory(const mozilla::Module& module,
//----------------------------------------------------------------------
JSCLContextHelper::JSCLContextHelper(mozJSComponentLoader *loader)
: mContext(loader->mContext), mContextThread(0),
: mContext(loader->mContext),
mContextStack(loader->mContextStack),
mBuf(nsnull)
{
mContextStack->Push(mContext);
mContextThread = JS_GetContextThread(mContext);
if (mContextThread) {
JS_BeginRequest(mContext);
}
JS_BeginRequest(mContext);
}
JSCLContextHelper::~JSCLContextHelper()
{
if (mContextStack) {
if (mContextThread) {
JS_EndRequest(mContext);
}
JS_EndRequest(mContext);
mContextStack->Pop(nsnull);

View File

@ -946,7 +946,7 @@ XPCJSRuntime::WatchdogMain(void *arg)
#endif
PR_WaitCondVar(self->mWatchdogWakeup, sleepInterval);
JS_ASSERT(status == PR_SUCCESS);
js::TriggerOperationCallbacksForActiveContexts(self->mJSRuntime);
js::TriggerOperationCallback(self->mJSRuntime);
}
/* Wake up the main thread waiting for the watchdog to terminate. */
@ -1043,7 +1043,6 @@ XPCJSRuntime::GetJSCycleCollectionContext()
mJSCycleCollectionContext = JS_NewContext(mJSRuntime, 0);
if (!mJSCycleCollectionContext)
return nsnull;
JS_ClearContextThread(mJSCycleCollectionContext);
}
return mJSCycleCollectionContext;
}
@ -1067,10 +1066,8 @@ XPCJSRuntime::~XPCJSRuntime()
mWatchdogWakeup = nsnull;
}
if (mJSCycleCollectionContext) {
JS_SetContextThread(mJSCycleCollectionContext);
if (mJSCycleCollectionContext)
JS_DestroyContextNoGC(mJSCycleCollectionContext);
}
#ifdef XPC_DUMP_AT_SHUTDOWN
{
@ -1639,30 +1636,30 @@ ReportJSRuntimeStats(const JS::IterateData &data, const nsACString &pathPrefix,
"hanging off them." SLOP_BYTES_STRING,
callback, closure);
ReportMemoryBytes(pathPrefix + NS_LITERAL_CSTRING("runtime/threads/normal"),
nsIMemoryReporter::KIND_HEAP, data.runtimeThreadsNormal,
"Memory used by JSThread objects and their data, "
ReportMemoryBytes(pathPrefix + NS_LITERAL_CSTRING("runtime/normal"),
nsIMemoryReporter::KIND_HEAP, data.runtimeNormal,
"Memory used by a JSRuntime, "
"excluding memory that is reported by "
"other reporters under 'explicit/js/runtime/'." SLOP_BYTES_STRING,
callback, closure);
ReportMemoryBytes(pathPrefix + NS_LITERAL_CSTRING("runtime/threads/temporary"),
nsIMemoryReporter::KIND_HEAP, data.runtimeThreadsTemporary,
"Memory held transiently in JSThreads and used during "
ReportMemoryBytes(pathPrefix + NS_LITERAL_CSTRING("runtime/temporary"),
nsIMemoryReporter::KIND_HEAP, data.runtimeTemporary,
"Memory held transiently in JSRuntime and used during "
"compilation. It mostly holds parse nodes."
SLOP_BYTES_STRING,
callback, closure);
ReportMemoryBytes0(pathPrefix + NS_LITERAL_CSTRING("runtime/threads/regexp-code"),
nsIMemoryReporter::KIND_NONHEAP, data.runtimeThreadsRegexpCode,
ReportMemoryBytes0(pathPrefix + NS_LITERAL_CSTRING("runtime/regexp-code"),
nsIMemoryReporter::KIND_NONHEAP, data.runtimeRegexpCode,
"Memory used by the regexp JIT to hold generated code.",
callback, closure);
ReportMemoryBytes(pathPrefix + NS_LITERAL_CSTRING("runtime/threads/stack-committed"),
nsIMemoryReporter::KIND_NONHEAP, data.runtimeThreadsStackCommitted,
"Memory used for the thread stacks. This is the committed portions "
"of the stacks; any uncommitted portions are not measured because they "
"hardly cost anything.",
ReportMemoryBytes(pathPrefix + NS_LITERAL_CSTRING("runtime/stack-committed"),
nsIMemoryReporter::KIND_NONHEAP, data.runtimeStackCommitted,
"Memory used for the JS call stack. This is the committed portion "
"of the stack; the uncommitted portion is not measured because it "
"hardly costs anything.",
callback, closure);
ReportGCHeapBytes(pathPrefix +
@ -1965,9 +1962,9 @@ XPCJSRuntime::XPCJSRuntime(nsXPConnect* aXPConnect)
JS_EnumerateDiagnosticMemoryRegions(DiagnosticMemoryCallback);
#endif
JS_SetAccumulateTelemetryCallback(mJSRuntime, AccumulateTelemetryCallback);
mWatchdogWakeup = JS_NEW_CONDVAR(js::GetRuntimeGCLock(mJSRuntime));
mWatchdogWakeup = PR_NewCondVar(js::GetRuntimeGCLock(mJSRuntime));
if (!mWatchdogWakeup)
NS_RUNTIMEABORT("JS_NEW_CONDVAR failed.");
NS_RUNTIMEABORT("PR_NewCondVar failed.");
js::SetActivityCallback(mJSRuntime, ActivityCallback, this);

View File

@ -55,7 +55,6 @@ using namespace mozilla;
XPCJSContextStack::~XPCJSContextStack()
{
if (mOwnSafeJSContext) {
JS_SetContextThread(mOwnSafeJSContext);
JS_DestroyContext(mOwnSafeJSContext);
mOwnSafeJSContext = nsnull;
}
@ -109,7 +108,6 @@ GetPrincipalFromCx(JSContext *cx)
bool
XPCJSContextStack::Push(JSContext *cx)
{
MOZ_ASSERT_IF(cx, JS_GetContextThread(cx));
if (mStack.Length() == 0) {
mStack.AppendElement(cx);
return true;
@ -423,7 +421,7 @@ XPCPerThreadData::GetDataImpl(JSContext *cx)
}
if (cx && !sMainJSThread && NS_IsMainThread()) {
sMainJSThread = js::GetContextThread(cx);
sMainJSThread = js::GetOwnerThread(cx);
sMainThreadData = data;

View File

@ -68,10 +68,8 @@ bool AutoScriptEvaluate::StartEvaluating(JSObject *scope, JSErrorReporter errorR
JS_SetErrorReporter(mJSContext, errorReporter);
mErrorReporterSet = true;
}
mContextHasThread = JS_GetContextThread(mJSContext);
if (mContextHasThread)
JS_BeginRequest(mJSContext);
JS_BeginRequest(mJSContext);
if (!mEnterCompartment.enter(mJSContext, scope))
return false;
@ -102,8 +100,7 @@ AutoScriptEvaluate::~AutoScriptEvaluate()
else
JS_ClearPendingException(mJSContext);
if (mContextHasThread)
JS_EndRequest(mJSContext);
JS_EndRequest(mJSContext);
// If this is a JSContext that has a private context that provides a
// nsIXPCScriptNotify interface, then notify the object the script has
@ -543,8 +540,8 @@ GetContextFromObject(JSObject *obj)
if (xpcc) {
JSContext *cx = xpcc->GetJSContext();
if (JS_GetContextThread(cx) == JS_GetCurrentThread())
return cx;
JS_AbortIfWrongThread(JS_GetRuntime(cx));
return cx;
}
return nsnull;

View File

@ -554,14 +554,10 @@ nsXPConnect::BeginCycleCollection(nsCycleCollectionTraversalCallback &cb,
if (!cx)
return NS_ERROR_OUT_OF_MEMORY;
// Clear after mCycleCollectionContext is destroyed
JS_SetContextThread(cx);
NS_ASSERTION(!mCycleCollectionContext, "Didn't call FinishTraverse?");
mCycleCollectionContext = new XPCCallContext(NATIVE_CALLER, cx);
if (!mCycleCollectionContext->IsValid()) {
mCycleCollectionContext = nsnull;
JS_ClearContextThread(cx);
return NS_ERROR_FAILURE;
}
@ -610,11 +606,15 @@ nsXPConnect::BeginCycleCollection(nsCycleCollectionTraversalCallback &cb,
return NS_OK;
}
void
bool
nsXPConnect::NotifyLeaveMainThread()
{
NS_ABORT_IF_FALSE(NS_IsMainThread(), "Off main thread");
JS_ClearRuntimeThread(mRuntime->GetJSRuntime());
JSRuntime *rt = mRuntime->GetJSRuntime();
if (JS_IsInRequest(rt) || JS_IsInSuspendedRequest(rt))
return false;
JS_ClearRuntimeThread(rt);
return true;
}
void
@ -641,11 +641,8 @@ nsXPConnect::NotifyEnterMainThread()
nsresult
nsXPConnect::FinishTraverse()
{
if (mCycleCollectionContext) {
JSContext *cx = mCycleCollectionContext->GetJSContext();
if (mCycleCollectionContext)
mCycleCollectionContext = nsnull;
JS_ClearContextThread(cx);
}
return NS_OK;
}

View File

@ -539,7 +539,7 @@ public:
nsCycleCollectionTraversalCallback &cb);
// nsCycleCollectionLanguageRuntime
virtual void NotifyLeaveMainThread();
virtual bool NotifyLeaveMainThread();
virtual void NotifyEnterCycleCollectionThread();
virtual void NotifyLeaveCycleCollectionThread();
virtual void NotifyEnterMainThread();
@ -3665,9 +3665,7 @@ public:
JS_Assert("NS_IsMainThread()", __FILE__, __LINE__);
if (cx) {
NS_ASSERTION(js::GetContextThread(cx), "Uh, JS context w/o a thread?");
if (js::GetContextThread(cx) == sMainJSThread)
if (js::GetOwnerThread(cx) == sMainJSThread)
return sMainThreadData;
} else if (sMainThreadData && sMainThreadData->mThread == PR_GetCurrentThread()) {
return sMainThreadData;
@ -3770,7 +3768,7 @@ public:
{sMainJSThread = nsnull; sMainThreadData = nsnull;}
static bool IsMainThread(JSContext *cx)
{ return js::GetContextThread(cx) == sMainJSThread; }
{ return js::GetOwnerThread(cx) == sMainJSThread; }
private:
XPCPerThreadData();

View File

@ -75,8 +75,6 @@ _CHROME_FILES = \
test_getweakmapkeys.xul \
test_weakmaps.xul \
test_bug706301.xul \
test_ccbeginfail.xul \
test_ccdump.xul \
$(NULL)
# Disabled until this test gets updated to test the new proxy based

View File

@ -1,67 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="chrome://global/skin"?>
<?xml-stylesheet type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css"?>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=710761
-->
<window title="Mozilla Bug 710761"
xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"/>
<!-- test results are displayed in the html:body -->
<body xmlns="http://www.w3.org/1999/xhtml">
<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=710761"
target="_blank">Mozilla Bug 710761</a>
</body>
<!-- test code goes here -->
<script type="application/javascript">
<![CDATA[
/** Test for Bug 710761 **/
let noCallbacks = true;
var beginFailListener = {
QueryInterface: function QueryInterface(aIID) {
if (aIID.equals(Components.interfaces.nsICycleCollectorListener) ||
aIID.equals(Components.interfaces.nsISupports))
return this;
throw Components.results.NS_NOINTERFACE;
},
/* nsICycleCollectorListener */
begin: function () {
throw Components.results.NS_ERROR_FAILURE;
},
noteRefCountedObject: function (addr, rc, descr) {
noCallbacks = false;
},
noteGCedObject: function (addr, marked, descr) {
noCallbacks = false;
},
noteEdge: function (addr, descr) {
noCallbacks = false;
},
beginResults: function () {
noCallbacks = false;
},
describeRoot: function (addr, known) {
noCallbacks = false;
},
describeGarbage: function (addr) {
noCallbacks = false;
},
end: function () {
noCallbacks = false;
},
};
window.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
.getInterface(Components.interfaces.nsIDOMWindowUtils)
.cycleCollect(beginFailListener);
ok(noCallbacks, "If cycle collector listener begin fails, no further callbacks should be called.");
]]>
</script>
</window>

View File

@ -1,49 +0,0 @@
<?xml version="1.0"?>
<?xml-stylesheet type="text/css" href="chrome://global/skin"?>
<?xml-stylesheet type="text/css" href="chrome://mochikit/content/tests/SimpleTest/test.css"?>
<!--
https://bugzilla.mozilla.org/show_bug.cgi?id=709162
-->
<window title="Mozilla Bug 709162"
xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<script type="application/javascript" src="chrome://mochikit/content/tests/SimpleTest/SimpleTest.js"/>
<!-- test results are displayed in the html:body -->
<body xmlns="http://www.w3.org/1999/xhtml">
<a href="https://bugzilla.mozilla.org/show_bug.cgi?id=709162"
target="_blank">Mozilla Bug 709162</a>
</body>
<!-- test code goes here -->
<script type="application/javascript">
<![CDATA[
/** Test for Bug 709162 **/
var emptyListener = {
QueryInterface: function QueryInterface(aIID) {
if (aIID.equals(Components.interfaces.nsICycleCollectorListener) ||
aIID.equals(Components.interfaces.nsISupports))
return this;
throw Components.results.NS_NOINTERFACE;
},
/* nsICycleCollectorListener */
begin: function () {},
noteRefCountedObject: function (addr, rc, descr) {},
noteGCedObject: function (addr, marked, descr) {},
noteEdge: function (addr, descr) {},
beginResults: function () {},
describeRoot: function (addr, known) {},
describeGarbage: function (addr) {},
end: function () {},
};
window.QueryInterface(Components.interfaces.nsIInterfaceRequestor)
.getInterface(Components.interfaces.nsIDOMWindowUtils)
.cycleCollect(emptyListener);
ok(true, "Dump cycle collector graph without crashing.");
]]>
</script>
</window>

View File

@ -3748,10 +3748,13 @@ public:
aListener = nsnull;
mListener = aListener;
GetJSRuntime()->NotifyLeaveMainThread();
mRequest.Notify();
mReply.Wait();
GetJSRuntime()->NotifyEnterMainThread();
if (GetJSRuntime()->NotifyLeaveMainThread()) {
mRequest.Notify();
mReply.Wait();
GetJSRuntime()->NotifyEnterMainThread();
} else {
mCollected = mCollector->BeginCollection(mListener);
}
mListener = nsnull;

View File

@ -91,8 +91,11 @@ struct nsCycleCollectionJSRuntime : public nsCycleCollectionLanguageRuntime
{
/**
* Called before/after transitioning to/from the main thread.
*
* NotifyLeaveMainThread may return 'false' to prevent the cycle collector
* from leaving the main thread.
*/
virtual void NotifyLeaveMainThread() = 0;
virtual bool NotifyLeaveMainThread() = 0;
virtual void NotifyEnterCycleCollectionThread() = 0;
virtual void NotifyLeaveCycleCollectionThread() = 0;
virtual void NotifyEnterMainThread() = 0;