Move serialized cycle detector for __proto__ and __parent__ into js_GC (414452, r=igor+shaver).

This commit is contained in:
brendan@mozilla.org 2008-01-29 22:29:49 -08:00
parent 7fb4bb6085
commit 6ac8056119
6 changed files with 226 additions and 216 deletions

View File

@ -732,12 +732,6 @@ JS_NewRuntime(uint32 maxbytes)
rt->stateChange = JS_NEW_CONDVAR(rt->gcLock);
if (!rt->stateChange)
goto bad;
rt->setSlotLock = JS_NEW_LOCK();
if (!rt->setSlotLock)
goto bad;
rt->setSlotDone = JS_NEW_CONDVAR(rt->setSlotLock);
if (!rt->setSlotDone)
goto bad;
rt->scopeSharingDone = JS_NEW_CONDVAR(rt->gcLock);
if (!rt->scopeSharingDone)
goto bad;
@ -798,10 +792,6 @@ JS_DestroyRuntime(JSRuntime *rt)
JS_DESTROY_LOCK(rt->rtLock);
if (rt->stateChange)
JS_DESTROY_CONDVAR(rt->stateChange);
if (rt->setSlotLock)
JS_DESTROY_LOCK(rt->setSlotLock);
if (rt->setSlotDone)
JS_DESTROY_CONDVAR(rt->setSlotDone);
if (rt->scopeSharingDone)
JS_DESTROY_CONDVAR(rt->scopeSharingDone);
if (rt->debuggerLock)

View File

@ -163,6 +163,16 @@ typedef struct JSPropertyTreeEntry {
*/
typedef struct JSNativeIteratorState JSNativeIteratorState;
typedef struct JSSetSlotRequest JSSetSlotRequest;
struct JSSetSlotRequest {
JSObject *obj; /* object containing slot to set */
JSObject *pobj; /* new proto or parent reference */
uint16 slot; /* which to set, proto or parent */
uint16 errnum; /* JSMSG_NO_ERROR or error result */
JSSetSlotRequest *next; /* next request in GC worklist */
};
struct JSRuntime {
/* Runtime state, synchronized by the stateChange/gcLock condvar/lock. */
JSRuntimeState state;
@ -218,6 +228,14 @@ struct JSRuntime {
JSTraceDataOp gcExtraRootsTraceOp;
void *gcExtraRootsData;
/*
* Used to serialize cycle checks when setting __proto__ or __parent__ by
* requesting the GC handle the required cycle detection. If the GC hasn't
* been poked, it won't scan for garbage. This member is protected by
* rt->gcLock.
*/
JSSetSlotRequest *setSlotRequests;
/* Random number generator state, used by jsmath.c. */
JSBool rngInitialized;
int64 rngMultiplier;
@ -276,12 +294,6 @@ struct JSRuntime {
/* Used to synchronize down/up state change; protected by gcLock. */
PRCondVar *stateChange;
/* Used to serialize cycle checks when setting __proto__ or __parent__. */
PRLock *setSlotLock;
PRCondVar *setSlotDone;
JSBool setSlotBusy;
JSScope *setSlotScope; /* deadlock avoidance, see jslock.c */
/*
* State for sharing single-threaded scopes, once a second thread tries to
* lock a scope. The scopeSharingDone condvar is protected by rt->gcLock,

View File

@ -2317,9 +2317,95 @@ js_TraceRuntime(JSTracer *trc, JSBool allAtoms)
rt->gcExtraRootsTraceOp(trc, rt->gcExtraRootsData);
}
static void
ProcessSetSlotRequest(JSContext *cx, JSSetSlotRequest *ssr)
{
JSObject *obj, *pobj;
uint32 slot;
obj = ssr->obj;
pobj = ssr->pobj;
slot = ssr->slot;
while (pobj) {
JSClass *clasp = STOBJ_GET_CLASS(pobj);
if (clasp->flags & JSCLASS_IS_EXTENDED) {
JSExtendedClass *xclasp = (JSExtendedClass *) clasp;
if (xclasp->wrappedObject) {
/* If there is no wrapped object, use the wrapper. */
JSObject *wrapped = xclasp->wrappedObject(cx, pobj);
if (wrapped)
pobj = wrapped;
}
}
if (pobj == obj) {
ssr->errnum = JSMSG_CYCLIC_VALUE;
return;
}
pobj = JSVAL_TO_OBJECT(STOBJ_GET_SLOT(pobj, slot));
}
pobj = ssr->pobj;
if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
JSScope *scope, *newscope;
JSObject *oldproto;
/* Check to see whether obj shares its prototype's scope. */
scope = OBJ_SCOPE(obj);
oldproto = STOBJ_GET_PROTO(obj);
if (oldproto && OBJ_SCOPE(oldproto) == scope) {
/* Either obj needs a new empty scope, or it should share pobj's. */
if (!pobj ||
!OBJ_IS_NATIVE(pobj) ||
OBJ_GET_CLASS(cx, pobj) != STOBJ_GET_CLASS(oldproto)) {
/*
* With no proto and no scope of its own, obj is truly empty.
*
* If pobj is not native, obj needs its own empty scope -- it
* should not continue to share oldproto's scope once oldproto
* is not on obj's prototype chain. That would put properties
* from oldproto's scope ahead of properties defined by pobj,
* in lookup order.
*
* If pobj's class differs from oldproto's, we may need a new
* scope to handle differences in private and reserved slots,
* so we suboptimally but safely make one.
*/
if (!js_GetMutableScope(cx, obj)) {
ssr->errnum = JSMSG_OUT_OF_MEMORY;
return;
}
} else if (OBJ_SCOPE(pobj) != scope) {
newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
obj->map = &newscope->map;
js_DropObjectMap(cx, &scope->map, obj);
JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
}
}
#if 0
/*
* Regenerate property cache type ids for all of the scopes along the
* old prototype chain, in case any property cache entries were filled
* by looking up starting from obj.
*/
while (oldproto && OBJ_IS_NATIVE(oldproto)) {
scope = OBJ_SCOPE(oldproto);
SCOPE_GENERATE_PCTYPE(cx, scope);
oldproto = STOBJ_GET_PROTO(scope->object);
}
#endif
}
/* Finally, do the deed. */
STOBJ_SET_SLOT(obj, slot, OBJECT_TO_JSVAL(pobj));
}
/*
* When gckind is GC_LAST_DITCH, it indicates a call from js_NewGCThing with
* rt->gcLock already held and when the lock should be kept on return.
* The gckind flag bit GC_LOCK_HELD indicates a call from js_NewGCThing with
* rt->gcLock already held, so the lock should be kept on return.
*/
void
js_GC(JSContext *cx, JSGCInvocationKind gckind)
@ -2350,8 +2436,11 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
JS_ASSERT(!JS_IS_RUNTIME_LOCKED(rt));
#endif
if (gckind == GC_LAST_DITCH) {
/* The last ditch GC preserves all atoms and weak roots. */
if (gckind & GC_KEEP_ATOMS) {
/*
* The set slot request and last ditch GC kinds preserve all atoms and
* weak roots.
*/
keepAtoms = JS_TRUE;
} else {
/* Keep atoms when a suspended compile is running on another context. */
@ -2379,17 +2468,17 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
if (callback) {
JSBool ok;
if (gckind == GC_LAST_DITCH)
if (gckind & GC_LOCK_HELD)
JS_UNLOCK_GC(rt);
ok = callback(cx, JSGC_BEGIN);
if (gckind == GC_LAST_DITCH)
if (gckind & GC_LOCK_HELD)
JS_LOCK_GC(rt);
if (!ok && gckind != GC_LAST_CONTEXT)
return;
}
/* Lock out other GC allocator and collector invocations. */
if (gckind != GC_LAST_DITCH)
if (!(gckind & GC_LOCK_HELD))
JS_LOCK_GC(rt);
METER(rt->gcStats.poke++);
@ -2403,7 +2492,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
JS_ASSERT(rt->gcLevel > 0);
rt->gcLevel++;
METER_UPDATE_MAX(rt->gcStats.maxlevel, rt->gcLevel);
if (gckind != GC_LAST_DITCH)
if (!(gckind & GC_LOCK_HELD))
JS_UNLOCK_GC(rt);
return;
}
@ -2459,7 +2548,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
JS_AWAIT_GC_DONE(rt);
if (requestDebit)
rt->requestCount += requestDebit;
if (gckind != GC_LAST_DITCH)
if (!(gckind & GC_LOCK_HELD))
JS_UNLOCK_GC(rt);
return;
}
@ -2491,11 +2580,46 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
* waiting for GC to finish.
*/
rt->gcRunning = JS_TRUE;
if (gckind == GC_SET_SLOT_REQUEST) {
JSSetSlotRequest *ssr;
while ((ssr = rt->setSlotRequests) != NULL) {
rt->setSlotRequests = ssr->next;
JS_UNLOCK_GC(rt);
ssr->next = NULL;
ProcessSetSlotRequest(cx, ssr);
JS_LOCK_GC(rt);
}
/*
* We assume here that killing links to parent and prototype objects
* does not create garbage (such objects typically are long-lived and
* widely shared, e.g. global objects, Function.prototype, etc.). We
* collect garbage only if a racing thread attempted GC and is waiting
* for us to finish (gcLevel > 1) or if someone already poked us.
*/
if (rt->gcLevel == 1 && !rt->gcPoke)
goto done_running;
rt->gcLevel = 1;
rt->gcPoke = JS_FALSE;
}
JS_UNLOCK_GC(rt);
/* Reset malloc counter. */
rt->gcMallocBytes = 0;
#if 0
/*
* Clear property cache weak references and disable the cache so nothing
* can fill it during GC (this is paranoia, since scripts should not run
* during GC).
*/
js_DisablePropertyCache(cx);
js_FlushPropertyCache(cx);
#endif
#ifdef JS_DUMP_SCOPE_METERS
{ extern void js_DumpScopeMeters(JSRuntime *rt);
js_DumpScopeMeters(rt);
@ -2521,17 +2645,25 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
continue;
memset(acx->thread->gcFreeLists, 0, sizeof acx->thread->gcFreeLists);
GSN_CACHE_CLEAR(&acx->thread->gsnCache);
#if 0
js_FlushPropertyCache(acx);
#endif
}
#else
/* The thread-unsafe case just has to clear the runtime's GSN cache. */
GSN_CACHE_CLEAR(&rt->gsnCache);
#endif
restart:
restart:
rt->gcNumber++;
JS_ASSERT(!rt->gcUntracedArenaStackTop);
JS_ASSERT(rt->gcTraceLaterCount == 0);
#if 0
/* Reset the property cache's type id generator so we can compress ids. */
rt->pcTypeGen = 0;
#endif
/*
* Mark phase.
*/
@ -2762,8 +2894,15 @@ restart:
JS_UNLOCK_GC(rt);
goto restart;
}
rt->gcLevel = 0;
#if 0
if (!(rt->pcTypeGen & PCTYPE_OVERFLOW_BIT))
js_EnablePropertyCache(cx);
#endif
rt->gcLastBytes = rt->gcBytes;
done_running:
rt->gcLevel = 0;
rt->gcRunning = JS_FALSE;
#ifdef JS_THREADSAFE
@ -2774,9 +2913,9 @@ restart:
JS_NOTIFY_GC_DONE(rt);
/*
* Unlock unless we have GC_LAST_DITCH which requires locked GC on return.
* Unlock unless we have GC_LOCK_HELD which requires locked GC on return.
*/
if (gckind != GC_LAST_DITCH)
if (!(gckind & GC_LOCK_HELD))
JS_UNLOCK_GC(rt);
#endif
@ -2790,11 +2929,11 @@ restart:
JSWeakRoots savedWeakRoots;
JSTempValueRooter tvr;
if (gckind == GC_LAST_DITCH) {
if (gckind & GC_KEEP_ATOMS) {
/*
* We allow JSGC_END implementation to force a full GC or allocate
* new GC things. Thus we must protect the weak roots from GC or
* overwrites.
* new GC things. Thus we must protect the weak roots from garbage
* collection and overwrites.
*/
savedWeakRoots = cx->weakRoots;
JS_PUSH_TEMP_ROOT_WEAK_COPY(cx, &savedWeakRoots, &tvr);
@ -2804,7 +2943,7 @@ restart:
(void) callback(cx, JSGC_END);
if (gckind == GC_LAST_DITCH) {
if (gckind & GC_KEEP_ATOMS) {
JS_LOCK_GC(rt);
JS_UNKEEP_ATOMS(rt);
JS_POP_TEMP_ROOT(cx, &tvr);

View File

@ -234,19 +234,33 @@ js_TraceContext(JSTracer *trc, JSContext *acx);
*/
typedef enum JSGCInvocationKind {
/* Normal invocation. */
GC_NORMAL,
GC_NORMAL = 0,
/*
* Called from js_DestroyContext for last JSContext in a JSRuntime, when
* it is imperative that rt->gcPoke gets cleared early in js_GC.
*/
GC_LAST_CONTEXT,
GC_LAST_CONTEXT = 1,
/*
* Flag bit telling js_GC that the caller has already acquired rt->gcLock.
* Currently, this flag is set for the invocation kinds that also preserve
* atoms and weak roots, so we don't need another bit for GC_KEEP_ATOMS.
*/
GC_LOCK_HELD = 2,
GC_KEEP_ATOMS = GC_LOCK_HELD,
/*
* Called from js_SetProtoOrParent with a request to set an object's proto
* or parent slot inserted on rt->setSlotRequests.
*/
GC_SET_SLOT_REQUEST = GC_LOCK_HELD | 0,
/*
* Called from js_NewGCThing as a last-ditch GC attempt. See comments
* before js_GC definition for details.
* in jsgc.c just before js_GC's definition for details.
*/
GC_LAST_DITCH
GC_LAST_DITCH = GC_LOCK_HELD | 1
} JSGCInvocationKind;
extern void

View File

@ -353,30 +353,7 @@ ShareScope(JSContext *cx, JSScope *scope)
JS_NOTIFY_ALL_CONDVAR(rt->scopeSharingDone);
}
js_InitLock(&scope->lock);
if (scope == rt->setSlotScope) {
/*
* Nesting locks on another thread that's using scope->ownercx: give
* the held lock a reentrancy count of 1 and set its lock.owner field
* directly (no compare-and-swap needed while scope->ownercx is still
* non-null). See below in ClaimScope, before the ShareScope call,
* for more on why this is necessary.
*
* If NSPR_LOCK is defined, we cannot deadlock holding rt->gcLock and
* acquiring scope->lock.fat here, against another thread holding that
* fat lock and trying to grab rt->gcLock. This is because no other
* thread can attempt to acquire scope->lock.fat until scope->ownercx
* is null *and* our thread has released rt->gcLock, which interlocks
* scope->ownercx's transition to null against tests of that member
* in ClaimScope.
*/
scope->lock.owner = CX_THINLOCK_ID(scope->ownercx);
#ifdef NSPR_LOCK
JS_ACQUIRE_LOCK((JSLock*)scope->lock.fat);
#endif
scope->u.count = 1;
} else {
scope->u.count = 0;
}
scope->u.count = 0;
js_FinishSharingScope(cx, scope);
}
@ -470,30 +447,19 @@ ClaimScope(JSScope *scope, JSContext *cx)
/*
* Avoid deadlock if scope's owner context is waiting on a scope that
* we own, by revoking scope's ownership. This approach to deadlock
* avoidance works because the engine never nests scope locks, except
* for the notable case of js_SetProtoOrParent (see jsobj.c).
* avoidance works because the engine never nests scope locks.
*
* If cx could hold locks on ownercx->scopeToShare, or if ownercx
* could hold locks on scope, we would need to keep reentrancy counts
* for all such "flyweight" (ownercx != NULL) locks, so that control
* would unwind properly once these locks became "thin" or "fat".
* Apart from the js_SetProtoOrParent exception, the engine promotes
* a scope from exclusive to shared access only when locking, never
* when holding or unlocking.
*
* If ownercx's thread is calling js_SetProtoOrParent, trying to lock
* the inner scope (the scope of the object being set as the prototype
* of the outer object), ShareScope will find the outer object's scope
* at rt->setSlotScope. If it's the same as scope, we give it a lock
* held by ownercx's thread with reentrancy count of 1, then we return
* here and break. After that we unwind to js_[GS]etSlotThreadSafe or
* js_LockScope (our caller), where we wait on the newly-fattened lock
* until ownercx's thread unwinds from js_SetProtoOrParent.
* The engine promotes a scope from exclusive to shared access only
* when locking, never when holding or unlocking.
*
* Avoid deadlock before any of this scope/context cycle detection if
* cx is on the active GC's thread, because in that case, no requests
* will run until the GC completes. Any scope wanted by the GC (from
* a finalizer) that can't be claimed must be slated for sharing.
* a finalizer) that can't be claimed must become shared.
*/
if (rt->gcThread == cx->thread ||
(ownercx->scopeToShare &&

View File

@ -274,84 +274,38 @@ out:
JSBool
js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj)
{
JSSetSlotRequest ssr;
JSRuntime *rt;
JSObject *obj2, *oldproto;
JSClass *clasp;
JSScope *scope, *newscope;
/*
* Serialize all proto and parent setting in order to detect cycles.
* We nest locks in this function, and only here, in the following orders:
*
* (1) rt->setSlotLock < pobj's scope lock;
* rt->setSlotLock < pobj's proto-or-parent's scope lock;
* rt->setSlotLock < pobj's grand-proto-or-parent's scope lock;
* etc...
* (2) rt->setSlotLock < obj's scope lock < pobj's scope lock.
*
* We avoid AB-BA deadlock by restricting obj from being on pobj's parent
* or proto chain (pobj may already be on obj's parent or proto chain; it
* could be moving up or down). We finally order obj with respect to pobj
* at the bottom of this routine (just before releasing rt->setSlotLock),
* by making pobj be obj's prototype or parent.
*
* After we have set the slot and released rt->setSlotLock, another call
* to js_SetProtoOrParent could nest locks according to the first order
* list above, but it cannot deadlock with any other thread. For there
* to be a deadlock, other parts of the engine would have to nest scope
* locks in the opposite order. XXXbe ensure they don't!
*/
rt = cx->runtime;
#ifdef JS_THREADSAFE
JS_ACQUIRE_LOCK(rt->setSlotLock);
while (rt->setSlotBusy) {
jsrefcount saveDepth;
/* Take pains to avoid nesting rt->gcLock inside rt->setSlotLock! */
JS_RELEASE_LOCK(rt->setSlotLock);
saveDepth = JS_SuspendRequest(cx);
JS_ACQUIRE_LOCK(rt->setSlotLock);
if (rt->setSlotBusy)
JS_WAIT_CONDVAR(rt->setSlotDone, JS_NO_TIMEOUT);
JS_RELEASE_LOCK(rt->setSlotLock);
JS_ResumeRequest(cx, saveDepth);
JS_ACQUIRE_LOCK(rt->setSlotLock);
}
rt->setSlotBusy = JS_TRUE;
JS_RELEASE_LOCK(rt->setSlotLock);
#define SET_SLOT_DONE(rt) \
JS_BEGIN_MACRO \
JS_ACQUIRE_LOCK((rt)->setSlotLock); \
(rt)->setSlotBusy = JS_FALSE; \
JS_NOTIFY_ALL_CONDVAR((rt)->setSlotDone); \
JS_RELEASE_LOCK((rt)->setSlotLock); \
JS_END_MACRO
#else
#define SET_SLOT_DONE(rt) /* nothing */
#endif
obj2 = pobj;
while (obj2) {
clasp = OBJ_GET_CLASS(cx, obj2);
if (clasp->flags & JSCLASS_IS_EXTENDED) {
JSExtendedClass *xclasp = (JSExtendedClass *) clasp;
if (xclasp->wrappedObject) {
/* If there is no wrapped object, just use the wrapper. */
JSObject *wrapped = xclasp->wrappedObject(cx, obj2);
if (wrapped)
obj2 = wrapped;
}
/* Optimize the null case to avoid the unnecessary overhead of js_GC. */
if (!pobj) {
JS_LOCK_OBJ(cx, obj);
if (slot == JSSLOT_PROTO && !js_GetMutableScope(cx, obj)) {
JS_UNLOCK_OBJ(cx, obj);
return JS_FALSE;
}
LOCKED_OBJ_SET_SLOT(obj, slot, JSVAL_NULL);
JS_UNLOCK_OBJ(cx, obj);
return JS_TRUE;
}
if (obj2 == obj) {
SET_SLOT_DONE(rt);
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL,
JSMSG_CYCLIC_VALUE,
ssr.obj = obj;
ssr.pobj = pobj;
ssr.slot = (uint16) slot;
ssr.errnum = (uint16) JSMSG_NOT_AN_ERROR;
rt = cx->runtime;
JS_LOCK_GC(rt);
ssr.next = rt->setSlotRequests;
rt->setSlotRequests = &ssr;
js_GC(cx, GC_SET_SLOT_REQUEST);
JS_UNLOCK_GC(rt);
if (ssr.errnum != JSMSG_NOT_AN_ERROR) {
if (ssr.errnum == JSMSG_OUT_OF_MEMORY) {
JS_ReportOutOfMemory(cx);
} else {
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, ssr.errnum,
#if JS_HAS_OBJ_PROTO_PROP
object_props[slot].name
#else
@ -359,75 +313,10 @@ js_SetProtoOrParent(JSContext *cx, JSObject *obj, uint32 slot, JSObject *pobj)
: js_parent_str
#endif
);
return JS_FALSE;
}
obj2 = JSVAL_TO_OBJECT(OBJ_GET_SLOT(cx, obj2, slot));
return JS_FALSE;
}
if (slot == JSSLOT_PROTO && OBJ_IS_NATIVE(obj)) {
/* Check to see whether obj shares its prototype's scope. */
JS_LOCK_OBJ(cx, obj);
scope = OBJ_SCOPE(obj);
oldproto = LOCKED_OBJ_GET_PROTO(obj);
if (oldproto && OBJ_SCOPE(oldproto) == scope) {
/* Either obj needs a new empty scope, or it should share pobj's. */
if (!pobj ||
!OBJ_IS_NATIVE(pobj) ||
OBJ_GET_CLASS(cx, pobj) != LOCKED_OBJ_GET_CLASS(oldproto)) {
/*
* With no proto and no scope of its own, obj is truly empty.
*
* If pobj is not native, obj needs its own empty scope -- it
* should not continue to share oldproto's scope once oldproto
* is not on obj's prototype chain. That would put properties
* from oldproto's scope ahead of properties defined by pobj,
* in lookup order.
*
* If pobj's class differs from oldproto's, we may need a new
* scope to handle differences in private and reserved slots,
* so we suboptimally but safely make one.
*/
scope = js_GetMutableScope(cx, obj);
if (!scope) {
JS_UNLOCK_OBJ(cx, obj);
SET_SLOT_DONE(rt);
return JS_FALSE;
}
} else if (OBJ_SCOPE(pobj) != scope) {
#ifdef JS_THREADSAFE
/*
* We are about to nest scope locks. Help jslock.c:ShareScope
* keep scope->u.count balanced for the JS_UNLOCK_SCOPE, while
* avoiding deadlock, by recording scope in rt->setSlotScope.
*/
if (scope->ownercx) {
JS_ASSERT(scope->ownercx == cx);
rt->setSlotScope = scope;
}
#endif
/* We can't deadlock because we checked for cycles above (2). */
JS_LOCK_OBJ(cx, pobj);
newscope = (JSScope *) js_HoldObjectMap(cx, pobj->map);
obj->map = &newscope->map;
js_DropObjectMap(cx, &scope->map, obj);
JS_TRANSFER_SCOPE_LOCK(cx, scope, newscope);
scope = newscope;
#ifdef JS_THREADSAFE
rt->setSlotScope = NULL;
#endif
}
}
LOCKED_OBJ_SET_PROTO(obj, pobj);
JS_UNLOCK_SCOPE(cx, scope);
} else {
OBJ_SET_SLOT(cx, obj, slot, OBJECT_TO_JSVAL(pobj));
}
SET_SLOT_DONE(rt);
return JS_TRUE;
#undef SET_SLOT_DONE
}
JS_STATIC_DLL_CALLBACK(JSHashNumber)