mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-02-26 20:30:41 +00:00
Merge tracemonkey to mozilla-central.
This commit is contained in:
commit
1ed03f8f31
@ -66,6 +66,10 @@ DIRS += jsapi-tests
|
||||
endif
|
||||
endif
|
||||
|
||||
ifdef ENABLE_TESTS
|
||||
DIRS += tests
|
||||
endif
|
||||
|
||||
MODULE = js
|
||||
LIBRARY_NAME = mozjs
|
||||
STATIC_LIBRARY_NAME = js_static
|
||||
@ -231,6 +235,7 @@ INSTALLED_HEADERS += \
|
||||
Native$(NANOJIT_ARCH).h \
|
||||
RegAlloc.h \
|
||||
nanojit.h \
|
||||
VMPI.h \
|
||||
$(NULL)
|
||||
|
||||
CPPSRCS += \
|
||||
@ -245,6 +250,7 @@ CPPSRCS += \
|
||||
avmplus.cpp \
|
||||
Native$(NANOJIT_ARCH).cpp \
|
||||
jsbuiltins.cpp \
|
||||
VMPI.cpp \
|
||||
$(NULL)
|
||||
|
||||
ifdef WINCE
|
||||
|
@ -5183,6 +5183,7 @@ MAKEFILES="
|
||||
shell/Makefile
|
||||
lirasm/Makefile
|
||||
jsapi-tests/Makefile
|
||||
tests/Makefile
|
||||
config/Makefile
|
||||
config/autoconf.mk
|
||||
config/mkdepend/Makefile
|
||||
|
@ -2603,12 +2603,10 @@ JS_RemoveExternalStringFinalizer(JSStringFinalizeOp finalizer)
|
||||
JS_PUBLIC_API(JSString *)
|
||||
JS_NewExternalString(JSContext *cx, jschar *chars, size_t length, intN type)
|
||||
{
|
||||
JSString *str;
|
||||
|
||||
CHECK_REQUEST(cx);
|
||||
JS_ASSERT((uintN) type < (uintN) (GCX_NTYPES - GCX_EXTERNAL_STRING));
|
||||
JS_ASSERT(uintN(type) < JS_EXTERNAL_STRING_LIMIT);
|
||||
|
||||
str = js_NewGCString(cx, (uintN) type + GCX_EXTERNAL_STRING);
|
||||
JSString *str = js_NewGCExternalString(cx, uintN(type));
|
||||
if (!str)
|
||||
return NULL;
|
||||
str->initFlat(chars, length);
|
||||
@ -4869,7 +4867,7 @@ JS_CompileUCFunctionForPrincipals(JSContext *cx, JSObject *obj,
|
||||
#endif
|
||||
|
||||
out:
|
||||
cx->weakRoots.newborn[JSTRACE_OBJECT] = FUN_OBJECT(fun);
|
||||
cx->weakRoots.newbornObject = FUN_OBJECT(fun);
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
|
||||
out2:
|
||||
@ -5369,6 +5367,18 @@ JS_GetStringLength(JSString *str)
|
||||
return str->length();
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(const char *)
|
||||
JS_GetStringBytesZ(JSContext *cx, JSString *str)
|
||||
{
|
||||
return js_GetStringBytes(cx, str);
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(const jschar *)
|
||||
JS_GetStringCharsZ(JSContext *cx, JSString *str)
|
||||
{
|
||||
return js_UndependString(cx, str);
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(intN)
|
||||
JS_CompareStrings(JSString *str1, JSString *str2)
|
||||
{
|
||||
|
@ -584,7 +584,9 @@ JS_END_EXTERN_C
|
||||
|
||||
class JSAutoRequest {
|
||||
public:
|
||||
JSAutoRequest(JSContext *cx) : mContext(cx), mSaveDepth(0) {
|
||||
JSAutoRequest(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx), mSaveDepth(0) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_BeginRequest(mContext);
|
||||
}
|
||||
~JSAutoRequest() {
|
||||
@ -601,6 +603,7 @@ class JSAutoRequest {
|
||||
protected:
|
||||
JSContext *mContext;
|
||||
jsrefcount mSaveDepth;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
|
||||
#if 0
|
||||
private:
|
||||
@ -611,7 +614,9 @@ class JSAutoRequest {
|
||||
|
||||
class JSAutoSuspendRequest {
|
||||
public:
|
||||
JSAutoSuspendRequest(JSContext *cx) : mContext(cx), mSaveDepth(0) {
|
||||
JSAutoSuspendRequest(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx), mSaveDepth(0) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
if (mContext) {
|
||||
mSaveDepth = JS_SuspendRequest(mContext);
|
||||
}
|
||||
@ -630,6 +635,7 @@ class JSAutoSuspendRequest {
|
||||
protected:
|
||||
JSContext *mContext;
|
||||
jsrefcount mSaveDepth;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
|
||||
#if 0
|
||||
private:
|
||||
@ -989,7 +995,9 @@ JS_END_EXTERN_C
|
||||
|
||||
class JSAutoLocalRootScope {
|
||||
public:
|
||||
JSAutoLocalRootScope(JSContext *cx) : mContext(cx) {
|
||||
JSAutoLocalRootScope(JSContext *cx JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_EnterLocalRootScope(mContext);
|
||||
}
|
||||
~JSAutoLocalRootScope() {
|
||||
@ -1002,6 +1010,7 @@ class JSAutoLocalRootScope {
|
||||
|
||||
protected:
|
||||
JSContext *mContext;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
|
||||
#if 0
|
||||
private:
|
||||
@ -2418,6 +2427,12 @@ JS_GetStringChars(JSString *str);
|
||||
extern JS_PUBLIC_API(size_t)
|
||||
JS_GetStringLength(JSString *str);
|
||||
|
||||
extern JS_PUBLIC_API(const char *)
|
||||
JS_GetStringBytesZ(JSContext *cx, JSString *str);
|
||||
|
||||
extern JS_PUBLIC_API(const jschar *)
|
||||
JS_GetStringCharsZ(JSContext *cx, JSString *str);
|
||||
|
||||
extern JS_PUBLIC_API(intN)
|
||||
JS_CompareStrings(JSString *str1, JSString *str2);
|
||||
|
||||
|
@ -327,7 +327,7 @@ ResizeSlots(JSContext *cx, JSObject *obj, uint32 oldlen, uint32 newlen)
|
||||
}
|
||||
|
||||
slots = obj->dslots ? obj->dslots - 1 : NULL;
|
||||
newslots = (jsval *) cx->realloc(slots, (newlen + 1) * sizeof(jsval));
|
||||
newslots = (jsval *) cx->realloc(slots, (size_t(newlen) + 1) * sizeof(jsval));
|
||||
if (!newslots)
|
||||
return JS_FALSE;
|
||||
|
||||
@ -3391,7 +3391,7 @@ js_NewEmptyArray(JSContext* cx, JSObject* proto)
|
||||
{
|
||||
JS_ASSERT(OBJ_IS_ARRAY(cx, proto));
|
||||
|
||||
JSObject* obj = js_NewGCObject(cx, GCX_OBJECT);
|
||||
JSObject* obj = js_NewGCObject(cx);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
@ -3463,7 +3463,7 @@ js_NewArrayObject(JSContext *cx, jsuint length, jsval *vector, JSBool holey)
|
||||
JS_POP_TEMP_ROOT(cx, &tvr);
|
||||
|
||||
/* Set/clear newborn root, in case we lost it. */
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = obj;
|
||||
cx->weakRoots.newbornObject = obj;
|
||||
return obj;
|
||||
}
|
||||
|
||||
|
@ -454,8 +454,7 @@ js_string_uninterner(JSDHashTable *table, JSDHashEntryHdr *hdr,
|
||||
JS_ASSERT(entry->keyAndFlags != 0);
|
||||
str = (JSString *)ATOM_ENTRY_KEY(entry);
|
||||
|
||||
/* Pass null as context. */
|
||||
js_FinalizeStringRT(rt, str, js_GetExternalStringGCType(str), NULL);
|
||||
js_FinalizeStringRT(rt, str);
|
||||
return JS_DHASH_NEXT;
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4; -*-
|
||||
* vim: set ts=8 sw=4 et tw=99:
|
||||
* vim: set ts=4 sw=4 et tw=99:
|
||||
*
|
||||
* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
@ -151,6 +151,24 @@ js_UnboxInt32(jsval v)
|
||||
}
|
||||
JS_DEFINE_CALLINFO_1(extern, INT32, js_UnboxInt32, JSVAL, 1, 1)
|
||||
|
||||
JSBool FASTCALL
|
||||
js_TryUnboxInt32(jsval v, int32* i32p)
|
||||
{
|
||||
if (JS_LIKELY(JSVAL_IS_INT(v))) {
|
||||
*i32p = JSVAL_TO_INT(v);
|
||||
return JS_TRUE;
|
||||
}
|
||||
if (!JSVAL_IS_DOUBLE(v))
|
||||
return JS_FALSE;
|
||||
int32 i;
|
||||
jsdouble d = *JSVAL_TO_DOUBLE(v);
|
||||
if (!JSDOUBLE_IS_INT(d, i))
|
||||
return JS_FALSE;
|
||||
*i32p = i;
|
||||
return JS_TRUE;
|
||||
}
|
||||
JS_DEFINE_CALLINFO_2(extern, BOOL, js_TryUnboxInt32, JSVAL, INT32PTR, 1, 1)
|
||||
|
||||
int32 FASTCALL
|
||||
js_DoubleToInt32(jsdouble d)
|
||||
{
|
||||
@ -389,7 +407,7 @@ js_NewNullClosure(JSContext* cx, JSObject* funobj, JSObject* proto, JSObject* pa
|
||||
JSFunction *fun = (JSFunction*) funobj;
|
||||
JS_ASSERT(GET_FUNCTION_PRIVATE(cx, funobj) == fun);
|
||||
|
||||
JSObject* closure = js_NewGCObject(cx, GCX_OBJECT);
|
||||
JSObject* closure = js_NewGCObject(cx);
|
||||
if (!closure)
|
||||
return NULL;
|
||||
|
||||
@ -406,6 +424,44 @@ js_NewNullClosure(JSContext* cx, JSObject* funobj, JSObject* proto, JSObject* pa
|
||||
}
|
||||
JS_DEFINE_CALLINFO_4(extern, OBJECT, js_NewNullClosure, CONTEXT, OBJECT, OBJECT, OBJECT, 0, 0)
|
||||
|
||||
JS_REQUIRES_STACK JSBool FASTCALL
|
||||
js_PopInterpFrame(JSContext* cx, InterpState* state)
|
||||
{
|
||||
JS_ASSERT(cx->fp && cx->fp->down);
|
||||
JSInlineFrame* ifp = (JSInlineFrame*)cx->fp;
|
||||
|
||||
/*
|
||||
* Mirror frame popping code from inline_return in js_Interpret. There are
|
||||
* some things we just don't want to handle. In those cases, the trace will
|
||||
* MISMATCH_EXIT.
|
||||
*/
|
||||
if (ifp->hookData)
|
||||
return JS_FALSE;
|
||||
if (cx->version != ifp->callerVersion)
|
||||
return JS_FALSE;
|
||||
if (cx->fp->flags & JSFRAME_CONSTRUCTING)
|
||||
return JS_FALSE;
|
||||
if (cx->fp->imacpc)
|
||||
return JS_FALSE;
|
||||
|
||||
/* Update display table. */
|
||||
if (cx->fp->script->staticLevel < JS_DISPLAY_SIZE)
|
||||
cx->display[cx->fp->script->staticLevel] = cx->fp->displaySave;
|
||||
|
||||
/* Pop the frame and its memory. */
|
||||
cx->fp = cx->fp->down;
|
||||
JS_ASSERT(cx->fp->regs == &ifp->callerRegs);
|
||||
cx->fp->regs = ifp->frame.regs;
|
||||
|
||||
/* Don't release |ifp->mark| yet, since ExecuteTree uses |cx->stackPool|. */
|
||||
state->stackMark = ifp->mark;
|
||||
|
||||
/* Update the inline call count. */
|
||||
*state->inlineCallCountp = *state->inlineCallCountp - 1;
|
||||
return JS_TRUE;
|
||||
}
|
||||
JS_DEFINE_CALLINFO_2(extern, BOOL, js_PopInterpFrame, CONTEXT, INTERPSTATE, 0, 0)
|
||||
|
||||
JSString* FASTCALL
|
||||
js_ConcatN(JSContext *cx, JSString **strArray, uint32 size)
|
||||
{
|
||||
|
@ -203,6 +203,7 @@ struct ClosureVarInfo;
|
||||
#define _JS_CTYPE_INT32 _JS_CTYPE(int32, _JS_I32, "","i", INFALLIBLE)
|
||||
#define _JS_CTYPE_INT32_RETRY _JS_CTYPE(int32, _JS_I32, --, --, FAIL_NEG)
|
||||
#define _JS_CTYPE_INT32_FAIL _JS_CTYPE(int32, _JS_I32, --, --, FAIL_STATUS)
|
||||
#define _JS_CTYPE_INT32PTR _JS_CTYPE(int32 *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_UINT32 _JS_CTYPE(uint32, _JS_I32, "","i", INFALLIBLE)
|
||||
#define _JS_CTYPE_UINT32_RETRY _JS_CTYPE(uint32, _JS_I32, --, --, FAIL_NEG)
|
||||
#define _JS_CTYPE_UINT32_FAIL _JS_CTYPE(uint32, _JS_I32, --, --, FAIL_STATUS)
|
||||
@ -227,6 +228,7 @@ struct ClosureVarInfo;
|
||||
#define _JS_CTYPE_CHARPTR _JS_CTYPE(char *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_APNPTR _JS_CTYPE(js_ArgsPrivateNative *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_CVIPTR _JS_CTYPE(const ClosureVarInfo *, _JS_PTR, --, --, INFALLIBLE)
|
||||
#define _JS_CTYPE_FRAMEINFO _JS_CTYPE(FrameInfo *, _JS_PTR, --, --, INFALLIBLE)
|
||||
|
||||
#define _JS_EXPAND(tokens) tokens
|
||||
|
||||
@ -502,6 +504,7 @@ JS_DECLARE_CALLINFO(js_BoxDouble)
|
||||
JS_DECLARE_CALLINFO(js_BoxInt32)
|
||||
JS_DECLARE_CALLINFO(js_UnboxDouble)
|
||||
JS_DECLARE_CALLINFO(js_UnboxInt32)
|
||||
JS_DECLARE_CALLINFO(js_TryUnboxInt32)
|
||||
JS_DECLARE_CALLINFO(js_dmod)
|
||||
JS_DECLARE_CALLINFO(js_imod)
|
||||
JS_DECLARE_CALLINFO(js_DoubleToInt32)
|
||||
@ -521,5 +524,6 @@ JS_DECLARE_CALLINFO(js_BooleanOrUndefinedToString)
|
||||
JS_DECLARE_CALLINFO(js_Arguments)
|
||||
JS_DECLARE_CALLINFO(js_NewNullClosure)
|
||||
JS_DECLARE_CALLINFO(js_ConcatN)
|
||||
JS_DECLARE_CALLINFO(js_PopInterpFrame)
|
||||
|
||||
#endif /* jsbuiltins_h___ */
|
||||
|
@ -116,7 +116,6 @@ PurgeThreadData(JSContext *cx, JSThreadData *data)
|
||||
|
||||
# ifdef JS_TRACER
|
||||
JSTraceMonitor *tm = &data->traceMonitor;
|
||||
tm->reservedDoublePoolPtr = tm->reservedDoublePool;
|
||||
|
||||
/*
|
||||
* If we are about to regenerate shapes, we have to flush the JIT cache,
|
||||
@ -126,12 +125,14 @@ PurgeThreadData(JSContext *cx, JSThreadData *data)
|
||||
tm->needFlush = JS_TRUE;
|
||||
|
||||
/*
|
||||
* We want to keep tm->reservedObjects after the GC. So, unless we are
|
||||
* shutting down, we don't purge them here and rather mark them during
|
||||
* We want to keep reserved doubles and objects after the GC. So, unless we
|
||||
* are shutting down, we don't purge them here and rather mark them during
|
||||
* the GC, see MarkReservedObjects in jsgc.cpp.
|
||||
*/
|
||||
if (cx->runtime->state == JSRTS_LANDING)
|
||||
if (cx->runtime->state == JSRTS_LANDING) {
|
||||
tm->reservedDoublePoolPtr = tm->reservedDoublePool;
|
||||
tm->reservedObjects = NULL;
|
||||
}
|
||||
# endif
|
||||
|
||||
/* Destroy eval'ed scripts. */
|
||||
@ -719,22 +720,10 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
|
||||
#endif
|
||||
|
||||
if (last) {
|
||||
/* Clear builtin functions, which are recreated on demand. */
|
||||
memset(rt->builtinFunctions, 0, sizeof rt->builtinFunctions);
|
||||
|
||||
js_GC(cx, GC_LAST_CONTEXT);
|
||||
DUMP_EVAL_CACHE_METER(cx);
|
||||
DUMP_FUNCTION_METER(cx);
|
||||
|
||||
/*
|
||||
* Free the script filename table if it exists and is empty. Do this
|
||||
* after the last GC to avoid finalizers tripping on free memory.
|
||||
*/
|
||||
if (rt->scriptFilenameTable &&
|
||||
rt->scriptFilenameTable->nentries == 0) {
|
||||
js_FinishRuntimeScriptState(rt);
|
||||
}
|
||||
|
||||
/* Take the runtime down, now that it has no contexts or atoms. */
|
||||
JS_LOCK_GC(rt);
|
||||
rt->state = JSRTS_DOWN;
|
||||
|
@ -126,6 +126,7 @@ struct VMFragment;
|
||||
#ifdef __cplusplus
|
||||
struct REHashKey;
|
||||
struct REHashFn;
|
||||
class FrameInfoCache;
|
||||
typedef nanojit::HashMap<REHashKey, nanojit::Fragment*, REHashFn> REHashMap;
|
||||
#endif
|
||||
|
||||
@ -154,12 +155,30 @@ struct JSTraceMonitor {
|
||||
*/
|
||||
JSContext *tracecx;
|
||||
|
||||
CLS(VMAllocator) dataAlloc; /* A chunk allocator for LIR. */
|
||||
/*
|
||||
* There are 3 allocators here. This might seem like overkill, but they
|
||||
* have different lifecycles, and by keeping them separate we keep the
|
||||
* amount of retained memory down significantly.
|
||||
*
|
||||
* The dataAlloc has the lifecycle of the monitor. It's flushed only
|
||||
* when the monitor is flushed.
|
||||
*
|
||||
* The traceAlloc has the same flush lifecycle as the dataAlloc, but
|
||||
* it is also *marked* when a recording starts and rewinds to the mark
|
||||
* point if recording aborts. So you can put things in it that are only
|
||||
* reachable on a successful record/compile cycle.
|
||||
*
|
||||
* The tempAlloc is flushed after each recording, successful or not.
|
||||
*/
|
||||
|
||||
CLS(VMAllocator) dataAlloc; /* A chunk allocator for fragments. */
|
||||
CLS(VMAllocator) traceAlloc; /* An allocator for trace metadata. */
|
||||
CLS(VMAllocator) tempAlloc; /* A temporary chunk allocator. */
|
||||
CLS(nanojit::CodeAlloc) codeAlloc; /* An allocator for native code. */
|
||||
CLS(nanojit::Assembler) assembler;
|
||||
CLS(nanojit::LirBuffer) lirbuf;
|
||||
CLS(nanojit::LirBuffer) reLirBuf;
|
||||
CLS(FrameInfoCache) frameCache;
|
||||
#ifdef DEBUG
|
||||
CLS(nanojit::LabelMap) labels;
|
||||
#endif
|
||||
@ -344,7 +363,7 @@ struct JSThread {
|
||||
/* Indicates that the thread is waiting in ClaimTitle from jslock.cpp. */
|
||||
JSTitle *titleToShare;
|
||||
|
||||
JSGCThing *gcFreeLists[GC_NUM_FREELISTS];
|
||||
JSGCThing *gcFreeLists[FINALIZE_LIMIT];
|
||||
|
||||
/* Factored out of JSThread for !JS_THREADSAFE embedding in JSRuntime. */
|
||||
JSThreadData data;
|
||||
@ -431,7 +450,7 @@ struct JSRuntime {
|
||||
|
||||
/* Garbage collector state, used by jsgc.c. */
|
||||
JSGCChunkInfo *gcChunkList;
|
||||
JSGCArenaList gcArenaList[GC_NUM_FREELISTS];
|
||||
JSGCArenaList gcArenaList[FINALIZE_LIMIT];
|
||||
JSGCDoubleArenaList gcDoubleArenaList;
|
||||
JSDHashTable gcRootsHash;
|
||||
JSDHashTable *gcLocksHash;
|
||||
@ -1246,20 +1265,28 @@ FrameAtomBase(JSContext *cx, JSStackFrame *fp)
|
||||
class JSAutoTempValueRooter
|
||||
{
|
||||
public:
|
||||
JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec)
|
||||
JSAutoTempValueRooter(JSContext *cx, size_t len, jsval *vec
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_PUSH_TEMP_ROOT(mContext, len, vec, &mTvr);
|
||||
}
|
||||
explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL)
|
||||
explicit JSAutoTempValueRooter(JSContext *cx, jsval v = JSVAL_NULL
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(mContext, v, &mTvr);
|
||||
}
|
||||
JSAutoTempValueRooter(JSContext *cx, JSString *str)
|
||||
JSAutoTempValueRooter(JSContext *cx, JSString *str
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_PUSH_TEMP_ROOT_STRING(mContext, str, &mTvr);
|
||||
}
|
||||
JSAutoTempValueRooter(JSContext *cx, JSObject *obj)
|
||||
JSAutoTempValueRooter(JSContext *cx, JSObject *obj
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_PUSH_TEMP_ROOT_OBJECT(mContext, obj, &mTvr);
|
||||
}
|
||||
|
||||
@ -1280,13 +1307,16 @@ class JSAutoTempValueRooter
|
||||
#endif
|
||||
|
||||
JSTempValueRooter mTvr;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
class JSAutoTempIdRooter
|
||||
{
|
||||
public:
|
||||
explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0))
|
||||
explicit JSAutoTempIdRooter(JSContext *cx, jsid id = INT_TO_JSID(0)
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_PUSH_SINGLE_TEMP_ROOT(mContext, ID_TO_VALUE(id), &mTvr);
|
||||
}
|
||||
|
||||
@ -1300,11 +1330,15 @@ class JSAutoTempIdRooter
|
||||
private:
|
||||
JSContext *mContext;
|
||||
JSTempValueRooter mTvr;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
class JSAutoIdArray {
|
||||
public:
|
||||
JSAutoIdArray(JSContext *cx, JSIdArray *ida) : cx(cx), idArray(ida) {
|
||||
JSAutoIdArray(JSContext *cx, JSIdArray *ida
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: cx(cx), idArray(ida) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
if (ida)
|
||||
JS_PUSH_TEMP_ROOT(cx, ida->length, ida->vector, &tvr);
|
||||
}
|
||||
@ -1329,15 +1363,18 @@ class JSAutoIdArray {
|
||||
JSContext * const cx;
|
||||
JSIdArray * const idArray;
|
||||
JSTempValueRooter tvr;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
/* The auto-root for enumeration object and its state. */
|
||||
class JSAutoEnumStateRooter : public JSTempValueRooter
|
||||
{
|
||||
public:
|
||||
JSAutoEnumStateRooter(JSContext *cx, JSObject *obj, jsval *statep)
|
||||
JSAutoEnumStateRooter(JSContext *cx, JSObject *obj, jsval *statep
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx), mStatep(statep)
|
||||
{
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
JS_ASSERT(obj);
|
||||
JS_ASSERT(statep);
|
||||
JS_PUSH_TEMP_ROOT_COMMON(cx, obj, this, JSTVU_ENUMERATOR, object);
|
||||
@ -1355,13 +1392,16 @@ class JSAutoEnumStateRooter : public JSTempValueRooter
|
||||
private:
|
||||
JSContext *mContext;
|
||||
jsval *mStatep;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
class JSAutoResolveFlags
|
||||
{
|
||||
public:
|
||||
JSAutoResolveFlags(JSContext *cx, uintN flags)
|
||||
JSAutoResolveFlags(JSContext *cx, uintN flags
|
||||
JS_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: mContext(cx), mSaved(cx->resolveFlags) {
|
||||
JS_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
cx->resolveFlags = flags;
|
||||
}
|
||||
|
||||
@ -1370,6 +1410,7 @@ class JSAutoResolveFlags
|
||||
private:
|
||||
JSContext *mContext;
|
||||
uintN mSaved;
|
||||
JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
};
|
||||
|
||||
#endif /* __cpluscplus */
|
||||
|
@ -2159,7 +2159,9 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
|
||||
#ifdef DEBUG
|
||||
JSStackFrame *caller = cg->compiler->callerFrame;
|
||||
#endif
|
||||
JS_ASSERT(caller);
|
||||
JS_ASSERT(caller->script);
|
||||
|
||||
JSTreeContext *tc = cg;
|
||||
while (tc->staticLevel != level)
|
||||
@ -2168,10 +2170,14 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
|
||||
JSCodeGenerator *evalcg = (JSCodeGenerator *) tc;
|
||||
JS_ASSERT(evalcg->flags & TCF_COMPILE_N_GO);
|
||||
JS_ASSERT(!(evalcg->flags & TCF_IN_FOR_INIT));
|
||||
JS_ASSERT(caller->script);
|
||||
JS_ASSERT(caller->fun && caller->varobj == evalcg->scopeChain);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Don't generate upvars on the left side of a for loop. See
|
||||
* bug 470758 and bug 520513.
|
||||
*/
|
||||
if (evalcg->flags & TCF_IN_FOR_INIT)
|
||||
return JS_TRUE;
|
||||
|
||||
if (cg->staticLevel == level) {
|
||||
pn->pn_op = JSOP_GETUPVAR;
|
||||
@ -6269,26 +6275,39 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
case TOK_NEW:
|
||||
case TOK_LP:
|
||||
{
|
||||
bool callop = (PN_TYPE(pn) == TOK_LP);
|
||||
uintN oldflags;
|
||||
|
||||
/*
|
||||
* Emit function call or operator new (constructor call) code.
|
||||
* Emit callable invocation or operator new (constructor call) code.
|
||||
* First, emit code for the left operand to evaluate the callable or
|
||||
* constructable object expression.
|
||||
*
|
||||
* For operator new applied to other expressions than E4X ones, we emit
|
||||
* JSOP_GETPROP instead of JSOP_CALLPROP, etc. This is necessary to
|
||||
* interpose the lambda-initialized method read barrier -- see the code
|
||||
* in jsops.cpp for JSOP_LAMBDA followed by JSOP_{SET,INIT}PROP.
|
||||
*
|
||||
* Then (or in a call case that has no explicit reference-base object)
|
||||
* we emit JSOP_NULL as a placeholder local GC root to hold the |this|
|
||||
* parameter: in the operator new case, the newborn instance; in the
|
||||
* base-less call case, a cookie meaning "use the global object as the
|
||||
* |this| value" (or in ES5 strict mode, "use undefined", so we should
|
||||
* use JSOP_PUSH instead of JSOP_NULL -- see bug 514570).
|
||||
*/
|
||||
pn2 = pn->pn_head;
|
||||
switch (pn2->pn_type) {
|
||||
case TOK_NAME:
|
||||
if (!EmitNameOp(cx, cg, pn2, JS_TRUE))
|
||||
if (!EmitNameOp(cx, cg, pn2, callop))
|
||||
return JS_FALSE;
|
||||
break;
|
||||
case TOK_DOT:
|
||||
if (!EmitPropOp(cx, pn2, PN_OP(pn2), cg, JS_TRUE))
|
||||
if (!EmitPropOp(cx, pn2, PN_OP(pn2), cg, callop))
|
||||
return JS_FALSE;
|
||||
break;
|
||||
case TOK_LB:
|
||||
JS_ASSERT(pn2->pn_op == JSOP_GETELEM);
|
||||
if (!EmitElemOp(cx, pn2, JSOP_CALLELEM, cg))
|
||||
if (!EmitElemOp(cx, pn2, callop ? JSOP_CALLELEM : JSOP_GETELEM, cg))
|
||||
return JS_FALSE;
|
||||
break;
|
||||
case TOK_UNARYOP:
|
||||
@ -6296,6 +6315,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
if (pn2->pn_op == JSOP_XMLNAME) {
|
||||
if (!EmitXMLName(cx, pn2, JSOP_CALLXMLNAME, cg))
|
||||
return JS_FALSE;
|
||||
callop = true; /* suppress JSOP_NULL after */
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
@ -6307,9 +6327,11 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
*/
|
||||
if (!js_EmitTree(cx, cg, pn2))
|
||||
return JS_FALSE;
|
||||
if (js_Emit1(cx, cg, JSOP_NULL) < 0)
|
||||
return JS_FALSE;
|
||||
callop = false; /* trigger JSOP_NULL after */
|
||||
break;
|
||||
}
|
||||
if (!callop && js_Emit1(cx, cg, JSOP_NULL) < 0)
|
||||
return JS_FALSE;
|
||||
|
||||
/* Remember start of callable-object bytecode for decompilation hint. */
|
||||
off = top;
|
||||
@ -6332,6 +6354,11 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
argc = pn->pn_count - 1;
|
||||
if (js_Emit3(cx, cg, PN_OP(pn), ARGC_HI(argc), ARGC_LO(argc)) < 0)
|
||||
return JS_FALSE;
|
||||
if (PN_OP(pn) == JSOP_CALL) {
|
||||
/* Add a trace hint opcode for recursion. */
|
||||
if (js_Emit1(cx, cg, JSOP_TRACE) < 0)
|
||||
return JS_FALSE;
|
||||
}
|
||||
if (PN_OP(pn) == JSOP_EVAL)
|
||||
EMIT_UINT16_IMM_OP(JSOP_LINENO, pn->pn_pos.begin.lineno);
|
||||
break;
|
||||
|
@ -1410,10 +1410,14 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
|
||||
fun = GET_FUNCTION_PRIVATE(cx, obj);
|
||||
|
||||
/*
|
||||
* No need to reflect fun.prototype in 'fun.prototype = ... '.
|
||||
* No need to reflect fun.prototype in 'fun.prototype = ... '. Assert that
|
||||
* fun is not a compiler-created function object, which must never leak to
|
||||
* script or embedding code and then be mutated.
|
||||
*/
|
||||
if (flags & JSRESOLVE_ASSIGNING)
|
||||
if (flags & JSRESOLVE_ASSIGNING) {
|
||||
JS_ASSERT(!js_IsInternalFunctionObject(obj));
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ok, check whether id is 'prototype' and bootstrap the function object's
|
||||
@ -1421,7 +1425,7 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
|
||||
*/
|
||||
atom = cx->runtime->atomState.classPrototypeAtom;
|
||||
if (id == ATOM_KEY(atom)) {
|
||||
JSObject *proto;
|
||||
JS_ASSERT(!js_IsInternalFunctionObject(obj));
|
||||
|
||||
/*
|
||||
* Beware of the wacky case of a user function named Object -- trying
|
||||
@ -1434,7 +1438,8 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
|
||||
* Make the prototype object to have the same parent as the function
|
||||
* object itself.
|
||||
*/
|
||||
proto = js_NewObject(cx, &js_ObjectClass, NULL, OBJ_GET_PARENT(cx, obj));
|
||||
JSObject *proto =
|
||||
js_NewObject(cx, &js_ObjectClass, NULL, OBJ_GET_PARENT(cx, obj));
|
||||
if (!proto)
|
||||
return JS_FALSE;
|
||||
|
||||
@ -1457,6 +1462,8 @@ fun_resolve(JSContext *cx, JSObject *obj, jsval id, uintN flags,
|
||||
|
||||
atom = OFFSET_TO_ATOM(cx->runtime, lfp->atomOffset);
|
||||
if (id == ATOM_KEY(atom)) {
|
||||
JS_ASSERT(!js_IsInternalFunctionObject(obj));
|
||||
|
||||
if (!js_DefineNativeProperty(cx, obj,
|
||||
ATOM_TO_JSID(atom), JSVAL_VOID,
|
||||
fun_getProperty, JS_PropertyStub,
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* -*- Mode: C; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
*
|
||||
* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
@ -107,7 +107,7 @@ typedef union JSLocalNames {
|
||||
#define JSFUN_KINDMASK 0xc000 /* encode interp vs. native and closure
|
||||
optimization level -- see above */
|
||||
|
||||
#define FUN_OBJECT(fun) (&(fun)->object)
|
||||
#define FUN_OBJECT(fun) (static_cast<JSObject *>(fun))
|
||||
#define FUN_KIND(fun) ((fun)->flags & JSFUN_KINDMASK)
|
||||
#define FUN_SET_KIND(fun,k) ((fun)->flags = ((fun)->flags & ~JSFUN_KINDMASK) | (k))
|
||||
#define FUN_INTERPRETED(fun) (FUN_KIND(fun) >= JSFUN_INTERPRETED)
|
||||
@ -128,8 +128,7 @@ typedef union JSLocalNames {
|
||||
JS_ASSERT((fun)->flags & JSFUN_TRCINFO), \
|
||||
fun->u.n.trcinfo)
|
||||
|
||||
struct JSFunction {
|
||||
JSObject object; /* GC'ed object header */
|
||||
struct JSFunction : public JSObject {
|
||||
uint16 nargs; /* maximum number of specified arguments,
|
||||
reflected as f.length/f.arity */
|
||||
uint16 flags; /* flags, see JSFUN_* below and in jsapi.h */
|
||||
@ -161,8 +160,8 @@ struct JSFunction {
|
||||
} u;
|
||||
JSAtom *atom; /* name for diagnostics and decompiling */
|
||||
|
||||
bool optimizedClosure() { return FUN_KIND(this) > JSFUN_INTERPRETED; }
|
||||
bool needsWrapper() { return FUN_NULL_CLOSURE(this) && u.i.skipmin != 0; }
|
||||
bool optimizedClosure() const { return FUN_KIND(this) > JSFUN_INTERPRETED; }
|
||||
bool needsWrapper() const { return FUN_NULL_CLOSURE(this) && u.i.skipmin != 0; }
|
||||
|
||||
uintN countArgsAndVars() const {
|
||||
JS_ASSERT(FUN_INTERPRETED(this));
|
||||
@ -222,6 +221,19 @@ extern JS_FRIEND_DATA(JSClass) js_FunctionClass;
|
||||
(JS_ASSERT(HAS_FUNCTION_CLASS(funobj)), \
|
||||
(JSFunction *) (funobj)->getPrivate())
|
||||
|
||||
/*
|
||||
* Return true if this is a compiler-created internal function accessed by
|
||||
* its own object. Such a function object must not be accessible to script
|
||||
* or embedding code.
|
||||
*/
|
||||
inline bool
|
||||
js_IsInternalFunctionObject(JSObject *funobj)
|
||||
{
|
||||
JS_ASSERT(HAS_FUNCTION_CLASS(funobj));
|
||||
JSFunction *fun = (JSFunction *) funobj->getPrivate();
|
||||
return funobj == fun && (fun->flags & JSFUN_LAMBDA) && !funobj->getParent();
|
||||
}
|
||||
|
||||
struct js_ArgsPrivateNative;
|
||||
|
||||
inline js_ArgsPrivateNative *
|
||||
|
682
js/src/jsgc.cpp
682
js/src/jsgc.cpp
File diff suppressed because it is too large
Load Diff
@ -58,30 +58,7 @@ JS_BEGIN_EXTERN_C
|
||||
*/
|
||||
#define JSTRACE_LIMIT 4
|
||||
|
||||
/*
|
||||
* We use the trace kinds as the types for all GC things except external
|
||||
* strings.
|
||||
*/
|
||||
#define GCX_OBJECT JSTRACE_OBJECT /* JSObject */
|
||||
#define GCX_DOUBLE JSTRACE_DOUBLE /* jsdouble */
|
||||
#define GCX_STRING JSTRACE_STRING /* JSString */
|
||||
#define GCX_XML JSTRACE_XML /* JSXML */
|
||||
#define GCX_EXTERNAL_STRING JSTRACE_LIMIT /* JSString with external
|
||||
chars */
|
||||
/*
|
||||
* The number of defined GC types and the maximum limit for the number of
|
||||
* possible GC types.
|
||||
*/
|
||||
#define GCX_NTYPES (GCX_EXTERNAL_STRING + 8)
|
||||
#define GCX_LIMIT_LOG2 4 /* type index bits */
|
||||
#define GCX_LIMIT JS_BIT(GCX_LIMIT_LOG2)
|
||||
|
||||
/* GC flag definitions, must fit in 8 bits (type index goes in the low bits). */
|
||||
#define GCF_TYPEMASK JS_BITMASK(GCX_LIMIT_LOG2)
|
||||
#define GCF_MARK JS_BIT(GCX_LIMIT_LOG2)
|
||||
#define GCF_FINAL JS_BIT(GCX_LIMIT_LOG2 + 1)
|
||||
#define GCF_LOCKSHIFT (GCX_LIMIT_LOG2 + 2) /* lock bit shift */
|
||||
#define GCF_LOCK JS_BIT(GCF_LOCKSHIFT) /* lock request bit in API */
|
||||
const uintN JS_EXTERNAL_STRING_LIMIT = 8;
|
||||
|
||||
/*
|
||||
* Get the type of the external string or -1 if the string was not created
|
||||
@ -149,19 +126,6 @@ typedef struct JSPtrTable {
|
||||
extern JSBool
|
||||
js_RegisterCloseableIterator(JSContext *cx, JSObject *obj);
|
||||
|
||||
/*
|
||||
* The private JSGCThing struct, which describes a gcFreeList element.
|
||||
*/
|
||||
struct JSGCThing {
|
||||
JSGCThing *next;
|
||||
uint8 *flagp;
|
||||
};
|
||||
|
||||
#define GC_NBYTES_MAX (10 * sizeof(JSGCThing))
|
||||
#define GC_NUM_FREELISTS (GC_NBYTES_MAX / sizeof(JSGCThing))
|
||||
#define GC_FREELIST_NBYTES(i) (((i) + 1) * sizeof(JSGCThing))
|
||||
#define GC_FREELIST_INDEX(n) (((n) / sizeof(JSGCThing)) - 1)
|
||||
|
||||
/*
|
||||
* Allocates a new GC thing of the given size. After a successful allocation
|
||||
* the caller must fully initialize the thing before calling any function that
|
||||
@ -169,16 +133,19 @@ struct JSGCThing {
|
||||
* values stored in the partially initialized thing.
|
||||
*/
|
||||
extern JSObject*
|
||||
js_NewGCObject(JSContext *cx, uintN flags);
|
||||
js_NewGCObject(JSContext *cx);
|
||||
|
||||
extern JSString*
|
||||
js_NewGCString(JSContext *cx, uintN flags);
|
||||
js_NewGCString(JSContext *cx);
|
||||
|
||||
extern JSString*
|
||||
js_NewGCExternalString(JSContext *cx, uintN type);
|
||||
|
||||
extern JSFunction*
|
||||
js_NewGCFunction(JSContext *cx, uintN flags);
|
||||
js_NewGCFunction(JSContext *cx);
|
||||
|
||||
extern JSXML*
|
||||
js_NewGCXML(JSContext *cx, uintN flags);
|
||||
js_NewGCXML(JSContext *cx);
|
||||
|
||||
/*
|
||||
* Allocate a new double jsval and store the result in *vp. vp must be a root.
|
||||
@ -284,6 +251,36 @@ typedef enum JSGCInvocationKind {
|
||||
extern void
|
||||
js_GC(JSContext *cx, JSGCInvocationKind gckind);
|
||||
|
||||
/*
|
||||
* The kind of GC thing with a finalizer. The external strings follow the
|
||||
* ordinary string to simplify js_GetExternalStringGCType.
|
||||
*/
|
||||
enum JSFinalizeGCThingKind {
|
||||
FINALIZE_OBJECT,
|
||||
FINALIZE_FUNCTION,
|
||||
#if JS_HAS_XML_SUPPORT
|
||||
FINALIZE_XML,
|
||||
#endif
|
||||
FINALIZE_STRING,
|
||||
FINALIZE_EXTERNAL_STRING0,
|
||||
FINALIZE_EXTERNAL_STRING1,
|
||||
FINALIZE_EXTERNAL_STRING2,
|
||||
FINALIZE_EXTERNAL_STRING3,
|
||||
FINALIZE_EXTERNAL_STRING4,
|
||||
FINALIZE_EXTERNAL_STRING5,
|
||||
FINALIZE_EXTERNAL_STRING6,
|
||||
FINALIZE_EXTERNAL_STRING7,
|
||||
FINALIZE_EXTERNAL_STRING_LAST = FINALIZE_EXTERNAL_STRING7,
|
||||
FINALIZE_LIMIT
|
||||
};
|
||||
|
||||
static inline bool
|
||||
IsFinalizableStringKind(unsigned thingKind)
|
||||
{
|
||||
return unsigned(FINALIZE_STRING) <= thingKind &&
|
||||
thingKind <= unsigned(FINALIZE_EXTERNAL_STRING_LAST);
|
||||
}
|
||||
|
||||
typedef struct JSGCArenaInfo JSGCArenaInfo;
|
||||
typedef struct JSGCArenaList JSGCArenaList;
|
||||
typedef struct JSGCChunkInfo JSGCChunkInfo;
|
||||
@ -292,6 +289,7 @@ struct JSGCArenaList {
|
||||
JSGCArenaInfo *last; /* last allocated GC arena */
|
||||
uint32 lastCount; /* number of allocated things in the last
|
||||
arena */
|
||||
uint32 thingKind; /* one of JSFinalizeGCThingKind */
|
||||
uint32 thingSize; /* size of things to allocate on this list
|
||||
*/
|
||||
JSGCThing *freeList; /* list of free GC things */
|
||||
@ -315,7 +313,13 @@ js_DestroyScriptsToGC(JSContext *cx, JSThreadData *data);
|
||||
|
||||
struct JSWeakRoots {
|
||||
/* Most recently created things by type, members of the GC's root set. */
|
||||
void *newborn[GCX_NTYPES];
|
||||
JSObject *newbornObject;
|
||||
jsdouble *newbornDouble;
|
||||
JSString *newbornString;
|
||||
#if JS_HAS_XML_SUPPORT
|
||||
JSXML *newbornXML;
|
||||
#endif
|
||||
JSString *newbornExternalString[JS_EXTERNAL_STRING_LIMIT];
|
||||
|
||||
/* Atom root for the last-looked-up atom on this context. */
|
||||
jsval lastAtom;
|
||||
@ -348,15 +352,8 @@ class JSFreePointerListTask : public JSBackgroundTask {
|
||||
};
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Free the chars held by str when it is finalized by the GC. When type is
|
||||
* less then zero, it denotes an internal string. Otherwise it denotes the
|
||||
* type of the external string allocated with JS_NewExternalString.
|
||||
*
|
||||
* This function always needs rt but can live with null cx.
|
||||
*/
|
||||
extern void
|
||||
js_FinalizeStringRT(JSRuntime *rt, JSString *str, intN type, JSContext *cx);
|
||||
js_FinalizeStringRT(JSRuntime *rt, JSString *str);
|
||||
|
||||
#ifdef DEBUG_notme
|
||||
#define JS_GCMETER 1
|
||||
@ -405,7 +402,7 @@ typedef struct JSGCStats {
|
||||
uint32 closelater; /* number of close hooks scheduled to run */
|
||||
uint32 maxcloselater; /* max number of close hooks scheduled to run */
|
||||
|
||||
JSGCArenaStats arenaStats[GC_NUM_FREELISTS];
|
||||
JSGCArenaStats arenaStats[FINALIZE_LIST_LIMIT];
|
||||
JSGCArenaStats doubleArenaStats;
|
||||
} JSGCStats;
|
||||
|
||||
|
@ -2787,12 +2787,6 @@ js_Interpret(JSContext *cx)
|
||||
|
||||
#endif /* !JS_THREADED_INTERP */
|
||||
|
||||
#ifdef JS_TRACER
|
||||
/* We cannot reenter the interpreter while recording. */
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "attempt to reenter interpreter while recording");
|
||||
#endif
|
||||
|
||||
/* Check for too deep of a native thread stack. */
|
||||
JS_CHECK_RECURSION(cx, return JS_FALSE);
|
||||
|
||||
@ -2854,27 +2848,32 @@ js_Interpret(JSContext *cx)
|
||||
#define MONITOR_BRANCH_TRACEVIS
|
||||
#endif
|
||||
|
||||
#define MONITOR_BRANCH() \
|
||||
#define RESTORE_INTERP_VARS() \
|
||||
JS_BEGIN_MACRO \
|
||||
fp = cx->fp; \
|
||||
script = fp->script; \
|
||||
atoms = FrameAtomBase(cx, fp); \
|
||||
currentVersion = (JSVersion) script->version; \
|
||||
JS_ASSERT(fp->regs == ®s); \
|
||||
if (cx->throwing) \
|
||||
goto error; \
|
||||
JS_END_MACRO
|
||||
|
||||
#define MONITOR_BRANCH(reason) \
|
||||
JS_BEGIN_MACRO \
|
||||
if (TRACING_ENABLED(cx)) { \
|
||||
if (js_MonitorLoopEdge(cx, inlineCallCount)) { \
|
||||
if (js_MonitorLoopEdge(cx, inlineCallCount, reason)) { \
|
||||
JS_ASSERT(TRACE_RECORDER(cx)); \
|
||||
MONITOR_BRANCH_TRACEVIS; \
|
||||
ENABLE_INTERRUPTS(); \
|
||||
} \
|
||||
fp = cx->fp; \
|
||||
script = fp->script; \
|
||||
atoms = FrameAtomBase(cx, fp); \
|
||||
currentVersion = (JSVersion) script->version; \
|
||||
JS_ASSERT(fp->regs == ®s); \
|
||||
if (cx->throwing) \
|
||||
goto error; \
|
||||
RESTORE_INTERP_VARS(); \
|
||||
} \
|
||||
JS_END_MACRO
|
||||
|
||||
#else /* !JS_TRACER */
|
||||
|
||||
#define MONITOR_BRANCH() ((void) 0)
|
||||
#define MONITOR_BRANCH(reason) ((void) 0)
|
||||
|
||||
#endif /* !JS_TRACER */
|
||||
|
||||
@ -2900,13 +2899,13 @@ js_Interpret(JSContext *cx)
|
||||
CHECK_BRANCH(); \
|
||||
if (op == JSOP_NOP) { \
|
||||
if (TRACE_RECORDER(cx)) { \
|
||||
MONITOR_BRANCH(); \
|
||||
MONITOR_BRANCH(Monitor_Branch); \
|
||||
op = (JSOp) *regs.pc; \
|
||||
} else { \
|
||||
op = (JSOp) *++regs.pc; \
|
||||
} \
|
||||
} else if (op == JSOP_TRACE) { \
|
||||
MONITOR_BRANCH(); \
|
||||
MONITOR_BRANCH(Monitor_Branch); \
|
||||
op = (JSOp) *regs.pc; \
|
||||
} \
|
||||
} \
|
||||
@ -2989,6 +2988,15 @@ js_Interpret(JSContext *cx)
|
||||
}
|
||||
#endif /* JS_HAS_GENERATORS */
|
||||
|
||||
#ifdef JS_TRACER
|
||||
/*
|
||||
* We cannot reenter the interpreter while recording; wait to abort until
|
||||
* after cx->fp->regs is set.
|
||||
*/
|
||||
if (TRACE_RECORDER(cx))
|
||||
js_AbortRecording(cx, "attempt to reenter interpreter while recording");
|
||||
#endif
|
||||
|
||||
/*
|
||||
* It is important that "op" be initialized before calling DO_OP because
|
||||
* it is possible for "op" to be specially assigned during the normal
|
||||
|
@ -762,10 +762,6 @@ js_FinishRuntimeNumberState(JSContext *cx)
|
||||
{
|
||||
JSRuntime *rt = cx->runtime;
|
||||
|
||||
js_UnlockGCThingRT(rt, rt->jsNaN);
|
||||
js_UnlockGCThingRT(rt, rt->jsNegativeInfinity);
|
||||
js_UnlockGCThingRT(rt, rt->jsPositiveInfinity);
|
||||
|
||||
rt->jsNaN = NULL;
|
||||
rt->jsNegativeInfinity = NULL;
|
||||
rt->jsPositiveInfinity = NULL;
|
||||
|
@ -2216,14 +2216,14 @@ js_NewObjectWithGivenProto(JSContext *cx, JSClass *clasp, JSObject *proto,
|
||||
*/
|
||||
JSObject* obj;
|
||||
if (clasp == &js_FunctionClass && !objectSize) {
|
||||
obj = (JSObject*) js_NewGCFunction(cx, GCX_OBJECT);
|
||||
obj = (JSObject*) js_NewGCFunction(cx);
|
||||
#ifdef DEBUG
|
||||
memset((uint8 *) obj + sizeof(JSObject), JS_FREE_PATTERN,
|
||||
sizeof(JSFunction) - sizeof(JSObject));
|
||||
#endif
|
||||
} else {
|
||||
JS_ASSERT(!objectSize || objectSize == sizeof(JSObject));
|
||||
obj = js_NewGCObject(cx, GCX_OBJECT);
|
||||
obj = js_NewGCObject(cx);
|
||||
}
|
||||
if (!obj)
|
||||
goto out;
|
||||
@ -2248,7 +2248,7 @@ js_NewObjectWithGivenProto(JSContext *cx, JSClass *clasp, JSObject *proto,
|
||||
}
|
||||
|
||||
/* Check that the newborn root still holds the object. */
|
||||
JS_ASSERT_IF(!cx->localRootStack, cx->weakRoots.newborn[GCX_OBJECT] == obj);
|
||||
JS_ASSERT_IF(!cx->localRootStack, cx->weakRoots.newbornObject == obj);
|
||||
|
||||
/*
|
||||
* Do not call debug hooks on trace, because we might be in a non-_FAIL
|
||||
@ -2260,7 +2260,7 @@ js_NewObjectWithGivenProto(JSContext *cx, JSClass *clasp, JSObject *proto,
|
||||
cx->debugHooks->objectHook(cx, obj, JS_TRUE,
|
||||
cx->debugHooks->objectHookData);
|
||||
JS_UNKEEP_ATOMS(cx->runtime);
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = obj;
|
||||
cx->weakRoots.newbornObject = obj;
|
||||
}
|
||||
|
||||
out:
|
||||
@ -2325,7 +2325,7 @@ NewNativeObject(JSContext* cx, JSClass* clasp, JSObject* proto,
|
||||
JSObject *parent, jsval privateSlotValue)
|
||||
{
|
||||
JS_ASSERT(JS_ON_TRACE(cx));
|
||||
JSObject* obj = js_NewGCObject(cx, GCX_OBJECT);
|
||||
JSObject* obj = js_NewGCObject(cx);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
@ -2664,7 +2664,7 @@ js_CloneBlockObject(JSContext *cx, JSObject *proto, JSStackFrame *fp)
|
||||
JS_ASSERT(!OBJ_IS_CLONED_BLOCK(proto));
|
||||
JS_ASSERT(STOBJ_GET_CLASS(proto) == &js_BlockClass);
|
||||
|
||||
JSObject *clone = js_NewGCObject(cx, GCX_OBJECT);
|
||||
JSObject *clone = js_NewGCObject(cx);
|
||||
if (!clone)
|
||||
return NULL;
|
||||
|
||||
@ -3264,7 +3264,7 @@ js_NewNativeObject(JSContext *cx, JSClass *clasp, JSObject *proto,
|
||||
JS_ASSERT(proto->map->ops == &js_ObjectOps);
|
||||
JS_ASSERT(OBJ_GET_CLASS(cx, proto) == clasp);
|
||||
|
||||
JSObject* obj = js_NewGCObject(cx, GCX_OBJECT);
|
||||
JSObject* obj = js_NewGCObject(cx);
|
||||
if (!obj)
|
||||
return NULL;
|
||||
|
||||
@ -3852,11 +3852,11 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
|
||||
}
|
||||
}
|
||||
|
||||
added = !scope->lookup(id);
|
||||
sprop = scope->add(cx, id, getter, setter, SPROP_INVALID_SLOT, attrs,
|
||||
flags, shortid);
|
||||
if (!sprop)
|
||||
goto error;
|
||||
added = true;
|
||||
}
|
||||
|
||||
/* Store value before calling addProperty, in case the latter GC's. */
|
||||
@ -5513,7 +5513,7 @@ js_GetClassPrototype(JSContext *cx, JSObject *scope, jsid id,
|
||||
* instance that delegates to this object, or just query the
|
||||
* prototype for its class.
|
||||
*/
|
||||
cx->weakRoots.newborn[GCX_OBJECT] = JSVAL_TO_GCTHING(v);
|
||||
cx->weakRoots.newbornObject = JSVAL_TO_OBJECT(v);
|
||||
}
|
||||
}
|
||||
*protop = JSVAL_IS_OBJECT(v) ? JSVAL_TO_OBJECT(v) : NULL;
|
||||
|
@ -4099,6 +4099,9 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
* compiler optimizes that to |if (true)|.
|
||||
*/
|
||||
pc2 = pc + len;
|
||||
op = JSOp(*pc2);
|
||||
if (op == JSOP_TRACE || op == JSOP_NOP)
|
||||
pc2 += JSOP_NOP_LENGTH;
|
||||
LOCAL_ASSERT(pc2 < endpc ||
|
||||
endpc < outer->code + outer->length);
|
||||
LOCAL_ASSERT(ss2.top == 1);
|
||||
|
7258
js/src/jsops.cpp
7258
js/src/jsops.cpp
File diff suppressed because it is too large
Load Diff
711
js/src/jsrecursion.cpp
Normal file
711
js/src/jsrecursion.cpp
Normal file
@ -0,0 +1,711 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=4 sw=4 et tw=99 ft=cpp:
|
||||
*
|
||||
* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is Mozilla SpiderMonkey JavaScript 1.9 code, released
|
||||
* June 12, 2009.
|
||||
*
|
||||
* The Initial Developer of the Original Code is
|
||||
* the Mozilla Corporation.
|
||||
*
|
||||
* Contributor(s):
|
||||
* David Anderson <danderson@mozilla.com>
|
||||
* Andreas Gal <gal@mozilla.com>
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either of the GNU General Public License Version 2 or later (the "GPL"),
|
||||
* or the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
class RecursiveSlotMap : public SlotMap
|
||||
{
|
||||
public:
|
||||
RecursiveSlotMap(TraceRecorder& rec)
|
||||
: SlotMap(rec)
|
||||
{
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK void
|
||||
adjustTypes()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
#if defined DEBUG
|
||||
static JS_REQUIRES_STACK void
|
||||
AssertDownFrameIsConsistent(JSContext* cx, VMSideExit* anchor, FrameInfo* fi)
|
||||
{
|
||||
JS_ASSERT(anchor->recursive_down);
|
||||
JS_ASSERT(anchor->recursive_down->callerHeight == fi->callerHeight);
|
||||
|
||||
unsigned downPostSlots = fi->callerHeight;
|
||||
JSTraceType* typeMap = fi->get_typemap();
|
||||
|
||||
js_CaptureStackTypes(cx, 1, typeMap);
|
||||
const JSTraceType* m1 = anchor->recursive_down->get_typemap();
|
||||
for (unsigned i = 0; i < downPostSlots; i++) {
|
||||
if (m1[i] == typeMap[i])
|
||||
continue;
|
||||
if (typeMap[i] == TT_INT32 && m1[i] == TT_DOUBLE)
|
||||
continue;
|
||||
JS_NOT_REACHED("invalid RECURSIVE_MISMATCH exit");
|
||||
}
|
||||
JS_ASSERT(memcmp(anchor->recursive_down, fi, sizeof(FrameInfo)) == 0);
|
||||
}
|
||||
#endif
|
||||
|
||||
JS_REQUIRES_STACK VMSideExit*
|
||||
TraceRecorder::downSnapshot(FrameInfo* downFrame)
|
||||
{
|
||||
JS_ASSERT(!pendingSpecializedNative);
|
||||
|
||||
/* Build the typemap the exit will have. Note extra stack slot for return value. */
|
||||
unsigned downPostSlots = downFrame->callerHeight;
|
||||
unsigned ngslots = treeInfo->globalSlots->length();
|
||||
unsigned exitTypeMapLen = downPostSlots + 1 + ngslots;
|
||||
JSTraceType* exitTypeMap = (JSTraceType*)alloca(sizeof(JSTraceType) * exitTypeMapLen);
|
||||
JSTraceType* typeMap = downFrame->get_typemap();
|
||||
for (unsigned i = 0; i < downPostSlots; i++)
|
||||
exitTypeMap[i] = typeMap[i];
|
||||
exitTypeMap[downPostSlots] = determineSlotType(&stackval(-1));
|
||||
determineGlobalTypes(&exitTypeMap[downPostSlots + 1]);
|
||||
|
||||
VMSideExit* exit = (VMSideExit*)
|
||||
traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) + sizeof(JSTraceType) * exitTypeMapLen);
|
||||
|
||||
memset(exit, 0, sizeof(VMSideExit));
|
||||
exit->from = fragment;
|
||||
exit->calldepth = 0;
|
||||
JS_ASSERT(unsigned(exit->calldepth) == getCallDepth());
|
||||
exit->numGlobalSlots = ngslots;
|
||||
exit->numStackSlots = downPostSlots + 1;
|
||||
exit->numStackSlotsBelowCurrentFrame = cx->fp->down->argv ?
|
||||
nativeStackOffset(&cx->fp->argv[-2]) / sizeof(double) : 0;
|
||||
exit->exitType = UNSTABLE_LOOP_EXIT;
|
||||
exit->block = cx->fp->down->blockChain;
|
||||
exit->pc = downFrame->pc + JSOP_CALL_LENGTH;
|
||||
exit->imacpc = NULL;
|
||||
exit->sp_adj = ((downPostSlots + 1) * sizeof(double)) - treeInfo->nativeStackBase;
|
||||
exit->rp_adj = exit->calldepth * sizeof(FrameInfo*);
|
||||
exit->nativeCalleeWord = 0;
|
||||
exit->lookupFlags = js_InferFlags(cx, 0);
|
||||
memcpy(exit->fullTypeMap(), exitTypeMap, sizeof(JSTraceType) * exitTypeMapLen);
|
||||
#if defined JS_JIT_SPEW
|
||||
TreevisLogExit(cx, exit);
|
||||
#endif
|
||||
return exit;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus
|
||||
TraceRecorder::upRecursion()
|
||||
{
|
||||
JS_ASSERT((JSOp)*cx->fp->down->regs->pc == JSOP_CALL);
|
||||
JS_ASSERT(js_CodeSpec[js_GetOpcode(cx, cx->fp->down->script,
|
||||
cx->fp->down->regs->pc)].length == JSOP_CALL_LENGTH);
|
||||
|
||||
JS_ASSERT(callDepth == 0);
|
||||
|
||||
/*
|
||||
* If some operation involving interpreter frame slurping failed, go to
|
||||
* that code right away, and don't bother with emitting the up-recursive
|
||||
* guards again.
|
||||
*/
|
||||
if (anchor && (anchor->exitType == RECURSIVE_EMPTY_RP_EXIT ||
|
||||
anchor->exitType == RECURSIVE_SLURP_MISMATCH_EXIT ||
|
||||
anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT)) {
|
||||
return InjectStatus(slurpDownFrames(cx->fp->down->regs->pc));
|
||||
}
|
||||
|
||||
jsbytecode* return_pc = cx->fp->down->regs->pc;
|
||||
jsbytecode* recursive_pc = return_pc + JSOP_CALL_LENGTH;
|
||||
|
||||
/*
|
||||
* It is possible that the down frame isn't the same at runtime. It's not
|
||||
* enough to guard on the PC, since the typemap could be different as well.
|
||||
* To deal with this, guard that the FrameInfo on the callstack is 100%
|
||||
* identical.
|
||||
*
|
||||
* Note that though the counted slots is called "downPostSlots", this is
|
||||
* the number of slots after the CALL instruction has theoretically popped
|
||||
* callee/this/argv, but before the return value is pushed. This is
|
||||
* intended since the FrameInfo pushed by down recursion would not have
|
||||
* the return value yet. Instead, when closing the loop, the return value
|
||||
* becomes the sole stack type that deduces type stability.
|
||||
*/
|
||||
unsigned totalSlots = NativeStackSlots(cx, 1);
|
||||
unsigned downPostSlots = totalSlots - NativeStackSlots(cx, 0);
|
||||
FrameInfo* fi = (FrameInfo*)alloca(sizeof(FrameInfo) + totalSlots * sizeof(JSTraceType));
|
||||
fi->block = cx->fp->blockChain;
|
||||
fi->pc = (jsbytecode*)return_pc;
|
||||
fi->imacpc = NULL;
|
||||
|
||||
/*
|
||||
* Need to compute this from the down frame, since the stack could have
|
||||
* moved on this one.
|
||||
*/
|
||||
fi->spdist = cx->fp->down->regs->sp - cx->fp->down->slots;
|
||||
JS_ASSERT(cx->fp->argc == cx->fp->down->argc);
|
||||
fi->set_argc(cx->fp->argc, false);
|
||||
fi->callerHeight = downPostSlots;
|
||||
fi->callerArgc = cx->fp->down->argc;
|
||||
|
||||
if (anchor && anchor->exitType == RECURSIVE_MISMATCH_EXIT) {
|
||||
/*
|
||||
* Case 0: Anchoring off a RECURSIVE_MISMATCH guard. Guard on this FrameInfo.
|
||||
* This is always safe because this point is only reached on simple "call myself"
|
||||
* recursive functions.
|
||||
*/
|
||||
#if defined DEBUG
|
||||
AssertDownFrameIsConsistent(cx, anchor, fi);
|
||||
#endif
|
||||
fi = anchor->recursive_down;
|
||||
} else if (recursive_pc != fragment->root->ip) {
|
||||
/*
|
||||
* Case 1: Guess that down-recursion has to started back out, infer types
|
||||
* from the down frame.
|
||||
*/
|
||||
js_CaptureStackTypes(cx, 1, fi->get_typemap());
|
||||
} else {
|
||||
/* Case 2: Guess that up-recursion is backing out, infer types from our TreeInfo. */
|
||||
JS_ASSERT(treeInfo->nStackTypes == downPostSlots + 1);
|
||||
JSTraceType* typeMap = fi->get_typemap();
|
||||
for (unsigned i = 0; i < downPostSlots; i++)
|
||||
typeMap[i] = treeInfo->typeMap[i];
|
||||
}
|
||||
|
||||
fi = traceMonitor->frameCache->memoize(fi);
|
||||
|
||||
/*
|
||||
* Guard that there are more recursive frames. If coming from an anchor
|
||||
* where this was already computed, don't bother doing it again.
|
||||
*/
|
||||
if (!anchor || anchor->exitType != RECURSIVE_MISMATCH_EXIT) {
|
||||
VMSideExit* exit = snapshot(RECURSIVE_EMPTY_RP_EXIT);
|
||||
|
||||
/* Guard that rp >= sr + 1 */
|
||||
guard(true,
|
||||
lir->ins2(LIR_pge, lirbuf->rp,
|
||||
lir->ins2(LIR_piadd,
|
||||
lir->insLoad(LIR_ldp, lirbuf->state,
|
||||
offsetof(InterpState, sor)),
|
||||
INS_CONSTWORD(sizeof(FrameInfo*)))),
|
||||
exit);
|
||||
}
|
||||
|
||||
debug_only_printf(LC_TMRecorder, "guardUpRecursive fragment->root=%p fi=%p\n", (void*)fragment->root, (void*)fi);
|
||||
|
||||
/* Guard that the FrameInfo above is the same FrameInfo pointer. */
|
||||
VMSideExit* exit = snapshot(RECURSIVE_MISMATCH_EXIT);
|
||||
LIns* prev_rp = lir->insLoad(LIR_ldp, lirbuf->rp, -int32_t(sizeof(FrameInfo*)));
|
||||
guard(true, lir->ins2(LIR_peq, prev_rp, INS_CONSTPTR(fi)), exit);
|
||||
|
||||
/*
|
||||
* Now it's time to try and close the loop. Get a special exit that points
|
||||
* at the down frame, after the return has been propagated up.
|
||||
*/
|
||||
exit = downSnapshot(fi);
|
||||
|
||||
/* Move the return value down from this frame to the one below it. */
|
||||
rval_ins = get(&stackval(-1));
|
||||
if (isPromoteInt(rval_ins))
|
||||
rval_ins = demoteIns(rval_ins);
|
||||
|
||||
/*
|
||||
* The native stack offset of the return value once this frame has returned, is:
|
||||
* -treeInfo->nativeStackBase + downPostSlots * sizeof(double)
|
||||
*
|
||||
* Note, not +1, since the offset is 0-based.
|
||||
*
|
||||
* This needs to be adjusted down one frame. The amount to adjust must be
|
||||
* the amount down recursion added, which was just guarded as |downPostSlots|.
|
||||
*
|
||||
* So the offset is:
|
||||
* -treeInfo->nativeStackBase + downPostSlots * sizeof(double) -
|
||||
* downPostSlots * sizeof(double)
|
||||
* Or:
|
||||
* -treeInfo->nativeStackBase
|
||||
*
|
||||
* This makes sense because this slot is just above the highest sp for the
|
||||
* down frame.
|
||||
*/
|
||||
lir->insStorei(rval_ins, lirbuf->sp, -treeInfo->nativeStackBase);
|
||||
|
||||
/* Adjust stacks. See above for |downPostSlots| reasoning. */
|
||||
lirbuf->sp = lir->ins2(LIR_piadd, lirbuf->sp,
|
||||
lir->insImmWord(-int(downPostSlots) * sizeof(double)));
|
||||
lir->insStorei(lirbuf->sp, lirbuf->state, offsetof(InterpState, sp));
|
||||
lirbuf->rp = lir->ins2(LIR_piadd, lirbuf->rp,
|
||||
lir->insImmWord(-int(sizeof(FrameInfo*))));
|
||||
lir->insStorei(lirbuf->rp, lirbuf->state, offsetof(InterpState, rp));
|
||||
|
||||
RecursiveSlotMap slotMap(*this);
|
||||
for (unsigned i = 0; i < downPostSlots; i++)
|
||||
slotMap.addSlot(exit->stackType(i));
|
||||
slotMap.addSlot(&stackval(-1));
|
||||
VisitGlobalSlots(slotMap, cx, *treeInfo->globalSlots);
|
||||
if (recursive_pc == (jsbytecode*)fragment->root->ip) {
|
||||
debug_only_print0(LC_TMTracer, "Compiling up-recursive loop...\n");
|
||||
} else {
|
||||
debug_only_print0(LC_TMTracer, "Compiling up-recursive branch...\n");
|
||||
exit->exitType = RECURSIVE_UNLINKED_EXIT;
|
||||
exit->recursive_pc = recursive_pc;
|
||||
}
|
||||
JS_ASSERT(treeInfo->recursion != Recursion_Disallowed);
|
||||
if (treeInfo->recursion != Recursion_Detected)
|
||||
treeInfo->recursion = Recursion_Unwinds;
|
||||
return closeLoop(slotMap, exit);
|
||||
}
|
||||
|
||||
class SlurpInfo
|
||||
{
|
||||
public:
|
||||
unsigned curSlot;
|
||||
JSTraceType* typeMap;
|
||||
VMSideExit* exit;
|
||||
unsigned slurpFailSlot;
|
||||
};
|
||||
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus
|
||||
TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
|
||||
{
|
||||
/* Missing - no go */
|
||||
if (cx->fp->argc != cx->fp->fun->nargs)
|
||||
RETURN_STOP_A("argc != nargs");
|
||||
|
||||
LIns* argv_ins;
|
||||
unsigned frameDepth;
|
||||
unsigned downPostSlots;
|
||||
|
||||
JSStackFrame* fp = cx->fp;
|
||||
LIns* fp_ins = addName(lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp)), "fp");
|
||||
|
||||
/*
|
||||
* When first emitting slurp code, do so against the down frame. After
|
||||
* popping the interpreter frame, it is illegal to resume here, as the
|
||||
* down frame has been moved up. So all this code should be skipped if
|
||||
* anchoring off such an exit.
|
||||
*/
|
||||
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
|
||||
fp_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, down)), "downFp");
|
||||
fp = fp->down;
|
||||
|
||||
argv_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argv)), "argv");
|
||||
|
||||
/* If recovering from a SLURP_MISMATCH, all of this is unnecessary. */
|
||||
if (!anchor || anchor->exitType != RECURSIVE_SLURP_MISMATCH_EXIT) {
|
||||
/* fp->down should not be NULL. */
|
||||
guard(false, lir->ins_peq0(fp_ins), RECURSIVE_LOOP_EXIT);
|
||||
|
||||
/* fp->down->argv should not be NULL. */
|
||||
guard(false, lir->ins_peq0(argv_ins), RECURSIVE_LOOP_EXIT);
|
||||
|
||||
/*
|
||||
* Guard on the script being the same. This might seem unnecessary,
|
||||
* but it lets the recursive loop end cleanly if it doesn't match.
|
||||
* With only the pc check, it is harder to differentiate between
|
||||
* end-of-recursion and recursion-returns-to-different-pc.
|
||||
*/
|
||||
guard(true,
|
||||
lir->ins2(LIR_peq,
|
||||
addName(lir->insLoad(LIR_ldp,
|
||||
fp_ins,
|
||||
offsetof(JSStackFrame, script)),
|
||||
"script"),
|
||||
INS_CONSTPTR(cx->fp->down->script)),
|
||||
RECURSIVE_LOOP_EXIT);
|
||||
}
|
||||
|
||||
/* fp->down->regs->pc should be == pc. */
|
||||
guard(true,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->insLoad(LIR_ldp,
|
||||
addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, regs)),
|
||||
"regs"),
|
||||
offsetof(JSFrameRegs, pc)),
|
||||
INS_CONSTPTR(return_pc)),
|
||||
RECURSIVE_SLURP_MISMATCH_EXIT);
|
||||
|
||||
/* fp->down->argc should be == argc. */
|
||||
guard(true,
|
||||
lir->ins2(LIR_eq,
|
||||
addName(lir->insLoad(LIR_ld, fp_ins, offsetof(JSStackFrame, argc)),
|
||||
"argc"),
|
||||
INS_CONST(cx->fp->argc)),
|
||||
MISMATCH_EXIT);
|
||||
|
||||
/* Pop the interpreter frame. */
|
||||
LIns* args[] = { lirbuf->state, cx_ins };
|
||||
guard(false, lir->ins_eq0(lir->insCall(&js_PopInterpFrame_ci, args)), MISMATCH_EXIT);
|
||||
|
||||
/* Compute slots for the down frame. */
|
||||
downPostSlots = NativeStackSlots(cx, 1) - NativeStackSlots(cx, 0);
|
||||
frameDepth = 1;
|
||||
} else {
|
||||
/* Note: loading argv from fp, not fp->down. */
|
||||
argv_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argv)), "argv");
|
||||
|
||||
/* Slots for this frame, minus the return value. */
|
||||
downPostSlots = NativeStackSlots(cx, 0) - 1;
|
||||
frameDepth = 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is a special exit used as a template for the stack-slurping code.
|
||||
* LeaveTree will ignore all but the final slot, which contains the return
|
||||
* value. The slurpSlot variable keeps track of the last slot that has been
|
||||
* unboxed, as to avoid re-unboxing when taking a SLURP_FAIL exit.
|
||||
*/
|
||||
unsigned numGlobalSlots = treeInfo->globalSlots->length();
|
||||
unsigned safeSlots = NativeStackSlots(cx, frameDepth) + 1 + numGlobalSlots;
|
||||
jsbytecode* recursive_pc = return_pc + JSOP_CALL_LENGTH;
|
||||
LIns* data = lir->insSkip(sizeof(VMSideExit) + sizeof(JSTraceType) * safeSlots);
|
||||
VMSideExit* exit = (VMSideExit*)data->payload();
|
||||
memset(exit, 0, sizeof(VMSideExit));
|
||||
exit->pc = (jsbytecode*)recursive_pc;
|
||||
exit->from = fragment;
|
||||
exit->exitType = RECURSIVE_SLURP_FAIL_EXIT;
|
||||
exit->numStackSlots = downPostSlots + 1;
|
||||
exit->numGlobalSlots = numGlobalSlots;
|
||||
exit->sp_adj = ((downPostSlots + 1) * sizeof(double)) - treeInfo->nativeStackBase;
|
||||
exit->recursive_pc = recursive_pc;
|
||||
|
||||
/*
|
||||
* Build the exit typemap. This may capture extra types, but they are
|
||||
* thrown away.
|
||||
*/
|
||||
JSTraceType* typeMap = exit->stackTypeMap();
|
||||
jsbytecode* oldpc = cx->fp->regs->pc;
|
||||
cx->fp->regs->pc = exit->pc;
|
||||
js_CaptureStackTypes(cx, frameDepth, typeMap);
|
||||
cx->fp->regs->pc = oldpc;
|
||||
typeMap[downPostSlots] = determineSlotType(&stackval(-1));
|
||||
if (typeMap[downPostSlots] == TT_INT32 &&
|
||||
oracle.isStackSlotUndemotable(cx, downPostSlots, recursive_pc)) {
|
||||
typeMap[downPostSlots] = TT_DOUBLE;
|
||||
}
|
||||
determineGlobalTypes(&typeMap[exit->numStackSlots]);
|
||||
#if defined JS_JIT_SPEW
|
||||
TreevisLogExit(cx, exit);
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Move return value to the right place, if necessary. The previous store
|
||||
* could have been killed so it is necessary to write it again.
|
||||
*/
|
||||
if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
|
||||
JS_ASSERT(exit->sp_adj >= int(sizeof(double)));
|
||||
ptrdiff_t actRetOffset = exit->sp_adj - sizeof(double);
|
||||
LIns* rval = get(&stackval(-1));
|
||||
if (typeMap[downPostSlots] == TT_INT32)
|
||||
rval = demoteIns(rval);
|
||||
lir->insStorei(addName(rval, "rval"), lirbuf->sp, actRetOffset);
|
||||
}
|
||||
|
||||
/* Slurp */
|
||||
SlurpInfo info;
|
||||
info.curSlot = 0;
|
||||
info.exit = exit;
|
||||
info.typeMap = typeMap;
|
||||
info.slurpFailSlot = (anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT) ?
|
||||
anchor->slurpFailSlot : 0;
|
||||
|
||||
/* callee */
|
||||
slurpSlot(lir->insLoad(LIR_ldp, argv_ins, -2 * ptrdiff_t(sizeof(jsval))),
|
||||
&fp->argv[-2],
|
||||
&info);
|
||||
/* this */
|
||||
slurpSlot(lir->insLoad(LIR_ldp, argv_ins, -1 * ptrdiff_t(sizeof(jsval))),
|
||||
&fp->argv[-1],
|
||||
&info);
|
||||
/* args[0..n] */
|
||||
for (unsigned i = 0; i < JS_MAX(fp->argc, fp->fun->nargs); i++)
|
||||
slurpSlot(lir->insLoad(LIR_ldp, argv_ins, i * sizeof(jsval)), &fp->argv[i], &info);
|
||||
/* argsobj */
|
||||
slurpSlot(addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argsobj)), "argsobj"),
|
||||
&fp->argsobj,
|
||||
&info);
|
||||
/* vars */
|
||||
LIns* slots_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, slots)),
|
||||
"slots");
|
||||
for (unsigned i = 0; i < fp->script->nfixed; i++)
|
||||
slurpSlot(lir->insLoad(LIR_ldp, slots_ins, i * sizeof(jsval)), &fp->slots[i], &info);
|
||||
/* stack vals */
|
||||
unsigned nfixed = fp->script->nfixed;
|
||||
jsval* stack = StackBase(fp);
|
||||
LIns* stack_ins = addName(lir->ins2(LIR_piadd,
|
||||
slots_ins,
|
||||
INS_CONSTWORD(nfixed * sizeof(jsval))),
|
||||
"stackBase");
|
||||
size_t limit = size_t(fp->regs->sp - StackBase(fp));
|
||||
if (anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT)
|
||||
limit--;
|
||||
else
|
||||
limit -= fp->fun->nargs + 2;
|
||||
for (size_t i = 0; i < limit; i++)
|
||||
slurpSlot(lir->insLoad(LIR_ldp, stack_ins, i * sizeof(jsval)), &stack[i], &info);
|
||||
|
||||
JS_ASSERT(info.curSlot == downPostSlots);
|
||||
|
||||
/* Jump back to the start */
|
||||
exit = copy(exit);
|
||||
exit->exitType = UNSTABLE_LOOP_EXIT;
|
||||
#if defined JS_JIT_SPEW
|
||||
TreevisLogExit(cx, exit);
|
||||
#endif
|
||||
|
||||
/* Finally, close the loop. */
|
||||
RecursiveSlotMap slotMap(*this);
|
||||
for (unsigned i = 0; i < downPostSlots; i++)
|
||||
slotMap.addSlot(typeMap[i]);
|
||||
slotMap.addSlot(&stackval(-1));
|
||||
VisitGlobalSlots(slotMap, cx, *treeInfo->globalSlots);
|
||||
debug_only_print0(LC_TMTracer, "Compiling up-recursive slurp...\n");
|
||||
exit = copy(exit);
|
||||
if (exit->recursive_pc == fragment->root->ip)
|
||||
exit->exitType = UNSTABLE_LOOP_EXIT;
|
||||
else
|
||||
exit->exitType = RECURSIVE_UNLINKED_EXIT;
|
||||
debug_only_printf(LC_TMTreeVis, "TREEVIS CHANGEEXIT EXIT=%p TYPE=%s\n", (void*)exit,
|
||||
getExitName(exit->exitType));
|
||||
JS_ASSERT(treeInfo->recursion >= Recursion_Unwinds);
|
||||
return closeLoop(slotMap, exit);
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus
|
||||
TraceRecorder::downRecursion()
|
||||
{
|
||||
JSStackFrame* fp = cx->fp;
|
||||
if ((jsbytecode*)fragment->ip < fp->script->code ||
|
||||
(jsbytecode*)fragment->ip >= fp->script->code + fp->script->length) {
|
||||
RETURN_STOP_A("inner recursive call must compile first");
|
||||
}
|
||||
|
||||
/* Adjust the stack by the budget the down-frame needs. */
|
||||
int slots = NativeStackSlots(cx, 1) - NativeStackSlots(cx, 0);
|
||||
JS_ASSERT(unsigned(slots) == NativeStackSlots(cx, 1) - fp->argc - 2 - fp->script->nfixed - 1);
|
||||
|
||||
/* Guard that there is enough stack space. */
|
||||
JS_ASSERT(treeInfo->maxNativeStackSlots >= treeInfo->nativeStackBase / sizeof(double));
|
||||
int guardSlots = slots + treeInfo->maxNativeStackSlots -
|
||||
treeInfo->nativeStackBase / sizeof(double);
|
||||
LIns* sp_top = lir->ins2(LIR_piadd, lirbuf->sp, lir->insImmWord(guardSlots * sizeof(double)));
|
||||
guard(true, lir->ins2(LIR_plt, sp_top, eos_ins), OOM_EXIT);
|
||||
|
||||
/* Guard that there is enough call stack space. */
|
||||
LIns* rp_top = lir->ins2(LIR_piadd, lirbuf->rp, lir->insImmWord(sizeof(FrameInfo*)));
|
||||
guard(true, lir->ins2(LIR_plt, rp_top, eor_ins), OOM_EXIT);
|
||||
|
||||
/* Add space for a new JIT frame. */
|
||||
lirbuf->sp = lir->ins2(LIR_piadd, lirbuf->sp, lir->insImmWord(slots * sizeof(double)));
|
||||
lir->insStorei(lirbuf->sp, lirbuf->state, offsetof(InterpState, sp));
|
||||
lirbuf->rp = lir->ins2(LIR_piadd, lirbuf->rp, lir->insImmWord(sizeof(FrameInfo*)));
|
||||
lir->insStorei(lirbuf->rp, lirbuf->state, offsetof(InterpState, rp));
|
||||
--callDepth;
|
||||
clearFrameSlotsFromCache();
|
||||
|
||||
/*
|
||||
* If the callee and caller have identical call sites, this is a down-
|
||||
* recursive loop. Otherwise something special happened. For example, a
|
||||
* recursive call that is unwinding could nest back down recursively again.
|
||||
* In this case, we build a fragment that ideally we'll never invoke
|
||||
* directly, but link from a down-recursive branch. The UNLINKED_EXIT tells
|
||||
* closeLoop() that the peer trees should match the recursive pc, not the
|
||||
* tree pc.
|
||||
*/
|
||||
VMSideExit* exit;
|
||||
if ((jsbytecode*)fragment->root->ip == fp->script->code)
|
||||
exit = snapshot(UNSTABLE_LOOP_EXIT);
|
||||
else
|
||||
exit = snapshot(RECURSIVE_UNLINKED_EXIT);
|
||||
exit->recursive_pc = fp->script->code;
|
||||
debug_only_print0(LC_TMTracer, "Compiling down-recursive function call.\n");
|
||||
JS_ASSERT(treeInfo->recursion != Recursion_Disallowed);
|
||||
treeInfo->recursion = Recursion_Detected;
|
||||
return closeLoop(exit);
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpInt32Slot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
guard(true,
|
||||
lir->ins2(LIR_or,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
|
||||
INS_CONSTWORD(JSVAL_DOUBLE)),
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(1)),
|
||||
INS_CONSTWORD(1))),
|
||||
exit);
|
||||
LIns* space = lir->insAlloc(sizeof(int32));
|
||||
LIns* args[] = { space, val_ins };
|
||||
LIns* result = lir->insCall(&js_TryUnboxInt32_ci, args);
|
||||
guard(false, lir->ins_eq0(result), exit);
|
||||
LIns* int32_ins = lir->insLoad(LIR_ld, space, 0);
|
||||
return int32_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpDoubleSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
guard(true,
|
||||
lir->ins2(LIR_or,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
|
||||
INS_CONSTWORD(JSVAL_DOUBLE)),
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(1)),
|
||||
INS_CONSTWORD(1))),
|
||||
exit);
|
||||
LIns* args[] = { val_ins };
|
||||
LIns* dbl_ins = lir->insCall(&js_UnboxDouble_ci, args);
|
||||
return dbl_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpBoolSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
guard(true,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
|
||||
INS_CONSTWORD(JSVAL_SPECIAL)),
|
||||
exit);
|
||||
LIns* bool_ins = lir->ins2(LIR_pirsh, val_ins, INS_CONST(JSVAL_TAGBITS));
|
||||
bool_ins = p2i(bool_ins);
|
||||
return bool_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpStringSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
guard(true,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(JSVAL_TAGMASK)),
|
||||
INS_CONSTWORD(JSVAL_STRING)),
|
||||
exit);
|
||||
LIns* str_ins = lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(~JSVAL_TAGMASK));
|
||||
return str_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpNullSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
guard(true, lir->ins_peq0(val_ins), exit);
|
||||
return val_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpObjectSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
/* Must not be NULL */
|
||||
guard(false, lir->ins_peq0(val_ins), exit);
|
||||
|
||||
/* Must be an object */
|
||||
guard(true,
|
||||
lir->ins_peq0(lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(JSVAL_TAGMASK))),
|
||||
exit);
|
||||
|
||||
/* Must NOT have a function class */
|
||||
guard(false,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand,
|
||||
lir->insLoad(LIR_ldp, val_ins, offsetof(JSObject, classword)),
|
||||
INS_CONSTWORD(~JSSLOT_CLASS_MASK_BITS)),
|
||||
INS_CONSTPTR(&js_FunctionClass)),
|
||||
exit);
|
||||
return val_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpFunctionSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
/* Must not be NULL */
|
||||
guard(false, lir->ins_peq0(val_ins), exit);
|
||||
|
||||
/* Must be an object */
|
||||
guard(true,
|
||||
lir->ins_peq0(lir->ins2(LIR_piand, val_ins, INS_CONSTWORD(JSVAL_TAGMASK))),
|
||||
exit);
|
||||
|
||||
/* Must have a function class */
|
||||
guard(true,
|
||||
lir->ins2(LIR_peq,
|
||||
lir->ins2(LIR_piand,
|
||||
lir->insLoad(LIR_ldp, val_ins, offsetof(JSObject, classword)),
|
||||
INS_CONSTWORD(~JSSLOT_CLASS_MASK_BITS)),
|
||||
INS_CONSTPTR(&js_FunctionClass)),
|
||||
exit);
|
||||
return val_ins;
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK LIns*
|
||||
TraceRecorder::slurpSlot(LIns* val_ins, jsval* vp, VMSideExit* exit)
|
||||
{
|
||||
switch (exit->slurpType)
|
||||
{
|
||||
case TT_PSEUDOBOOLEAN:
|
||||
return slurpBoolSlot(val_ins, vp, exit);
|
||||
case TT_INT32:
|
||||
return slurpInt32Slot(val_ins, vp, exit);
|
||||
case TT_DOUBLE:
|
||||
return slurpDoubleSlot(val_ins, vp, exit);
|
||||
case TT_STRING:
|
||||
return slurpStringSlot(val_ins, vp, exit);
|
||||
case TT_NULL:
|
||||
return slurpNullSlot(val_ins, vp, exit);
|
||||
case TT_OBJECT:
|
||||
return slurpObjectSlot(val_ins, vp, exit);
|
||||
case TT_FUNCTION:
|
||||
return slurpFunctionSlot(val_ins, vp, exit);
|
||||
default:
|
||||
JS_NOT_REACHED("invalid type in typemap");
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
JS_REQUIRES_STACK void
|
||||
TraceRecorder::slurpSlot(LIns* val_ins, jsval* vp, SlurpInfo* info)
|
||||
{
|
||||
/* Don't re-read slots that aren't needed. */
|
||||
if (info->curSlot < info->slurpFailSlot) {
|
||||
info->curSlot++;
|
||||
return;
|
||||
}
|
||||
VMSideExit* exit = copy(info->exit);
|
||||
exit->slurpFailSlot = info->curSlot;
|
||||
exit->slurpType = info->typeMap[info->curSlot];
|
||||
|
||||
#if defined DEBUG
|
||||
/* Make sure that we don't try and record infinity branches */
|
||||
JS_ASSERT_IF(anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT &&
|
||||
info->curSlot == info->slurpFailSlot,
|
||||
anchor->slurpType != exit->slurpType);
|
||||
#endif
|
||||
|
||||
LIns* val = slurpSlot(val_ins, vp, exit);
|
||||
lir->insStorei(val,
|
||||
lirbuf->sp,
|
||||
-treeInfo->nativeStackBase + ptrdiff_t(info->curSlot) * sizeof(double));
|
||||
info->curSlot++;
|
||||
}
|
||||
|
@ -3083,6 +3083,7 @@ class RegExpNativeCompiler {
|
||||
|
||||
Allocator &alloc = *JS_TRACE_MONITOR(cx).dataAlloc;
|
||||
|
||||
/* Must only create a VMSideExit; see StackFilter::getTops. */
|
||||
size_t len = (sizeof(GuardRecord) +
|
||||
sizeof(VMSideExit) +
|
||||
(re_length-1) * sizeof(jschar));
|
||||
@ -3196,6 +3197,15 @@ class RegExpNativeCompiler {
|
||||
|
||||
if (outOfMemory())
|
||||
goto fail;
|
||||
|
||||
/*
|
||||
* Deep in the nanojit compiler, the StackFilter is trying to throw
|
||||
* away stores above the VM interpreter/native stacks. We have no such
|
||||
* stacks, so rely on the fact that lirbuf->sp and lirbuf->rp are null
|
||||
* to ensure our stores are ignored.
|
||||
*/
|
||||
JS_ASSERT(!lirbuf->sp && !lirbuf->rp);
|
||||
|
||||
::compile(assm, fragment verbose_only(, tempAlloc, tm->labels));
|
||||
if (assm->error() != nanojit::None)
|
||||
goto fail;
|
||||
|
@ -844,7 +844,7 @@ NewToken(JSTokenStream *ts, ptrdiff_t adjust)
|
||||
tp->pos.begin.index = ts->linepos +
|
||||
(tp->ptr - ts->linebuf.base) -
|
||||
ts->ungetpos;
|
||||
tp->pos.begin.lineno = tp->pos.end.lineno = (uint16)ts->lineno;
|
||||
tp->pos.begin.lineno = tp->pos.end.lineno = ts->lineno;
|
||||
return tp;
|
||||
}
|
||||
|
||||
@ -923,7 +923,7 @@ js_GetToken(JSContext *cx, JSTokenStream *ts)
|
||||
if (!atom)
|
||||
goto error;
|
||||
}
|
||||
tp->pos.end.lineno = (uint16)ts->lineno;
|
||||
tp->pos.end.lineno = ts->lineno;
|
||||
tp->t_op = JSOP_STRING;
|
||||
tp->t_atom = atom;
|
||||
goto out;
|
||||
@ -1028,7 +1028,7 @@ js_GetToken(JSContext *cx, JSTokenStream *ts)
|
||||
atom = atomize(cx, tb);
|
||||
if (!atom)
|
||||
goto error;
|
||||
tp->pos.end.lineno = (uint16)ts->lineno;
|
||||
tp->pos.end.lineno = ts->lineno;
|
||||
tp->t_op = JSOP_STRING;
|
||||
tp->t_atom = atom;
|
||||
tt = TOK_XMLATTR;
|
||||
@ -1298,7 +1298,7 @@ retry:
|
||||
atom = atomize(cx, tb);
|
||||
if (!atom)
|
||||
goto error;
|
||||
tp->pos.end.lineno = (uint16)ts->lineno;
|
||||
tp->pos.end.lineno = ts->lineno;
|
||||
tp->t_op = JSOP_STRING;
|
||||
tp->t_atom = atom;
|
||||
tt = TOK_STRING;
|
||||
@ -1532,7 +1532,7 @@ retry:
|
||||
if (!atom)
|
||||
goto error;
|
||||
tp->t_atom = atom;
|
||||
tp->pos.end.lineno = (uint16)ts->lineno;
|
||||
tp->pos.end.lineno = ts->lineno;
|
||||
goto out;
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ struct JSTokenStream {
|
||||
};
|
||||
|
||||
#define CURRENT_TOKEN(ts) ((ts)->tokens[(ts)->cursor])
|
||||
#define ON_CURRENT_LINE(ts,pos) ((uint16)(ts)->lineno == (pos).end.lineno)
|
||||
#define ON_CURRENT_LINE(ts,pos) ((ts)->lineno == (pos).end.lineno)
|
||||
|
||||
/* JSTokenStream flags */
|
||||
#define TSF_ERROR 0x01 /* fatal error while compiling */
|
||||
|
@ -1022,8 +1022,27 @@ JSScope::reportReadOnlyScope(JSContext *cx)
|
||||
void
|
||||
JSScope::generateOwnShape(JSContext *cx)
|
||||
{
|
||||
if (object)
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
#ifdef JS_TRACER
|
||||
if (object) {
|
||||
js_LeaveTraceIfGlobalObject(cx, object);
|
||||
|
||||
/*
|
||||
* The JIT must have arranged to re-guard after any unpredictable shape
|
||||
* change, so if we are on trace here, we should already be prepared to
|
||||
* bail off trace.
|
||||
*/
|
||||
JS_ASSERT_IF(JS_ON_TRACE(cx), cx->bailExit);
|
||||
|
||||
/*
|
||||
* If we are recording, here is where we forget already-guarded shapes.
|
||||
* Any subsequent property operation upon object on the trace currently
|
||||
* being recorded will re-guard (and re-memoize).
|
||||
*/
|
||||
JSTraceMonitor *tm = &JS_TRACE_MONITOR(cx);
|
||||
if (TraceRecorder *tr = tm->recorder)
|
||||
tr->forgetGuardedShapesForObject(object);
|
||||
}
|
||||
#endif
|
||||
|
||||
shape = js_GenerateShape(cx, false);
|
||||
setOwnShape();
|
||||
|
@ -1021,6 +1021,21 @@ static JSHashAllocOps sftbl_alloc_ops = {
|
||||
js_alloc_sftbl_entry, js_free_sftbl_entry
|
||||
};
|
||||
|
||||
static void
|
||||
FinishRuntimeScriptState(JSRuntime *rt)
|
||||
{
|
||||
if (rt->scriptFilenameTable) {
|
||||
JS_HashTableDestroy(rt->scriptFilenameTable);
|
||||
rt->scriptFilenameTable = NULL;
|
||||
}
|
||||
#ifdef JS_THREADSAFE
|
||||
if (rt->scriptFilenameTableLock) {
|
||||
JS_DESTROY_LOCK(rt->scriptFilenameTableLock);
|
||||
rt->scriptFilenameTableLock = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
JSBool
|
||||
js_InitRuntimeScriptState(JSRuntime *rt)
|
||||
{
|
||||
@ -1035,7 +1050,7 @@ js_InitRuntimeScriptState(JSRuntime *rt)
|
||||
JS_NewHashTable(16, JS_HashString, js_compare_strings, NULL,
|
||||
&sftbl_alloc_ops, NULL);
|
||||
if (!rt->scriptFilenameTable) {
|
||||
js_FinishRuntimeScriptState(rt); /* free lock if threadsafe */
|
||||
FinishRuntimeScriptState(rt); /* free lock if threadsafe */
|
||||
return JS_FALSE;
|
||||
}
|
||||
JS_INIT_CLIST(&rt->scriptFilenamePrefixes);
|
||||
@ -1049,35 +1064,19 @@ typedef struct ScriptFilenamePrefix {
|
||||
uint32 flags; /* user-defined flags to inherit from this prefix */
|
||||
} ScriptFilenamePrefix;
|
||||
|
||||
void
|
||||
js_FinishRuntimeScriptState(JSRuntime *rt)
|
||||
{
|
||||
if (rt->scriptFilenameTable) {
|
||||
JS_HashTableDestroy(rt->scriptFilenameTable);
|
||||
rt->scriptFilenameTable = NULL;
|
||||
}
|
||||
#ifdef JS_THREADSAFE
|
||||
if (rt->scriptFilenameTableLock) {
|
||||
JS_DESTROY_LOCK(rt->scriptFilenameTableLock);
|
||||
rt->scriptFilenameTableLock = NULL;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void
|
||||
js_FreeRuntimeScriptState(JSRuntime *rt)
|
||||
{
|
||||
ScriptFilenamePrefix *sfp;
|
||||
|
||||
if (!rt->scriptFilenameTable)
|
||||
return;
|
||||
|
||||
while (!JS_CLIST_IS_EMPTY(&rt->scriptFilenamePrefixes)) {
|
||||
sfp = (ScriptFilenamePrefix *) rt->scriptFilenamePrefixes.next;
|
||||
ScriptFilenamePrefix *sfp = (ScriptFilenamePrefix *)
|
||||
rt->scriptFilenamePrefixes.next;
|
||||
JS_REMOVE_LINK(&sfp->links);
|
||||
js_free(sfp);
|
||||
}
|
||||
js_FinishRuntimeScriptState(rt);
|
||||
FinishRuntimeScriptState(rt);
|
||||
}
|
||||
|
||||
#ifdef DEBUG_brendan
|
||||
@ -1302,6 +1301,10 @@ js_SweepScriptFilenames(JSRuntime *rt)
|
||||
if (!rt->scriptFilenameTable)
|
||||
return;
|
||||
|
||||
/*
|
||||
* JS_HashTableEnumerateEntries shrinks the table if many entries are
|
||||
* removed preventing wasting memory on a too sparse table.
|
||||
*/
|
||||
JS_HashTableEnumerateEntries(rt->scriptFilenameTable,
|
||||
js_script_filename_sweeper,
|
||||
rt);
|
||||
|
@ -212,17 +212,9 @@ js_InitScriptClass(JSContext *cx, JSObject *obj);
|
||||
extern JSBool
|
||||
js_InitRuntimeScriptState(JSRuntime *rt);
|
||||
|
||||
/*
|
||||
* On last context destroy for rt, if script filenames are all GC'd, free the
|
||||
* script filename table and its lock.
|
||||
*/
|
||||
extern void
|
||||
js_FinishRuntimeScriptState(JSRuntime *rt);
|
||||
|
||||
/*
|
||||
* On JS_DestroyRuntime(rt), forcibly free script filename prefixes and any
|
||||
* script filename table entries that have not been GC'd, the latter using
|
||||
* js_FinishRuntimeScriptState.
|
||||
* script filename table entries that have not been GC'd.
|
||||
*
|
||||
* This allows script filename prefixes to outlive any context in rt.
|
||||
*/
|
||||
|
@ -70,6 +70,9 @@ typedef JSUint16 uint16_t;
|
||||
typedef JSUint32 uint32_t;
|
||||
typedef JSUint64 uint64_t;
|
||||
|
||||
/* Suppress other, conflicting attempts to define stdint-bits. */
|
||||
#define _STDINT_H
|
||||
|
||||
/* If JS_STDDEF_H_HAS_INTPTR_T or JS_CRTDEFS_H_HAS_INTPTR_T are
|
||||
defined, then jsinttypes.h included the given header, which
|
||||
introduced definitions for intptr_t and uintptr_t. Otherwise,
|
||||
|
@ -48,6 +48,8 @@
|
||||
* of rooting things that might lose their newborn root due to subsequent GC
|
||||
* allocations in the same native method.
|
||||
*/
|
||||
#define __STDC_LIMIT_MACROS
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include "jstypes.h"
|
||||
@ -297,6 +299,8 @@ str_encodeURI(JSContext *cx, uintN argc, jsval *vp);
|
||||
static JSBool
|
||||
str_encodeURI_Component(JSContext *cx, uintN argc, jsval *vp);
|
||||
|
||||
static const uint32 OVERLONG_UTF8 = UINT32_MAX;
|
||||
|
||||
static uint32
|
||||
Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length);
|
||||
|
||||
@ -3025,7 +3029,7 @@ js_NewString(JSContext *cx, jschar *chars, size_t length)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
str = js_NewGCString(cx, GCX_STRING);
|
||||
str = js_NewGCString(cx);
|
||||
if (!str)
|
||||
return NULL;
|
||||
str->initFlat(chars, length);
|
||||
@ -3095,7 +3099,7 @@ js_NewDependentString(JSContext *cx, JSString *base, size_t start,
|
||||
return js_NewStringCopyN(cx, base->chars() + start, length);
|
||||
}
|
||||
|
||||
ds = js_NewGCString(cx, GCX_STRING);
|
||||
ds = js_NewGCString(cx);
|
||||
if (!ds)
|
||||
return NULL;
|
||||
if (start == 0)
|
||||
@ -3643,7 +3647,7 @@ js_InflateStringToBuffer(JSContext *cx, const char *src, size_t srclen,
|
||||
n++;
|
||||
if (n > srclen)
|
||||
goto bufferTooSmall;
|
||||
if (n == 1 || n > 6)
|
||||
if (n == 1 || n > 4)
|
||||
goto badCharacter;
|
||||
for (j = 1; j < n; j++) {
|
||||
if ((src[j] & 0xC0) != 0x80)
|
||||
@ -5162,7 +5166,7 @@ Encode(JSContext *cx, JSString *str, const jschar *unescapedSet,
|
||||
const jschar *chars;
|
||||
jschar c, c2;
|
||||
uint32 v;
|
||||
uint8 utf8buf[6];
|
||||
uint8 utf8buf[4];
|
||||
jschar hexBuf[4];
|
||||
static const char HexDigits[] = "0123456789ABCDEF"; /* NB: uppercase */
|
||||
|
||||
@ -5226,7 +5230,7 @@ Decode(JSContext *cx, JSString *str, const jschar *reservedSet, jsval *rval)
|
||||
jschar c, H;
|
||||
uint32 v;
|
||||
jsuint B;
|
||||
uint8 octets[6];
|
||||
uint8 octets[4];
|
||||
intN j, n;
|
||||
|
||||
str->getCharsAndLength(chars, length);
|
||||
@ -5252,7 +5256,7 @@ Decode(JSContext *cx, JSString *str, const jschar *reservedSet, jsval *rval)
|
||||
n = 1;
|
||||
while (B & (0x80 >> n))
|
||||
n++;
|
||||
if (n == 1 || n > 6)
|
||||
if (n == 1 || n > 4)
|
||||
goto report_bad_uri;
|
||||
octets[0] = (uint8)B;
|
||||
if (k + 3 * (n - 1) >= length)
|
||||
@ -5351,14 +5355,14 @@ str_encodeURI_Component(JSContext *cx, uintN argc, jsval *vp)
|
||||
|
||||
/*
|
||||
* Convert one UCS-4 char and write it into a UTF-8 buffer, which must be at
|
||||
* least 6 bytes long. Return the number of UTF-8 bytes of data written.
|
||||
* least 4 bytes long. Return the number of UTF-8 bytes of data written.
|
||||
*/
|
||||
int
|
||||
js_OneUcs4ToUtf8Char(uint8 *utf8Buffer, uint32 ucs4Char)
|
||||
{
|
||||
int utf8Length = 1;
|
||||
|
||||
JS_ASSERT(ucs4Char <= 0x7FFFFFFF);
|
||||
JS_ASSERT(ucs4Char <= 0x10FFFF);
|
||||
if (ucs4Char < 0x80) {
|
||||
*utf8Buffer = (uint8)ucs4Char;
|
||||
} else {
|
||||
@ -5391,10 +5395,10 @@ Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length)
|
||||
uint32 minucs4Char;
|
||||
/* from Unicode 3.1, non-shortest form is illegal */
|
||||
static const uint32 minucs4Table[] = {
|
||||
0x00000080, 0x00000800, 0x0001000, 0x0020000, 0x0400000
|
||||
0x00000080, 0x00000800, 0x00010000
|
||||
};
|
||||
|
||||
JS_ASSERT(utf8Length >= 1 && utf8Length <= 6);
|
||||
JS_ASSERT(utf8Length >= 1 && utf8Length <= 4);
|
||||
if (utf8Length == 1) {
|
||||
ucs4Char = *utf8Buffer;
|
||||
JS_ASSERT(!(ucs4Char & 0x80));
|
||||
@ -5407,8 +5411,9 @@ Utf8ToOneUcs4Char(const uint8 *utf8Buffer, int utf8Length)
|
||||
JS_ASSERT((*utf8Buffer & 0xC0) == 0x80);
|
||||
ucs4Char = ucs4Char<<6 | (*utf8Buffer++ & 0x3F);
|
||||
}
|
||||
if (ucs4Char < minucs4Char ||
|
||||
ucs4Char == 0xFFFE || ucs4Char == 0xFFFF) {
|
||||
if (JS_UNLIKELY(ucs4Char < minucs4Char)) {
|
||||
ucs4Char = OVERLONG_UTF8;
|
||||
} else if (ucs4Char == 0xFFFE || ucs4Char == 0xFFFF) {
|
||||
ucs4Char = 0xFFFD;
|
||||
}
|
||||
}
|
||||
|
2833
js/src/jstracer.cpp
Executable file → Normal file
2833
js/src/jstracer.cpp
Executable file → Normal file
File diff suppressed because it is too large
Load Diff
@ -47,6 +47,7 @@
|
||||
#include "jstypes.h"
|
||||
#include "jsbuiltins.h"
|
||||
#include "jscntxt.h"
|
||||
#include "jsdhash.h"
|
||||
#include "jsinterp.h"
|
||||
#include "jslock.h"
|
||||
#include "jsnum.h"
|
||||
@ -157,18 +158,37 @@ public:
|
||||
|
||||
/*
|
||||
* Tracker is used to keep track of values being manipulated by the interpreter
|
||||
* during trace recording. Note that tracker pages aren't necessarily the
|
||||
* same size as OS pages, they just are a moderate-sized chunk of memory.
|
||||
* during trace recording. It maps opaque, 4-byte aligned address to LIns pointers.
|
||||
* pointers. To do this efficiently, we observe that the addresses of jsvals
|
||||
* living in the interpreter tend to be aggregated close to each other -
|
||||
* usually on the same page (where a tracker page doesn't have to be the same
|
||||
* size as the OS page size, but it's typically similar). The Tracker
|
||||
* consists of a linked-list of structures representing a memory page, which
|
||||
* are created on-demand as memory locations are used.
|
||||
*
|
||||
* For every address, first we split it into two parts: upper bits which
|
||||
* represent the "base", and lower bits which represent an offset against the
|
||||
* base. For the offset, we then right-shift it by two because the bottom two
|
||||
* bits of a 4-byte aligned address are always zero. The mapping then
|
||||
* becomes:
|
||||
*
|
||||
* page = page in pagelist such that Base(address) == page->base,
|
||||
* page->map[Offset(address)]
|
||||
*/
|
||||
class Tracker {
|
||||
#define TRACKER_PAGE_SZB 4096
|
||||
#define TRACKER_PAGE_ENTRIES (TRACKER_PAGE_SZB >> 2) // each slot is 4 bytes
|
||||
#define TRACKER_PAGE_MASK jsuword(TRACKER_PAGE_SZB - 1)
|
||||
|
||||
struct TrackerPage {
|
||||
struct TrackerPage* next;
|
||||
jsuword base;
|
||||
nanojit::LIns* map[1];
|
||||
nanojit::LIns* map[TRACKER_PAGE_ENTRIES];
|
||||
};
|
||||
struct TrackerPage* pagelist;
|
||||
|
||||
jsuword getTrackerPageBase(const void* v) const;
|
||||
jsuword getTrackerPageOffset(const void* v) const;
|
||||
struct TrackerPage* findTrackerPage(const void* v) const;
|
||||
struct TrackerPage* addTrackerPage(const void* v);
|
||||
public:
|
||||
@ -260,7 +280,9 @@ public:
|
||||
JS_REQUIRES_STACK void markGlobalSlotUndemotable(JSContext* cx, unsigned slot);
|
||||
JS_REQUIRES_STACK bool isGlobalSlotUndemotable(JSContext* cx, unsigned slot) const;
|
||||
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot);
|
||||
JS_REQUIRES_STACK void markStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc);
|
||||
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot) const;
|
||||
JS_REQUIRES_STACK bool isStackSlotUndemotable(JSContext* cx, unsigned slot, const void* pc) const;
|
||||
void markInstructionUndemotable(jsbytecode* pc);
|
||||
bool isInstructionUndemotable(jsbytecode* pc) const;
|
||||
|
||||
@ -365,7 +387,19 @@ public:
|
||||
_(UNSTABLE_LOOP) \
|
||||
_(TIMEOUT) \
|
||||
_(DEEP_BAIL) \
|
||||
_(STATUS)
|
||||
_(STATUS) \
|
||||
/* Exit is almost recursive and wants a peer at recursive_pc */ \
|
||||
_(RECURSIVE_UNLINKED) \
|
||||
/* Exit is recursive, and there are no more frames */ \
|
||||
_(RECURSIVE_LOOP) \
|
||||
/* Exit is recursive, but type-mismatched guarding on a down frame */ \
|
||||
_(RECURSIVE_MISMATCH) \
|
||||
/* Exit is recursive, and the JIT wants to try slurping interp frames */ \
|
||||
_(RECURSIVE_EMPTY_RP) \
|
||||
/* Slurping interp frames in up-recursion failed */ \
|
||||
_(RECURSIVE_SLURP_FAIL) \
|
||||
/* Tried to slurp an interp frame, but the pc or argc mismatched */ \
|
||||
_(RECURSIVE_SLURP_MISMATCH)
|
||||
|
||||
enum ExitType {
|
||||
#define MAKE_EXIT_CODE(x) x##_EXIT,
|
||||
@ -374,6 +408,8 @@ enum ExitType {
|
||||
TOTAL_EXIT_TYPES
|
||||
};
|
||||
|
||||
struct FrameInfo;
|
||||
|
||||
struct VMSideExit : public nanojit::SideExit
|
||||
{
|
||||
JSObject* block;
|
||||
@ -387,6 +423,11 @@ struct VMSideExit : public nanojit::SideExit
|
||||
uint32 numStackSlotsBelowCurrentFrame;
|
||||
ExitType exitType;
|
||||
uintN lookupFlags;
|
||||
void* recursive_pc;
|
||||
FrameInfo* recursive_down;
|
||||
unsigned hitcount;
|
||||
unsigned slurpFailSlot;
|
||||
JSTraceType slurpType;
|
||||
|
||||
/*
|
||||
* Ordinarily 0. If a slow native function is atop the stack, the 1 bit is
|
||||
@ -410,6 +451,11 @@ struct VMSideExit : public nanojit::SideExit
|
||||
return (JSTraceType*)(this + 1);
|
||||
}
|
||||
|
||||
inline JSTraceType& stackType(unsigned i) {
|
||||
JS_ASSERT(i < numStackSlots);
|
||||
return stackTypeMap()[i];
|
||||
}
|
||||
|
||||
inline JSTraceType* globalTypeMap() {
|
||||
return (JSTraceType*)(this + 1) + this->numStackSlots;
|
||||
}
|
||||
@ -438,6 +484,45 @@ public:
|
||||
return mOutOfMemory;
|
||||
}
|
||||
|
||||
struct Mark
|
||||
{
|
||||
VMAllocator& vma;
|
||||
bool committed;
|
||||
nanojit::Allocator::Chunk* saved_chunk;
|
||||
char* saved_top;
|
||||
char* saved_limit;
|
||||
size_t saved_size;
|
||||
|
||||
Mark(VMAllocator& vma) :
|
||||
vma(vma),
|
||||
committed(false),
|
||||
saved_chunk(vma.current_chunk),
|
||||
saved_top(vma.current_top),
|
||||
saved_limit(vma.current_limit),
|
||||
saved_size(vma.mSize)
|
||||
{}
|
||||
|
||||
~Mark()
|
||||
{
|
||||
if (!committed)
|
||||
vma.rewind(*this);
|
||||
}
|
||||
|
||||
void commit() { committed = true; }
|
||||
};
|
||||
|
||||
void rewind(const Mark& m) {
|
||||
while (current_chunk != m.saved_chunk) {
|
||||
Chunk *prev = current_chunk->prev;
|
||||
freeChunk(current_chunk);
|
||||
current_chunk = prev;
|
||||
}
|
||||
current_top = m.saved_top;
|
||||
current_limit = m.saved_limit;
|
||||
mSize = m.saved_size;
|
||||
memset(current_top, 0, current_limit - current_top);
|
||||
}
|
||||
|
||||
bool mOutOfMemory;
|
||||
size_t mSize;
|
||||
|
||||
@ -481,6 +566,8 @@ struct REHashFn {
|
||||
}
|
||||
};
|
||||
|
||||
class TreeInfo;
|
||||
|
||||
struct FrameInfo {
|
||||
JSObject* block; // caller block chain head
|
||||
jsbytecode* pc; // caller fp->regs->pc
|
||||
@ -516,6 +603,7 @@ struct FrameInfo {
|
||||
|
||||
// The typemap just before the callee is called.
|
||||
JSTraceType* get_typemap() { return (JSTraceType*) (this+1); }
|
||||
const JSTraceType* get_typemap() const { return (JSTraceType*) (this+1); }
|
||||
};
|
||||
|
||||
struct UnstableExit
|
||||
@ -525,6 +613,21 @@ struct UnstableExit
|
||||
UnstableExit* next;
|
||||
};
|
||||
|
||||
enum MonitorReason
|
||||
{
|
||||
Monitor_Branch,
|
||||
Monitor_EnterFrame,
|
||||
Monitor_LeaveFrame
|
||||
};
|
||||
|
||||
enum RecursionStatus
|
||||
{
|
||||
Recursion_None, /* No recursion has been compiled yet. */
|
||||
Recursion_Disallowed, /* This tree cannot be recursive. */
|
||||
Recursion_Unwinds, /* Tree is up-recursive only. */
|
||||
Recursion_Detected /* Tree has down recursion and maybe up recursion. */
|
||||
};
|
||||
|
||||
class TreeInfo {
|
||||
public:
|
||||
nanojit::Fragment* const fragment;
|
||||
@ -550,6 +653,7 @@ public:
|
||||
uintN treeLineNumber;
|
||||
uintN treePCOffset;
|
||||
#endif
|
||||
RecursionStatus recursion;
|
||||
|
||||
TreeInfo(nanojit::Allocator* alloc,
|
||||
nanojit::Fragment* _fragment,
|
||||
@ -568,7 +672,8 @@ public:
|
||||
sideExits(alloc),
|
||||
unstableExits(NULL),
|
||||
gcthings(alloc),
|
||||
sprops(alloc)
|
||||
sprops(alloc),
|
||||
recursion(Recursion_None)
|
||||
{}
|
||||
|
||||
inline unsigned nGlobalTypes() {
|
||||
@ -584,7 +689,7 @@ public:
|
||||
UnstableExit* removeUnstableExit(VMSideExit* exit);
|
||||
};
|
||||
|
||||
#if defined(JS_JIT_SPEW) && (defined(NANOJIT_IA32) || (defined(NANOJIT_AMD64) && defined(__GNUC__)))
|
||||
#if defined(JS_JIT_SPEW) && (defined(NANOJIT_IA32) || defined(NANOJIT_X64))
|
||||
# define EXECUTE_TREE_TIMER
|
||||
#endif
|
||||
|
||||
@ -600,6 +705,7 @@ struct InterpState
|
||||
JSContext *cx; // current VM context handle
|
||||
double *eos; // first unusable word after the native stack
|
||||
void *eor; // first unusable word after the call stack
|
||||
void *sor; // start of rp stack
|
||||
VMSideExit* lastTreeExitGuard; // guard we exited on during a tree call
|
||||
VMSideExit* lastTreeCallGuard; // guard we want to grow from if the tree
|
||||
// call exit guard mismatched
|
||||
@ -625,7 +731,7 @@ struct InterpState
|
||||
double* deepBailSp;
|
||||
|
||||
|
||||
// Used when calling natives from trace to root the vp vector. */
|
||||
// Used when calling natives from trace to root the vp vector.
|
||||
uintN nativeVpLen;
|
||||
jsval *nativeVp;
|
||||
};
|
||||
@ -653,35 +759,120 @@ js_SetBuiltinError(JSContext *cx)
|
||||
cx->interpState->builtinStatus |= JSBUILTIN_ERROR;
|
||||
}
|
||||
|
||||
#ifdef DEBUG_JSRS_NOT_BOOL
|
||||
struct JSRecordingStatus {
|
||||
#ifdef DEBUG_RECORDING_STATUS_NOT_BOOL
|
||||
/* #define DEBUG_RECORDING_STATUS_NOT_BOOL to detect misuses of RecordingStatus */
|
||||
struct RecordingStatus {
|
||||
int code;
|
||||
bool operator==(JSRecordingStatus &s) { return this->code == s.code; };
|
||||
bool operator!=(JSRecordingStatus &s) { return this->code != s.code; };
|
||||
bool operator==(RecordingStatus &s) { return this->code == s.code; };
|
||||
bool operator!=(RecordingStatus &s) { return this->code != s.code; };
|
||||
};
|
||||
enum JSRScodes {
|
||||
JSRS_ERROR_code,
|
||||
JSRS_STOP_code,
|
||||
JSRS_CONTINUE_code,
|
||||
JSRS_IMACRO_code
|
||||
enum RecordingStatusCodes {
|
||||
RECORD_ERROR_code = 0,
|
||||
RECORD_STOP_code = 1,
|
||||
|
||||
RECORD_CONTINUE_code = 3,
|
||||
RECORD_IMACRO_code = 4
|
||||
};
|
||||
struct JSRecordingStatus JSRS_CONTINUE = { JSRS_CONTINUE_code };
|
||||
struct JSRecordingStatus JSRS_STOP = { JSRS_STOP_code };
|
||||
struct JSRecordingStatus JSRS_IMACRO = { JSRS_IMACRO_code };
|
||||
struct JSRecordingStatus JSRS_ERROR = { JSRS_ERROR_code };
|
||||
#define STATUS_ABORTS_RECORDING(s) ((s) == JSRS_STOP || (s) == JSRS_ERROR)
|
||||
RecordingStatus RECORD_CONTINUE = { RECORD_CONTINUE_code };
|
||||
RecordingStatus RECORD_STOP = { RECORD_STOP_code };
|
||||
RecordingStatus RECORD_IMACRO = { RECORD_IMACRO_code };
|
||||
RecordingStatus RECORD_ERROR = { RECORD_ERROR_code };
|
||||
|
||||
struct AbortableRecordingStatus {
|
||||
int code;
|
||||
bool operator==(AbortableRecordingStatus &s) { return this->code == s.code; };
|
||||
bool operator!=(AbortableRecordingStatus &s) { return this->code != s.code; };
|
||||
};
|
||||
enum AbortableRecordingStatusCodes {
|
||||
ARECORD_ERROR_code = 0,
|
||||
ARECORD_STOP_code = 1,
|
||||
ARECORD_ABORTED_code = 2,
|
||||
ARECORD_CONTINUE_code = 3,
|
||||
ARECORD_IMACRO_code = 4
|
||||
};
|
||||
AbortableRecordingStatus ARECORD_ERROR = { ARECORD_ERROR_code };
|
||||
AbortableRecordingStatus ARECORD_STOP = { ARECORD_STOP_code };
|
||||
AbortableRecordingStatus ARECORD_CONTINUE = { ARECORD_CONTINUE_code };
|
||||
AbortableRecordingStatus ARECORD_IMACRO = { ARECORD_IMACRO_code };
|
||||
AbortableRecordingStatus ARECORD_ABORTED = { ARECORD_ABORTED_code };
|
||||
|
||||
static inline AbortableRecordingStatus
|
||||
InjectStatus(RecordingStatus rs)
|
||||
{
|
||||
AbortableRecordingStatus ars = { rs.code };
|
||||
return ars;
|
||||
}
|
||||
static inline AbortableRecordingStatus
|
||||
InjectStatus(AbortableRecordingStatus ars)
|
||||
{
|
||||
return ars;
|
||||
}
|
||||
|
||||
static inline bool
|
||||
StatusAbortsRecording(AbortableRecordingStatus ars)
|
||||
{
|
||||
return ars == ARECORD_ERROR || ars == ARECORD_STOP || ars == ARECORD_ABORTED;
|
||||
}
|
||||
#else
|
||||
enum JSRecordingStatus {
|
||||
JSRS_ERROR, // Error; propagate to interpreter.
|
||||
JSRS_STOP, // Abort recording.
|
||||
JSRS_CONTINUE, // Continue recording.
|
||||
JSRS_IMACRO // Entered imacro; continue recording.
|
||||
// Only JSOP_IS_IMACOP opcodes may return this.
|
||||
|
||||
/*
|
||||
* Normally, during recording, when the recorder cannot continue, it returns
|
||||
* ARECORD_STOP to indicate that recording should be aborted by the top-level
|
||||
* recording function. However, if the recorder reenters the interpreter (e.g.,
|
||||
* when executing an inner loop), there will be an immediate abort. This
|
||||
* condition must be carefully detected and propagated out of all nested
|
||||
* recorder calls lest the now-invalid TraceRecorder object be accessed
|
||||
* accidentally. This condition is indicated by the ARECORD_ABORTED value.
|
||||
*
|
||||
* The AbortableRecordingStatus enumeration represents the general set of
|
||||
* possible results of calling a recorder function. Functions that cannot
|
||||
* possibly return ARECORD_ABORTED may statically guarantee this to the caller
|
||||
* using the RecordingStatus enumeration. Ideally, C++ would allow subtyping
|
||||
* of enumerations, but it doesn't. To simulate subtype conversion manually,
|
||||
* code should call InjectStatus to inject a value of the restricted set into a
|
||||
* value of the general set.
|
||||
*/
|
||||
|
||||
enum RecordingStatus {
|
||||
RECORD_ERROR = 0, // Error; propagate to interpreter.
|
||||
RECORD_STOP = 1, // Recording should be aborted at the top-level
|
||||
// call to the recorder.
|
||||
// (value reserved for ARECORD_ABORTED)
|
||||
RECORD_CONTINUE = 3, // Continue recording.
|
||||
RECORD_IMACRO = 4 // Entered imacro; continue recording.
|
||||
// Only JSOP_IS_IMACOP opcodes may return this.
|
||||
};
|
||||
#define STATUS_ABORTS_RECORDING(s) ((s) <= JSRS_STOP)
|
||||
|
||||
enum AbortableRecordingStatus {
|
||||
ARECORD_ERROR = 0,
|
||||
ARECORD_STOP = 1,
|
||||
ARECORD_ABORTED = 2, // Recording has already been aborted; the recorder
|
||||
// has been deleted.
|
||||
ARECORD_CONTINUE = 3,
|
||||
ARECORD_IMACRO = 4
|
||||
};
|
||||
|
||||
static JS_ALWAYS_INLINE AbortableRecordingStatus
|
||||
InjectStatus(RecordingStatus rs)
|
||||
{
|
||||
return static_cast<AbortableRecordingStatus>(rs);
|
||||
}
|
||||
|
||||
static JS_ALWAYS_INLINE AbortableRecordingStatus
|
||||
InjectStatus(AbortableRecordingStatus ars)
|
||||
{
|
||||
return ars;
|
||||
}
|
||||
|
||||
static JS_ALWAYS_INLINE bool
|
||||
StatusAbortsRecording(AbortableRecordingStatus ars)
|
||||
{
|
||||
return ars <= ARECORD_ABORTED;
|
||||
}
|
||||
#endif
|
||||
|
||||
class SlotMap;
|
||||
class SlurpInfo;
|
||||
|
||||
/* Results of trying to compare two typemaps together */
|
||||
enum TypeConsensus
|
||||
@ -693,6 +884,7 @@ enum TypeConsensus
|
||||
|
||||
class TraceRecorder {
|
||||
VMAllocator& tempAlloc;
|
||||
VMAllocator::Mark mark;
|
||||
JSContext* cx;
|
||||
JSTraceMonitor* traceMonitor;
|
||||
JSObject* globalObj;
|
||||
@ -735,6 +927,7 @@ class TraceRecorder {
|
||||
uint32 outerArgc; /* outer trace deepest frame argc */
|
||||
bool loop;
|
||||
nanojit::LIns* loopLabel;
|
||||
MonitorReason monitorReason;
|
||||
|
||||
nanojit::LIns* insImmObj(JSObject* obj);
|
||||
nanojit::LIns* insImmFun(JSFunction* fun);
|
||||
@ -756,6 +949,24 @@ class TraceRecorder {
|
||||
|
||||
JS_REQUIRES_STACK void guard(bool expected, nanojit::LIns* cond, ExitType exitType);
|
||||
JS_REQUIRES_STACK void guard(bool expected, nanojit::LIns* cond, VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpInt32Slot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpDoubleSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpStringSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpObjectSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpFunctionSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpNullSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpBoolSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK nanojit::LIns* slurpSlot(nanojit::LIns* val_ins, jsval* vp,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK void slurpSlot(nanojit::LIns* val_ins, jsval* vp, SlurpInfo* info);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus slurpDownFrames(jsbytecode* return_pc);
|
||||
|
||||
nanojit::LIns* addName(nanojit::LIns* ins, const char* name);
|
||||
|
||||
@ -770,7 +981,8 @@ class TraceRecorder {
|
||||
JS_REQUIRES_STACK void checkForGlobalObjectReallocation();
|
||||
|
||||
JS_REQUIRES_STACK TypeConsensus selfTypeStability(SlotMap& smap);
|
||||
JS_REQUIRES_STACK TypeConsensus peerTypeStability(SlotMap& smap, VMFragment** peer);
|
||||
JS_REQUIRES_STACK TypeConsensus peerTypeStability(SlotMap& smap, const void* ip,
|
||||
VMFragment** peer);
|
||||
|
||||
JS_REQUIRES_STACK jsval& argval(unsigned n) const;
|
||||
JS_REQUIRES_STACK jsval& varval(unsigned n) const;
|
||||
@ -789,9 +1001,9 @@ class TraceRecorder {
|
||||
|
||||
JS_REQUIRES_STACK nanojit::LIns* scopeChain() const;
|
||||
JS_REQUIRES_STACK JSStackFrame* frameIfInRange(JSObject* obj, unsigned* depthp = NULL) const;
|
||||
JS_REQUIRES_STACK JSRecordingStatus traverseScopeChain(JSObject *obj, nanojit::LIns *obj_ins, JSObject *obj2, nanojit::LIns *&obj2_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus scopeChainProp(JSObject* obj, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
JS_REQUIRES_STACK JSRecordingStatus callProp(JSObject* obj, JSProperty* sprop, jsid id, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
JS_REQUIRES_STACK RecordingStatus traverseScopeChain(JSObject *obj, nanojit::LIns *obj_ins, JSObject *obj2, nanojit::LIns *&obj2_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus scopeChainProp(JSObject* obj, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
JS_REQUIRES_STACK RecordingStatus callProp(JSObject* obj, JSProperty* sprop, jsid id, jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
|
||||
JS_REQUIRES_STACK nanojit::LIns* arg(unsigned n);
|
||||
JS_REQUIRES_STACK void arg(unsigned n, nanojit::LIns* i);
|
||||
@ -810,45 +1022,53 @@ class TraceRecorder {
|
||||
|
||||
JS_REQUIRES_STACK nanojit::LIns* newArguments();
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus call_imacro(jsbytecode* imacro);
|
||||
JS_REQUIRES_STACK RecordingStatus call_imacro(jsbytecode* imacro);
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus ifop();
|
||||
JS_REQUIRES_STACK JSRecordingStatus switchop();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus ifop();
|
||||
JS_REQUIRES_STACK RecordingStatus switchop();
|
||||
#ifdef NANOJIT_IA32
|
||||
JS_REQUIRES_STACK JSRecordingStatus tableswitch();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus tableswitch();
|
||||
#endif
|
||||
JS_REQUIRES_STACK JSRecordingStatus inc(jsval& v, jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK JSRecordingStatus inc(jsval v, nanojit::LIns*& v_ins, jsint incr,
|
||||
JS_REQUIRES_STACK RecordingStatus inc(jsval& v, jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK RecordingStatus inc(jsval v, nanojit::LIns*& v_ins, jsint incr,
|
||||
bool pre = true);
|
||||
JS_REQUIRES_STACK JSRecordingStatus incHelper(jsval v, nanojit::LIns* v_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus incHelper(jsval v, nanojit::LIns* v_ins,
|
||||
nanojit::LIns*& v_after, jsint incr);
|
||||
JS_REQUIRES_STACK JSRecordingStatus incProp(jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK JSRecordingStatus incElem(jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK JSRecordingStatus incName(jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus incProp(jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK RecordingStatus incElem(jsint incr, bool pre = true);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus incName(jsint incr, bool pre = true);
|
||||
|
||||
JS_REQUIRES_STACK void strictEquality(bool equal, bool cmpCase);
|
||||
JS_REQUIRES_STACK JSRecordingStatus equality(bool negate, bool tryBranchAfterCond);
|
||||
JS_REQUIRES_STACK JSRecordingStatus equalityHelper(jsval l, jsval r,
|
||||
nanojit::LIns* l_ins, nanojit::LIns* r_ins,
|
||||
bool negate, bool tryBranchAfterCond,
|
||||
jsval& rval);
|
||||
JS_REQUIRES_STACK JSRecordingStatus relational(nanojit::LOpcode op, bool tryBranchAfterCond);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus equality(bool negate, bool tryBranchAfterCond);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus equalityHelper(jsval l, jsval r,
|
||||
nanojit::LIns* l_ins, nanojit::LIns* r_ins,
|
||||
bool negate, bool tryBranchAfterCond,
|
||||
jsval& rval);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus relational(nanojit::LOpcode op, bool tryBranchAfterCond);
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus unary(nanojit::LOpcode op);
|
||||
JS_REQUIRES_STACK JSRecordingStatus binary(nanojit::LOpcode op);
|
||||
JS_REQUIRES_STACK RecordingStatus unary(nanojit::LOpcode op);
|
||||
JS_REQUIRES_STACK RecordingStatus binary(nanojit::LOpcode op);
|
||||
|
||||
JS_REQUIRES_STACK void guardShape(nanojit::LIns* obj_ins, JSObject* obj,
|
||||
uint32 shape, const char* guardName,
|
||||
nanojit::LIns* map_ins, VMSideExit* exit);
|
||||
JS_REQUIRES_STACK RecordingStatus guardShape(nanojit::LIns* obj_ins, JSObject* obj,
|
||||
uint32 shape, const char* name,
|
||||
nanojit::LIns* map_ins, VMSideExit* exit);
|
||||
|
||||
JSDHashTable guardedShapeTable;
|
||||
|
||||
#if defined DEBUG_notme && defined XP_UNIX
|
||||
void dumpGuardedShapes(const char* prefix);
|
||||
#endif
|
||||
|
||||
void forgetGuardedShapes();
|
||||
|
||||
inline nanojit::LIns* map(nanojit::LIns *obj_ins);
|
||||
JS_REQUIRES_STACK bool map_is_native(JSObjectMap* map, nanojit::LIns* map_ins,
|
||||
nanojit::LIns*& ops_ins, size_t op_offset = 0);
|
||||
JS_REQUIRES_STACK JSRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
JSObject*& obj2, jsuword& pcval);
|
||||
JS_REQUIRES_STACK JSRecordingStatus guardNativePropertyOp(JSObject* aobj,
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus test_property_cache(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
JSObject*& obj2, jsuword& pcval);
|
||||
JS_REQUIRES_STACK RecordingStatus guardNativePropertyOp(JSObject* aobj,
|
||||
nanojit::LIns* map_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus guardPropertyCacheHit(nanojit::LIns* obj_ins,
|
||||
nanojit::LIns* map_ins,
|
||||
JSObject* aobj,
|
||||
JSObject* obj2,
|
||||
@ -882,44 +1102,44 @@ class TraceRecorder {
|
||||
|
||||
nanojit::LIns* getStringLength(nanojit::LIns* str_ins);
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus name(jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
JS_REQUIRES_STACK JSRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins, uint32 *slotp,
|
||||
nanojit::LIns** v_insp, jsval* outp);
|
||||
JS_REQUIRES_STACK JSRecordingStatus denseArrayElement(jsval& oval, jsval& idx, jsval*& vp,
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus name(jsval*& vp, nanojit::LIns*& ins, NameResult& nr);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus prop(JSObject* obj, nanojit::LIns* obj_ins, uint32 *slotp,
|
||||
nanojit::LIns** v_insp, jsval* outp);
|
||||
JS_REQUIRES_STACK RecordingStatus denseArrayElement(jsval& oval, jsval& idx, jsval*& vp,
|
||||
nanojit::LIns*& v_ins,
|
||||
nanojit::LIns*& addr_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getProp(JSObject* obj, nanojit::LIns* obj_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getProp(jsval& v);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getThis(nanojit::LIns*& this_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus getProp(JSObject* obj, nanojit::LIns* obj_ins);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus getProp(jsval& v);
|
||||
JS_REQUIRES_STACK RecordingStatus getThis(nanojit::LIns*& this_ins);
|
||||
|
||||
JS_REQUIRES_STACK VMSideExit* enterDeepBailCall();
|
||||
JS_REQUIRES_STACK void leaveDeepBailCall();
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus primitiveToStringInPlace(jsval* vp);
|
||||
JS_REQUIRES_STACK RecordingStatus primitiveToStringInPlace(jsval* vp);
|
||||
JS_REQUIRES_STACK void finishGetProp(nanojit::LIns* obj_ins, nanojit::LIns* vp_ins,
|
||||
nanojit::LIns* ok_ins, jsval* outp);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getPropertyByName(nanojit::LIns* obj_ins, jsval* idvalp,
|
||||
JS_REQUIRES_STACK RecordingStatus getPropertyByName(nanojit::LIns* obj_ins, jsval* idvalp,
|
||||
jsval* outp);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getPropertyByIndex(nanojit::LIns* obj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus getPropertyByIndex(nanojit::LIns* obj_ins,
|
||||
nanojit::LIns* index_ins, jsval* outp);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getPropertyById(nanojit::LIns* obj_ins, jsval* outp);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getPropertyWithNativeGetter(nanojit::LIns* obj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus getPropertyById(nanojit::LIns* obj_ins, jsval* outp);
|
||||
JS_REQUIRES_STACK RecordingStatus getPropertyWithNativeGetter(nanojit::LIns* obj_ins,
|
||||
JSScopeProperty* sprop,
|
||||
jsval* outp);
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus nativeSet(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
JSScopeProperty* sprop,
|
||||
jsval v, nanojit::LIns* v_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus setProp(jsval &l, JSPropCacheEntry* entry,
|
||||
JS_REQUIRES_STACK RecordingStatus setProp(jsval &l, JSPropCacheEntry* entry,
|
||||
JSScopeProperty* sprop,
|
||||
jsval &v, nanojit::LIns*& v_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus setCallProp(JSObject *callobj, nanojit::LIns *callobj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus setCallProp(JSObject *callobj, nanojit::LIns *callobj_ins,
|
||||
JSScopeProperty *sprop, nanojit::LIns *v_ins,
|
||||
jsval v);
|
||||
JS_REQUIRES_STACK JSRecordingStatus initOrSetPropertyByName(nanojit::LIns* obj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus initOrSetPropertyByName(nanojit::LIns* obj_ins,
|
||||
jsval* idvalp, jsval* rvalp,
|
||||
bool init);
|
||||
JS_REQUIRES_STACK JSRecordingStatus initOrSetPropertyByIndex(nanojit::LIns* obj_ins,
|
||||
JS_REQUIRES_STACK RecordingStatus initOrSetPropertyByIndex(nanojit::LIns* obj_ins,
|
||||
nanojit::LIns* index_ins,
|
||||
jsval* rvalp, bool init);
|
||||
|
||||
@ -934,44 +1154,44 @@ class TraceRecorder {
|
||||
JS_REQUIRES_STACK bool guardHasPrototype(JSObject* obj, nanojit::LIns* obj_ins,
|
||||
JSObject** pobj, nanojit::LIns** pobj_ins,
|
||||
VMSideExit* exit);
|
||||
JS_REQUIRES_STACK JSRecordingStatus guardPrototypeHasNoIndexedProperties(JSObject* obj,
|
||||
JS_REQUIRES_STACK RecordingStatus guardPrototypeHasNoIndexedProperties(JSObject* obj,
|
||||
nanojit::LIns* obj_ins,
|
||||
ExitType exitType);
|
||||
JS_REQUIRES_STACK JSRecordingStatus guardNotGlobalObject(JSObject* obj,
|
||||
JS_REQUIRES_STACK RecordingStatus guardNotGlobalObject(JSObject* obj,
|
||||
nanojit::LIns* obj_ins);
|
||||
void clearFrameSlotsFromCache();
|
||||
JS_REQUIRES_STACK void putArguments();
|
||||
JS_REQUIRES_STACK JSRecordingStatus guardCallee(jsval& callee);
|
||||
JS_REQUIRES_STACK RecordingStatus guardCallee(jsval& callee);
|
||||
JS_REQUIRES_STACK JSStackFrame *guardArguments(JSObject *obj, nanojit::LIns* obj_ins,
|
||||
unsigned *depthp);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getClassPrototype(JSObject* ctor,
|
||||
JS_REQUIRES_STACK RecordingStatus getClassPrototype(JSObject* ctor,
|
||||
nanojit::LIns*& proto_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus getClassPrototype(JSProtoKey key,
|
||||
JS_REQUIRES_STACK RecordingStatus getClassPrototype(JSProtoKey key,
|
||||
nanojit::LIns*& proto_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus newArray(JSObject* ctor, uint32 argc, jsval* argv,
|
||||
JS_REQUIRES_STACK RecordingStatus newArray(JSObject* ctor, uint32 argc, jsval* argv,
|
||||
jsval* rval);
|
||||
JS_REQUIRES_STACK JSRecordingStatus newString(JSObject* ctor, uint32 argc, jsval* argv,
|
||||
JS_REQUIRES_STACK RecordingStatus newString(JSObject* ctor, uint32 argc, jsval* argv,
|
||||
jsval* rval);
|
||||
JS_REQUIRES_STACK JSRecordingStatus interpretedFunctionCall(jsval& fval, JSFunction* fun,
|
||||
JS_REQUIRES_STACK RecordingStatus interpretedFunctionCall(jsval& fval, JSFunction* fun,
|
||||
uintN argc, bool constructing);
|
||||
JS_REQUIRES_STACK void propagateFailureToBuiltinStatus(nanojit::LIns *ok_ins,
|
||||
nanojit::LIns *&status_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus emitNativeCall(JSSpecializedNative* sn, uintN argc,
|
||||
JS_REQUIRES_STACK RecordingStatus emitNativeCall(JSSpecializedNative* sn, uintN argc,
|
||||
nanojit::LIns* args[], bool rooted);
|
||||
JS_REQUIRES_STACK void emitNativePropertyOp(JSScope* scope,
|
||||
JSScopeProperty* sprop,
|
||||
nanojit::LIns* obj_ins,
|
||||
bool setflag,
|
||||
nanojit::LIns* boxed_ins);
|
||||
JS_REQUIRES_STACK JSRecordingStatus callSpecializedNative(JSNativeTraceInfo* trcinfo, uintN argc,
|
||||
JS_REQUIRES_STACK RecordingStatus callSpecializedNative(JSNativeTraceInfo* trcinfo, uintN argc,
|
||||
bool constructing);
|
||||
JS_REQUIRES_STACK JSRecordingStatus callNative(uintN argc, JSOp mode);
|
||||
JS_REQUIRES_STACK JSRecordingStatus functionCall(uintN argc, JSOp mode);
|
||||
JS_REQUIRES_STACK RecordingStatus callNative(uintN argc, JSOp mode);
|
||||
JS_REQUIRES_STACK RecordingStatus functionCall(uintN argc, JSOp mode);
|
||||
|
||||
JS_REQUIRES_STACK void trackCfgMerges(jsbytecode* pc);
|
||||
JS_REQUIRES_STACK void emitIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
|
||||
JS_REQUIRES_STACK void fuseIf(jsbytecode* pc, bool cond, nanojit::LIns* x);
|
||||
JS_REQUIRES_STACK JSRecordingStatus checkTraceEnd(jsbytecode* pc);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus checkTraceEnd(jsbytecode* pc);
|
||||
|
||||
bool hasMethod(JSObject* obj, jsid id);
|
||||
JS_REQUIRES_STACK bool hasIteratorMethod(JSObject* obj);
|
||||
@ -996,13 +1216,13 @@ public:
|
||||
TraceRecorder(JSContext* cx, VMSideExit*, nanojit::Fragment*, TreeInfo*,
|
||||
unsigned stackSlots, unsigned ngslots, JSTraceType* typeMap,
|
||||
VMSideExit* expectedInnerExit, jsbytecode* outerTree,
|
||||
uint32 outerArgc);
|
||||
uint32 outerArgc, MonitorReason monitorReason);
|
||||
~TraceRecorder();
|
||||
|
||||
bool outOfMemory();
|
||||
|
||||
static JS_REQUIRES_STACK JSRecordingStatus monitorRecording(JSContext* cx, TraceRecorder* tr,
|
||||
JSOp op);
|
||||
static JS_REQUIRES_STACK AbortableRecordingStatus monitorRecording(JSContext* cx, TraceRecorder* tr,
|
||||
JSOp op);
|
||||
|
||||
JS_REQUIRES_STACK JSTraceType determineSlotType(jsval* vp);
|
||||
|
||||
@ -1028,26 +1248,35 @@ public:
|
||||
|
||||
nanojit::Fragment* getFragment() const { return fragment; }
|
||||
TreeInfo* getTreeInfo() const { return treeInfo; }
|
||||
JS_REQUIRES_STACK bool compile(JSTraceMonitor* tm);
|
||||
JS_REQUIRES_STACK bool closeLoop(TypeConsensus &consensus);
|
||||
JS_REQUIRES_STACK bool closeLoop(SlotMap& slotMap, VMSideExit* exit, TypeConsensus &consensus);
|
||||
JS_REQUIRES_STACK void endLoop();
|
||||
JS_REQUIRES_STACK void endLoop(VMSideExit* exit);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus compile(JSTraceMonitor* tm);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(VMSideExit* exit);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(SlotMap& slotMap, VMSideExit* exit);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus endLoop();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus endLoop(VMSideExit* exit);
|
||||
JS_REQUIRES_STACK void joinEdgesToEntry(VMFragment* peer_root);
|
||||
JS_REQUIRES_STACK void adjustCallerTypes(nanojit::Fragment* f);
|
||||
JS_REQUIRES_STACK VMFragment* findNestedCompatiblePeer(VMFragment* f);
|
||||
JS_REQUIRES_STACK void prepareTreeCall(VMFragment* inner);
|
||||
JS_REQUIRES_STACK void emitTreeCall(VMFragment* inner, VMSideExit* exit);
|
||||
JS_REQUIRES_STACK VMFragment* findNestedCompatiblePeer(VMFragment* f);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus attemptTreeCall(VMFragment* inner,
|
||||
uintN& inlineCallCount);
|
||||
unsigned getCallDepth() const;
|
||||
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_EnterFrame();
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_LeaveFrame();
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_SetPropHit(JSPropCacheEntry* entry,
|
||||
JSScopeProperty* sprop);
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_DefLocalFunSetSlot(uint32 slot, JSObject* obj);
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_NativeCallComplete();
|
||||
JS_REQUIRES_STACK void determineGlobalTypes(JSTraceType* typeMap);
|
||||
nanojit::LIns* demoteIns(nanojit::LIns* ins);
|
||||
|
||||
TreeInfo* getTreeInfo() { return treeInfo; }
|
||||
JS_REQUIRES_STACK VMSideExit* downSnapshot(FrameInfo* downFrame);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus upRecursion();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus downRecursion();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus record_EnterFrame(uintN& inlineCallCount);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus record_LeaveFrame();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus record_SetPropHit(JSPropCacheEntry* entry,
|
||||
JSScopeProperty* sprop);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus record_DefLocalFunSetSlot(uint32 slot, JSObject* obj);
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus record_NativeCallComplete();
|
||||
|
||||
void forgetGuardedShapesForObject(JSObject* obj);
|
||||
|
||||
#ifdef DEBUG
|
||||
JS_REQUIRES_STACK void tprint(const char *format, int count, nanojit::LIns *insa[]);
|
||||
@ -1070,7 +1299,7 @@ public:
|
||||
#endif
|
||||
|
||||
#define OPDEF(op,val,name,token,length,nuses,ndefs,prec,format) \
|
||||
JS_REQUIRES_STACK JSRecordingStatus record_##op();
|
||||
JS_REQUIRES_STACK AbortableRecordingStatus record_##op();
|
||||
# include "jsopcode.tbl"
|
||||
#undef OPDEF
|
||||
|
||||
@ -1082,6 +1311,7 @@ public:
|
||||
friend class TypeCompatibilityVisitor;
|
||||
friend class SlotMap;
|
||||
friend class DefaultSlotMap;
|
||||
friend class RecursiveSlotMap;
|
||||
friend jsval *js_ConcatPostImacroStackCleanup(uint32 argc, JSFrameRegs ®s,
|
||||
TraceRecorder *recorder);
|
||||
};
|
||||
@ -1097,14 +1327,14 @@ public:
|
||||
#define TRACE_ARGS_(x,args) \
|
||||
JS_BEGIN_MACRO \
|
||||
if (TraceRecorder* tr_ = TRACE_RECORDER(cx)) { \
|
||||
JSRecordingStatus status = tr_->record_##x args; \
|
||||
if (STATUS_ABORTS_RECORDING(status)) { \
|
||||
AbortableRecordingStatus status = tr_->record_##x args; \
|
||||
if (StatusAbortsRecording(status)) { \
|
||||
if (TRACE_RECORDER(cx)) \
|
||||
js_AbortRecording(cx, #x); \
|
||||
if (status == JSRS_ERROR) \
|
||||
if (status == ARECORD_ERROR) \
|
||||
goto error; \
|
||||
} \
|
||||
JS_ASSERT(status != JSRS_IMACRO); \
|
||||
JS_ASSERT(status != ARECORD_IMACRO); \
|
||||
} \
|
||||
JS_END_MACRO
|
||||
|
||||
@ -1114,7 +1344,7 @@ public:
|
||||
#define TRACE_2(x,a,b) TRACE_ARGS(x, (a, b))
|
||||
|
||||
extern JS_REQUIRES_STACK bool
|
||||
js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount);
|
||||
js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, MonitorReason reason);
|
||||
|
||||
#ifdef DEBUG
|
||||
# define js_AbortRecording(cx, reason) js_AbortRecordingImpl(cx, reason)
|
||||
|
@ -204,4 +204,97 @@ static JS_INLINE void js_free(void* p) {
|
||||
|
||||
JS_END_EXTERN_C
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
/**
|
||||
* The following classes are designed to cause assertions to detect
|
||||
* inadvertent use of guard objects as temporaries. In other words,
|
||||
* when we have a guard object whose only purpose is its constructor and
|
||||
* destructor (and is never otherwise referenced), the intended use
|
||||
* might be:
|
||||
* JSAutoTempValueRooter tvr(cx, 1, &val);
|
||||
* but is is easy to accidentally write:
|
||||
* JSAutoTempValueRooter(cx, 1, &val);
|
||||
* which compiles just fine, but runs the destructor well before the
|
||||
* intended time.
|
||||
*
|
||||
* They work by adding (#ifdef DEBUG) an additional parameter to the
|
||||
* guard object's constructor, with a default value, so that users of
|
||||
* the guard object's API do not need to do anything. The default value
|
||||
* of this parameter is a temporary object. C++ (ISO/IEC 14882:1998),
|
||||
* section 12.2 [class.temporary], clauses 4 and 5 seem to assume a
|
||||
* guarantee that temporaries are destroyed in the reverse of their
|
||||
* construction order, but I actually can't find a statement that that
|
||||
* is true in the general case (beyond the two specific cases mentioned
|
||||
* there). However, it seems to be true.
|
||||
*
|
||||
* These classes are intended to be used only via the macros immediately
|
||||
* below them:
|
||||
* JS_DECL_USE_GUARD_OBJECT_NOTIFIER declares (ifdef DEBUG) a member
|
||||
* variable, and should be put where a declaration of a private
|
||||
* member variable would be placed.
|
||||
* JS_GUARD_OBJECT_NOTIFIER_PARAM should be placed at the end of the
|
||||
* parameters to each constructor of the guard object; it declares
|
||||
* (ifdef DEBUG) an additional parameter.
|
||||
* JS_GUARD_OBJECT_NOTIFIER_INIT is a statement that belongs in each
|
||||
* constructor. It uses the parameter declared by
|
||||
* JS_GUARD_OBJECT_NOTIFIER_PARAM.
|
||||
*/
|
||||
#ifdef DEBUG
|
||||
class JSGuardObjectNotifier
|
||||
{
|
||||
private:
|
||||
bool* mStatementDone;
|
||||
public:
|
||||
JSGuardObjectNotifier() : mStatementDone(NULL) {}
|
||||
|
||||
~JSGuardObjectNotifier() {
|
||||
*mStatementDone = true;
|
||||
}
|
||||
|
||||
void SetStatementDone(bool *aStatementDone) {
|
||||
mStatementDone = aStatementDone;
|
||||
}
|
||||
};
|
||||
|
||||
class JSGuardObjectNotificationReceiver
|
||||
{
|
||||
private:
|
||||
bool mStatementDone;
|
||||
public:
|
||||
JSGuardObjectNotificationReceiver() : mStatementDone(false) {}
|
||||
|
||||
~JSGuardObjectNotificationReceiver() {
|
||||
// Assert that the guard object was not used as a temporary.
|
||||
// (Note that this assert might also fire if Init is not called
|
||||
// because the guard object's implementation is not using the
|
||||
// above macros correctly.)
|
||||
JS_ASSERT(mStatementDone);
|
||||
}
|
||||
|
||||
void Init(const JSGuardObjectNotifier &aNotifier) {
|
||||
// aNotifier is passed as a const reference so that we can pass a
|
||||
// temporary, but we really intend it as non-const
|
||||
const_cast<JSGuardObjectNotifier&>(aNotifier).
|
||||
SetStatementDone(&mStatementDone);
|
||||
}
|
||||
};
|
||||
|
||||
#define JS_DECL_USE_GUARD_OBJECT_NOTIFIER \
|
||||
JSGuardObjectNotificationReceiver _mCheckNotUsedAsTemporary;
|
||||
#define JS_GUARD_OBJECT_NOTIFIER_PARAM \
|
||||
, const JSGuardObjectNotifier& _notifier = JSGuardObjectNotifier()
|
||||
#define JS_GUARD_OBJECT_NOTIFIER_INIT \
|
||||
JS_BEGIN_MACRO _mCheckNotUsedAsTemporary.Init(_notifier); JS_END_MACRO
|
||||
|
||||
#else /* defined(DEBUG) */
|
||||
|
||||
#define JS_DECL_USE_GUARD_OBJECT_NOTIFIER
|
||||
#define JS_GUARD_OBJECT_NOTIFIER_PARAM
|
||||
#define JS_GUARD_OBJECT_NOTIFIER_INIT JS_BEGIN_MACRO JS_END_MACRO
|
||||
|
||||
#endif /* !defined(DEBUG) */
|
||||
|
||||
#endif /* defined(__cplusplus) */
|
||||
|
||||
#endif /* jsutil_h___ */
|
||||
|
@ -204,7 +204,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
|
||||
* before deserialization of bytecode. If the saved version does not match
|
||||
* the current version, abort deserialization and invalidate the file.
|
||||
*/
|
||||
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 54)
|
||||
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 55)
|
||||
|
||||
/*
|
||||
* Library-private functions.
|
||||
|
@ -3385,9 +3385,9 @@ Descendants(JSContext *cx, JSXML *xml, jsval id)
|
||||
|
||||
/*
|
||||
* Protect nameqn's object and strings from GC by linking list to it
|
||||
* temporarily. The cx->newborn[GCX_OBJECT] GC root protects listobj,
|
||||
* which protects list. Any other object allocations occuring beneath
|
||||
* DescendantsHelper use local roots.
|
||||
* temporarily. The newborn GC root for the last allocated object
|
||||
* protects listobj, which protects list. Any other object allocations
|
||||
* occurring beneath DescendantsHelper use local roots.
|
||||
*/
|
||||
list->name = nameqn;
|
||||
if (!js_EnterLocalRootScope(cx))
|
||||
@ -7182,9 +7182,7 @@ uint32 xml_serial;
|
||||
JSXML *
|
||||
js_NewXML(JSContext *cx, JSXMLClass xml_class)
|
||||
{
|
||||
JSXML *xml;
|
||||
|
||||
xml = (JSXML *) js_NewGCXML(cx, GCX_XML);
|
||||
JSXML *xml = js_NewGCXML(cx);
|
||||
if (!xml)
|
||||
return NULL;
|
||||
|
||||
|
@ -122,7 +122,7 @@ struct JSXML {
|
||||
} u;
|
||||
};
|
||||
|
||||
JS_STATIC_ASSERT(JS_ROUNDUP(sizeof(JSXML), sizeof(JSGCThing)) == sizeof(JSXML));
|
||||
JS_STATIC_ASSERT(sizeof(JSXML) % JSVAL_ALIGN == 0);
|
||||
|
||||
/* union member shorthands */
|
||||
#define xml_kids u.list.kids
|
||||
|
@ -460,7 +460,9 @@ FragmentAssembler::sProfId = 0;
|
||||
FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
||||
: mParent(parent), mFragName(fragmentName)
|
||||
{
|
||||
mFragment = new Fragment(NULL verbose_only(, sProfId++));
|
||||
mFragment = new Fragment(NULL verbose_only(, (mParent.mLogc.lcbits &
|
||||
nanojit::LC_FragProfile) ?
|
||||
sProfId++ : 0));
|
||||
mFragment->lirbuf = mParent.mLirbuf;
|
||||
mFragment->root = mFragment;
|
||||
mParent.mFragments[mFragName].fragptr = mFragment;
|
||||
@ -483,6 +485,8 @@ FragmentAssembler::FragmentAssembler(Lirasm &parent, const string &fragmentName)
|
||||
|
||||
mReturnTypeBits = 0;
|
||||
mLir->ins0(LIR_start);
|
||||
for (int i = 0; i < nanojit::NumSavedRegs; ++i)
|
||||
mLir->insParam(i, 1);
|
||||
|
||||
mLineno = 0;
|
||||
}
|
||||
@ -732,8 +736,8 @@ FragmentAssembler::endFragment()
|
||||
mFragment->lastIns =
|
||||
mLir->insGuard(LIR_x, NULL, createGuardRecord(createSideExit()));
|
||||
|
||||
::compile(&mParent.mAssm, mFragment, mParent.mAlloc
|
||||
verbose_only(, mParent.mLabelMap));
|
||||
::compile(&mParent.mAssm, mFragment
|
||||
verbose_only(, mParent.mAlloc, mParent.mLabelMap));
|
||||
|
||||
if (mParent.mAssm.error() != nanojit::None) {
|
||||
cerr << "error during assembly: ";
|
||||
|
@ -59,7 +59,7 @@ namespace nanojit
|
||||
Chunk *c = current_chunk;
|
||||
while (c) {
|
||||
Chunk *prev = c->prev;
|
||||
this->freeChunk(c);
|
||||
freeChunk(c);
|
||||
c = prev;
|
||||
}
|
||||
current_chunk = NULL;
|
||||
|
@ -53,7 +53,7 @@ namespace nanojit
|
||||
class Allocator {
|
||||
public:
|
||||
Allocator();
|
||||
virtual ~Allocator();
|
||||
~Allocator();
|
||||
void reset();
|
||||
|
||||
/** alloc memory, never return null. */
|
||||
@ -67,7 +67,7 @@ namespace nanojit
|
||||
return allocSlow(nbytes);
|
||||
}
|
||||
|
||||
private:
|
||||
protected:
|
||||
void* allocSlow(size_t nbytes);
|
||||
void fill(size_t minbytes);
|
||||
|
||||
@ -81,8 +81,8 @@ namespace nanojit
|
||||
char* current_top;
|
||||
char* current_limit;
|
||||
|
||||
// allocator SPI
|
||||
private:
|
||||
// allocator SPI
|
||||
|
||||
/** allocate another block from a host provided allocator */
|
||||
void* allocChunk(size_t nbytes);
|
||||
|
||||
|
@ -41,6 +41,10 @@
|
||||
|
||||
#ifdef FEATURE_NANOJIT
|
||||
|
||||
#ifdef VTUNE
|
||||
#include "../core/CodegenLIR.h"
|
||||
#endif
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
#ifdef NJ_VERBOSE
|
||||
@ -105,6 +109,12 @@ namespace nanojit
|
||||
, _labels(alloc)
|
||||
, _epilogue(NULL)
|
||||
, _err(None)
|
||||
#if PEDANTIC
|
||||
, pedanticTop(NULL)
|
||||
#endif
|
||||
#ifdef VTUNE
|
||||
, cgen(NULL)
|
||||
#endif
|
||||
, config(core->config)
|
||||
{
|
||||
VMPI_memset(&_stats, 0, sizeof(_stats));
|
||||
@ -193,6 +203,14 @@ namespace nanojit
|
||||
verbose_only( nBytes += (end - start) * sizeof(NIns); )
|
||||
NanoAssert(uintptr_t(end) - uintptr_t(start) >= (size_t)LARGEST_UNDERRUN_PROT);
|
||||
eip = end;
|
||||
|
||||
#ifdef VTUNE
|
||||
if (_nIns && _nExitIns) {
|
||||
//cgen->jitAddRecord((uintptr_t)list->code, 0, 0, true); // add placeholder record for top of page
|
||||
cgen->jitCodePosUpdate((uintptr_t)list->code);
|
||||
cgen->jitPushInfo(); // new page requires new entry
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::reset()
|
||||
@ -337,10 +355,14 @@ namespace nanojit
|
||||
|
||||
Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
|
||||
{
|
||||
#if !PEDANTIC
|
||||
if (i->isop(LIR_alloc)) {
|
||||
d += findMemFor(i);
|
||||
return FP;
|
||||
}
|
||||
#else
|
||||
(void) d;
|
||||
#endif
|
||||
return findRegFor(i, allow);
|
||||
}
|
||||
|
||||
@ -1347,7 +1369,29 @@ namespace nanojit
|
||||
evictScratchRegs();
|
||||
|
||||
asm_call(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
#ifdef VTUNE
|
||||
case LIR_file:
|
||||
{
|
||||
// we traverse backwards so we are now hitting the file
|
||||
// that is associated with a bunch of LIR_lines we already have seen
|
||||
uintptr_t currentFile = ins->oprnd1()->imm32();
|
||||
cgen->jitFilenameUpdate(currentFile);
|
||||
break;
|
||||
}
|
||||
case LIR_line:
|
||||
{
|
||||
// add a new table entry, we don't yet knwo which file it belongs
|
||||
// to so we need to add it to the update table too
|
||||
// note the alloc, actual act is delayed; see above
|
||||
uint32_t currentLine = (uint32_t) ins->oprnd1()->imm32();
|
||||
cgen->jitLineNumUpdate(currentLine);
|
||||
cgen->jitAddRecord((uintptr_t)_nIns, 0, currentLine, true);
|
||||
break;
|
||||
}
|
||||
#endif // VTUNE
|
||||
}
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
@ -1394,6 +1438,10 @@ namespace nanojit
|
||||
if (error())
|
||||
return;
|
||||
|
||||
#ifdef VTUNE
|
||||
cgen->jitCodePosUpdate((uintptr_t)_nIns);
|
||||
#endif
|
||||
|
||||
// check that all is well (don't check in exit paths since its more complicated)
|
||||
debug_only( pageValidate(); )
|
||||
debug_only( resourceConsistencyCheck(); )
|
||||
|
@ -107,6 +107,10 @@ namespace nanojit
|
||||
typedef SeqBuilder<NIns*> NInsList;
|
||||
typedef HashMap<NIns*, LIns*> NInsMap;
|
||||
|
||||
#ifdef VTUNE
|
||||
class avmplus::CodegenLIR;
|
||||
#endif
|
||||
|
||||
class LabelState
|
||||
{
|
||||
public:
|
||||
@ -166,10 +170,13 @@ namespace nanojit
|
||||
LogControl* _logc;
|
||||
size_t codeBytes;
|
||||
size_t exitBytes;
|
||||
#endif // NJ_VERBOSE
|
||||
|
||||
#ifdef VTUNE
|
||||
avmplus::CodegenLIR *cgen;
|
||||
#endif
|
||||
|
||||
Assembler(CodeAlloc& codeAlloc, Allocator& alloc, AvmCore* core, LogControl* logc);
|
||||
~Assembler() {}
|
||||
|
||||
void endAssembly(Fragment* frag);
|
||||
void assemble(Fragment* frag);
|
||||
@ -253,6 +260,9 @@ namespace nanojit
|
||||
NIns* _nExitIns; // current instruction in exit fragment page
|
||||
NIns* _epilogue;
|
||||
AssmError _err; // 0 = means assemble() appears ok, otherwise it failed
|
||||
#if PEDANTIC
|
||||
NIns* pedanticTop;
|
||||
#endif
|
||||
|
||||
AR _activation;
|
||||
RegAlloc _allocator;
|
||||
|
@ -1012,6 +1012,13 @@ namespace nanojit
|
||||
|
||||
class LirWriter
|
||||
{
|
||||
LInsp insDisp(LInsp base, int32_t& d) {
|
||||
if (!isValidDisplacement(d)) {
|
||||
base = ins2i(LIR_piadd, base, d);
|
||||
d = 0;
|
||||
}
|
||||
return base;
|
||||
}
|
||||
public:
|
||||
LirWriter *out;
|
||||
|
||||
@ -1052,9 +1059,11 @@ namespace nanojit
|
||||
return out->insImmf(d);
|
||||
}
|
||||
virtual LInsp insLoad(LOpcode op, LIns* base, int32_t d) {
|
||||
base = insDisp(base, d);
|
||||
return out->insLoad(op, base, d);
|
||||
}
|
||||
virtual LInsp insStorei(LIns* value, LIns* base, int32_t d) {
|
||||
base = insDisp(base, d);
|
||||
return out->insStorei(value, base, d);
|
||||
}
|
||||
virtual LInsp insCall(const CallInfo *call, LInsp args[]) {
|
||||
@ -1067,6 +1076,13 @@ namespace nanojit
|
||||
virtual LInsp insSkip(size_t size) {
|
||||
return out->insSkip(size);
|
||||
}
|
||||
void insAssert(LIns* expr) {
|
||||
#if defined DEBUG
|
||||
LIns* branch = insBranch(LIR_jt, expr, NULL);
|
||||
ins0(LIR_dbreak);
|
||||
branch->setTarget(ins0(LIR_label));
|
||||
#endif
|
||||
}
|
||||
|
||||
// convenience functions
|
||||
|
||||
@ -1101,6 +1117,7 @@ namespace nanojit
|
||||
class Entry
|
||||
{
|
||||
public:
|
||||
Entry(int) : name(0), size(0), align(0) {}
|
||||
Entry(char *n, size_t s, size_t a) : name(n),size(s),align(a) {}
|
||||
char* name;
|
||||
size_t size:29, align:3;
|
||||
@ -1139,6 +1156,7 @@ namespace nanojit
|
||||
class Entry
|
||||
{
|
||||
public:
|
||||
Entry(int) : name(0) {}
|
||||
Entry(char* n) : name(n) {}
|
||||
char* name;
|
||||
};
|
||||
@ -1344,6 +1362,9 @@ namespace nanojit
|
||||
LInsp state,param1,sp,rp;
|
||||
LInsp savedRegs[NumSavedRegs];
|
||||
|
||||
protected:
|
||||
friend class LirBufWriter;
|
||||
|
||||
/** each chunk is just a raw area of LIns instances, with no header
|
||||
and no more than 8-byte alignment. The chunk size is somewhat arbitrary
|
||||
as long as it's well larger than 2*sizeof(LInsSk) */
|
||||
@ -1359,9 +1380,6 @@ namespace nanojit
|
||||
* itself. */
|
||||
static const size_t MAX_SKIP_PAYLOAD_SZB = MAX_LINS_SZB - sizeof(LInsSk);
|
||||
|
||||
protected:
|
||||
friend class LirBufWriter;
|
||||
|
||||
/** get CHUNK_SZB more memory for LIR instructions */
|
||||
void chunkAlloc();
|
||||
void moveToNewChunk(uintptr_t addrOfLastLInsOnCurrentChunk);
|
||||
|
@ -74,7 +74,7 @@
|
||||
OPDEF(start, 0, 0, Op0) // start of a fragment
|
||||
OPDEF(regfence, 1, 0, Op0) // register fence, no register allocation is allowed across this meta instruction
|
||||
OPDEF(skip, 2, 1, Sk) // holds blobs ("payloads") of data; also links pages
|
||||
OPDEF(unused3, 3,-1, None)
|
||||
OPDEF(dbreak, 3, 0, Op0)
|
||||
OPDEF(unused4, 4,-1, None)
|
||||
OPDEF(unused5, 5,-1, None)
|
||||
OPDEF(unused6, 6,-1, None)
|
||||
|
@ -161,6 +161,13 @@ static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5
|
||||
static const RegisterMask GpRegs = 0xFFFF;
|
||||
static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
|
||||
|
||||
#define isS12(offs) ((-(1<<12)) <= (offs) && (offs) < (1<<12))
|
||||
#define isU12(offs) (((offs) & 0xfff) == (offs))
|
||||
|
||||
static inline bool isValidDisplacement(int32_t d) {
|
||||
return isS12(d);
|
||||
}
|
||||
|
||||
#define IsFpReg(_r) ((rmask((Register)_r) & (FpRegs)) != 0)
|
||||
#define IsGpReg(_r) ((rmask((Register)_r) & (GpRegs)) != 0)
|
||||
#define FpRegNum(_fpr) ((_fpr) - FirstFloatReg)
|
||||
@ -307,13 +314,14 @@ enum {
|
||||
NanoAssert(IsGpReg(rd) && IsGpReg(rl));\
|
||||
NanoAssert(isOp2Imm(op2imm));\
|
||||
*(--_nIns) = (NIns) ((cond)<<28 | OP_IMM | (ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (op2imm));\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn) { \
|
||||
asm_output("%s%s%s %s, #0x%X", #op, condNames[cond], (S)?"s":"", gpn(rd), decOp2Imm(op2imm));\
|
||||
else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
|
||||
} else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) { \
|
||||
NanoAssert(S==1);\
|
||||
asm_output("%s%s %s, #0x%X", #op, condNames[cond], gpn(rl), decOp2Imm(op2imm));\
|
||||
} else\
|
||||
} else { \
|
||||
asm_output("%s%s%s %s, %s, #0x%X", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), decOp2Imm(op2imm));\
|
||||
}\
|
||||
} while (0)
|
||||
|
||||
// ALU operation with two register arguments
|
||||
@ -329,13 +337,14 @@ enum {
|
||||
NanoAssert(((S)==0) || ((S)==1));\
|
||||
NanoAssert(IsGpReg(rd) && IsGpReg(rl) && IsGpReg(rr));\
|
||||
*(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (rr));\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn) { \
|
||||
asm_output("%s%s%s %s, %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr));\
|
||||
else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
|
||||
} else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) { \
|
||||
NanoAssert(S==1);\
|
||||
asm_output("%s%s %s, %s", #op, condNames[cond], gpn(rl), gpn(rr));\
|
||||
} else\
|
||||
} else { \
|
||||
asm_output("%s%s%s %s, %s, %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr));\
|
||||
}\
|
||||
} while (0)
|
||||
|
||||
// ALU operation with two register arguments, with rr operated on by a shift and shift immediate
|
||||
@ -354,13 +363,14 @@ enum {
|
||||
NanoAssert(IsShift(sh));\
|
||||
NanoAssert((imm)>=0 && (imm)<32);\
|
||||
*(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (imm)<<7 | (sh)<<4 | (rr));\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn) { \
|
||||
asm_output("%s%s%s %s, %s, %s #%d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr), shiftNames[sh], (imm));\
|
||||
else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
|
||||
} else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) { \
|
||||
NanoAssert(S==1);\
|
||||
asm_output("%s%s %s, %s, %s #%d", #op, condNames[cond], gpn(rl), gpn(rr), shiftNames[sh], (imm));\
|
||||
} else\
|
||||
} else { \
|
||||
asm_output("%s%s%s %s, %s, %s, %s #%d", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr), shiftNames[sh], (imm));\
|
||||
}\
|
||||
} while (0)
|
||||
|
||||
// ALU operation with two register arguments, with rr operated on by a shift and shift register
|
||||
@ -378,13 +388,14 @@ enum {
|
||||
NanoAssert(IsGpReg(rd) && IsGpReg(rl) && IsGpReg(rr) && IsGpReg(rs));\
|
||||
NanoAssert(IsShift(sh));\
|
||||
*(--_nIns) = (NIns) ((cond)<<28 |(ARM_##op)<<21 | (S)<<20 | (rl)<<16 | (rd)<<12 | (rs)<<8 | (sh)<<4 | (rr));\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn)\
|
||||
if (ARM_##op == ARM_mov || ARM_##op == ARM_mvn) { \
|
||||
asm_output("%s%s%s %s, %s, %s %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rr), shiftNames[sh], gpn(rs));\
|
||||
else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) {\
|
||||
} else if (ARM_##op >= ARM_tst && ARM_##op <= ARM_cmn) { \
|
||||
NanoAssert(S==1);\
|
||||
asm_output("%s%s %s, %s, %s %s", #op, condNames[cond], gpn(rl), gpn(rr), shiftNames[sh], gpn(rs));\
|
||||
} else\
|
||||
} else { \
|
||||
asm_output("%s%s%s %s, %s, %s, %s %s", #op, condNames[cond], (S)?"s":"", gpn(rd), gpn(rl), gpn(rr), shiftNames[sh], gpn(rs));\
|
||||
}\
|
||||
} while (0)
|
||||
|
||||
// --------
|
||||
@ -647,8 +658,6 @@ enum {
|
||||
// PC always points to current instruction + 8, so when calculating pc-relative
|
||||
// offsets, use PC+8.
|
||||
#define PC_OFFSET_FROM(target,frompc) ((intptr_t)(target) - ((intptr_t)(frompc) + 8))
|
||||
#define isS12(offs) ((-(1<<12)) <= (offs) && (offs) < (1<<12))
|
||||
#define isU12(offs) (((offs) & 0xfff) == (offs))
|
||||
|
||||
#define B_cond(_c,_t) \
|
||||
B_cond_chk(_c,_t,1)
|
||||
|
@ -254,6 +254,9 @@ namespace nanojit
|
||||
static const int NumSavedRegs = 18; // R13-R30
|
||||
#endif
|
||||
|
||||
static inline bool isValidDisplacement(int32_t d) {
|
||||
return true;
|
||||
}
|
||||
static inline bool IsFpReg(Register r) {
|
||||
return r >= F0;
|
||||
}
|
||||
|
@ -181,6 +181,10 @@ namespace nanojit
|
||||
1<<F22;
|
||||
static const RegisterMask AllowableFlagRegs = GpRegs;
|
||||
|
||||
static inline bool isValidDisplacement(int32_t d) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#define nextreg(r) Register(r+1)
|
||||
|
||||
verbose_only( extern const char* regNames[]; )
|
||||
|
@ -70,7 +70,7 @@ tracing
|
||||
namespace nanojit
|
||||
{
|
||||
const Register Assembler::retRegs[] = { RAX };
|
||||
#ifdef _MSC_VER
|
||||
#ifdef _WIN64
|
||||
const Register Assembler::argRegs[] = { RCX, RDX, R8, R9 };
|
||||
const Register Assembler::savedRegs[] = { RBX, RSI, RDI, R12, R13, R14, R15 };
|
||||
#else
|
||||
@ -588,7 +588,7 @@ namespace nanojit
|
||||
emit(X64_callrax);
|
||||
}
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#ifdef _WIN64
|
||||
int stk_used = 32; // always reserve 32byte shadow area
|
||||
#else
|
||||
int stk_used = 0;
|
||||
@ -604,7 +604,7 @@ namespace nanojit
|
||||
asm_regarg(sz, arg, argRegs[arg_index]);
|
||||
arg_index++;
|
||||
}
|
||||
#ifdef _MSC_VER
|
||||
#ifdef _WIN64
|
||||
else if (sz == ARGSIZE_F && arg_index < NumArgRegs) {
|
||||
// double goes in XMM reg # based on overall arg_index
|
||||
asm_regarg(sz, arg, Register(XMM0+arg_index));
|
||||
@ -1153,8 +1153,8 @@ namespace nanojit
|
||||
uint32_t kind = ins->paramKind();
|
||||
if (kind == 0) {
|
||||
// ordinary param
|
||||
// first six args always in registers for mac x64
|
||||
if (a < 6) {
|
||||
// first four or six args always in registers for x86_64 ABI
|
||||
if (a < (uint32_t)NumArgRegs) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
@ -1281,7 +1281,7 @@ namespace nanojit
|
||||
void Assembler::nRegisterResetAll(RegAlloc &a) {
|
||||
// add scratch registers to our free list for the allocator
|
||||
a.clear();
|
||||
#ifdef _MSC_VER
|
||||
#ifdef _WIN64
|
||||
a.free = 0x001fffcf; // rax-rbx, rsi, rdi, r8-r15, xmm0-xmm5
|
||||
#else
|
||||
a.free = 0xffffffff & ~(1<<RSP | 1<<RBP);
|
||||
@ -1316,7 +1316,7 @@ namespace nanojit
|
||||
}
|
||||
|
||||
Register Assembler::nRegisterAllocFromSet(RegisterMask set) {
|
||||
#if defined _WIN64
|
||||
#if defined _MSC_VER
|
||||
DWORD tr;
|
||||
_BitScanForward(&tr, set);
|
||||
_allocator.free &= ~rmask((Register)tr);
|
||||
|
@ -320,6 +320,9 @@ namespace nanojit
|
||||
static const int NumArgRegs = 6;
|
||||
#endif
|
||||
|
||||
static inline bool isValidDisplacement(int32_t d) {
|
||||
return true;
|
||||
}
|
||||
static inline bool IsFpReg(Register r) {
|
||||
return ((1<<r) & FpRegs) != 0;
|
||||
}
|
||||
|
@ -265,12 +265,6 @@ namespace nanojit
|
||||
btr RegAlloc::free[ecx], eax // free &= ~rmask(i)
|
||||
mov r, eax
|
||||
}
|
||||
#elif defined WIN64
|
||||
unsigned long tr, fr;
|
||||
_BitScanForward(&tr, set);
|
||||
_bittestandreset(&fr, tr);
|
||||
regs.free = fr;
|
||||
r = tr;
|
||||
#else
|
||||
asm(
|
||||
"bsf %1, %%eax\n\t"
|
||||
@ -1231,6 +1225,24 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
// negateMask is used by asm_fneg.
|
||||
#if defined __SUNPRO_CC
|
||||
// From Sun Studio C++ Readme: #pragma align inside namespace requires mangled names.
|
||||
// Initialize here to avoid multithreading contention issues during initialization.
|
||||
static uint32_t negateMask_temp[] = {0, 0, 0, 0, 0, 0, 0};
|
||||
|
||||
static uint32_t* negateMaskInit()
|
||||
{
|
||||
uint32_t* negateMask = (uint32_t*)alignUp(negateMask_temp, 16);
|
||||
negateMask[1] = 0x80000000;
|
||||
return negateMask;
|
||||
}
|
||||
|
||||
static uint32_t *negateMask = negateMaskInit();
|
||||
#else
|
||||
static const AVMPLUS_ALIGN16(uint32_t) negateMask[] = {0,0x80000000,0,0};
|
||||
#endif
|
||||
|
||||
void Assembler::asm_fneg(LInsp ins)
|
||||
{
|
||||
if (config.sse2)
|
||||
@ -1255,14 +1267,6 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
#if defined __SUNPRO_CC
|
||||
// from Sun Studio C++ Readme: #pragma align inside namespace requires mangled names
|
||||
static uint32_t temp[] = {0, 0, 0, 0, 0, 0, 0};
|
||||
static uint32_t *negateMask = (uint32_t *)alignUp(temp, 16);
|
||||
negateMask[1] = 0x80000000;
|
||||
#else
|
||||
static const AVMPLUS_ALIGN16(uint32_t) negateMask[] = {0,0x80000000,0,0};
|
||||
#endif
|
||||
SSE_XORPD(rr, negateMask);
|
||||
|
||||
if (rr != ra)
|
||||
|
@ -152,6 +152,10 @@ namespace nanojit
|
||||
|
||||
static const RegisterMask AllowableFlagRegs = 1<<EAX |1<<ECX | 1<<EDX | 1<<EBX;
|
||||
|
||||
static inline bool isValidDisplacement(int32_t d) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#define _rmask_(r) (1<<(r))
|
||||
#define _is_xmm_reg_(r) ((_rmask_(r)&XmmRegs)!=0)
|
||||
#define _is_x87_reg_(r) ((_rmask_(r)&x87Regs)!=0)
|
||||
|
152
js/src/nanojit/VMPI.cpp
Normal file
152
js/src/nanojit/VMPI.cpp
Normal file
@ -0,0 +1,152 @@
|
||||
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
|
||||
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version 1.1 (the
|
||||
* "License"); you may not use this file except in compliance with the License. You may obtain
|
||||
* a copy of the License at http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis, WITHOUT
|
||||
* WARRANTY OF ANY KIND, either express or implied. See the License for the specific
|
||||
* language governing rights and limitations under the License.
|
||||
*
|
||||
* The Original Code is [Open Source Virtual Machine.]
|
||||
*
|
||||
* The Initial Developer of the Original Code is Adobe System Incorporated. Portions created
|
||||
* by the Initial Developer are Copyright (C)[ 2004-2006 ] Adobe Systems Incorporated. All Rights
|
||||
* Reserved.
|
||||
*
|
||||
* Contributor(s): Adobe AS3 Team
|
||||
* Andreas Gal <gal@mozilla.com>
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of either the GNU
|
||||
* General Public License Version 2 or later (the "GPL"), or the GNU Lesser General Public
|
||||
* License Version 2.1 or later (the "LGPL"), in which case the provisions of the GPL or the
|
||||
* LGPL are applicable instead of those above. If you wish to allow use of your version of this
|
||||
* file only under the terms of either the GPL or the LGPL, and not to allow others to use your
|
||||
* version of this file under the terms of the MPL, indicate your decision by deleting provisions
|
||||
* above and replace them with the notice and other provisions required by the GPL or the
|
||||
* LGPL. If you do not delete the provisions above, a recipient may use your version of this file
|
||||
* under the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
***** END LICENSE BLOCK ***** */
|
||||
|
||||
#include "nanojit.h"
|
||||
|
||||
#ifdef SOLARIS
|
||||
#include <ucontext.h>
|
||||
#include <dlfcn.h>
|
||||
#include <procfs.h>
|
||||
#include <sys/stat.h>
|
||||
extern "C" caddr_t _getfp(void);
|
||||
typedef caddr_t maddr_ptr;
|
||||
#else
|
||||
typedef void *maddr_ptr;
|
||||
#endif
|
||||
|
||||
using namespace avmplus;
|
||||
|
||||
#ifdef WIN32
|
||||
void
|
||||
VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag)
|
||||
{
|
||||
DWORD oldProtectFlags = 0;
|
||||
DWORD newProtectFlags = 0;
|
||||
if ( executableFlag && writeableFlag ) {
|
||||
newProtectFlags = PAGE_EXECUTE_READWRITE;
|
||||
} else if ( executableFlag ) {
|
||||
newProtectFlags = PAGE_EXECUTE_READ;
|
||||
} else if ( writeableFlag ) {
|
||||
newProtectFlags = PAGE_READWRITE;
|
||||
} else {
|
||||
newProtectFlags = PAGE_READONLY;
|
||||
}
|
||||
|
||||
BOOL retval;
|
||||
MEMORY_BASIC_INFORMATION mbi;
|
||||
do {
|
||||
VirtualQuery(address, &mbi, sizeof(MEMORY_BASIC_INFORMATION));
|
||||
size_t markSize = size > mbi.RegionSize ? mbi.RegionSize : size;
|
||||
|
||||
retval = VirtualProtect(address, markSize, newProtectFlags, &oldProtectFlags);
|
||||
NanoAssert(retval);
|
||||
|
||||
address = (char*) address + markSize;
|
||||
size -= markSize;
|
||||
} while(size > 0 && retval);
|
||||
|
||||
// We should not be clobbering PAGE_GUARD protections
|
||||
NanoAssert((oldProtectFlags & PAGE_GUARD) == 0);
|
||||
}
|
||||
|
||||
#elif defined(AVMPLUS_OS2)
|
||||
|
||||
void
|
||||
VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag)
|
||||
{
|
||||
ULONG flags = PAG_READ;
|
||||
if (executableFlag) {
|
||||
flags |= PAG_EXECUTE;
|
||||
}
|
||||
if (writeableFlag) {
|
||||
flags |= PAG_WRITE;
|
||||
}
|
||||
address = (void*)((size_t)address & ~(0xfff));
|
||||
size = (size + 0xfff) & ~(0xfff);
|
||||
|
||||
ULONG attribFlags = PAG_FREE;
|
||||
while (size) {
|
||||
ULONG attrib;
|
||||
ULONG range = size;
|
||||
ULONG retval = DosQueryMem(address, &range, &attrib);
|
||||
AvmAssert(retval == 0);
|
||||
|
||||
// exit if this is the start of the next memory object
|
||||
if (attrib & attribFlags) {
|
||||
break;
|
||||
}
|
||||
attribFlags |= PAG_BASE;
|
||||
|
||||
range = size > range ? range : size;
|
||||
retval = DosSetMem(address, range, flags);
|
||||
AvmAssert(retval == 0);
|
||||
|
||||
address = (char*)address + range;
|
||||
size -= range;
|
||||
}
|
||||
}
|
||||
|
||||
#else // !WIN32 && !AVMPLUS_OS2
|
||||
|
||||
void VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag)
|
||||
{
|
||||
int bitmask = sysconf(_SC_PAGESIZE) - 1;
|
||||
// mprotect requires that the addresses be aligned on page boundaries
|
||||
void *endAddress = (void*) ((char*)address + size);
|
||||
void *beginPage = (void*) ((size_t)address & ~bitmask);
|
||||
void *endPage = (void*) (((size_t)endAddress + bitmask) & ~bitmask);
|
||||
size_t sizePaged = (size_t)endPage - (size_t)beginPage;
|
||||
|
||||
int flags = PROT_READ;
|
||||
if (executableFlag) {
|
||||
flags |= PROT_EXEC;
|
||||
}
|
||||
if (writeableFlag) {
|
||||
flags |= PROT_WRITE;
|
||||
}
|
||||
int retval = mprotect((maddr_ptr)beginPage, (unsigned int)sizePaged, flags);
|
||||
AvmAssert(retval == 0);
|
||||
(void)retval;
|
||||
}
|
||||
|
||||
#endif // WIN32
|
92
js/src/nanojit/VMPI.h
Normal file
92
js/src/nanojit/VMPI.h
Normal file
@ -0,0 +1,92 @@
|
||||
/* -*- Mode: C++; c-basic-offset: 4; indent-tabs-mode: nil; tab-width: 4 -*- */
|
||||
/* vi: set ts=4 sw=4 expandtab: (add to ~/.vimrc: set modeline modelines=5) */
|
||||
/* ***** BEGIN LICENSE BLOCK *****
|
||||
* Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
*
|
||||
* The contents of this file are subject to the Mozilla Public License Version
|
||||
* 1.1 (the "License"); you may not use this file except in compliance with
|
||||
* the License. You may obtain a copy of the License at
|
||||
* http://www.mozilla.org/MPL/
|
||||
*
|
||||
* Software distributed under the License is distributed on an "AS IS" basis,
|
||||
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
* for the specific language governing rights and limitations under the
|
||||
* License.
|
||||
*
|
||||
* The Original Code is [Open Source Virtual Machine].
|
||||
*
|
||||
* The Initial Developer of the Original Code is
|
||||
* Adobe System Incorporated.
|
||||
* Portions created by the Initial Developer are Copyright (C) 2004-2007
|
||||
* the Initial Developer. All Rights Reserved.
|
||||
*
|
||||
* Contributor(s):
|
||||
* Adobe AS3 Team
|
||||
*
|
||||
* Alternatively, the contents of this file may be used under the terms of
|
||||
* either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
* the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
* in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
* of those above. If you wish to allow use of your version of this file only
|
||||
* under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
* use your version of this file under the terms of the MPL, indicate your
|
||||
* decision by deleting the provisions above and replace them with the notice
|
||||
* and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
* the provisions above, a recipient may use your version of this file under
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
/*
|
||||
* Stub VMPI implementation to support standalone nanojit repository.
|
||||
*
|
||||
* Really only works if you *don't* have a busted-up C library.
|
||||
*/
|
||||
|
||||
#ifndef __VMPI_h__
|
||||
#define __VMPI_h__
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
#include <stdlib.h>
|
||||
#include <stddef.h>
|
||||
|
||||
#if defined(AVMPLUS_UNIX) || defined(AVMPLUS_OS2)
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#endif
|
||||
|
||||
#ifdef AVMPLUS_WIN32
|
||||
#if ! defined(_STDINT_H)
|
||||
typedef signed char int8_t;
|
||||
typedef signed short int16_t;
|
||||
typedef signed int int32_t;
|
||||
typedef signed __int64 int64_t;
|
||||
typedef unsigned char uint8_t;
|
||||
typedef unsigned short uint16_t;
|
||||
typedef unsigned int uint32_t;
|
||||
typedef unsigned __int64 uint64_t;
|
||||
#endif
|
||||
#else
|
||||
#include <stdint.h>
|
||||
#include <inttypes.h>
|
||||
#endif
|
||||
|
||||
#define VMPI_strlen strlen
|
||||
#define VMPI_strcat strcat
|
||||
#define VMPI_strcmp strcmp
|
||||
#define VMPI_strncat strncat
|
||||
#define VMPI_strcpy strcpy
|
||||
#define VMPI_sprintf sprintf
|
||||
#define VMPI_memset memset
|
||||
#define VMPI_isdigit isdigit
|
||||
#define VMPI_getDate()
|
||||
|
||||
extern void VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag);
|
||||
|
||||
#endif
|
@ -54,7 +54,6 @@ avmplus::AvmLog(char const *msg, ...) {
|
||||
}
|
||||
|
||||
#ifdef _DEBUG
|
||||
// NanoAssertFail matches JS_Assert in jsutil.cpp.
|
||||
void NanoAssertFail()
|
||||
{
|
||||
#if defined(WIN32)
|
||||
@ -68,111 +67,6 @@ void NanoAssertFail()
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifdef WIN32
|
||||
void
|
||||
VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag)
|
||||
{
|
||||
DWORD oldProtectFlags = 0;
|
||||
DWORD newProtectFlags = 0;
|
||||
if ( executableFlag && writeableFlag ) {
|
||||
newProtectFlags = PAGE_EXECUTE_READWRITE;
|
||||
} else if ( executableFlag ) {
|
||||
newProtectFlags = PAGE_EXECUTE_READ;
|
||||
} else if ( writeableFlag ) {
|
||||
newProtectFlags = PAGE_READWRITE;
|
||||
} else {
|
||||
newProtectFlags = PAGE_READONLY;
|
||||
}
|
||||
|
||||
BOOL retval;
|
||||
MEMORY_BASIC_INFORMATION mbi;
|
||||
do {
|
||||
VirtualQuery(address, &mbi, sizeof(MEMORY_BASIC_INFORMATION));
|
||||
size_t markSize = size > mbi.RegionSize ? mbi.RegionSize : size;
|
||||
|
||||
retval = VirtualProtect(address, markSize, newProtectFlags, &oldProtectFlags);
|
||||
NanoAssert(retval);
|
||||
|
||||
address = (char*) address + markSize;
|
||||
size -= markSize;
|
||||
} while(size > 0 && retval);
|
||||
|
||||
// We should not be clobbering PAGE_GUARD protections
|
||||
NanoAssert((oldProtectFlags & PAGE_GUARD) == 0);
|
||||
}
|
||||
|
||||
#elif defined(AVMPLUS_OS2)
|
||||
|
||||
void
|
||||
VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag)
|
||||
{
|
||||
ULONG flags = PAG_READ;
|
||||
if (executableFlag) {
|
||||
flags |= PAG_EXECUTE;
|
||||
}
|
||||
if (writeableFlag) {
|
||||
flags |= PAG_WRITE;
|
||||
}
|
||||
address = (void*)((size_t)address & ~(0xfff));
|
||||
size = (size + 0xfff) & ~(0xfff);
|
||||
|
||||
ULONG attribFlags = PAG_FREE;
|
||||
while (size) {
|
||||
ULONG attrib;
|
||||
ULONG range = size;
|
||||
ULONG retval = DosQueryMem(address, &range, &attrib);
|
||||
AvmAssert(retval == 0);
|
||||
|
||||
// exit if this is the start of the next memory object
|
||||
if (attrib & attribFlags) {
|
||||
break;
|
||||
}
|
||||
attribFlags |= PAG_BASE;
|
||||
|
||||
range = size > range ? range : size;
|
||||
retval = DosSetMem(address, range, flags);
|
||||
AvmAssert(retval == 0);
|
||||
|
||||
address = (char*)address + range;
|
||||
size -= range;
|
||||
}
|
||||
}
|
||||
|
||||
#else // !WIN32 && !AVMPLUS_OS2
|
||||
|
||||
void VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag)
|
||||
{
|
||||
int bitmask = sysconf(_SC_PAGESIZE) - 1;
|
||||
// mprotect requires that the addresses be aligned on page boundaries
|
||||
void *endAddress = (void*) ((char*)address + size);
|
||||
void *beginPage = (void*) ((size_t)address & ~bitmask);
|
||||
void *endPage = (void*) (((size_t)endAddress + bitmask) & ~bitmask);
|
||||
size_t sizePaged = (size_t)endPage - (size_t)beginPage;
|
||||
|
||||
int flags = PROT_READ;
|
||||
if (executableFlag) {
|
||||
flags |= PROT_EXEC;
|
||||
}
|
||||
if (writeableFlag) {
|
||||
flags |= PROT_WRITE;
|
||||
}
|
||||
int retval = mprotect((maddr_ptr)beginPage, (unsigned int)sizePaged, flags);
|
||||
AvmAssert(retval == 0);
|
||||
(void)retval;
|
||||
}
|
||||
|
||||
#endif // WIN32
|
||||
|
||||
|
||||
#ifdef WINCE
|
||||
|
||||
// Due to the per-process heap slots on Windows Mobile, we can often run in to OOM
|
||||
|
@ -36,19 +36,11 @@
|
||||
#ifndef avm_h___
|
||||
#define avm_h___
|
||||
|
||||
#include <assert.h>
|
||||
#include <string.h>
|
||||
#include <stdio.h>
|
||||
#include <ctype.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#if defined(AVMPLUS_UNIX) || defined(AVMPLUS_OS2)
|
||||
#include <unistd.h>
|
||||
#include <sys/mman.h>
|
||||
#if defined(HAVE_CONFIG_H) && defined(NANOJIT_CENTRAL)
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include "jstypes.h"
|
||||
#include "jsstdint.h"
|
||||
#include "VMPI.h"
|
||||
|
||||
#if !defined(AVMPLUS_LITTLE_ENDIAN) && !defined(AVMPLUS_BIG_ENDIAN)
|
||||
#ifdef IS_BIG_ENDIAN
|
||||
@ -104,17 +96,15 @@ void NanoAssertFail();
|
||||
#define AvmAssertMsg(x, y)
|
||||
#define AvmDebugLog(x) printf x
|
||||
|
||||
#if defined(AVMPLUS_IA32)
|
||||
#if defined(_MSC_VER)
|
||||
__declspec(naked) static inline __int64 rdtsc()
|
||||
#if defined(_M_IX86) || defined(_M_AMD64)
|
||||
// Visual C++ for x86 and x64 uses compiler intrinsics
|
||||
static inline unsigned __int64 rdtsc(void)
|
||||
{
|
||||
__asm
|
||||
{
|
||||
rdtsc;
|
||||
ret;
|
||||
}
|
||||
return __rdtsc();
|
||||
}
|
||||
#elif defined(SOLARIS)
|
||||
|
||||
#elif defined(AVMPLUS_IA32)
|
||||
#if defined(SOLARIS)
|
||||
static inline unsigned long long rdtsc(void)
|
||||
{
|
||||
unsigned long long int x;
|
||||
@ -177,20 +167,6 @@ struct JSContext;
|
||||
# define PERFM_TPROF_END()
|
||||
#endif
|
||||
|
||||
#define VMPI_strlen strlen
|
||||
#define VMPI_strcat strcat
|
||||
#define VMPI_strncat strncat
|
||||
#define VMPI_strcpy strcpy
|
||||
#define VMPI_sprintf sprintf
|
||||
#define VMPI_memset memset
|
||||
#define VMPI_isdigit isdigit
|
||||
#define VMPI_getDate()
|
||||
|
||||
extern void VMPI_setPageProtection(void *address,
|
||||
size_t size,
|
||||
bool executableFlag,
|
||||
bool writeableFlag);
|
||||
|
||||
namespace avmplus {
|
||||
|
||||
typedef int FunctionID;
|
||||
|
78
js/src/tests/Makefile.in
Normal file
78
js/src/tests/Makefile.in
Normal file
@ -0,0 +1,78 @@
|
||||
# vim: set shiftwidth=8 tabstop=8 autoindent noexpandtab copyindent:
|
||||
# ***** BEGIN LICENSE BLOCK *****
|
||||
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
|
||||
#
|
||||
# The contents of this file are subject to the Mozilla Public License Version
|
||||
# 1.1 (the "License"); you may not use this file except in compliance with
|
||||
# the License. You may obtain a copy of the License at
|
||||
# http://www.mozilla.org/MPL/
|
||||
#
|
||||
# Software distributed under the License is distributed on an "AS IS" basis,
|
||||
# WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
|
||||
# for the specific language governing rights and limitations under the
|
||||
# License.
|
||||
#
|
||||
# The Original Code is Mozilla's javascript tests.
|
||||
#
|
||||
# The Initial Developer of the Original Code is the Mozilla Foundation.
|
||||
# Portions created by the Initial Developer are Copyright (C) 2009
|
||||
# the Initial Developer. All Rights Reserved.
|
||||
#
|
||||
# Contributor(s):
|
||||
#
|
||||
# Alternatively, the contents of this file may be used under the terms of
|
||||
# either the GNU General Public License Version 2 or later (the "GPL"), or
|
||||
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
|
||||
# in which case the provisions of the GPL or the LGPL are applicable instead
|
||||
# of those above. If you wish to allow use of your version of this file only
|
||||
# under the terms of either the GPL or the LGPL, and not to allow others to
|
||||
# use your version of this file under the terms of the MPL, indicate your
|
||||
# decision by deleting the provisions above and replace them with the notice
|
||||
# and other provisions required by the GPL or the LGPL. If you do not delete
|
||||
# the provisions above, a recipient may use your version of this file under
|
||||
# the terms of any one of the MPL, the GPL or the LGPL.
|
||||
#
|
||||
# ***** END LICENSE BLOCK *****
|
||||
|
||||
DEPTH = ..
|
||||
topsrcdir = @top_srcdir@
|
||||
srcdir = @srcdir@
|
||||
VPATH = @srcdir@
|
||||
|
||||
include $(DEPTH)/config/autoconf.mk
|
||||
|
||||
MODULE = jsreftest
|
||||
|
||||
include $(topsrcdir)/config/rules.mk
|
||||
|
||||
# test files to be packaged.
|
||||
TEST_FILES = \
|
||||
jsreftest.html \
|
||||
shell.js \
|
||||
browser.js \
|
||||
js-test-driver-end.js \
|
||||
user.js \
|
||||
jstests.list \
|
||||
e4x/ \
|
||||
ecma/ \
|
||||
ecma_2/ \
|
||||
ecma_3/ \
|
||||
ecma_3_1/ \
|
||||
ecma_5/ \
|
||||
js1_1/ \
|
||||
js1_2/ \
|
||||
js1_3/ \
|
||||
js1_4/ \
|
||||
js1_5/ \
|
||||
js1_6/ \
|
||||
js1_7/ \
|
||||
js1_8/ \
|
||||
js1_8_1/ \
|
||||
$(NULL)
|
||||
|
||||
PKG_STAGE = $(DIST)/test-package-stage
|
||||
|
||||
# stage tests for packaging
|
||||
stage-package:
|
||||
$(NSINSTALL) -D $(PKG_STAGE)/jsreftest/tests
|
||||
@(cd $(srcdir) && tar $(TAR_CREATE_FLAGS) - $(TEST_FILES)) | (cd $(PKG_STAGE)/jsreftest/tests && tar -xf -)
|
@ -34,7 +34,6 @@
|
||||
* the terms of any one of the MPL, the GPL or the LGPL.
|
||||
*
|
||||
* ***** END LICENSE BLOCK ***** */
|
||||
|
||||
var gPageCompleted;
|
||||
var GLOBAL = this + '';
|
||||
|
||||
@ -164,7 +163,7 @@ function jsdgc()
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('gc: ' + ex);
|
||||
print('jsdgc: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -200,6 +199,7 @@ function Preferences(aPrefRoot)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences: ' + ex);
|
||||
}
|
||||
|
||||
}
|
||||
@ -222,6 +222,7 @@ function Preferences_getPrefRoot()
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_getPrefRoot: ' + ex);
|
||||
}
|
||||
return root;
|
||||
}
|
||||
@ -242,6 +243,7 @@ function Preferences_getPref(aPrefName)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
//print('Preferences_getPref: ' + ex);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
@ -262,6 +264,7 @@ function Preferences_getBoolPref(aPrefName)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
//print('Preferences_getBoolPref: ' + ex);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
@ -282,6 +285,7 @@ function Preferences_getIntPref(aPrefName)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
//print('Preferences_getIntPref: ' + ex);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
@ -302,6 +306,7 @@ function Preferences_getCharPref(aPrefName)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
//print('Preferences_getCharPref: ' + ex);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
@ -329,6 +334,7 @@ function Preferences_setPref(aPrefName, aPrefValue)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_setCharPref: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -355,6 +361,7 @@ function Preferences_setBoolPref(aPrefName, aPrefValue)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_setBoolPref: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -381,6 +388,7 @@ function Preferences_setIntPref(aPrefName, aPrefValue)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_setIntPref: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -407,6 +415,7 @@ function Preferences_setCharPref(aPrefName, aPrefValue)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_setCharPref: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -436,6 +445,7 @@ function Preferences_resetPref(aPrefName)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_resetPref: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -460,6 +470,7 @@ function Preferences_resetAllPrefs()
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_resetAllPrefs: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -478,6 +489,7 @@ function Preferences_clearPref(aPrefName)
|
||||
}
|
||||
catch(ex)
|
||||
{
|
||||
print('Preferences_clearPref: ' + ex);
|
||||
}
|
||||
}
|
||||
|
||||
@ -596,6 +608,7 @@ var gVersion = 150;
|
||||
|
||||
function jsTestDriverBrowserInit()
|
||||
{
|
||||
|
||||
if (typeof dump != 'function')
|
||||
{
|
||||
dump = print;
|
||||
@ -667,6 +680,14 @@ function jsTestDriverBrowserInit()
|
||||
}
|
||||
}
|
||||
|
||||
// default to language=type;text/javascript. required for
|
||||
// reftest style manifests.
|
||||
if (!properties.language)
|
||||
{
|
||||
properties.language = 'type';
|
||||
properties.mimetype = 'text/javascript';
|
||||
}
|
||||
|
||||
gTestPath = properties.test;
|
||||
|
||||
gVersion = 10*parseInt(properties.version.replace(/\./g, ''));
|
||||
@ -680,12 +701,13 @@ function jsTestDriverBrowserInit()
|
||||
* since the default setting of jit changed from false to true
|
||||
* in http://hg.mozilla.org/tracemonkey/rev/685e00e68be9
|
||||
* bisections which depend upon jit settings can be thrown off.
|
||||
* default jit(false) to make bisections depending upon jit settings
|
||||
* consistent over time. This is not needed in shell tests as the default
|
||||
* jit setting has not changed there.
|
||||
* default jit(false) when not running jsreftests to make bisections
|
||||
* depending upon jit settings consistent over time. This is not needed
|
||||
* in shell tests as the default jit setting has not changed there.
|
||||
*/
|
||||
|
||||
jit(properties.jit);
|
||||
if (properties.jit || !document.location.href.match(/jsreftest.html/))
|
||||
jit(properties.jit);
|
||||
|
||||
var testpathparts = properties.test.split(/\//);
|
||||
|
||||
@ -809,9 +831,134 @@ function jsTestDriverEnd()
|
||||
gTestcases[i].dump();
|
||||
}
|
||||
|
||||
// tell reftest the test is complete.
|
||||
document.documentElement.className = '';
|
||||
// tell Spider page is complete
|
||||
gPageCompleted = true;
|
||||
}
|
||||
}
|
||||
|
||||
//var dlog = (function (s) { print('debug: ' + s); });
|
||||
var dlog = (function (s) {});
|
||||
|
||||
// dialog closer from http://bclary.com/projects/spider/spider/chrome/content/spider/dialog-closer.js
|
||||
|
||||
var gDialogCloser;
|
||||
var gDialogCloserObserver;
|
||||
|
||||
function registerDialogCloser()
|
||||
{
|
||||
dlog('registerDialogCloser: start');
|
||||
try
|
||||
{
|
||||
netscape.security.PrivilegeManager.
|
||||
enablePrivilege('UniversalXPConnect');
|
||||
}
|
||||
catch(excp)
|
||||
{
|
||||
print('registerDialogCloser: ' + excp);
|
||||
return;
|
||||
}
|
||||
|
||||
gDialogCloser = Components.
|
||||
classes['@mozilla.org/embedcomp/window-watcher;1'].
|
||||
getService(Components.interfaces.nsIWindowWatcher);
|
||||
|
||||
gDialogCloserObserver = {observe: dialogCloser_observe};
|
||||
|
||||
gDialogCloser.registerNotification(gDialogCloserObserver);
|
||||
|
||||
dlog('registerDialogCloser: complete');
|
||||
}
|
||||
|
||||
function unregisterDialogCloser()
|
||||
{
|
||||
dlog('unregisterDialogCloser: start');
|
||||
|
||||
if (!gDialogCloserObserver || !gDialogCloser)
|
||||
{
|
||||
return;
|
||||
}
|
||||
try
|
||||
{
|
||||
netscape.security.PrivilegeManager.
|
||||
enablePrivilege('UniversalXPConnect');
|
||||
}
|
||||
catch(excp)
|
||||
{
|
||||
print('unregisterDialogCloser: ' + excp);
|
||||
return;
|
||||
}
|
||||
|
||||
gDialogCloser.unregisterNotification(gDialogCloserObserver);
|
||||
|
||||
gDialogCloserObserver = null;
|
||||
gDialogCloser = null;
|
||||
|
||||
dlog('unregisterDialogCloser: stop');
|
||||
}
|
||||
|
||||
// use an array to handle the case where multiple dialogs
|
||||
// appear at one time
|
||||
var gDialogCloserSubjects = [];
|
||||
|
||||
function dialogCloser_observe(subject, topic, data)
|
||||
{
|
||||
try
|
||||
{
|
||||
netscape.security.PrivilegeManager.
|
||||
enablePrivilege('UniversalXPConnect');
|
||||
|
||||
dlog('dialogCloser_observe: ' +
|
||||
'subject: ' + subject +
|
||||
', topic=' + topic +
|
||||
', data=' + data +
|
||||
', subject.document.documentURI=' + subject.document.documentURI +
|
||||
', subjects pending=' + gDialogCloserSubjects.length);
|
||||
}
|
||||
catch(excp)
|
||||
{
|
||||
print('dialogCloser_observe: ' + excp);
|
||||
return;
|
||||
}
|
||||
|
||||
if (subject instanceof ChromeWindow && topic == 'domwindowopened' )
|
||||
{
|
||||
gDialogCloserSubjects.push(subject);
|
||||
// timeout of 0 needed when running under reftest framework.
|
||||
subject.setTimeout(closeDialog, 0);
|
||||
}
|
||||
dlog('dialogCloser_observe: subjects pending: ' + gDialogCloserSubjects.length);
|
||||
}
|
||||
|
||||
function closeDialog()
|
||||
{
|
||||
var subject;
|
||||
dlog('closeDialog: subjects pending: ' + gDialogCloserSubjects.length);
|
||||
|
||||
while ( (subject = gDialogCloserSubjects.pop()) != null)
|
||||
{
|
||||
dlog('closeDialog: subject=' + subject);
|
||||
|
||||
dlog('closeDialog: subject.document instanceof XULDocument: ' + (subject.document instanceof XULDocument));
|
||||
dlog('closeDialog: subject.document.documentURI: ' + subject.document.documentURI);
|
||||
|
||||
if (subject.document instanceof XULDocument &&
|
||||
subject.document.documentURI == 'chrome://global/content/commonDialog.xul')
|
||||
{
|
||||
dlog('closeDialog: close XULDocument dialog?');
|
||||
subject.close();
|
||||
}
|
||||
else
|
||||
{
|
||||
// alerts inside of reftest framework are not XULDocument dialogs.
|
||||
dlog('closeDialog: close chrome dialog?');
|
||||
subject.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
registerDialogCloser();
|
||||
window.addEventListener('unload', unregisterDialogCloser, true);
|
||||
|
||||
jsTestDriverBrowserInit();
|
30
js/src/tests/e4x/Expressions/jstests.list
Normal file
30
js/src/tests/e4x/Expressions/jstests.list
Normal file
@ -0,0 +1,30 @@
|
||||
url-prefix ../../jsreftest.html?test=e4x/Expressions/
|
||||
script 11.1.1.js
|
||||
script 11.1.2.js
|
||||
script 11.1.3.js
|
||||
script 11.1.4-01.js
|
||||
script 11.1.4-02.js
|
||||
script 11.1.4-03.js
|
||||
fails script 11.1.4-04.js
|
||||
script 11.1.4-05.js
|
||||
script 11.1.4-06.js
|
||||
script 11.1.4-07.js
|
||||
fails script 11.1.4-08.js
|
||||
script 11.1.4.js
|
||||
script 11.1.5.js
|
||||
script 11.2.1.js
|
||||
script 11.2.2.js
|
||||
script 11.2.3.js
|
||||
script 11.2.4.js
|
||||
script 11.3.1.js
|
||||
script 11.3.2.js
|
||||
script 11.4.1.js
|
||||
script 11.5.1.js
|
||||
script 11.6.1.js
|
||||
script 11.6.2.js
|
||||
script 11.6.3.js
|
||||
script regress-301545.js
|
||||
script regress-302531.js
|
||||
script regress-340024.js
|
||||
script regress-366123.js
|
||||
script regress-496113.js
|
11
js/src/tests/e4x/GC/jstests.list
Normal file
11
js/src/tests/e4x/GC/jstests.list
Normal file
@ -0,0 +1,11 @@
|
||||
url-prefix ../../jsreftest.html?test=e4x/GC/
|
||||
script regress-280844-1.js
|
||||
script regress-280844-2.js
|
||||
skip script regress-292455.js # does not always dismiss alert
|
||||
script regress-313952-01.js
|
||||
script regress-313952-02.js
|
||||
script regress-324117.js
|
||||
skip script regress-324278.js # slow
|
||||
script regress-339785.js
|
||||
script regress-357063-01.js
|
||||
script regress-357063-02.js
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user