Merge tracemonkey to mozilla-central.

This commit is contained in:
Robert Sayre 2010-04-21 13:27:52 -04:00
commit d14cfe5463
55 changed files with 1513 additions and 1440 deletions

View File

@ -1974,11 +1974,12 @@ jsdStackFrame::Eval (const nsAString &bytes, const nsACString &fileName,
nsresult rv;
nsCOMPtr<nsIJSContextStack> stack = do_GetService("@mozilla.org/js/xpc/ContextStack;1", &rv);
if (NS_FAILED(rv))
return rv;
rv = stack->Push(cx);
if (NS_FAILED(rv))
if (NS_SUCCEEDED(rv))
rv = stack->Push(cx);
if (NS_FAILED(rv)) {
JS_RestoreExceptionState (cx, estate);
return rv;
}
*_rval = JSD_AttemptUCScriptInStackFrame (mCx, mThreadState,
mStackFrameInfo,

View File

@ -687,6 +687,7 @@ if test -n "$_WIN32_MSVC"; then
# Common to all MSVC environments:
# Windows lacks <stdint.h>, but has __int8, and so on.
AC_DEFINE(JS_HAVE___INTN)
AC_DEFINE(HAVE_LOCALECONV)
case "$target" in
*-wince|*-winmo)
@ -5293,8 +5294,6 @@ MAKEFILES="
Makefile
shell/Makefile
lirasm/Makefile
ctypes/Makefile
ctypes/tests/Makefile
jsapi-tests/Makefile
tests/Makefile
config/Makefile
@ -5390,8 +5389,9 @@ if test "$JS_HAS_CTYPES"; then
# Use a separate cache file for libffi, since it does things differently
# from our configure.
mkdir -p $_objdir/ctypes/libffi
old_cache_file=$cache_file
cache_file=ctypes/libffi/config.cache
cache_file=$_objdir/ctypes/libffi/config.cache
old_config_files=$CONFIG_FILES
unset CONFIG_FILES
AC_OUTPUT_SUBDIRS(ctypes/libffi)

View File

@ -146,8 +146,8 @@ namespace StructType {
namespace FunctionType {
static JSBool Create(JSContext* cx, uintN argc, jsval* vp);
static JSBool ConstructData(JSContext* cx, JSObject* obj, uintN argc,
jsval* argv, jsval* rval);
static JSBool ConstructData(JSContext* cx, JSObject* typeObj,
JSObject* dataObj, JSObject* fnObj, JSObject* thisObj);
static JSBool Call(JSContext* cx, JSObject* obj, uintN argc, jsval* argv,
jsval* rval);
@ -1579,7 +1579,6 @@ ConvertToJS(JSContext* cx,
break;
}
case TYPE_pointer:
case TYPE_function:
case TYPE_array:
case TYPE_struct: {
// We're about to create a new CData object to return. If the caller doesn't
@ -1596,6 +1595,8 @@ ConvertToJS(JSContext* cx,
*result = OBJECT_TO_JSVAL(obj);
break;
}
case TYPE_function:
JS_NOT_REACHED("cannot return a FunctionType");
}
return true;
@ -1774,14 +1775,6 @@ ImplicitConvert(JSContext* cx,
}
return TypeError(cx, "pointer", val);
}
case TYPE_function: {
if (JSVAL_IS_NULL(val)) {
// Convert to a null function pointer.
*static_cast<void**>(buffer) = NULL;
break;
}
return TypeError(cx, "function", val);
}
case TYPE_array: {
JSObject* baseType = ArrayType::GetBaseType(cx, targetType);
size_t targetLength = ArrayType::GetLength(cx, targetType);
@ -1937,6 +1930,7 @@ ImplicitConvert(JSContext* cx,
return TypeError(cx, "struct", val);
}
case TYPE_void_t:
case TYPE_function:
JS_NOT_REACHED("invalid type");
return false;
}
@ -1999,14 +1993,6 @@ ExplicitConvert(JSContext* cx, jsval val, JSObject* targetType, void* buffer)
*static_cast<uintptr_t*>(buffer) = result;
break;
}
case TYPE_function: {
// Convert a number, Int64 object, or UInt64 object to a function pointer.
uintptr_t result;
if (!jsvalToPtrExplicit(cx, val, &result))
return TypeError(cx, "function", val);
*static_cast<uintptr_t*>(buffer) = result;
break;
}
case TYPE_float32_t:
case TYPE_float64_t:
case TYPE_float:
@ -2017,6 +2003,7 @@ ExplicitConvert(JSContext* cx, jsval val, JSObject* targetType, void* buffer)
JS_SetPendingException(cx, ex.value());
return false;
case TYPE_void_t:
case TYPE_function:
JS_NOT_REACHED("invalid type");
return false;
}
@ -2080,9 +2067,6 @@ BuildTypeName(JSContext* cx, JSObject* typeObj)
case TYPE_function: {
FunctionInfo* fninfo = FunctionType::GetFunctionInfo(cx, currentType);
// Function pointer goes on the left.
PrependString(result, "*");
// Add in the calling convention, if it's not cdecl.
if (GetABICode(cx, fninfo->mABI) == ABI_STDCALL)
PrependString(result, "__stdcall ");
@ -2105,8 +2089,9 @@ BuildTypeName(JSContext* cx, JSObject* typeObj)
AppendString(result, ")");
// Set 'currentType' to the return type, and let the loop process it.
// 'prevGrouping' doesn't matter here, because functions cannot return
// arrays -- thus the parenthetical rules don't get tickled.
currentType = fninfo->mReturnType;
prevGrouping = currentGrouping;
continue;
}
default:
@ -2452,10 +2437,11 @@ CType::ConstructData(JSContext* cx,
case TYPE_void_t:
JS_ReportError(cx, "cannot construct from void_t");
return JS_FALSE;
case TYPE_function:
JS_ReportError(cx, "cannot construct from FunctionType; use FunctionType.ptr instead");
return JS_FALSE;
case TYPE_pointer:
return PointerType::ConstructData(cx, obj, argc, argv, rval);
case TYPE_function:
return FunctionType::ConstructData(cx, obj, argc, argv, rval);
case TYPE_array:
return ArrayType::ConstructData(cx, obj, argc, argv, rval);
case TYPE_struct:
@ -3179,8 +3165,8 @@ PointerType::ConstructData(JSContext* cx,
return JS_FALSE;
}
if (argc > 1) {
JS_ReportError(cx, "constructor takes zero or one argument");
if (argc > 2) {
JS_ReportError(cx, "constructor takes 0, 1, or 2 arguments");
return JS_FALSE;
}
@ -3190,12 +3176,39 @@ PointerType::ConstructData(JSContext* cx,
*rval = OBJECT_TO_JSVAL(result);
if (argc == 1) {
if (!ExplicitConvert(cx, argv[0], obj, CData::GetData(cx, result)))
return JS_FALSE;
if (argc == 0) {
// Construct a null pointer.
return JS_TRUE;
}
return JS_TRUE;
if (argc >= 1) {
JSObject* baseObj = PointerType::GetBaseType(cx, obj);
if (baseObj && CType::GetTypeCode(cx, baseObj) == TYPE_function &&
JSVAL_IS_OBJECT(argv[0]) &&
JS_ObjectIsFunction(cx, JSVAL_TO_OBJECT(argv[0]))) {
// Construct a FunctionType.ptr from a JS function, and allow an
// optional 'this' argument.
JSObject* thisObj = NULL;
if (argc == 2) {
if (JSVAL_IS_OBJECT(argv[1])) {
thisObj = JSVAL_TO_OBJECT(argv[1]);
} else if (!JS_ValueToObject(cx, argv[1], &thisObj)) {
return JS_FALSE;
}
}
JSObject* fnObj = JSVAL_TO_OBJECT(argv[0]);
return FunctionType::ConstructData(cx, baseObj, result, fnObj, thisObj);
}
if (argc == 2) {
JS_ReportError(cx, "first argument must be a function");
return JS_FALSE;
}
}
// Construct from a raw pointer value.
return ExplicitConvert(cx, argv[0], obj, CData::GetData(cx, result));
}
JSObject*
@ -4336,9 +4349,9 @@ PrepareType(JSContext* cx, jsval type)
if (!result)
return NULL;
} else if (typeCode == TYPE_void_t) {
// disallow void argument types
JS_ReportError(cx, "Cannot have void argument type");
} else if (typeCode == TYPE_void_t || typeCode == TYPE_function) {
// disallow void or function argument types
JS_ReportError(cx, "Cannot have void or function argument type");
return NULL;
}
@ -4360,9 +4373,9 @@ PrepareReturnType(JSContext* cx, jsval type)
JSObject* result = JSVAL_TO_OBJECT(type);
TypeCode typeCode = CType::GetTypeCode(cx, result);
// Arrays can never be return types.
if (typeCode == TYPE_array) {
JS_ReportError(cx, "Result type cannot be an array");
// Arrays and functions can never be return types.
if (typeCode == TYPE_array || typeCode == TYPE_function) {
JS_ReportError(cx, "Return type cannot be an array or function");
return NULL;
}
@ -4431,6 +4444,11 @@ NewFunctionInfo(JSContext* cx,
return NULL;
}
ffi_abi abi;
if (!GetABI(cx, abiType, &abi)) {
JS_ReportError(cx, "Invalid ABI specification");
return NULL;
}
fninfo->mABI = JSVAL_TO_OBJECT(abiType);
// prepare the result type
@ -4557,10 +4575,9 @@ FunctionType::CreateInternal(JSContext* cx,
SLOT_FUNCTIONDATAPROTO);
// Create a new CType object with the common properties and slots.
// We use ffi_type_void here in its capacity as "a type of undefined size".
JSObject* typeObj = CType::Create(cx, typeProto, dataProto, TYPE_function,
NULL, INT_TO_JSVAL(sizeof(void*)),
INT_TO_JSVAL(ffi_type_pointer.alignment),
&ffi_type_pointer, NULL);
NULL, JSVAL_VOID, JSVAL_VOID, &ffi_type_void, NULL);
if (!typeObj)
return NULL;
js::AutoValueRooter root(cx, typeObj);
@ -4582,75 +4599,38 @@ FunctionType::CreateInternal(JSContext* cx,
JSBool
FunctionType::ConstructData(JSContext* cx,
JSObject* obj,
uintN argc,
jsval* argv,
jsval* rval)
JSObject* typeObj,
JSObject* dataObj,
JSObject* fnObj,
JSObject* thisObj)
{
if (!CType::IsCType(cx, obj) || CType::GetTypeCode(cx, obj) != TYPE_function) {
JS_ReportError(cx, "not a FunctionType");
JS_ASSERT(CType::GetTypeCode(cx, typeObj) == TYPE_function);
PRFuncPtr* data = static_cast<PRFuncPtr*>(CData::GetData(cx, dataObj));
FunctionInfo* fninfo = FunctionType::GetFunctionInfo(cx, typeObj);
if (fninfo->mIsVariadic) {
JS_ReportError(cx, "Can't declare a variadic callback function");
return JS_FALSE;
}
JSObject* result = CData::Create(cx, obj, NULL, NULL, true);
if (!result)
JSObject* closureObj = CClosure::Create(cx, typeObj, fnObj, thisObj, data);
if (!closureObj)
return JS_FALSE;
js::AutoValueRooter root(cx, closureObj);
// Set the closure object as the referent of the new CData object.
if (!JS_SetReservedSlot(cx, dataObj, SLOT_REFERENT,
OBJECT_TO_JSVAL(closureObj)))
return JS_FALSE;
*rval = OBJECT_TO_JSVAL(result);
if (argc == 0) {
// Construct a null pointer.
return JS_TRUE;
}
if (argc == 1 || argc == 2) {
jsval arg = argv[0];
PRFuncPtr* data = static_cast<PRFuncPtr*>(CData::GetData(cx, result));
if (JSVAL_IS_OBJECT(arg) && JS_ObjectIsFunction(cx, JSVAL_TO_OBJECT(arg))) {
FunctionInfo* fninfo = FunctionType::GetFunctionInfo(cx, obj);
if (fninfo->mIsVariadic) {
JS_ReportError(cx, "Can't declare a variadic callback function");
return JS_FALSE;
}
// Construct from a JS function, and allow an optional 'this' argument.
JSObject* thisObj = NULL;
if (argc == 2) {
if (JSVAL_IS_OBJECT(argv[1])) {
thisObj = JSVAL_TO_OBJECT(argv[1]);
} else if (!JS_ValueToObject(cx, argv[1], &thisObj)) {
return JS_FALSE;
}
}
JSObject* fnObj = JSVAL_TO_OBJECT(arg);
JSObject* closureObj = CClosure::Create(cx, obj, fnObj, thisObj, data);
if (!closureObj)
return JS_FALSE;
js::AutoValueRooter root(cx, closureObj);
// Set the closure object as the referent of the new CData object.
if (!JS_SetReservedSlot(cx, result, SLOT_REFERENT,
OBJECT_TO_JSVAL(closureObj)))
return JS_FALSE;
// Seal the CData object, to prevent modification of the function pointer.
// This permanently associates this object with the closure, and avoids
// having to do things like reset SLOT_REFERENT when someone tries to
// change the pointer value.
// XXX This will need to change when bug 541212 is fixed -- CData::ValueSetter
// could be called on a sealed object.
return JS_SealObject(cx, result, JS_FALSE);
}
if (argc == 1) {
// Construct from a raw pointer value.
return ExplicitConvert(cx, arg, obj, data);
}
}
JS_ReportError(cx, "constructor takes 0, 1, or 2 arguments");
return JS_FALSE;
// Seal the CData object, to prevent modification of the function pointer.
// This permanently associates this object with the closure, and avoids
// having to do things like reset SLOT_REFERENT when someone tries to
// change the pointer value.
// XXX This will need to change when bug 541212 is fixed -- CData::ValueSetter
// could be called on a sealed object.
return JS_SealObject(cx, dataObj, JS_FALSE);
}
typedef Array<AutoValue, 16> AutoValueAutoArray;
@ -4699,8 +4679,10 @@ FunctionType::Call(JSContext* cx,
}
JSObject* typeObj = CData::GetCType(cx, obj);
if (CType::GetTypeCode(cx, typeObj) != TYPE_function) {
JS_ReportError(cx, "not a FunctionType");
if (CType::GetTypeCode(cx, typeObj) != TYPE_pointer ||
!(typeObj = PointerType::GetBaseType(cx, typeObj)) ||
CType::GetTypeCode(cx, typeObj) != TYPE_function) {
JS_ReportError(cx, "not a FunctionType.ptr");
return false;
}

View File

@ -252,16 +252,26 @@ Library::Declare(JSContext* cx, uintN argc, jsval* vp)
return JS_FALSE;
root.setObject(typeObj);
// Make a function pointer type.
typeObj = PointerType::CreateInternal(cx, NULL, typeObj, NULL);
if (!typeObj)
return JS_FALSE;
root.setObject(typeObj);
} else {
// Case 2).
if (JSVAL_IS_PRIMITIVE(argv[1]) ||
!CType::IsCType(cx, JSVAL_TO_OBJECT(argv[1]))) {
JS_ReportError(cx, "second argument must be a type");
!CType::IsCType(cx, JSVAL_TO_OBJECT(argv[1])) ||
!CType::IsSizeDefined(cx, JSVAL_TO_OBJECT(argv[1]))) {
JS_ReportError(cx, "second argument must be a type of defined size");
return JS_FALSE;
}
typeObj = JSVAL_TO_OBJECT(argv[1]);
isFunction = CType::GetTypeCode(cx, typeObj) == TYPE_function;
if (CType::GetTypeCode(cx, typeObj) == TYPE_pointer) {
JSObject* baseType = PointerType::GetBaseType(cx, typeObj);
isFunction = baseType && CType::GetTypeCode(cx, baseType) == TYPE_function;
}
}
void* data;

View File

@ -1467,7 +1467,7 @@ struct JSExtendedClass {
#define JSCLASS_NEW_ENUMERATE (1<<1) /* has JSNewEnumerateOp hook */
#define JSCLASS_NEW_RESOLVE (1<<2) /* has JSNewResolveOp hook */
#define JSCLASS_PRIVATE_IS_NSISUPPORTS (1<<3) /* private is (nsISupports *) */
#define JSCLASS_SHARE_ALL_PROPERTIES (1<<4) /* all properties are SHARED */
/* (1<<4) was JSCLASS_SHARE_ALL_PROPERTIES, now obsolete. See bug 527805. */
#define JSCLASS_NEW_RESOLVE_GETS_START (1<<5) /* JSNewResolveOp gets starting
object in prototype chain
passed in via *objp in/out

View File

@ -200,7 +200,7 @@ struct JSArenaPool {
if ((pool)->current == (a)) (pool)->current = &(pool)->first; \
*(pnext) = (a)->next; \
JS_CLEAR_ARENA(a); \
free(a); \
js_free(a); \
(a) = NULL; \
JS_END_MACRO

View File

@ -893,7 +893,7 @@ js_ContextIterator(JSRuntime *rt, JSBool unlocked, JSContext **iterp)
{
JSContext *cx = *iterp;
Conditionally<AutoLockGC> lockIf(unlocked, rt);
Conditionally<AutoLockGC> lockIf(!!unlocked, rt);
cx = js_ContextFromLinkField(cx ? cx->link.next : rt->contextList.next);
if (&cx->link == &rt->contextList)
cx = NULL;
@ -1844,11 +1844,27 @@ js_InvokeOperationCallback(JSContext *cx)
* not yield. Operation callbacks are supposed to happen rarely (seconds,
* not milliseconds) so it is acceptable to yield at every callback.
*/
if (cx->runtime->gcIsNeeded)
JSRuntime *rt = cx->runtime;
if (rt->gcIsNeeded) {
js_GC(cx, GC_NORMAL);
/*
* On trace we can exceed the GC quota, see comments in NewGCArena. So
* we check the quota and report OOM here when we are off trace.
*/
bool delayedOutOfMemory;
JS_LOCK_GC(rt);
delayedOutOfMemory = (rt->gcBytes > rt->gcMaxBytes);
JS_UNLOCK_GC(rt);
if (delayedOutOfMemory) {
js_ReportOutOfMemory(cx);
return false;
}
}
#ifdef JS_THREADSAFE
else
else {
JS_YieldRequest(cx);
}
#endif
JSOperationCallback cb = cx->operationCallback;

View File

@ -1663,6 +1663,11 @@ class AutoGCRooter {
ID = -12, /* js::AutoIdRooter */
VECTOR = -13 /* js::AutoValueVector */
};
private:
/* No copy or assignment semantics. */
AutoGCRooter(AutoGCRooter &ida);
void operator=(AutoGCRooter &ida);
};
class AutoSaveWeakRoots : private AutoGCRooter

View File

@ -801,9 +801,9 @@ js_WrapWatchedSetter(JSContext *cx, jsid id, uintN attrs, JSPropertyOp setter)
} else {
atom = NULL;
}
wrapper = js_NewFunction(cx, NULL, js_watch_set_wrapper, 1, 0,
js_CastAsObject(setter)->getParent(),
atom);
setter ? js_CastAsObject(setter)->getParent() : NULL, atom);
if (!wrapper)
return NULL;
return js_CastAsPropertyOp(FUN_OBJECT(wrapper));

View File

@ -2843,11 +2843,11 @@ void dumpGCTimer(GCTimer *gcT, uint64 firstEnter, bool lastGC)
if (!gcFile) {
gcFile = fopen("gcTimer.dat", "w");
JS_ASSERT(gcFile);
fprintf(gcFile, " AppTime, Total, Mark, Sweep, FinObj, ");
fprintf(gcFile, "FinStr, FinDbl, Destroy, newChunks, destoyChunks\n");
}
JS_ASSERT(gcFile);
fprintf(gcFile, "%12.1f, %6.1f, %6.1f, %6.1f, %6.1f, %6.1f, %6.1f, %7.1f, ",
(double)(gcT->enter - firstEnter) / 1E6,
(double)(gcT->end-gcT->enter) / 1E6,
@ -2860,8 +2860,10 @@ void dumpGCTimer(GCTimer *gcT, uint64 firstEnter, bool lastGC)
fprintf(gcFile, "%10d, %10d \n", newChunkCount, destroyChunkCount);
fflush(gcFile);
if (lastGC)
if (lastGC) {
fclose(gcFile);
gcFile = NULL;
}
}
# define GCTIMER_PARAM , GCTimer &gcTimer
@ -3212,7 +3214,7 @@ FireGCBegin(JSContext *cx, JSGCInvocationKind gckind)
* another thread.
*/
if (gckind != GC_SET_SLOT_REQUEST && callback) {
Conditionally<AutoUnlockGC> unlockIf(gckind & GC_LOCK_HELD, rt);
Conditionally<AutoUnlockGC> unlockIf(!!(gckind & GC_LOCK_HELD), rt);
return callback(cx, JSGC_BEGIN) || gckind == GC_LAST_CONTEXT;
}
return true;
@ -3369,7 +3371,7 @@ js_GC(JSContext *cx, JSGCInvocationKind gckind)
* Make sure that the GC from another thread respects
* GC_KEEP_ATOMS.
*/
Conditionally<AutoKeepAtoms> keepIf(gckind & GC_KEEP_ATOMS, rt);
Conditionally<AutoKeepAtoms> keepIf(!!(gckind & GC_KEEP_ATOMS), rt);
/*
* Check that we did not release the GC lock above and let the

View File

@ -472,7 +472,11 @@ void
TraceObjectVector(JSTracer *trc, JSObject **vec, uint32 len);
inline void
#ifdef DEBUG
TraceValues(JSTracer *trc, size_t len, jsval *vec, const char *name)
#else
TraceValues(JSTracer *trc, size_t len, jsval *vec, const char *) /* last arg unused in release. kill unreferenced formal param warnings */
#endif
{
for (jsval *vp = vec, *end = vp + len; vp < end; vp++) {
jsval v = *vp;

View File

@ -67,7 +67,7 @@
static void *
DefaultAllocTable(void *pool, size_t size)
{
return malloc(size);
return js_malloc(size);
}
static void
@ -79,7 +79,7 @@ DefaultFreeTable(void *pool, void *item, size_t size)
static JSHashEntry *
DefaultAllocEntry(void *pool, const void *key)
{
return (JSHashEntry*) malloc(sizeof(JSHashEntry));
return (JSHashEntry*) js_malloc(sizeof(JSHashEntry));
}
static void

View File

@ -836,7 +836,7 @@ js_SetSlotThreadSafe(JSContext *cx, JSObject *obj, uint32 slot, jsval v)
static JSFatLock *
NewFatlock()
{
JSFatLock *fl = (JSFatLock *)malloc(sizeof(JSFatLock)); /* for now */
JSFatLock *fl = (JSFatLock *)js_malloc(sizeof(JSFatLock)); /* for now */
if (!fl) return NULL;
fl->susp = 0;
fl->next = NULL;

View File

@ -3587,7 +3587,7 @@ js_AllocSlots(JSContext *cx, JSObject *obj, size_t nslots)
jsval* slots;
slots = (jsval*) cx->malloc(SLOTS_TO_DYNAMIC_WORDS(nslots) * sizeof(jsval));
if (!slots)
return true;
return false;
*slots++ = nslots;
/* clear the newly allocated cells. */
@ -3644,6 +3644,9 @@ js_GrowSlots(JSContext *cx, JSObject *obj, size_t nslots)
size_t oslots = size_t(slots[-1]);
slots = (jsval*) cx->realloc(slots - 1, nwords * sizeof(jsval));
if (!slots)
return false;
*slots++ = nslots;
obj->dslots = slots;
@ -3673,6 +3676,8 @@ js_ShrinkSlots(JSContext *cx, JSObject *obj, size_t nslots)
} else {
size_t nwords = SLOTS_TO_DYNAMIC_WORDS(nslots);
slots = (jsval*) cx->realloc(slots - 1, nwords * sizeof(jsval));
if (!slots)
return; /* Leave obj->dslots at its old size. */
*slots++ = nslots;
obj->dslots = slots;
}
@ -4259,9 +4264,6 @@ js_DefineNativeProperty(JSContext *cx, JSObject *obj, jsid id, jsval value,
added = false;
if (!sprop) {
/* Add a new property, or replace an existing one of the same id. */
if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES)
attrs |= JSPROP_SHARED;
if (defineHow & JSDNP_SET_METHOD) {
JS_ASSERT(clasp == &js_ObjectClass);
JS_ASSERT(VALUE_IS_FUNCTION(cx, value));
@ -5126,9 +5128,6 @@ js_SetPropertyHelper(JSContext *cx, JSObject *obj, jsid id, uintN defineHow,
return JS_FALSE;
}
if (clasp->flags & JSCLASS_SHARE_ALL_PROPERTIES)
attrs |= JSPROP_SHARED;
/*
* Check for Object class here to avoid defining a method on a class
* with magic resolve, addProperty, getProperty, etc. hooks.

View File

@ -1120,12 +1120,8 @@ BEGIN_CASE(JSOP_NEG)
END_CASE(JSOP_NEG)
BEGIN_CASE(JSOP_POS)
{
rval = FETCH_OPND(-1);
if (!ValueToNumberValue(cx, &regs.sp[-1]))
goto error;
rval = regs.sp[-1];
}
END_CASE(JSOP_POS)
BEGIN_CASE(JSOP_DELNAME)
@ -1739,9 +1735,6 @@ BEGIN_CASE(JSOP_SETMETHOD)
* inline compensation code here, depending on
* real-world workloads.
*/
JS_ASSERT(!(obj->getClass()->flags &
JSCLASS_SHARE_ALL_PROPERTIES));
PCMETER(cache->pchits++);
PCMETER(cache->addpchits++);
@ -3346,7 +3339,6 @@ BEGIN_CASE(JSOP_INITMETHOD)
obj = JSVAL_TO_OBJECT(lval);
JS_ASSERT(obj->isNative());
JS_ASSERT(!obj->getClass()->reserveSlots);
JS_ASSERT(!(obj->getClass()->flags & JSCLASS_SHARE_ALL_PROPERTIES));
JSScope *scope = obj->scope();
PropertyCacheEntry *entry;

View File

@ -343,12 +343,12 @@ class TokenStream
void setXMLTagMode(bool enabled = true) { setFlag(enabled, TSF_XMLTAGMODE); }
void setXMLOnlyMode(bool enabled = true) { setFlag(enabled, TSF_XMLONLYMODE); }
void setUnexpectedEOF(bool enabled = true) { setFlag(enabled, TSF_UNEXPECTED_EOF); }
bool isStrictMode() { return flags & TSF_STRICT_MODE_CODE; }
bool isXMLTagMode() { return flags & TSF_XMLTAGMODE; }
bool isXMLOnlyMode() { return flags & TSF_XMLONLYMODE; }
bool isUnexpectedEOF() { return flags & TSF_UNEXPECTED_EOF; }
bool isEOF() const { return flags & TSF_EOF; }
bool isError() const { return flags & TSF_ERROR; }
bool isStrictMode() { return !!(flags & TSF_STRICT_MODE_CODE); }
bool isXMLTagMode() { return !!(flags & TSF_XMLTAGMODE); }
bool isXMLOnlyMode() { return !!(flags & TSF_XMLONLYMODE); }
bool isUnexpectedEOF() { return !!(flags & TSF_UNEXPECTED_EOF); }
bool isEOF() const { return !!(flags & TSF_EOF); }
bool isError() const { return !!(flags & TSF_ERROR); }
/* Mutators. */
bool reportCompileErrorNumberVA(JSParseNode *pn, uintN flags, uintN errorNumber, va_list ap);

View File

@ -181,9 +181,11 @@ class ReentrancyGuard
#endif
public:
template <class T>
ReentrancyGuard(T &obj)
#ifdef DEBUG
ReentrancyGuard(T &obj)
: entered(obj.entered)
#else
ReentrancyGuard(T &/*obj*/)
#endif
{
#ifdef DEBUG
@ -241,12 +243,34 @@ PointerRangeSize(T *begin, T *end)
class SystemAllocPolicy
{
public:
void *malloc(size_t bytes) { return ::malloc(bytes); }
void *realloc(void *p, size_t bytes) { return ::realloc(p, bytes); }
void free(void *p) { ::free(p); }
void *malloc(size_t bytes) { return js_malloc(bytes); }
void *realloc(void *p, size_t bytes) { return js_realloc(p, bytes); }
void free(void *p) { js_free(p); }
void reportAllocOverflow() const {}
};
/*
* This utility pales in comparison to Boost's aligned_storage. The utility
* simply assumes that JSUint64 is enough alignment for anyone. This may need
* to be extended one day...
*
* As an important side effect, pulling the storage into this template is
* enough obfuscation to confuse gcc's strict-aliasing analysis into not giving
* false negatives when we cast from the char buffer to whatever type we've
* constructed using the bytes.
*/
template <size_t nbytes>
struct AlignedStorage
{
union U {
char bytes[nbytes];
uint64 _;
} u;
const void *addr() const { return u.bytes; }
void *addr() { return u.bytes; }
};
/*
* Small utility for lazily constructing objects without using dynamic storage.
* When a LazilyConstructed<T> is constructed, it is |empty()|, i.e., no value
@ -258,45 +282,52 @@ class SystemAllocPolicy
template <class T>
class LazilyConstructed
{
union {
uint64 align;
char bytes[sizeof(T) + 1];
};
AlignedStorage<sizeof(T)> storage;
bool constructed;
T &asT() { return *reinterpret_cast<T *>(bytes); }
char & constructed() { return bytes[sizeof(T)]; }
T &asT() { return *reinterpret_cast<T *>(storage.addr()); }
public:
LazilyConstructed() { constructed() = false; }
~LazilyConstructed() { if (constructed()) asT().~T(); }
LazilyConstructed() { constructed = false; }
~LazilyConstructed() { if (constructed) asT().~T(); }
bool empty() const { return !constructed(); }
bool empty() const { return !constructed; }
void construct() {
JS_ASSERT(!constructed());
new(bytes) T();
constructed() = true;
JS_ASSERT(!constructed);
new(storage.addr()) T();
constructed = true;
}
template <class T1>
void construct(const T1 &t1) {
JS_ASSERT(!constructed());
new(bytes) T(t1);
constructed() = true;
JS_ASSERT(!constructed);
new(storage.addr()) T(t1);
constructed = true;
}
template <class T1, class T2>
void construct(const T1 &t1, const T2 &t2) {
JS_ASSERT(!constructed());
new(bytes) T(t1, t2);
constructed() = true;
JS_ASSERT(!constructed);
new(storage.addr()) T(t1, t2);
constructed = true;
}
template <class T1, class T2, class T3>
void construct(const T1 &t1, const T2 &t2, const T3 &t3) {
JS_ASSERT(!constructed());
new(bytes) T(t1, t2, t3);
constructed() = true;
JS_ASSERT(!constructed);
new(storage.addr()) T(t1, t2, t3);
constructed = true;
}
T *addr() {
JS_ASSERT(constructed);
return &asT();
}
T &ref() {
JS_ASSERT(constructed);
return asT();
}
};

View File

@ -2805,6 +2805,7 @@ NativeToValue(JSContext* cx, jsval& v, TraceType type, double* slot)
break;
case TT_SPECIAL:
JS_ASSERT(*(JSBool*)slot != JSVAL_TO_SPECIAL(JSVAL_VOID));
v = SPECIAL_TO_JSVAL(*(JSBool*)slot);
debug_only_printf(LC_TMTracer, "special<%d> ", *(JSBool*)slot);
break;
@ -6456,11 +6457,6 @@ ScopeChainCheck(JSContext* cx, TreeFragment* f)
return false;
}
/* Make sure the global object is sane. */
JS_ASSERT(f->globalObj->numSlots() <= MAX_GLOBAL_SLOTS);
JS_ASSERT(f->nGlobalTypes() == f->globalSlots->length());
JS_ASSERT_IF(f->globalSlots->length() != 0,
f->globalObj->shape() == f->globalShape);
return true;
}
@ -6480,6 +6476,12 @@ ExecuteTree(JSContext* cx, TreeFragment* f, uintN& inlineCallCount,
if (!ScopeChainCheck(cx, f))
return NULL;
/* Make sure the global object is sane. */
JS_ASSERT(f->globalObj->numSlots() <= MAX_GLOBAL_SLOTS);
JS_ASSERT(f->nGlobalTypes() == f->globalSlots->length());
JS_ASSERT_IF(f->globalSlots->length() != 0,
f->globalObj->shape() == f->globalShape);
/* Initialize trace state. */
InterpState state(cx, tm, f, inlineCallCount, innermostNestedGuardp);
double* stack = tm->storage->stack();
@ -6953,6 +6955,13 @@ MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount, RecordReason reason)
return false;
}
if (!ScopeChainCheck(cx, f)) {
#ifdef MOZ_TRACEVIS
tvso.r = R_FAIL_SCOPE_CHAIN_CHECK;
#endif
return false;
}
/*
* We can give RecordTree the root peer. If that peer is already taken,
* it will walk the peer list and find us a free slot or allocate a new
@ -11343,6 +11352,26 @@ TraceRecorder::setProp(jsval &l, PropertyCacheEntry* entry, JSScopeProperty* spr
return nativeSet(obj, obj_ins, sprop, v, v_ins);
}
JS_REQUIRES_STACK RecordingStatus
TraceRecorder::setUpwardTrackedVar(jsval* stackVp, jsval v, LIns* v_ins)
{
TraceType stackT = determineSlotType(stackVp);
TraceType otherT = getCoercedType(v);
bool promote = true;
if (stackT != otherT) {
if (stackT == TT_DOUBLE && otherT == TT_INT32 && isPromoteInt(v_ins))
promote = false;
else
RETURN_STOP("can't trace this upvar mutation");
}
set(stackVp, v_ins, promote);
return RECORD_CONTINUE;
}
JS_REQUIRES_STACK RecordingStatus
TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty *sprop,
LIns *v_ins, jsval v)
@ -11354,14 +11383,14 @@ TraceRecorder::setCallProp(JSObject *callobj, LIns *callobj_ins, JSScopeProperty
JS_ASSERT(sprop->hasShortID());
uintN slot = uint16(sprop->shortid);
jsval *vp2 = &fp->argv[slot];
set(vp2, v_ins);
CHECK_STATUS(setUpwardTrackedVar(vp2, v, v_ins));
return RECORD_CONTINUE;
}
if (sprop->setterOp() == SetCallVar) {
JS_ASSERT(sprop->hasShortID());
uintN slot = uint16(sprop->shortid);
jsval *vp2 = &fp->slots[slot];
set(vp2, v_ins);
CHECK_STATUS(setUpwardTrackedVar(vp2, v, v_ins));
return RECORD_CONTINUE;
}
RETURN_STOP("can't trace special CallClass setter");
@ -13017,29 +13046,31 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_
}
/* If not idx < length, stay on trace (and read value as undefined). */
LIns* br2 = lir->insBranch(LIR_jf,
lir->ins2(LIR_pult,
pidx_ins,
stobj_get_fslot(obj_ins, JSObject::JSSLOT_ARRAY_LENGTH)),
NULL);
LIns* length = stobj_get_fslot(obj_ins, JSObject::JSSLOT_ARRAY_LENGTH);
if (pidx_ins != length) {
LIns* br2 = lir->insBranch(LIR_jf,
lir->ins2(LIR_pult, pidx_ins, length),
NULL);
/* If dslots is NULL, stay on trace (and read value as undefined). */
LIns* br3 = lir->insBranch(LIR_jt, lir->ins_peq0(dslots_ins), NULL);
/* If dslots is NULL, stay on trace (and read value as undefined). */
LIns* br3 = lir->insBranch(LIR_jt, lir->ins_peq0(dslots_ins), NULL);
/* If not idx < capacity, stay on trace (and read value as undefined). */
LIns* br4 = lir->insBranch(LIR_jf,
lir->ins2(LIR_pult,
pidx_ins,
lir->insLoad(LIR_ldp, dslots_ins,
-(int)sizeof(jsval), ACC_OTHER)),
NULL);
lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
LIns* label = lir->ins0(LIR_label);
if (br1)
br1->setTarget(label);
br2->setTarget(label);
br3->setTarget(label);
br4->setTarget(label);
/* If not idx < capacity, stay on trace (and read value as undefined). */
LIns* br4 = lir->insBranch(LIR_jf,
lir->ins2(LIR_pult,
pidx_ins,
lir->insLoad(LIR_ldp, dslots_ins,
-(int)sizeof(jsval), ACC_OTHER)),
NULL);
lir->insGuard(LIR_x, NULL, createGuardRecord(exit));
LIns* label = lir->ins0(LIR_label);
if (br1)
br1->setTarget(label);
br2->setTarget(label);
br3->setTarget(label);
br4->setTarget(label);
}
CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT));
@ -13082,18 +13113,14 @@ TraceRecorder::denseArrayElement(jsval& oval, jsval& ival, jsval*& vp, LIns*& v_
v_ins = unbox_jsval(*vp, lir->insLoad(LIR_ldp, addr_ins, 0, ACC_OTHER), exit);
if (JSVAL_IS_SPECIAL(*vp) && !JSVAL_IS_VOID(*vp)) {
/*
* If we read a hole from the array, convert it to undefined and guard
* that there are no indexed properties along the prototype chain.
*/
LIns* br = lir->insBranch(LIR_jf,
lir->ins2i(LIR_eq, v_ins, JSVAL_TO_SPECIAL(JSVAL_HOLE)),
NULL);
CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT));
br->setTarget(lir->ins0(LIR_label));
JS_ASSERT_IF(!JSVAL_IS_BOOLEAN(*vp), *vp == JSVAL_HOLE);
guard(*vp == JSVAL_HOLE, lir->ins2(LIR_eq, v_ins, INS_CONST(JSVAL_TO_SPECIAL(JSVAL_HOLE))), exit);
/* Don't let the hole value escape. Turn it into an undefined. */
v_ins = lir->ins2i(LIR_and, v_ins, ~(JSVAL_HOLE_FLAG >> JSVAL_TAGBITS));
if (*vp == JSVAL_HOLE) {
CHECK_STATUS(guardPrototypeHasNoIndexedProperties(obj, obj_ins, MISMATCH_EXIT));
v_ins = INS_CONST(JSVAL_TO_SPECIAL(JSVAL_VOID));
}
}
return RECORD_CONTINUE;
}
@ -13708,13 +13735,10 @@ TraceRecorder::record_JSOP_BINDNAME()
JS_ASSERT(obj);
}
if (obj != globalObj) {
// If anything other than Block, Call, DeclEnv, and the global
// object is on the scope chain, we shouldn't be recording. Of
// those, only Block and global can be present in global code.
JS_NOT_REACHED("BINDNAME in global code resolved to non-global object");
RETURN_STOP_A("BINDNAME in global code resolved to non-global object");
}
// If anything other than Block, Call, DeclEnv, and the global object
// is on the scope chain, we shouldn't be recording. Of those, only
// Block and global can be present in global code.
JS_ASSERT(obj == globalObj);
/*
* The trace is specialized to this global object. Furthermore, we know it
@ -14129,10 +14153,9 @@ TraceRecorder::record_JSOP_ARGSUB()
JSStackFrame* fp = cx->fp;
if (!(fp->fun->flags & JSFUN_HEAVYWEIGHT)) {
uintN slot = GET_ARGNO(fp->regs->pc);
if (slot < fp->argc)
stack(0, get(&cx->fp->argv[slot]));
else
stack(0, INS_VOID());
if (slot >= fp->argc)
RETURN_STOP_A("can't trace out-of-range arguments");
stack(0, get(&cx->fp->argv[slot]));
return ARECORD_CONTINUE;
}
RETURN_STOP_A("can't trace JSOP_ARGSUB hard case");

View File

@ -1351,6 +1351,9 @@ class TraceRecorder
JS_REQUIRES_STACK TraceType determineSlotType(jsval* vp);
JS_REQUIRES_STACK RecordingStatus setUpwardTrackedVar(jsval* stackVp, jsval v,
nanojit::LIns* v_ins);
JS_REQUIRES_STACK AbortableRecordingStatus compile();
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop();
JS_REQUIRES_STACK AbortableRecordingStatus closeLoop(VMSideExit* exit);
@ -1565,6 +1568,7 @@ enum TraceVisExitReason {
R_FAIL_EXTEND_MAX_BRANCHES,
R_FAIL_EXTEND_START,
R_FAIL_EXTEND_COLD,
R_FAIL_SCOPE_CHAIN_CHECK,
R_NO_EXTEND_OUTER,
R_MISMATCH_EXIT,
R_OOM_EXIT,

View File

@ -44,6 +44,7 @@
#ifndef jsutil_h___
#define jsutil_h___
#include "jstypes.h"
#include <stdlib.h>
JS_BEGIN_EXTERN_C
@ -182,6 +183,12 @@ extern JS_FRIEND_API(void)
JS_DumpBacktrace(JSCallsite *trace);
#endif
#if defined JS_USE_CUSTOM_ALLOCATOR
#include "jscustomallocator.h"
#else
static JS_INLINE void* js_malloc(size_t bytes) {
if (bytes < sizeof(void*)) /* for asyncFree */
bytes = sizeof(void*);
@ -203,6 +210,7 @@ static JS_INLINE void* js_realloc(void* p, size_t bytes) {
static JS_INLINE void js_free(void* p) {
free(p);
}
#endif/* JS_USE_CUSTOM_ALLOCATOR */
JS_END_EXTERN_C

View File

@ -208,7 +208,7 @@ class Vector : AllocPolicy
/*
* Since a vector either stores elements inline or in a heap-allocated
* buffer, reuse the storage. mLengthOrCapacity serves as the union
* discriminator. In inline mode (when elements are stored in u.mBuf),
* discriminator. In inline mode (when elements are stored in u.storage),
* mLengthOrCapacity holds the vector's length. In heap mode (when elements
* are stored in [u.ptrs.mBegin, u.ptrs.mEnd)), mLengthOrCapacity holds the
* vector's capacity.
@ -228,22 +228,7 @@ class Vector : AllocPolicy
union {
BufferPtrs ptrs;
char mBuf[sInlineBytes];
#if __GNUC__
/*
* GCC thinks there is a strict aliasing warning since mBuf is a char
* array but we read and write to it as a T array. This is not an error
* since there are no reads and writes to the mBuf memory except those
* that treat it as a T array. Sadly,
* #pragma GCC diagnostic ignore "-Wstrict-aliasing"
* doesn't silence the warning. Type punning is allowed through a union
* of the involved types, so, for now, this error can be silenced by
* adding each offending T to this union. (This won't work for non-POD
* T's, but there don't seem to be any with warnings yet...)
*/
jschar unused1_;
#endif
AlignedStorage<sInlineBytes> storage;
} u;
/* Only valid when usingInlineStorage() */
@ -259,12 +244,12 @@ class Vector : AllocPolicy
T *inlineBegin() const {
JS_ASSERT(usingInlineStorage());
return (T *)u.mBuf;
return (T *)u.storage.addr();
}
T *inlineEnd() const {
JS_ASSERT(usingInlineStorage());
return (T *)u.mBuf + mLengthOrCapacity;
return (T *)u.storage.addr() + mLengthOrCapacity;
}
/* Only valid when !usingInlineStorage() */

View File

@ -154,16 +154,6 @@ enum ReturnType {
#define FN(name, args) \
{#name, CI(name, args)}
const ArgType I32 = nanojit::ARGTYPE_LO;
#ifdef NANOJIT_64BIT
const ArgType I64 = nanojit::ARGTYPE_Q;
#endif
const ArgType F64 = nanojit::ARGTYPE_F;
const ArgType PTR = nanojit::ARGTYPE_P;
const ArgType WRD = nanojit::ARGTYPE_P;
const ArgType VD = nanojit::ARGTYPE_V; // "VOID" causes problems on Windows!
enum LirTokenType {
NAME, NUMBER, PUNCT, NEWLINE
};
@ -372,10 +362,10 @@ double sinFn(double d) {
#define sin sinFn
Function functions[] = {
FN(puts, argMask(PTR, 1, 1) | retMask(I32)),
FN(sin, argMask(F64, 1, 1) | retMask(F64)),
FN(malloc, argMask(WRD, 1, 1) | retMask(PTR)),
FN(free, argMask(PTR, 1, 1) | retMask(VD))
FN(puts, argMask(ARGTYPE_P, 1, 1) | retMask(ARGTYPE_I)),
FN(sin, argMask(ARGTYPE_F, 1, 1) | retMask(ARGTYPE_F)),
FN(malloc, argMask(ARGTYPE_P, 1, 1) | retMask(ARGTYPE_P)),
FN(free, argMask(ARGTYPE_P, 1, 1) | retMask(ARGTYPE_V))
};
template<typename out, typename in> out
@ -1249,51 +1239,51 @@ static void f_V_IQF(int32_t, uint64_t, double)
}
#endif
const CallInfo ci_I_I1 = CI(f_I_I1, argMask(I32, 1, 1) |
retMask(I32));
const CallInfo ci_I_I1 = CI(f_I_I1, argMask(ARGTYPE_I, 1, 1) |
retMask(ARGTYPE_I));
const CallInfo ci_I_I6 = CI(f_I_I6, argMask(I32, 1, 6) |
argMask(I32, 2, 6) |
argMask(I32, 3, 6) |
argMask(I32, 4, 6) |
argMask(I32, 5, 6) |
argMask(I32, 6, 6) |
retMask(I32));
const CallInfo ci_I_I6 = CI(f_I_I6, argMask(ARGTYPE_I, 1, 6) |
argMask(ARGTYPE_I, 2, 6) |
argMask(ARGTYPE_I, 3, 6) |
argMask(ARGTYPE_I, 4, 6) |
argMask(ARGTYPE_I, 5, 6) |
argMask(ARGTYPE_I, 6, 6) |
retMask(ARGTYPE_I));
#ifdef NANOJIT_64BIT
const CallInfo ci_Q_Q2 = CI(f_Q_Q2, argMask(I64, 1, 2) |
argMask(I64, 2, 2) |
retMask(I64));
const CallInfo ci_Q_Q2 = CI(f_Q_Q2, argMask(ARGTYPE_Q, 1, 2) |
argMask(ARGTYPE_Q, 2, 2) |
retMask(ARGTYPE_Q));
const CallInfo ci_Q_Q7 = CI(f_Q_Q7, argMask(I64, 1, 7) |
argMask(I64, 2, 7) |
argMask(I64, 3, 7) |
argMask(I64, 4, 7) |
argMask(I64, 5, 7) |
argMask(I64, 6, 7) |
argMask(I64, 7, 7) |
retMask(I64));
const CallInfo ci_Q_Q7 = CI(f_Q_Q7, argMask(ARGTYPE_Q, 1, 7) |
argMask(ARGTYPE_Q, 2, 7) |
argMask(ARGTYPE_Q, 3, 7) |
argMask(ARGTYPE_Q, 4, 7) |
argMask(ARGTYPE_Q, 5, 7) |
argMask(ARGTYPE_Q, 6, 7) |
argMask(ARGTYPE_Q, 7, 7) |
retMask(ARGTYPE_Q));
#endif
const CallInfo ci_F_F3 = CI(f_F_F3, argMask(F64, 1, 3) |
argMask(F64, 2, 3) |
argMask(F64, 3, 3) |
retMask(F64));
const CallInfo ci_F_F3 = CI(f_F_F3, argMask(ARGTYPE_F, 1, 3) |
argMask(ARGTYPE_F, 2, 3) |
argMask(ARGTYPE_F, 3, 3) |
retMask(ARGTYPE_F));
const CallInfo ci_F_F8 = CI(f_F_F8, argMask(F64, 1, 8) |
argMask(F64, 2, 8) |
argMask(F64, 3, 8) |
argMask(F64, 4, 8) |
argMask(F64, 5, 8) |
argMask(F64, 6, 8) |
argMask(F64, 7, 8) |
argMask(F64, 8, 8) |
retMask(F64));
const CallInfo ci_F_F8 = CI(f_F_F8, argMask(ARGTYPE_F, 1, 8) |
argMask(ARGTYPE_F, 2, 8) |
argMask(ARGTYPE_F, 3, 8) |
argMask(ARGTYPE_F, 4, 8) |
argMask(ARGTYPE_F, 5, 8) |
argMask(ARGTYPE_F, 6, 8) |
argMask(ARGTYPE_F, 7, 8) |
argMask(ARGTYPE_F, 8, 8) |
retMask(ARGTYPE_F));
#ifdef NANOJIT_64BIT
const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(I32, 1, 3) |
argMask(I64, 2, 3) |
argMask(F64, 3, 3) |
const CallInfo ci_V_IQF = CI(f_V_IQF, argMask(ARGTYPE_I, 1, 3) |
argMask(ARGTYPE_Q, 2, 3) |
argMask(ARGTYPE_F, 3, 3) |
retMask(ARGTYPE_V));
#endif

View File

@ -1 +1 @@
ec93bc283385c4e19bfbfda1aa1ecf94aef932ec
ccc892912055b7da35ff7a8bafa7c3cd0bc060d5

View File

@ -79,7 +79,6 @@ namespace nanojit
#endif
, _config(config)
{
VMPI_memset(&_stats, 0, sizeof(_stats));
VMPI_memset(lookahead, 0, N_LOOKAHEAD * sizeof(LInsp));
nInit(core);
(void)logc;
@ -219,8 +218,6 @@ namespace nanojit
_allocator.addActive(r, ins);
ins->setReg(r);
} else {
counter_increment(steals);
// Nothing free, steal one.
// LSRA says pick the one with the furthest use.
LIns* vic = findVictim(setA___);
@ -252,14 +249,6 @@ namespace nanojit
_allocator.removeActive(r);
_allocator.addFree(r);
return r;
}
/**
* these instructions don't have to be saved & reloaded to spill,
* they can just be recalculated w/out any inputs.
*/
bool Assembler::canRemat(LIns *i) {
return i->isImmAny() || i->isop(LIR_alloc);
}
void Assembler::codeAlloc(NIns *&start, NIns *&end, NIns *&eip
@ -290,7 +279,6 @@ namespace nanojit
_nExitIns = 0;
codeStart = codeEnd = 0;
exitStart = exitEnd = 0;
_stats.pages = 0;
codeList = 0;
nativePageReset();
@ -303,7 +291,7 @@ namespace nanojit
{
if (error()) return;
// This may be a normal code chunk or an exit code chunk.
NanoAssertMsg(containsPtr(codeStart, codeEnd, _nIns),
NanoAssertMsg(codeStart <= _nIns && _nIns <= codeEnd,
"Native instruction pointer overstep paging bounds; check overrideProtect for last instruction");
}
#endif
@ -325,7 +313,7 @@ namespace nanojit
continue;
uint32_t arIndex = ins->getArIndex();
NanoAssert(arIndex != 0);
if (ins->isop(LIR_alloc)) {
if (ins->isop(LIR_allocp)) {
int const n = i + (ins->size()>>2);
for (int j=i+1; j < n; j++) {
NanoAssert(_entries[j]==ins);
@ -415,18 +403,18 @@ namespace nanojit
// Like findRegFor(), but called when the LIns is used as a pointer. It
// doesn't have to be called, findRegFor() can still be used, but it can
// optimize the LIR_alloc case by indexing off FP, thus saving the use of
// optimize the LIR_allocp case by indexing off FP, thus saving the use of
// a GpReg.
//
Register Assembler::getBaseReg(LInsp base, int &d, RegisterMask allow)
{
#if !PEDANTIC
if (base->isop(LIR_alloc)) {
// The value of a LIR_alloc is a pointer to its stack memory,
if (base->isop(LIR_allocp)) {
// The value of a LIR_allocp is a pointer to its stack memory,
// which is always relative to FP. So we can just return FP if we
// also adjust 'd' (and can do so in a valid manner). Or, in the
// PEDANTIC case, we can just assign a register as normal;
// findRegFor() will allocate the stack memory for LIR_alloc if
// findRegFor() will allocate the stack memory for LIR_allocp if
// necessary.
d += findMemFor(base);
return FP;
@ -441,12 +429,12 @@ namespace nanojit
// same type as the stored value, eg. in asm_store32() on 32-bit platforms
// and asm_store64() on 64-bit platforms. Similar to getBaseReg(),
// findRegFor2() can be called instead, but this function can optimize the
// case where the base value is a LIR_alloc.
// case where the base value is a LIR_allocp.
void Assembler::getBaseReg2(RegisterMask allowValue, LIns* value, Register& rv,
RegisterMask allowBase, LIns* base, Register& rb, int &d)
{
#if !PEDANTIC
if (base->isop(LIR_alloc)) {
if (base->isop(LIR_allocp)) {
rb = FP;
d += findMemFor(base);
rv = findRegFor(value, allowValue);
@ -469,7 +457,7 @@ namespace nanojit
//
Register Assembler::findRegFor(LIns* ins, RegisterMask allow)
{
if (ins->isop(LIR_alloc)) {
if (ins->isop(LIR_allocp)) {
// Never allocate a reg for this without stack space too.
findMemFor(ins);
}
@ -536,7 +524,7 @@ namespace nanojit
// sometimes useful to have it there for assignments.
Register Assembler::findSpecificRegForUnallocated(LIns* ins, Register r)
{
if (ins->isop(LIR_alloc)) {
if (ins->isop(LIR_allocp)) {
// never allocate a reg for this w/out stack space too
findMemFor(ins);
}
@ -641,7 +629,7 @@ namespace nanojit
asm_maybe_spill(ins, pop);
#ifdef NANOJIT_IA32
if (!ins->isInAr() && pop && r == FST0) {
// This can only happen with a LIR_fcall to an impure function
// This can only happen with a LIR_calld to an impure function
// whose return value was ignored (ie. if ins->isInReg() was false
// prior to the findRegFor() call).
FSTP(FST0); // pop the fpu result since it isn't used
@ -713,8 +701,6 @@ namespace nanojit
void Assembler::evict(LIns* vic)
{
// Not free, need to steal.
counter_increment(steals);
Register r = vic->getReg();
NanoAssert(!_allocator.isFree(r));
@ -787,7 +773,6 @@ namespace nanojit
NIns* Assembler::asm_leave_trace(LInsp guard)
{
verbose_only( int32_t nativeSave = _stats.native );
verbose_only( verbose_outputf("----------------------------------- ## END exit block %p", guard);)
// This point is unreachable. So free all the registers. If an
@ -831,8 +816,6 @@ namespace nanojit
debug_only( _fpuStkDepth = _sv_fpuStkDepth; _sv_fpuStkDepth = 9999; )
#endif
verbose_only(_stats.exitnative += (_stats.native-nativeSave));
return jmpTarget;
}
@ -1001,12 +984,6 @@ namespace nanojit
_thisfrag = frag;
_inExit = false;
counter_reset(native);
counter_reset(exitnative);
counter_reset(steals);
counter_reset(spills);
counter_reset(remats);
setError(None);
// native code gen buffer setup
@ -1015,12 +992,6 @@ namespace nanojit
// make sure we got memory at least one page
if (error()) return;
#ifdef PERFM
_stats.pages = 0;
_stats.codeStart = _nIns-1;
_stats.codeExitStart = _nExitIns-1;
#endif /* PERFM */
_epilogue = NULL;
nBeginAssembly();
@ -1204,6 +1175,107 @@ namespace nanojit
#define countlir_jtbl()
#endif
void Assembler::asm_jmp(LInsp ins, InsList& pending_lives)
{
NanoAssert((ins->isop(LIR_j) && !ins->oprnd1()) ||
(ins->isop(LIR_jf) && ins->oprnd1()->isconstval(0)) ||
(ins->isop(LIR_jt) && ins->oprnd1()->isconstval(1)));
countlir_jmp();
LInsp to = ins->getTarget();
LabelState *label = _labels.get(to);
// The jump is always taken so whatever register state we
// have from downstream code, is irrelevant to code before
// this jump. So clear it out. We will pick up register
// state from the jump target, if we have seen that label.
releaseRegisters();
if (label && label->addr) {
// Forward jump - pick up register state from target.
unionRegisterState(label->regs);
JMP(label->addr);
}
else {
// Backwards jump.
handleLoopCarriedExprs(pending_lives);
if (!label) {
// save empty register state at loop header
_labels.add(to, 0, _allocator);
}
else {
intersectRegisterState(label->regs);
}
JMP(0);
_patches.put(_nIns, to);
}
}
void Assembler::asm_jcc(LInsp ins, InsList& pending_lives)
{
bool branchOnFalse = (ins->opcode() == LIR_jf);
LIns* cond = ins->oprnd1();
if (cond->isconst()) {
if ((!branchOnFalse && !cond->imm32()) || (branchOnFalse && cond->imm32())) {
// jmp never taken, not needed
} else {
asm_jmp(ins, pending_lives); // jmp always taken
}
return;
}
countlir_jcc();
LInsp to = ins->getTarget();
LabelState *label = _labels.get(to);
if (label && label->addr) {
// Forward jump to known label. Need to merge with label's register state.
unionRegisterState(label->regs);
asm_branch(branchOnFalse, cond, label->addr);
}
else {
// Back edge.
handleLoopCarriedExprs(pending_lives);
if (!label) {
// Evict all registers, most conservative approach.
evictAllActiveRegs();
_labels.add(to, 0, _allocator);
}
else {
// Evict all registers, most conservative approach.
intersectRegisterState(label->regs);
}
NIns *branch = asm_branch(branchOnFalse, cond, 0);
_patches.put(branch,to);
}
}
void Assembler::asm_x(LInsp ins)
{
verbose_only( _thisfrag->nStaticExits++; )
countlir_x();
// Generate the side exit branch on the main trace.
NIns *exit = asm_exit(ins);
JMP(exit);
}
void Assembler::asm_xcc(LInsp ins)
{
LIns* cond = ins->oprnd1();
if (cond->isconst()) {
if ((ins->isop(LIR_xt) && !cond->imm32()) || (ins->isop(LIR_xf) && cond->imm32())) {
// guard never taken, not needed
} else {
asm_x(ins); // guard always taken
}
return;
}
verbose_only( _thisfrag->nStaticExits++; )
countlir_xcc();
// We only support cmp with guard right now, also assume it is 'close'
// and only emit the branch.
NIns* exit = asm_exit(ins); // does intersectRegisterState()
asm_branch(ins->opcode() == LIR_xf, cond, exit);
}
void Assembler::gen(LirFilter* reader)
{
NanoAssert(_thisfrag->nStaticExits == 0);
@ -1302,19 +1374,19 @@ namespace nanojit
evictAllActiveRegs();
break;
case LIR_live:
case LIR_flive:
CASE64(LIR_qlive:) {
case LIR_livel:
case LIR_lived:
CASE64(LIR_liveq:) {
countlir_live();
LInsp op1 = ins->oprnd1();
// alloca's are meant to live until the point of the LIR_live instruction, marking
// allocp's are meant to live until the point of the LIR_livep instruction, marking
// other expressions as live ensures that they remain so at loop bottoms.
// alloca areas require special treatment because they are accessed indirectly and
// the indirect accesses are invisible to the assembler, other than via LIR_live.
// allocp areas require special treatment because they are accessed indirectly and
// the indirect accesses are invisible to the assembler, other than via LIR_livep.
// other expression results are only accessed directly in ways that are visible to
// the assembler, so extending those expression's lifetimes past the last loop edge
// isn't necessary.
if (op1->isop(LIR_alloc)) {
if (op1->isop(LIR_allocp)) {
findMemFor(op1);
} else {
pending_lives.add(ins);
@ -1322,9 +1394,9 @@ namespace nanojit
break;
}
case LIR_ret:
case LIR_fret:
CASE64(LIR_qret:) {
case LIR_retl:
case LIR_retd:
CASE64(LIR_retq:) {
countlir_ret();
asm_ret(ins);
break;
@ -1332,46 +1404,42 @@ namespace nanojit
// Allocate some stack space. The value of this instruction
// is the address of the stack space.
case LIR_alloc: {
case LIR_allocp: {
countlir_alloc();
NanoAssert(ins->isInAr());
if (ins->isInReg()) {
Register r = ins->getReg();
asm_restore(ins, r);
_allocator.retire(r);
ins->clearReg();
}
if (ins->isInReg())
evict(ins);
freeResourcesOf(ins);
break;
}
case LIR_int:
case LIR_imml:
{
countlir_imm();
asm_immi(ins);
break;
}
#ifdef NANOJIT_64BIT
case LIR_quad:
case LIR_immq:
{
countlir_imm();
asm_immq(ins);
break;
}
#endif
case LIR_float:
case LIR_immd:
{
countlir_imm();
asm_immf(ins);
break;
}
case LIR_param:
case LIR_paramp:
{
countlir_param();
asm_param(ins);
break;
}
#if NJ_SOFTFLOAT_SUPPORTED
case LIR_callh:
case LIR_hcalll:
{
// return result of quad-call in register
deprecated_prepResultReg(ins, rmask(retRegs[1]));
@ -1379,53 +1447,53 @@ namespace nanojit
findSpecificRegFor(ins->oprnd1(), retRegs[0]);
break;
}
case LIR_qlo:
case LIR_dlo2l:
{
countlir_qlo();
asm_qlo(ins);
break;
}
case LIR_qhi:
case LIR_dhi2l:
{
countlir_qhi();
asm_qhi(ins);
break;
}
case LIR_qjoin:
case LIR_ll2d:
{
countlir_qjoin();
asm_qjoin(ins);
break;
}
#endif
CASE64(LIR_qcmov:)
case LIR_cmov:
CASE64(LIR_cmovq:)
case LIR_cmovl:
{
countlir_cmov();
asm_cmov(ins);
break;
}
case LIR_ldzb:
case LIR_ldzs:
case LIR_ldsb:
case LIR_ldss:
case LIR_ld:
case LIR_ldub2ul:
case LIR_lduw2ul:
case LIR_ldb2l:
case LIR_ldw2l:
case LIR_ldl:
{
countlir_ld();
asm_load32(ins);
break;
}
case LIR_ld32f:
case LIR_ldf:
case LIR_lds2d:
case LIR_ldd:
CASE64(LIR_ldq:)
{
countlir_ldq();
asm_load64(ins);
break;
}
case LIR_neg:
case LIR_not:
case LIR_negl:
case LIR_notl:
{
countlir_alu();
asm_neg_not(ins);
@ -1433,12 +1501,12 @@ namespace nanojit
}
#if defined NANOJIT_64BIT
case LIR_qiadd:
case LIR_qiand:
case LIR_qilsh:
case LIR_qursh:
case LIR_qirsh:
case LIR_qior:
case LIR_addq:
case LIR_andq:
case LIR_lshq:
case LIR_rshuq:
case LIR_rshq:
case LIR_orq:
case LIR_qxor:
{
asm_qbinop(ins);
@ -1446,92 +1514,92 @@ namespace nanojit
}
#endif
case LIR_add:
case LIR_sub:
case LIR_mul:
case LIR_and:
case LIR_or:
case LIR_xor:
case LIR_lsh:
case LIR_rsh:
case LIR_ush:
CASE86(LIR_div:)
CASE86(LIR_mod:)
case LIR_addl:
case LIR_subl:
case LIR_mull:
case LIR_andl:
case LIR_orl:
case LIR_xorl:
case LIR_lshl:
case LIR_rshl:
case LIR_rshul:
CASE86(LIR_divl:)
CASE86(LIR_modl:)
{
countlir_alu();
asm_arith(ins);
break;
}
case LIR_fneg:
case LIR_negd:
{
countlir_fpu();
asm_fneg(ins);
break;
}
case LIR_fadd:
case LIR_fsub:
case LIR_fmul:
case LIR_fdiv:
case LIR_addd:
case LIR_subd:
case LIR_muld:
case LIR_divd:
{
countlir_fpu();
asm_fop(ins);
break;
}
case LIR_i2f:
case LIR_l2d:
{
countlir_fpu();
asm_i2f(ins);
break;
}
case LIR_u2f:
case LIR_ul2d:
{
countlir_fpu();
asm_u2f(ins);
break;
}
case LIR_f2i:
case LIR_d2l:
{
countlir_fpu();
asm_f2i(ins);
break;
}
#ifdef NANOJIT_64BIT
case LIR_i2q:
case LIR_u2q:
case LIR_l2q:
case LIR_ul2uq:
{
countlir_alu();
asm_promote(ins);
break;
}
case LIR_q2i:
case LIR_q2l:
{
countlir_alu();
asm_q2i(ins);
break;
}
#endif
case LIR_stb:
case LIR_sts:
case LIR_sti:
case LIR_stl2b:
case LIR_stl2w:
case LIR_stl:
{
countlir_st();
asm_store32(op, ins->oprnd1(), ins->disp(), ins->oprnd2());
break;
}
case LIR_st32f:
case LIR_stfi:
CASE64(LIR_stqi:)
case LIR_std2s:
case LIR_std:
CASE64(LIR_stq:)
{
countlir_stq();
LIns* value = ins->oprnd1();
LIns* base = ins->oprnd2();
int dr = ins->disp();
#if NJ_SOFTFLOAT_SUPPORTED
if (value->isop(LIR_qjoin) && op == LIR_stfi)
if (value->isop(LIR_ll2d) && op == LIR_std)
{
// This is correct for little-endian only.
asm_store32(LIR_sti, value->oprnd1(), dr, base);
asm_store32(LIR_sti, value->oprnd2(), dr+4, base);
asm_store32(LIR_stl, value->oprnd1(), dr, base);
asm_store32(LIR_stl, value->oprnd2(), dr+4, base);
}
else
#endif
@ -1542,65 +1610,13 @@ namespace nanojit
}
case LIR_j:
{
countlir_jmp();
LInsp to = ins->getTarget();
LabelState *label = _labels.get(to);
// the jump is always taken so whatever register state we
// have from downstream code, is irrelevant to code before
// this jump. so clear it out. we will pick up register
// state from the jump target, if we have seen that label.
releaseRegisters();
if (label && label->addr) {
// forward jump - pick up register state from target.
unionRegisterState(label->regs);
JMP(label->addr);
}
else {
// backwards jump
handleLoopCarriedExprs(pending_lives);
if (!label) {
// save empty register state at loop header
_labels.add(to, 0, _allocator);
}
else {
intersectRegisterState(label->regs);
}
JMP(0);
_patches.put(_nIns, to);
}
asm_jmp(ins, pending_lives);
break;
}
case LIR_jt:
case LIR_jf:
{
countlir_jcc();
LInsp to = ins->getTarget();
LIns* cond = ins->oprnd1();
LabelState *label = _labels.get(to);
if (label && label->addr) {
// forward jump to known label. need to merge with label's register state.
unionRegisterState(label->regs);
asm_branch(op == LIR_jf, cond, label->addr);
}
else {
// back edge.
handleLoopCarriedExprs(pending_lives);
if (!label) {
// evict all registers, most conservative approach.
evictAllActiveRegs();
_labels.add(to, 0, _allocator);
}
else {
// evict all registers, most conservative approach.
intersectRegisterState(label->regs);
}
NIns *branch = asm_branch(op == LIR_jf, cond, 0);
_patches.put(branch,to);
}
asm_jcc(ins, pending_lives);
break;
}
#if NJ_JTBL_SUPPORTED
case LIR_jtbl:
@ -1704,27 +1720,16 @@ namespace nanojit
#endif
case LIR_xt:
case LIR_xf:
{
verbose_only( _thisfrag->nStaticExits++; )
countlir_xcc();
// we only support cmp with guard right now, also assume it is 'close' and only emit the branch
NIns* exit = asm_exit(ins); // does intersectRegisterState()
LIns* cond = ins->oprnd1();
asm_branch(op == LIR_xf, cond, exit);
asm_xcc(ins);
break;
}
case LIR_x:
{
verbose_only( _thisfrag->nStaticExits++; )
countlir_x();
// generate the side exit branch on the main trace.
NIns *exit = asm_exit(ins);
JMP( exit );
asm_x(ins);
break;
}
case LIR_addxov:
case LIR_subxov:
case LIR_mulxov:
case LIR_addxovl:
case LIR_subxovl:
case LIR_mulxovl:
{
verbose_only( _thisfrag->nStaticExits++; )
countlir_xcc();
@ -1735,35 +1740,35 @@ namespace nanojit
break;
}
case LIR_feq:
case LIR_fle:
case LIR_flt:
case LIR_fgt:
case LIR_fge:
case LIR_eqd:
case LIR_led:
case LIR_ltd:
case LIR_gtd:
case LIR_ged:
{
countlir_fpu();
asm_fcond(ins);
break;
}
case LIR_eq:
case LIR_le:
case LIR_lt:
case LIR_gt:
case LIR_ge:
case LIR_ult:
case LIR_ule:
case LIR_ugt:
case LIR_uge:
case LIR_eql:
case LIR_lel:
case LIR_ltl:
case LIR_gtl:
case LIR_gel:
case LIR_ltul:
case LIR_leul:
case LIR_gtul:
case LIR_geul:
#ifdef NANOJIT_64BIT
case LIR_qeq:
case LIR_qle:
case LIR_qlt:
case LIR_qgt:
case LIR_qge:
case LIR_qult:
case LIR_qule:
case LIR_qugt:
case LIR_quge:
case LIR_eqq:
case LIR_leq:
case LIR_ltq:
case LIR_gtq:
case LIR_geq:
case LIR_ltuq:
case LIR_leuq:
case LIR_gtuq:
case LIR_geuq:
#endif
{
countlir_alu();
@ -1771,11 +1776,11 @@ namespace nanojit
break;
}
case LIR_fcall:
case LIR_calld:
#ifdef NANOJIT_64BIT
case LIR_qcall:
case LIR_callq:
#endif
case LIR_icall:
case LIR_calll:
{
countlir_call();
asm_call(ins);
@ -1840,7 +1845,7 @@ namespace nanojit
}
#if defined NANOJIT_IA32 || defined NANOJIT_X64
else if (ins->isop(LIR_mod)) {
else if (ins->isop(LIR_modl)) {
// There's a similar case when a div feeds into a mod.
outputf(" %s # codegen'd with the mod",
printer->formatIns(&b, ins->oprnd1()));
@ -1930,7 +1935,7 @@ namespace nanojit
findMemFor(op1);
}
if (!op1->isImmAny())
findRegFor(op1, ins->isop(LIR_flive) ? FpRegs : GpRegs);
findRegFor(op1, ins->isop(LIR_lived) ? FpRegs : GpRegs);
}
// clear this list since we have now dealt with those lifetimes. extending
@ -1970,7 +1975,7 @@ namespace nanojit
RefBuf b;
const char* n = _thisfrag->lirbuf->printer->formatRef(&b, ins);
if (ins->isop(LIR_param) && ins->paramKind()==1 &&
if (ins->isop(LIR_paramp) && ins->paramKind()==1 &&
r == Assembler::savedRegs[ins->paramArg()])
{
// dont print callee-saved regs that arent used

View File

@ -68,8 +68,8 @@ namespace nanojit
// - 'entry' records the state of the native machine stack at particular
// points during assembly. Each entry represents four bytes.
//
// - Parts of the stack can be allocated by LIR_alloc, in which case each
// slot covered by the allocation contains a pointer to the LIR_alloc
// - Parts of the stack can be allocated by LIR_allocp, in which case each
// slot covered by the allocation contains a pointer to the LIR_allocp
// LIns.
//
// - The stack also holds spilled values, in which case each slot holding
@ -88,7 +88,7 @@ namespace nanojit
// * An LIns can appear in at most one contiguous sequence of slots in
// AR, and the length of that sequence depends on the opcode (1 slot
// for instructions producing 32-bit values, 2 slots for instructions
// producing 64-bit values, N slots for LIR_alloc).
// producing 64-bit values, N slots for LIR_allocp).
//
// * An LIns named by 'entry[i]' must have an in-use reservation with
// arIndex==i (or an 'i' indexing the start of the same contiguous
@ -153,7 +153,7 @@ namespace nanojit
inline /*static*/ uint32_t AR::nStackSlotsFor(LIns* ins)
{
uint32_t n = 0;
if (ins->isop(LIR_alloc)) {
if (ins->isop(LIR_allocp)) {
n = ins->size() >> 2;
} else {
switch (ins->retType()) {
@ -181,25 +181,6 @@ namespace nanojit
#endif
#endif
struct Stats
{
counter_define(steals;)
counter_define(remats;)
counter_define(spills;)
counter_define(native;)
counter_define(exitnative;)
int32_t pages;
NIns* codeStart;
NIns* codeExitStart;
DECLARE_PLATFORM_STATS()
#ifdef __GNUC__
// inexplicably, gnuc gives padding/alignment warnings without this. pacify it.
bool pad[4];
#endif
};
// error codes
enum AssmError
{
@ -250,7 +231,7 @@ namespace nanojit
* as we generate machine code. As part of the prologue, we issue
* a stack adjustment instruction and then later patch the adjustment
* value. Temporary values can be placed into the AR as method calls
* are issued. Also LIR_alloc instructions will consume space.
* are issued. Also LIR_allocp instructions will consume space.
*/
class Assembler
{
@ -323,8 +304,6 @@ namespace nanojit
CodeList* codeList; // finished blocks of code.
private:
Stats _stats;
void gen(LirFilter* toCompile);
NIns* genPrologue();
NIns* genEpilogue();
@ -367,7 +346,10 @@ namespace nanojit
void codeAlloc(NIns *&start, NIns *&end, NIns *&eip
verbose_only(, size_t &nBytes));
bool canRemat(LIns*);
// These instructions don't have to be saved & reloaded to spill,
// they can just be recalculated cheaply.
static bool canRemat(LIns*);
bool deprecated_isKnownReg(Register r) {
return r != deprecated_UnknownReg;
@ -425,6 +407,10 @@ namespace nanojit
verbose_only( void asm_inc_m32(uint32_t*); )
void asm_mmq(Register rd, int dd, Register rs, int ds);
void asm_jmp(LInsp ins, InsList& pending_lives);
void asm_jcc(LInsp ins, InsList& pending_lives);
void asm_x(LInsp ins);
void asm_xcc(LInsp ins);
NIns* asm_exit(LInsp guard);
NIns* asm_leave_trace(LInsp guard);
void asm_store32(LOpcode op, LIns *val, int d, LIns *base);

View File

@ -210,41 +210,6 @@ namespace nanojit
debug_only(sanity_check();)
}
void CodeAlloc::sweep() {
debug_only(sanity_check();)
// Pass #1: remove fully-coalesced blocks from availblocks.
CodeList** prev = &availblocks;
for (CodeList* ab = availblocks; ab != 0; ab = *prev) {
NanoAssert(ab->higher != 0);
NanoAssert(ab->isFree);
if (!ab->higher->higher && !ab->lower) {
*prev = ab->next;
debug_only(ab->next = 0;)
} else {
prev = &ab->next;
}
}
// Pass #2: remove same blocks from heapblocks, and free them.
prev = &heapblocks;
for (CodeList* hb = heapblocks; hb != 0; hb = *prev) {
NanoAssert(hb->lower != 0);
if (!hb->lower->lower && hb->lower->isFree) {
NanoAssert(!hb->lower->next);
// whole page is unused
void* mem = hb->lower;
*prev = hb->next;
_nvprof("free page",1);
markBlockWrite(firstBlock(hb));
freeCodeChunk(mem, bytesPerAlloc);
totalAllocated -= bytesPerAlloc;
} else {
prev = &hb->next;
}
}
}
void CodeAlloc::freeAll(CodeList* &code) {
while (code) {
CodeList *b = removeBlock(code);
@ -463,52 +428,10 @@ extern "C" void sync_instruction_memory(caddr_t v, u_int len);
}
}
size_t CodeAlloc::size(const CodeList* blocks) {
size_t size = 0;
for (const CodeList* b = blocks; b != 0; b = b->next)
size += int((uintptr_t)b->end - (uintptr_t)b);
return size;
}
size_t CodeAlloc::size() {
return totalAllocated;
}
bool CodeAlloc::contains(const CodeList* blocks, NIns* p) {
for (const CodeList *b = blocks; b != 0; b = b->next) {
_nvprof("block contains",1);
if (b->contains(p))
return true;
}
return false;
}
void CodeAlloc::moveAll(CodeList* &blocks, CodeList* &other) {
if (other) {
CodeList* last = other;
while (last->next)
last = last->next;
last->next = blocks;
blocks = other;
other = 0;
}
}
// figure out whether this is a pointer into allocated/free code,
// or something we don't manage.
CodeAlloc::CodePointerKind CodeAlloc::classifyPtr(NIns *p) {
for (CodeList* hb = heapblocks; hb != 0; hb = hb->next) {
CodeList* b = firstBlock(hb);
if (!containsPtr((NIns*)b, (NIns*)((uintptr_t)b + bytesPerAlloc), p))
continue;
do {
if (b->contains(p))
return b->isFree ? kFree : kUsed;
} while ((b = b->higher) != 0);
}
return kUnknown;
}
// check that all block neighbors are correct
#ifdef _DEBUG
void CodeAlloc::sanity_check() {

View File

@ -42,11 +42,6 @@
namespace nanojit
{
/** return true if ptr is in the range [start, end] */
inline bool containsPtr(const NIns* start, const NIns* end, const NIns* ptr) {
return ptr >= start && ptr <= end;
}
/**
* CodeList is a linked list of non-contigous blocks of code. Clients use CodeList*
* to point to a list, and each CodeList instance tracks a single contiguous
@ -93,9 +88,6 @@ namespace nanojit
/** return the whole size of this block including overhead */
size_t blockSize() const { return uintptr_t(end) - uintptr_t(this); }
/** return true if just this block contains p */
bool contains(NIns* p) const { return containsPtr(&code[0], end, p); }
};
/**
@ -196,31 +188,12 @@ namespace nanojit
/** add a block previously returned by alloc(), to code */
static void add(CodeList* &code, NIns* start, NIns* end);
/** move all the code in list "from" to list "to", and leave from empty. */
static void moveAll(CodeList* &to, CodeList* &from);
/** return true if any block in list "code" contains the code pointer p */
static bool contains(const CodeList* code, NIns* p);
/** return the number of bytes in all the code blocks in "code", including block overhead */
static size_t size(const CodeList* code);
/** return the total number of bytes held by this CodeAlloc. */
size_t size();
/** print out stats about heap usage */
void logStats();
enum CodePointerKind {
kUnknown, kFree, kUsed
};
/** determine whether the given address is not code, or is allocated or free */
CodePointerKind classifyPtr(NIns *p);
/** return any completely empty pages */
void sweep();
/** protect all code in this code alloc */
void markAllExec();

File diff suppressed because it is too large Load Diff

View File

@ -145,8 +145,8 @@ namespace nanojit
LIR_ldf = LIR_ldd,
LIR_ld32f = LIR_lds2d,
// LIR_stb
LIR_sts = LIR_stw,
LIR_stb = LIR_stl2b,
LIR_sts = LIR_stl2w,
LIR_sti = LIR_stl,
#ifdef NANOJIT_64BIT
LIR_stqi = LIR_stq,
@ -270,64 +270,64 @@ namespace nanojit
LIR_callh = LIR_hcalll,
#endif
LIR_param = PTR_SIZE(LIR_iparam, LIR_qparam),
LIR_param = LIR_paramp,
LIR_alloc = PTR_SIZE(LIR_ialloc, LIR_qalloc),
LIR_alloc = LIR_allocp,
LIR_pret = PTR_SIZE(LIR_ret, LIR_qret),
LIR_pret = LIR_retp,
LIR_plive = PTR_SIZE(LIR_live, LIR_qlive),
LIR_plive = LIR_livep,
LIR_stpi = PTR_SIZE(LIR_sti, LIR_stqi),
LIR_stpi = LIR_stp,
LIR_pcall = PTR_SIZE(LIR_icall, LIR_qcall),
LIR_pcall = LIR_callp,
LIR_peq = PTR_SIZE(LIR_eq, LIR_qeq),
LIR_plt = PTR_SIZE(LIR_lt, LIR_qlt),
LIR_pgt = PTR_SIZE(LIR_gt, LIR_qgt),
LIR_ple = PTR_SIZE(LIR_le, LIR_qle),
LIR_pge = PTR_SIZE(LIR_ge, LIR_qge),
LIR_pult = PTR_SIZE(LIR_ult, LIR_qult),
LIR_pugt = PTR_SIZE(LIR_ugt, LIR_qugt),
LIR_pule = PTR_SIZE(LIR_ule, LIR_qule),
LIR_puge = PTR_SIZE(LIR_uge, LIR_quge),
LIR_piadd = PTR_SIZE(LIR_add, LIR_qiadd),
LIR_peq = LIR_eqp,
LIR_plt = LIR_ltp,
LIR_pgt = LIR_gtp,
LIR_ple = LIR_lep,
LIR_pge = LIR_gep,
LIR_pult = LIR_ltup,
LIR_pugt = LIR_gtup,
LIR_pule = LIR_leup,
LIR_puge = LIR_geup,
LIR_piadd = LIR_addp,
LIR_piand = PTR_SIZE(LIR_and, LIR_qiand),
LIR_pior = PTR_SIZE(LIR_or, LIR_qior),
LIR_pxor = PTR_SIZE(LIR_xor, LIR_qxor),
LIR_piand = LIR_andp,
LIR_pior = LIR_orp,
LIR_pxor = LIR_xorp,
LIR_pilsh = PTR_SIZE(LIR_lsh, LIR_qilsh),
LIR_pirsh = PTR_SIZE(LIR_rsh, LIR_qirsh),
LIR_pursh = PTR_SIZE(LIR_ush, LIR_qursh),
LIR_pilsh = LIR_lshp,
LIR_pirsh = LIR_rshp,
LIR_pursh = LIR_rshup,
LIR_pcmov = PTR_SIZE(LIR_cmov, LIR_qcmov)
LIR_pcmov = LIR_cmovp
};
// 32-bit integer comparisons must be contiguous, as must 64-bit integer
// comparisons and 64-bit float comparisons.
NanoStaticAssert(LIR_eq + 1 == LIR_lt &&
LIR_eq + 2 == LIR_gt &&
LIR_eq + 3 == LIR_le &&
LIR_eq + 4 == LIR_ge &&
LIR_eq + 5 == LIR_ult &&
LIR_eq + 6 == LIR_ugt &&
LIR_eq + 7 == LIR_ule &&
LIR_eq + 8 == LIR_uge);
NanoStaticAssert(LIR_eql + 1 == LIR_ltl &&
LIR_eql + 2 == LIR_gtl &&
LIR_eql + 3 == LIR_lel &&
LIR_eql + 4 == LIR_gel &&
LIR_eql + 5 == LIR_ltul &&
LIR_eql + 6 == LIR_gtul &&
LIR_eql + 7 == LIR_leul &&
LIR_eql + 8 == LIR_geul);
#ifdef NANOJIT_64BIT
NanoStaticAssert(LIR_qeq + 1 == LIR_qlt &&
LIR_qeq + 2 == LIR_qgt &&
LIR_qeq + 3 == LIR_qle &&
LIR_qeq + 4 == LIR_qge &&
LIR_qeq + 5 == LIR_qult &&
LIR_qeq + 6 == LIR_qugt &&
LIR_qeq + 7 == LIR_qule &&
LIR_qeq + 8 == LIR_quge);
NanoStaticAssert(LIR_eqq + 1 == LIR_ltq &&
LIR_eqq + 2 == LIR_gtq &&
LIR_eqq + 3 == LIR_leq &&
LIR_eqq + 4 == LIR_geq &&
LIR_eqq + 5 == LIR_ltuq &&
LIR_eqq + 6 == LIR_gtuq &&
LIR_eqq + 7 == LIR_leuq &&
LIR_eqq + 8 == LIR_geuq);
#endif
NanoStaticAssert(LIR_feq + 1 == LIR_flt &&
LIR_feq + 2 == LIR_fgt &&
LIR_feq + 3 == LIR_fle &&
LIR_feq + 4 == LIR_fge);
NanoStaticAssert(LIR_eqd + 1 == LIR_ltd &&
LIR_eqd + 2 == LIR_gtd &&
LIR_eqd + 3 == LIR_led &&
LIR_eqd + 4 == LIR_ged);
// Various opcodes must be changeable to their opposite with op^1
// (although we use invertXyz() when possible, ie. outside static
@ -336,20 +336,20 @@ namespace nanojit
NanoStaticAssert((LIR_xt^1) == LIR_xf && (LIR_xf^1) == LIR_xt);
NanoStaticAssert((LIR_lt^1) == LIR_gt && (LIR_gt^1) == LIR_lt);
NanoStaticAssert((LIR_le^1) == LIR_ge && (LIR_ge^1) == LIR_le);
NanoStaticAssert((LIR_ult^1) == LIR_ugt && (LIR_ugt^1) == LIR_ult);
NanoStaticAssert((LIR_ule^1) == LIR_uge && (LIR_uge^1) == LIR_ule);
NanoStaticAssert((LIR_ltl^1) == LIR_gtl && (LIR_gtl^1) == LIR_ltl);
NanoStaticAssert((LIR_lel^1) == LIR_gel && (LIR_gel^1) == LIR_lel);
NanoStaticAssert((LIR_ltul^1) == LIR_gtul && (LIR_gtul^1) == LIR_ltul);
NanoStaticAssert((LIR_leul^1) == LIR_geul && (LIR_geul^1) == LIR_leul);
#ifdef NANOJIT_64BIT
NanoStaticAssert((LIR_qlt^1) == LIR_qgt && (LIR_qgt^1) == LIR_qlt);
NanoStaticAssert((LIR_qle^1) == LIR_qge && (LIR_qge^1) == LIR_qle);
NanoStaticAssert((LIR_qult^1) == LIR_qugt && (LIR_qugt^1) == LIR_qult);
NanoStaticAssert((LIR_qule^1) == LIR_quge && (LIR_quge^1) == LIR_qule);
NanoStaticAssert((LIR_ltq^1) == LIR_gtq && (LIR_gtq^1) == LIR_ltq);
NanoStaticAssert((LIR_leq^1) == LIR_geq && (LIR_geq^1) == LIR_leq);
NanoStaticAssert((LIR_ltuq^1) == LIR_gtuq && (LIR_gtuq^1) == LIR_ltuq);
NanoStaticAssert((LIR_leuq^1) == LIR_geuq && (LIR_geuq^1) == LIR_leuq);
#endif
NanoStaticAssert((LIR_flt^1) == LIR_fgt && (LIR_fgt^1) == LIR_flt);
NanoStaticAssert((LIR_fle^1) == LIR_fge && (LIR_fge^1) == LIR_fle);
NanoStaticAssert((LIR_ltd^1) == LIR_gtd && (LIR_gtd^1) == LIR_ltd);
NanoStaticAssert((LIR_led^1) == LIR_ged && (LIR_ged^1) == LIR_led);
struct GuardRecord;
@ -566,39 +566,39 @@ namespace nanojit
inline bool isRetOpcode(LOpcode op) {
return
#if defined NANOJIT_64BIT
op == LIR_qret ||
op == LIR_retq ||
#endif
op == LIR_ret || op == LIR_fret;
op == LIR_retl || op == LIR_retd;
}
inline bool isCmovOpcode(LOpcode op) {
return
#if defined NANOJIT_64BIT
op == LIR_qcmov ||
op == LIR_cmovq ||
#endif
op == LIR_cmov;
op == LIR_cmovl;
}
inline bool isICmpOpcode(LOpcode op) {
return LIR_eq <= op && op <= LIR_uge;
return LIR_eql <= op && op <= LIR_geul;
}
inline bool isSICmpOpcode(LOpcode op) {
return LIR_eq <= op && op <= LIR_ge;
return LIR_eql <= op && op <= LIR_gel;
}
inline bool isUICmpOpcode(LOpcode op) {
return LIR_eq == op || (LIR_ult <= op && op <= LIR_uge);
return LIR_eql == op || (LIR_ltul <= op && op <= LIR_geul);
}
#ifdef NANOJIT_64BIT
inline bool isQCmpOpcode(LOpcode op) {
return LIR_qeq <= op && op <= LIR_quge;
return LIR_eqq <= op && op <= LIR_geuq;
}
inline bool isSQCmpOpcode(LOpcode op) {
return LIR_qeq <= op && op <= LIR_qge;
return LIR_eqq <= op && op <= LIR_geq;
}
inline bool isUQCmpOpcode(LOpcode op) {
return LIR_qeq == op || (LIR_qult <= op && op <= LIR_quge);
return LIR_eqq == op || (LIR_ltuq <= op && op <= LIR_geuq);
}
#endif
inline bool isFCmpOpcode(LOpcode op) {
return LIR_feq <= op && op <= LIR_fge;
return LIR_eqd <= op && op <= LIR_ged;
}
inline LOpcode invertCondJmpOpcode(LOpcode op) {
@ -625,14 +625,14 @@ namespace nanojit
}
inline LOpcode getCallOpcode(const CallInfo* ci) {
LOpcode op = LIR_pcall;
LOpcode op = LIR_callp;
switch (ci->returnType()) {
case ARGTYPE_V: op = LIR_pcall; break;
case ARGTYPE_V: op = LIR_callp; break;
case ARGTYPE_I:
case ARGTYPE_U: op = LIR_icall; break;
case ARGTYPE_F: op = LIR_fcall; break;
case ARGTYPE_U: op = LIR_calll; break;
case ARGTYPE_F: op = LIR_calld; break;
#ifdef NANOJIT_64BIT
case ARGTYPE_Q: op = LIR_qcall; break;
case ARGTYPE_Q: op = LIR_callq; break;
#endif
default: NanoAssert(0); break;
}
@ -922,7 +922,7 @@ namespace nanojit
inline uint64_t imm64() const;
inline double imm64f() const;
// For LIR_alloc.
// For LIR_allocp.
inline int32_t size() const;
inline void setSize(int32_t nbytes);
@ -1000,11 +1000,11 @@ namespace nanojit
return isRetOpcode(opcode());
}
bool isLive() const {
return isop(LIR_live) ||
return isop(LIR_livel) ||
#if defined NANOJIT_64BIT
isop(LIR_qlive) ||
isop(LIR_liveq) ||
#endif
isop(LIR_flive);
isop(LIR_lived);
}
bool isCmp() const {
LOpcode op = opcode();
@ -1015,11 +1015,11 @@ namespace nanojit
isFCmpOpcode(op);
}
bool isCall() const {
return isop(LIR_icall) ||
return isop(LIR_calll) ||
#if defined NANOJIT_64BIT
isop(LIR_qcall) ||
isop(LIR_callq) ||
#endif
isop(LIR_fcall);
isop(LIR_calld);
}
bool isCmov() const {
return isCmovOpcode(opcode());
@ -1033,11 +1033,11 @@ namespace nanojit
bool isGuard() const {
return isop(LIR_x) || isop(LIR_xf) || isop(LIR_xt) ||
isop(LIR_xbarrier) || isop(LIR_xtbl) ||
isop(LIR_addxov) || isop(LIR_subxov) || isop(LIR_mulxov);
isop(LIR_addxovl) || isop(LIR_subxovl) || isop(LIR_mulxovl);
}
// True if the instruction is a 32-bit integer immediate.
bool isconst() const {
return isop(LIR_int);
return isop(LIR_imml);
}
// True if the instruction is a 32-bit integer immediate and
// has the value 'val' when treated as a 32-bit signed integer.
@ -1047,7 +1047,7 @@ namespace nanojit
#ifdef NANOJIT_64BIT
// True if the instruction is a 64-bit integer immediate.
bool isconstq() const {
return isop(LIR_quad);
return isop(LIR_immq);
}
#endif
// True if the instruction is a pointer-sized integer immediate.
@ -1061,7 +1061,7 @@ namespace nanojit
}
// True if the instruction is a 64-bit float immediate.
bool isconstf() const {
return isop(LIR_float);
return isop(LIR_immd);
}
// True if the instruction is a 64-bit integer or float immediate.
bool isconstqf() const {
@ -1155,7 +1155,7 @@ namespace nanojit
LIns* getLIns() { return &ins; };
};
// 1-operand form. Used for LIR_ret, unary arithmetic/logic ops, etc.
// 1-operand form. Used for LIR_retl, unary arithmetic/logic ops, etc.
class LInsOp1
{
private:
@ -1225,7 +1225,7 @@ namespace nanojit
LIns* getLIns() { return &ins; };
};
// Used for LIR_sti and LIR_stqi.
// Used for LIR_stl and LIR_stq.
class LInsSti
{
private:
@ -1277,7 +1277,7 @@ namespace nanojit
LIns* getLIns() { return &ins; };
};
// Used for LIR_iparam, LIR_qparam.
// Used for LIR_paramp.
class LInsP
{
private:
@ -1292,7 +1292,7 @@ namespace nanojit
LIns* getLIns() { return &ins; };
};
// Used for LIR_int and LIR_alloc.
// Used for LIR_imml and LIR_allocp.
class LInsI
{
private:
@ -1306,7 +1306,7 @@ namespace nanojit
LIns* getLIns() { return &ins; };
};
// Used for LIR_quad and LIR_float.
// Used for LIR_immq and LIR_immd.
class LInsN64
{
private:
@ -1428,7 +1428,7 @@ namespace nanojit
void LIns::initLInsP(int32_t arg, int32_t kind) {
clearReg();
clearArIndex();
lastWord.opcode = LIR_param;
lastWord.opcode = LIR_paramp;
NanoAssert(isU8(arg) && isU8(kind));
toLInsP()->arg = arg;
toLInsP()->kind = kind;
@ -1506,9 +1506,9 @@ namespace nanojit
case LIR_xbarrier:
return (GuardRecord*)oprnd2();
case LIR_addxov:
case LIR_subxov:
case LIR_mulxov:
case LIR_addxovl:
case LIR_subxovl:
case LIR_mulxovl:
return (GuardRecord*)oprnd3();
default:
@ -1540,8 +1540,8 @@ namespace nanojit
return toLInsSk()->prevLIns;
}
inline uint8_t LIns::paramArg() const { NanoAssert(isop(LIR_param)); return toLInsP()->arg; }
inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_param)); return toLInsP()->kind; }
inline uint8_t LIns::paramArg() const { NanoAssert(isop(LIR_paramp)); return toLInsP()->arg; }
inline uint8_t LIns::paramKind() const { NanoAssert(isop(LIR_paramp)); return toLInsP()->kind; }
inline int32_t LIns::imm32() const { NanoAssert(isconst()); return toLInsI()->imm32; }
@ -1562,12 +1562,12 @@ namespace nanojit
}
int32_t LIns::size() const {
NanoAssert(isop(LIR_alloc));
NanoAssert(isop(LIR_allocp));
return toLInsI()->imm32 << 2;
}
void LIns::setSize(int32_t nbytes) {
NanoAssert(isop(LIR_alloc));
NanoAssert(isop(LIR_allocp));
NanoAssert(nbytes > 0);
toLInsI()->imm32 = (nbytes+3)>>2; // # of required 32bit words
}
@ -1674,12 +1674,12 @@ namespace nanojit
// Inserts an integer comparison to 0
LIns* ins_eq0(LIns* oprnd1) {
return ins2i(LIR_eq, oprnd1, 0);
return ins2i(LIR_eql, oprnd1, 0);
}
// Inserts a pointer comparison to 0
LIns* ins_peq0(LIns* oprnd1) {
return ins2(LIR_peq, oprnd1, insImmWord(0));
return ins2(LIR_eqp, oprnd1, insImmWord(0));
}
// Inserts a binary operation where the second operand is an
@ -1690,7 +1690,7 @@ namespace nanojit
#if NJ_SOFTFLOAT_SUPPORTED
LIns* qjoin(LInsp lo, LInsp hi) {
return ins2(LIR_qjoin, lo, hi);
return ins2(LIR_ll2d, lo, hi);
}
#endif
LIns* insImmPtr(const void *ptr) {
@ -1712,7 +1712,7 @@ namespace nanojit
// Sign-extend integers to native integers. On 32-bit this is a no-op.
LIns* ins_i2p(LIns* intIns) {
#ifdef NANOJIT_64BIT
return ins1(LIR_i2q, intIns);
return ins1(LIR_l2q, intIns);
#else
return intIns;
#endif
@ -1721,13 +1721,13 @@ namespace nanojit
// Zero-extend integers to native integers. On 32-bit this is a no-op.
LIns* ins_u2p(LIns* uintIns) {
#ifdef NANOJIT_64BIT
return ins1(LIR_u2q, uintIns);
return ins1(LIR_ul2uq, uintIns);
#else
return uintIns;
#endif
}
// Chooses LIR_sti or LIR_stqi based on size of value.
// Chooses LIR_stl or LIR_stq based on size of value.
LIns* insStorei(LIns* value, LIns* base, int32_t d, AccSet accSet);
};

View File

@ -152,8 +152,8 @@ OP_64(ldq, 22, Ld, I64, -1) // load quad
OP___(ldd, 23, Ld, F64, -1) // load double
OP___(lds2d, 24, Ld, F64, -1) // load single and extend to a double
OP___(stb, 25, Sti, Void, 0) // store byte
OP___(stw, 26, Sti, Void, 0) // store word
OP___(stl2b, 25, Sti, Void, 0) // store long truncated to byte
OP___(stl2w, 26, Sti, Void, 0) // store long truncated to word
OP___(stl, 27, Sti, Void, 0) // store long
OP_64(stq, 28, Sti, Void, 0) // store quad
OP___(std, 29, Sti, Void, 0) // store double

View File

@ -150,7 +150,6 @@ namespace nanojit {
// but only outputs if LC_Assembly is set. Also prepends the output
// with the address of the current native instruction.
#define asm_output(...) do { \
counter_increment(native); \
if (_logc->lcbits & LC_Assembly) { \
outline[0]='\0'; \
VMPI_sprintf(outline, "%010lx ", (unsigned long)_nIns); \

View File

@ -1230,15 +1230,18 @@ Assembler::asm_store32(LOpcode op, LIns *value, int dr, LIns *base)
}
}
bool
Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
}
void
Assembler::asm_restore(LInsp i, Register r)
{
if (i->isop(LIR_alloc)) {
asm_add_imm(r, FP, deprecated_disp(i));
} else if (i->isconst()) {
if (!i->deprecated_getArIndex()) {
i->deprecated_markAsClear();
}
asm_ld_imm(r, i->imm32());
}
else {
@ -1277,6 +1280,8 @@ Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
(void) pop;
(void) quad;
NanoAssert(d);
// fixme: bug 556175 this code doesn't appear to handle
// values of d outside the 12-bit range.
if (_config.arm_vfp && IsFpReg(rr)) {
if (isS8(d >> 2)) {
FSTD(rr, FP, d);

View File

@ -65,7 +65,12 @@ namespace nanojit
// only d0-d6 are actually used; we'll use d7 as s14-s15 for i2f/u2f/etc.
#define NJ_VFP_MAX_REGISTERS 8
#define NJ_MAX_REGISTERS (11 + NJ_VFP_MAX_REGISTERS)
#define NJ_MAX_STACK_ENTRY 256
// fixme: bug 556175: this cant be over 1024, because
// the ARM backend cannot support more than 12-bit displacements
// in a single load/store instruction, for spilling. see asm_spill().
#define NJ_MAX_STACK_ENTRY 1024
#define NJ_MAX_PARAMETERS 16
#define NJ_ALIGN_STACK 8

View File

@ -1133,6 +1133,11 @@ namespace nanojit
value, lirNames[value->opcode()], dr, base, lirNames[base->opcode()]);
}
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
}
void Assembler::asm_restore(LIns *i, Register r)
{
int d;
@ -1146,8 +1151,6 @@ namespace nanojit
}
}
else if (i->isconst()) {
if (!i->deprecated_getArIndex())
i->deprecated_markAsClear();
asm_li(r, i->imm32());
}
else {

View File

@ -58,9 +58,9 @@ namespace nanojit
// Req: NJ_MAX_STACK_ENTRY is number of instructions to hold in LIR stack
#if 0
// FIXME: Inconsistent use in signed/unsigned expressions makes this generate errors
static const uint32_t NJ_MAX_STACK_ENTRY = 256;
static const uint32_t NJ_MAX_STACK_ENTRY = 4096;
#else
#define NJ_MAX_STACK_ENTRY 256
#define NJ_MAX_STACK_ENTRY 4096
#endif
static const int NJ_ALIGN_STACK = 8;

View File

@ -178,9 +178,9 @@ namespace nanojit
switch (op) {
case LIR_sti:
case LIR_stb:
// handled by mainline code below for now
break;
case LIR_stb:
case LIR_sts:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
@ -194,13 +194,27 @@ namespace nanojit
#if !PEDANTIC
if (isS16(dr)) {
STW(rs, dr, ra);
switch (op) {
case LIR_sti:
STW(rs, dr, ra);
break;
case LIR_stb:
STB(rs, dr, ra);
break;
}
return;
}
#endif
// general case store, any offset size
STWX(rs, ra, R0);
switch (op) {
case LIR_sti:
STWX(rs, ra, R0);
break;
case LIR_stb:
STBX(rs, ra, R0);
break;
}
asm_li(R0, dr);
}
@ -613,6 +627,11 @@ namespace nanojit
FMR(r, s);
}
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
}
void Assembler::asm_restore(LIns *i, Register r) {
int d;
if (i->isop(LIR_alloc)) {
@ -620,9 +639,6 @@ namespace nanojit
ADDI(r, FP, d);
}
else if (i->isconst()) {
if (!i->deprecated_getArIndex()) {
i->deprecated_markAsClear();
}
asm_li(r, i->imm32());
}
// XXX: should really rematerializable isconstf() and isconstq() cases

View File

@ -226,6 +226,8 @@ namespace nanojit
PPC_srawi = 0x7C000670, // shift right algebraic word immediate
PPC_srd = 0x7C000436, // shift right doubleword (zero ext)
PPC_srw = 0x7C000430, // shift right word (zero ext)
PPC_stb = 0x98000000, // store byte
PPC_stbx = 0x7C0001AE, // store byte indexed
PPC_std = 0xF8000000, // store doubleword
PPC_stdu = 0xF8000001, // store doubleword with update
PPC_stdux = 0x7C00016A, // store doubleword with update indexed
@ -437,7 +439,7 @@ namespace nanojit
#define MFCTR(r) MFSPR(r, ctr)
#define MEMd(op, r, d, a) do {\
NanoAssert(isS16(d) && (d&3)==0);\
NanoAssert(isS16(d));\
EMIT1(PPC_##op | GPR(r)<<21 | GPR(a)<<16 | uint16_t(d), "%s %s,%d(%s)", #op, gpn(r), int16_t(d), gpn(a));\
} while(0) /* no addr */
@ -463,11 +465,17 @@ namespace nanojit
#define LWZX(r, a, b) MEMx(lwzx, r, a, b)
#define LDX(r, a, b) MEMx(ldx, r, a, b)
// store word (32-bit integer)
#define STW(r, d, b) MEMd(stw, r, d, b)
#define STWU(r, d, b) MEMd(stwu, r, d, b)
#define STWX(s, a, b) MEMx(stwx, s, a, b)
#define STWUX(s, a, b) MEMux(stwux, s, a, b)
// store byte
#define STB(r, d, b) MEMd(stb, r, d, b)
#define STBX(s, a, b) MEMx(stbx, s, a, b)
// store double (64-bit float)
#define STD(r, d, b) MEMd(std, r, d, b)
#define STDU(r, d, b) MEMd(stdu, r, d, b)
#define STDX(s, a, b) MEMx(stdx, s, a, b)

View File

@ -250,6 +250,11 @@ namespace nanojit
return 0;
}
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
}
void Assembler::asm_restore(LInsp i, Register r)
{
underrunProtect(24);
@ -259,9 +264,6 @@ namespace nanojit
SET32(d, L2);
}
else if (i->isconst()) {
if (!i->deprecated_getArIndex()) {
i->deprecated_markAsClear();
}
int v = i->imm32();
SET32(v, r);
} else {
@ -278,9 +280,9 @@ namespace nanojit
{
switch (op) {
case LIR_sti:
case LIR_stb:
// handled by mainline code below for now
break;
case LIR_stb:
case LIR_sts:
NanoAssertMsg(0, "NJ_EXPANDED_LOADSTORE_SUPPORTED not yet supported for this architecture");
return;
@ -294,7 +296,14 @@ namespace nanojit
{
Register rb = getBaseReg(base, dr, GpRegs);
int c = value->imm32();
STW32(L2, dr, rb);
switch (op) {
case LIR_sti:
STW32(L2, dr, rb);
break;
case LIR_stb:
STB32(L2, dr, rb);
break;
}
SET32(c, L2);
}
else
@ -309,7 +318,14 @@ namespace nanojit
} else {
getBaseReg2(GpRegs, value, ra, GpRegs, base, rb, dr);
}
STW32(ra, dr, rb);
switch (op) {
case LIR_sti:
STW32(ra, dr, rb);
break;
case LIR_stb:
STB32(ra, dr, rb);
break;
}
}
}

View File

@ -841,6 +841,19 @@ namespace nanojit
asm_output("st %s, [%s + %d]", gpn(rd), gpn(rs1), simm13); \
} while (0)
#define STB(rd, rs2, rs1) \
do { \
Format_3_1(3, rd, 0x5, rs1, 0, rs2); \
asm_output("stb %s, [%s + %s]", gpn(rd), gpn(rs1), gpn(rs2)); \
} while (0)
#define STBI(rd, simm13, rs1) \
do { \
Format_3_1I(3, rd, 0x5, rs1, simm13); \
asm_output("stb %s, [%s + %d]", gpn(rd), gpn(rs1), simm13); \
} while (0)
#define SUBCC(rs1, rs2, rd) \
do { \
Format_3_1(2, rd, 0x14, rs1, 0, rs2); \
@ -921,6 +934,14 @@ namespace nanojit
SET32(imm32, L0); \
}
#define STB32(rd, imm32, rs1) \
if(isIMM13(imm32)) { \
STBI(rd, imm32, rs1); \
} else { \
STB(rd, L0, rs1); \
SET32(imm32, L0); \
}
#define LDUB32(rs1, imm32, rd) \
if(isIMM13(imm32)) { \
LDUBI(rs1, imm32, rd); \

View File

@ -171,9 +171,9 @@ namespace nanojit
static inline uint64_t mod_disp32(uint64_t op, Register r, Register b, int32_t d) {
NanoAssert(IsGpReg(r) && IsGpReg(b));
NanoAssert((b & 7) != 4); // using RSP or R12 as base requires SIB
if (isS8(d)) {
uint64_t mod = (((op>>24)&255)>>6); // mod bits in addressing mode: 0,1,2, or 3
if (mod == 2 && isS8(d)) {
// op is: 0x[disp32=0][mod=2:r:b][op][rex][len]
NanoAssert((((op>>24)&255)>>6) == 2); // disp32 mode
int len = oplen(op);
op = (op & ~0xff000000LL) | (0x40 | (r&7)<<3 | (b&7))<<24; // replace mod
return op<<24 | int64_t(d)<<56 | (len-3); // shrink disp, add disp8
@ -1370,6 +1370,10 @@ namespace nanojit
UCOMISD(ra, rb);
}
bool Assembler::canRemat(LIns* ins) {
return ins->isImmAny() || ins->isop(LIR_alloc);
}
// WARNING: the code generated by this function must not affect the
// condition codes. See asm_cmp() for details.
void Assembler::asm_restore(LIns *ins, Register r) {
@ -1378,15 +1382,12 @@ namespace nanojit
LEAQRM(r, d, FP);
}
else if (ins->isconst()) {
ins->clearReg();
asm_immi(r, ins->imm32(), /*canClobberCCs*/false);
}
else if (ins->isconstq()) {
ins->clearReg();
asm_immq(r, ins->imm64(), /*canClobberCCs*/false);
}
else if (ins->isconstf()) {
ins->clearReg();
asm_immf(r, ins->imm64(), /*canClobberCCs*/false);
}
else {

View File

@ -239,9 +239,27 @@ namespace nanojit
inline void Assembler::DIV(R r) { count_alu(); ALU(0xf7, (Register)7,(r)); asm_output("idiv edx:eax, %s",gpn(r)); }
inline void Assembler::NOT(R r) { count_alu(); ALU(0xf7, (Register)2,(r)); asm_output("not %s",gpn(r)); }
inline void Assembler::NEG(R r) { count_alu(); ALU(0xf7, (Register)3,(r)); asm_output("neg %s",gpn(r)); }
inline void Assembler::SHR(R r, R s) { count_alu(); ALU(0xd3, (Register)5,(r)); asm_output("shr %s,%s",gpn(r),gpn(s)); }
inline void Assembler::SAR(R r, R s) { count_alu(); ALU(0xd3, (Register)7,(r)); asm_output("sar %s,%s",gpn(r),gpn(s)); }
inline void Assembler::SHL(R r, R s) { count_alu(); ALU(0xd3, (Register)4,(r)); asm_output("shl %s,%s",gpn(r),gpn(s)); }
inline void Assembler::SHR(R r, R s) {
count_alu();
NanoAssert(s == ECX); (void)s;
ALU(0xd3, (Register)5,(r));
asm_output("shr %s,%s",gpn(r),gpn(s));
}
inline void Assembler::SAR(R r, R s) {
count_alu();
NanoAssert(s == ECX); (void)s;
ALU(0xd3, (Register)7,(r));
asm_output("sar %s,%s",gpn(r),gpn(s));
}
inline void Assembler::SHL(R r, R s) {
count_alu();
NanoAssert(s == ECX); (void)s;
ALU(0xd3, (Register)4,(r));
asm_output("shl %s,%s",gpn(r),gpn(s));
}
inline void Assembler::SHIFT(I32 c, R r, I32 i) {
underrunProtect(3);
@ -524,9 +542,8 @@ namespace nanojit
inline void Assembler::JCC(I32 o, NIns* t, const char* n) {
count_jcc();
underrunProtect(6);
NanoAssert(t);
intptr_t tt = (intptr_t)t - (intptr_t)_nIns;
if (isS8(tt)) {
if (t && isS8(tt)) {
_nIns -= 2;
_nIns[0] = uint8_t( 0x70 | o );
_nIns[1] = uint8_t(tt);
@ -536,7 +553,7 @@ namespace nanojit
_nIns[0] = JCC32;
_nIns[1] = (uint8_t) ( 0x80 | o );
}
asm_output("%-5s %p", n, t);
asm_output("%-5s %p", n, t); (void) n;
}
inline void Assembler::JMP_long(NIns* t) {
@ -846,7 +863,7 @@ namespace nanojit
underrunProtect(2);
ALU(0xff, 2, (r));
verbose_only(asm_output("call %s",gpn(r));)
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();)
debug_only(if (ci->returnType()==ARGTYPE_F) fpu_push();) (void)ci;
}
void Assembler::nInit(AvmCore*)
@ -1131,6 +1148,11 @@ namespace nanojit
return prefer;
}
bool Assembler::canRemat(LIns* ins)
{
return ins->isImmAny() || ins->isop(LIR_alloc);
}
// WARNING: the code generated by this function must not affect the
// condition codes. See asm_cmp().
@ -1149,11 +1171,9 @@ namespace nanojit
} else if (ins->isconst()) {
asm_immi(r, ins->imm32(), /*canClobberCCs*/false);
ins->clearReg();
} else if (ins->isconstf()) {
asm_immf(r, ins->imm64(), ins->imm64f(), /*canClobberCCs*/false);
ins->clearReg();
} else if (ins->isop(LIR_param) && ins->paramKind() == 0 &&
(arg = ins->paramArg()) >= (abi_regcount = max_abi_regs[_thisfrag->lirbuf->abi])) {
@ -1169,7 +1189,6 @@ namespace nanojit
//
int d = (arg - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
ins->clearReg();
} else {
int d = findMemFor(ins);
@ -1417,7 +1436,7 @@ namespace nanojit
}
if (branchOnFalse) {
// op == LIR_xf
// op == LIR_xf/LIR_jf
switch (condop) {
case LIR_eq: JNE(targ); break;
case LIR_lt: JNL(targ); break;
@ -1431,7 +1450,7 @@ namespace nanojit
default: NanoAssert(0); break;
}
} else {
// op == LIR_xt
// op == LIR_xt/LIR_jt
switch (condop) {
case LIR_eq: JE(targ); break;
case LIR_lt: JL(targ); break;

View File

@ -60,9 +60,10 @@ avmplus::AvmLog(char const *msg, ...) {
}
#ifdef _DEBUG
void NanoAssertFail()
{
abort();
namespace avmplus {
void AvmAssertFail(const char* /* msg */) {
abort();
}
}
#endif

View File

@ -82,12 +82,13 @@
#define _DEBUG
#endif
#define NJ_VERBOSE 1
#define NJ_PROFILE 1
#include <stdarg.h>
#endif
#ifdef _DEBUG
void NanoAssertFail();
namespace avmplus {
void AvmAssertFail(const char* msg);
}
#endif
#if defined(AVMPLUS_IA32)

View File

@ -112,7 +112,7 @@ namespace nanojit
#define __NanoAssertMsgf(a, file_, line_, f, ...) \
if (!(a)) { \
avmplus::AvmLog("Assertion failed: " f "%s (%s:%d)\n", __VA_ARGS__, #a, file_, line_); \
NanoAssertFail(); \
avmplus::AvmAssertFail(""); \
}
#define _NanoAssertMsgf(a, file_, line_, f, ...) __NanoAssertMsgf(a, file_, line_, f, __VA_ARGS__)
@ -148,12 +148,9 @@ namespace nanojit
}
#ifdef AVMPLUS_VERBOSE
#ifndef NJ_VERBOSE_DISABLED
#define NJ_VERBOSE 1
#endif
#ifndef NJ_PROFILE_DISABLED
#define NJ_PROFILE 1
#endif
#ifndef NJ_VERBOSE_DISABLED
#define NJ_VERBOSE 1
#endif
#endif
#ifdef NJ_NO_VARIADIC_MACROS
@ -177,30 +174,6 @@ namespace nanojit
#define debug_only(x)
#endif /* DEBUG */
#ifdef NJ_PROFILE
#define counter_struct_begin() struct {
#define counter_struct_end() } _stats;
#define counter_define(x) int32_t x
#define counter_value(x) _stats.x
#define counter_set(x,v) (counter_value(x)=(v))
#define counter_adjust(x,i) (counter_value(x)+=(int32_t)(i))
#define counter_reset(x) counter_set(x,0)
#define counter_increment(x) counter_adjust(x,1)
#define counter_decrement(x) counter_adjust(x,-1)
#define profile_only(x) x
#else
#define counter_struct_begin()
#define counter_struct_end()
#define counter_define(x)
#define counter_value(x)
#define counter_set(x,v)
#define counter_adjust(x,i)
#define counter_reset(x)
#define counter_increment(x)
#define counter_decrement(x)
#define profile_only(x)
#endif /* NJ_PROFILE */
#define isS8(i) ( int32_t(i) == int8_t(i) )
#define isU8(i) ( int32_t(i) == uint8_t(i) )
#define isS16(i) ( int32_t(i) == int16_t(i) )
@ -260,7 +233,10 @@ namespace nanojit {
public:
// All Nanojit and jstracer printing should be routed through
// this function.
void printf( const char* format, ... ) PRINTF_CHECK(2,3);
virtual ~LogControl() {}
#ifdef NJ_VERBOSE
virtual void printf( const char* format, ... ) PRINTF_CHECK(2,3);
#endif
// An OR of LC_Bits values, indicating what should be output
uint32_t lcbits;

View File

@ -3730,6 +3730,30 @@ Compile(JSContext *cx, uintN argc, jsval *vp)
return JS_TRUE;
}
static JSBool
Parse(JSContext *cx, uintN argc, jsval *vp)
{
if (argc < 1) {
JS_ReportErrorNumber(cx, js_GetErrorMessage, NULL, JSMSG_MORE_ARGS_NEEDED,
"compile", "0", "s");
return JS_FALSE;
}
jsval arg0 = JS_ARGV(cx, vp)[0];
if (!JSVAL_IS_STRING(arg0)) {
const char *typeName = JS_GetTypeName(cx, JS_TypeOfValue(cx, arg0));
JS_ReportError(cx, "expected string to parse, got %s", typeName);
return JS_FALSE;
}
JSString *scriptContents = JSVAL_TO_STRING(arg0);
js::Parser parser(cx);
parser.init(JS_GetStringCharsZ(cx, scriptContents), JS_GetStringLength(scriptContents),
NULL, "<string>", 0);
parser.parse(NULL);
JS_SET_RVAL(cx, vp, JSVAL_VOID);
return JS_TRUE;
}
static JSBool
Snarf(JSContext *cx, JSObject *obj, uintN argc, jsval *argv, jsval *rval)
{
@ -3883,6 +3907,7 @@ static JSFunctionSpec shell_functions[] = {
#endif
JS_FS("snarf", Snarf, 0,0,0),
JS_FN("compile", Compile, 1,0),
JS_FN("parse", Parse, 1,0),
JS_FN("timeout", Timeout, 1,0),
JS_FN("elapsed", Elapsed, 0,0),
JS_FS_END
@ -3987,7 +4012,8 @@ static const char *const shell_help_messages[] = {
"scatter(fns) Call functions concurrently (ignoring errors)",
#endif
"snarf(filename) Read filename into returned string",
"compile(code) Parses a string, potentially throwing",
"compile(code) Compiles a string to bytecode, potentially throwing",
"parse(code) Parses a string, potentially throwing",
"timeout([seconds])\n"
" Get/Set the limit in seconds for the execution time for the current context.\n"
" A negative value (default) means that the execution time is unlimited.",

View File

@ -0,0 +1,79 @@
#!/usr/bin/env python2.4
"""usage: %progname candidate_path baseline_path
:warning: May raise ImportError on import if JSON support is missing.
"""
import optparse
from contextlib import nested
from operator import itemgetter
try:
import json
except ImportError:
import simplejson as json
def avg(seq):
return sum(seq) / len(seq)
def compare(current, baseline):
percent_speedups = []
for key, current_result in current.iteritems():
try:
baseline_result = baseline[key]
except KeyError:
print key, 'missing from baseline'
continue
val_getter = itemgetter('average_ms', 'stddev_ms')
base_avg, base_stddev = val_getter(baseline_result)
current_avg, current_stddev = val_getter(current_result)
t_best, t_worst = current_avg - current_stddev, current_avg + current_stddev
base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
fmt = '%30s: %s'
if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
speedup = -((t_worst - base_t_best) / base_t_best) * 100
result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
(t_worst, base_t_best, speedup)
percent_speedups.append(speedup)
elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
(t_best, base_t_worst, slowdown)
percent_speedups.append(slowdown)
else:
result = 'Meh.'
print '%30s: %s' % (key, result)
if percent_speedups:
print 'Average speedup: %.2f%%' % avg(percent_speedups)
def compare_immediate(current_map, baseline_path):
baseline_file = open(baseline_path)
baseline_map = json.load(baseline_file)
baseline_file.close()
compare(current_map, baseline_map)
def main(candidate_path, baseline_path):
candidate_file, baseline_file = open(candidate_path), open(baseline_path)
candidate = json.load(candidate_file)
baseline = json.load(baseline_file)
compare(candidate, baseline)
candidate_file.close()
baseline_file.close()
if __name__ == '__main__':
parser = optparse.OptionParser(usage=__doc__.strip())
options, args = parser.parse_args()
try:
candidate_path = args.pop(0)
except IndexError:
parser.error('A JSON filepath to compare against baseline is required')
try:
baseline_path = args.pop(0)
except IndexError:
parser.error('A JSON filepath for baseline is required')
main(candidate_path, baseline_path)

View File

@ -6,5 +6,5 @@
function f(x) { return 1 + "" + (x + 1); }
reportCompare("12", f(1), "");
var g = eval(String(f));
reportCompare("12", f(1), "");
var g = eval("(" + f + ")");
reportCompare("12", g(1), "");

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
"""%prog [options] dirpath
"""%prog [options] shellpath dirpath
Pulls performance data on parsing via the js shell.
Displays the average number of milliseconds it took to parse each file.
@ -17,6 +17,7 @@ baseline data, we're probably faster. A similar computation is used for
determining the "slower" designation.
Arguments:
shellpath executable JavaScript shell
dirpath directory filled with parsilicious js files
"""
@ -26,18 +27,23 @@ import os
import subprocess as subp
import sys
from string import Template
from operator import itemgetter
try:
import compare_bench
except ImportError:
compare_bench = None
_DIR = os.path.dirname(__file__)
JS_CODE_TEMPLATE = Template("""
var contents = snarf("$filepath");
for (let i = 0; i < $warmup_run_count; i++)
compile(contents);
if (typeof snarf !== 'undefined') read = snarf
var contents = read("$filepath");
for (var i = 0; i < $warmup_run_count; i++)
parse(contents);
var results = [];
for (let i = 0; i < $real_run_count; i++) {
for (var i = 0; i < $real_run_count; i++) {
var start = new Date();
compile(contents);
parse(contents);
var end = new Date();
results.push(end - start);
}
@ -45,24 +51,6 @@ print(results);
""")
def find_shell(filename='js'):
"""Look around for the js shell. Prefer more obvious places to look.
:return: Path if found, else None.
"""
relpaths = ['', 'obj', os.pardir, [os.pardir, 'obj']]
for relpath in relpaths:
path_parts = [_DIR]
if isinstance(relpath, list):
path_parts += relpath
else:
path_parts.append(relpath)
path_parts.append(filename)
path = os.path.join(*path_parts)
if os.path.isfile(path):
return path
def gen_filepaths(dirpath, target_ext='.js'):
for filename in os.listdir(dirpath):
if filename.endswith(target_ext):
@ -97,7 +85,7 @@ def bench(shellpath, filepath, warmup_runs, counted_runs, stfu=False):
def parsemark(filepaths, fbench, stfu=False):
""":param fbench: fbench(filename) -> float"""
bench_map = {}
bench_map = {} # {filename: (avg, stddev)}
for filepath in filepaths:
filename = os.path.split(filepath)[-1]
if not stfu:
@ -112,58 +100,31 @@ def parsemark(filepaths, fbench, stfu=False):
filename_str = '"%s"' % filename
print fmt % (filename_str, avg, stddev)
print '}'
return bench_map
def compare(current, baseline):
for key, (avg, stddev) in current.iteritems():
try:
base_avg, base_stddev = itemgetter('average_ms', 'stddev_ms')(baseline.get(key, None))
except TypeError:
print key, 'missing from baseline'
continue
t_best, t_worst = avg - stddev, avg + stddev
base_t_best, base_t_worst = base_avg - base_stddev, base_avg + base_stddev
fmt = '%30s: %s'
if t_worst < base_t_best: # Worst takes less time (better) than baseline's best.
speedup = -((t_worst - base_t_best) / base_t_best) * 100
result = 'faster: %6.2fms < baseline %6.2fms (%+6.2f%%)' % \
(t_worst, base_t_best, speedup)
elif t_best > base_t_worst: # Best takes more time (worse) than baseline's worst.
slowdown = -((t_best - base_t_worst) / base_t_worst) * 100
result = 'SLOWER: %6.2fms > baseline %6.2fms (%+6.2f%%) ' % \
(t_best, base_t_worst, slowdown)
else:
result = 'Meh.'
print '%30s: %s' % (key, result)
def try_import_json():
try:
import json
return json
except ImportError:
try:
import simplejson as json
return json
except ImportError:
pass
return dict((filename, dict(average_ms=avg, stddev_ms=stddev))
for filename, (avg, stddev) in bench_map.iteritems())
def main():
parser = optparse.OptionParser(usage=__doc__.strip())
parser.add_option('-w', '--warmup-runs', metavar='COUNT', type=int,
default=5, help='used to minimize test instability')
default=5, help='used to minimize test instability [%default]')
parser.add_option('-c', '--counted-runs', metavar='COUNT', type=int,
default=20, help='timed data runs that count towards the average')
default=50, help='timed data runs that count towards the average [%default]')
parser.add_option('-s', '--shell', metavar='PATH', help='explicit shell '
'location; when omitted, will look in likely places')
parser.add_option('-b', '--baseline', metavar='JSON_PATH',
dest='baseline_path', help='json file with baseline values to '
'compare against')
parser.add_option('-q', '--quiet', dest='stfu', action='store_true',
default=False, help='only print JSON to stdout')
default=False, help='only print JSON to stdout [%default]')
options, args = parser.parse_args()
try:
shellpath = args.pop(0)
except IndexError:
parser.print_help()
print
print >> sys.stderr, 'error: shellpath required'
return -1
try:
dirpath = args.pop(0)
except IndexError:
@ -171,26 +132,21 @@ def main():
print
print >> sys.stderr, 'error: dirpath required'
return -1
shellpath = options.shell or find_shell()
if not shellpath:
print >> sys.stderr, 'Could not find shell'
if not shellpath or not os.path.exists(shellpath):
print >> sys.stderr, 'error: could not find shell:', shellpath
return -1
if options.baseline_path:
if not os.path.isfile(options.baseline_path):
print >> sys.stderr, 'Baseline file does not exist'
print >> sys.stderr, 'error: baseline file does not exist'
return -1
json = try_import_json()
if not json:
print >> sys.stderr, 'You need a json lib for baseline comparison'
if not compare_bench:
print >> sys.stderr, 'error: JSON support is missing, cannot compare benchmarks'
return -1
benchfile = lambda filepath: bench(shellpath, filepath,
options.warmup_runs, options.counted_runs, stfu=options.stfu)
bench_map = parsemark(gen_filepaths(dirpath), benchfile, options.stfu)
if options.baseline_path:
fh = open(options.baseline_path, 'r') # 2.4 compat, no 'with'.
baseline_map = json.load(fh)
fh.close()
compare(current=bench_map, baseline=baseline_map)
compare_bench.compare_immediate(bench_map, options.baseline_path)
return 0

View File

@ -0,0 +1 @@
eval("for(a = 0; a < 4; a++) x = 1;", []);

View File

@ -0,0 +1,10 @@
function f()
{
var a = [];
a.length = 10;
for (var i = 0; i < 100; i++) {
var y = a[a.length];
}
}
f();
// No assertEq() call, the test is just that it shouldn't assert or crash.

View File

@ -0,0 +1,16 @@
Function("\
for each(let w in [(5), false, Number, false]) {\
(function f(zzzzzz) {\
return zzzzzz.length == 0 ? 0 : zzzzzz[0] + f(zzzzzz.slice(1))\
})([, [], [], w, , ])\
}\
")()
Function("\
for each(let w in [(void 0), (void 0), false, false, false, false, false, \
undefined, undefined, false, (void 0), undefined]) {\
(function f(zzzzzz) {\
return zzzzzz.length == 0 ? 0 : zzzzzz[0] + f(zzzzzz.slice(1))\
})([w, , w, w, [], []])\
}\
")()

View File

@ -0,0 +1,17 @@
var s;
function f(i) {
if (i > 4) /* side exit when arr[i] changes from bool to undefined (via a hole) */
assertEq(s, undefined);
else
assertEq(s, false);
return 1;
}
var arr = [ false, false, false, false, false, , , , ];
for (var i = 0; i < 10; ++i) {
(s = arr[i]) + f(i);
}
checkStats({ traceTriggered: 2, sideExitIntoInterpreter: 2 })

View File

@ -41,6 +41,7 @@ reasons = [
'extendMaxBranches',
'extendStart',
'extendCold',
'scopeChainCheck',
'extendOuter',
'mismatchExit',
'oomExit',

View File

@ -38,80 +38,16 @@
let EXPORTED_SYMBOLS = [ "ctypes" ];
/**
/*
* This is the js module for ctypes. Import it like so:
* Components.utils.import("resource://gre/modules/ctypes.jsm");
*
* This will create a 'ctypes' object, which provides an interface to describe
* C types and call C functions from a dynamic library. It has the following
* properties and functions:
* and instantiate C types and call C functions from a dynamic library.
*
* ABI constants that specify the calling convention to use.
* ctypes.default_abi corresponds to the cdecl convention, and in almost all
* cases is the correct choice. ctypes.stdcall is provided for calling
* functions in the Microsoft Win32 API.
* For documentation on the API, see:
* https://developer.mozilla.org/en/js-ctypes/js-ctypes_reference
*
* ctypes.default_abi // corresponds to cdecl
* ctypes.stdcall_abi // for calling Win32 API functions
*
* Types available for arguments and return values, representing
* their C counterparts.
*
* ctypes.void_t // Only allowed for return types.
* ctypes.bool // _Bool type (assumed 8 bits wide).
* ctypes.int8_t // int8_t (signed char) type.
* ctypes.int16_t // int16_t (short) type.
* ctypes.int32_t // int32_t (int) type.
* ctypes.int64_t // int64_t (long long) type.
* ctypes.uint8_t // uint8_t (unsigned char) type.
* ctypes.uint16_t // uint16_t (unsigned short) type.
* ctypes.uint32_t // uint32_t (unsigned int) type.
* ctypes.uint64_t // uint64_t (unsigned long long) type.
* ctypes.float // float type.
* ctypes.double // double type.
* ctypes.string // C string (char *).
* ctypes.ustring // 16-bit string (char16_t *).
*
* Library ctypes.open(name)
*
* Attempts to dynamically load the specified library. Returns a Library
* object on success.
* @name A string or nsILocalFile representing the name and path of
* the library to open.
* @returns A Library object.
*
* Library.close()
*
* Unloads the currently loaded library. Any subsequent attempts to call
* functions on this interface will fail.
*
* function Library.declare(name, abi, returnType, argType1, argType2, ...)
*
* Declares a C function in a library.
* @name Function name. This must be a valid symbol in the library.
* @abi The calling convention to use. Must be an ABI constant
* from ctypes.
* @returnType The return type of the function. Must be a type constant
* from ctypes.
* @argTypes Argument types. Must be a type constant (other than void_t)
* from ctypes, or the literal string "..." to denote a
* variadic function.
* @returns A function object.
*
* A function object can then be used to call the C function it represents
* like so:
*
* const myFunction = myLibrary.declare("myFunction", ctypes.default_abi,
* ctypes.double, ctypes.int32_t, ctypes.int32_t, ...);
*
* var result = myFunction(5, 10, ...);
*
* Arguments will be checked against the types supplied at declaration, and
* some attempt to convert values (e.g. boolean true/false to integer 0/1)
* will be made. Otherwise, if types do not match, or conversion fails,
* an exception will be thrown. Arguments passed as variadic parameters
* must have an explicit ctypes type, since their types are not declared
* in the signature.
*/
// Initialize the ctypes object. You do not need to do this yourself.

View File

@ -1334,12 +1334,14 @@ function run_type_ctor_class_tests(c, t, t2, props, fns, instanceProps, instance
do_check_throws(function() { t.prototype[p]; }, Error);
// Test that an instance 'd' of 't' is a CData.
let d = t();
do_check_class(d, "CData");
do_check_true(d.__parent__ === ctypes);
do_check_true(d.__proto__ === t.prototype);
do_check_true(d instanceof t);
do_check_true(d.constructor === t);
if (t.__proto__ != ctypes.FunctionType.prototype) {
let d = t();
do_check_class(d, "CData");
do_check_true(d.__parent__ === ctypes);
do_check_true(d.__proto__ === t.prototype);
do_check_true(d instanceof t);
do_check_true(d.constructor === t);
}
}
function run_StructType_tests() {
@ -1692,16 +1694,19 @@ function run_FunctionType_tests() {
ctypes.FunctionType(ctypes.default_abi, ctypes.void_t, null);
}, Error);
do_check_throws(function() {
ctypes.FunctionType(ctypes.default_abi, ctypes.void_t());
ctypes.FunctionType(ctypes.default_abi, ctypes.int32_t());
}, Error);
do_check_throws(function() {
ctypes.FunctionType(ctypes.void_t, ctypes.void_t);
}, Error);
let g_t = ctypes.StructType("g_t", [{ a: ctypes.int32_t }, { b: ctypes.double }]);
let g = g_t(1, 2);
let f_t = ctypes.FunctionType(ctypes.default_abi, g_t);
let name = "g_t(*)()";
let name = "g_t()()";
do_check_eq(f_t.name, name);
do_check_eq(f_t.size, ctypes.uintptr_t.size);
do_check_eq(f_t.size, undefined);
do_check_true(f_t.abi === ctypes.default_abi);
do_check_true(f_t.returnType === g_t);
do_check_true(f_t.argTypes.length == 0);
@ -1710,13 +1715,25 @@ function run_FunctionType_tests() {
do_check_eq(f_t.toSource(),
"ctypes.FunctionType(ctypes.default_abi, g_t)");
let fp_t = f_t.ptr;
name = "g_t(*)()";
do_check_eq(fp_t.name, name);
do_check_eq(fp_t.size, ctypes.uintptr_t.size);
do_check_eq(fp_t.toString(), "type " + name);
do_check_eq(fp_t.toSource(),
"ctypes.FunctionType(ctypes.default_abi, g_t).ptr");
// Check that constructing a FunctionType CData directly throws.
do_check_throws(function() { f_t(); }, Error);
// Test ExplicitConvert.
let f = f_t();
let f = fp_t();
do_check_throws(function() { f.value; }, Error);
do_check_eq(ptrValue(f), 0);
f = f_t(5);
f = fp_t(5);
do_check_eq(ptrValue(f), 5);
f = f_t(ctypes.UInt64(10));
f = fp_t(ctypes.UInt64(10));
do_check_eq(ptrValue(f), 10);
// Test ImplicitConvert.
@ -1724,43 +1741,45 @@ function run_FunctionType_tests() {
do_check_eq(ptrValue(f), 0);
do_check_throws(function() { f.value = 5; }, Error);
do_check_eq(f.toSource(),
'ctypes.FunctionType(ctypes.default_abi, g_t)(ctypes.UInt64("0x0"))');
'ctypes.FunctionType(ctypes.default_abi, g_t).ptr(ctypes.UInt64("0x0"))');
// Test ExplicitConvert from a function pointer of different type.
let f2_t = ctypes.FunctionType(ctypes.default_abi, g_t, [ ctypes.int32_t ]);
let f2 = f2_t();
do_check_throws(function() { f_t(f2); }, Error);
let f2 = f2_t.ptr();
do_check_throws(function() { fp_t(f2); }, Error);
do_check_throws(function() { f.value = f2; }, Error);
do_check_throws(function() { f2.value = f; }, Error);
// Test that converting to a voidptr_t throws.
do_check_throws(function() { ctypes.voidptr_t(f2); }, Error);
// Test that converting to a voidptr_t works, but not the other way.
let v = ctypes.voidptr_t(f2);
do_check_eq(v.toSource(), 'ctypes.voidptr_t(ctypes.UInt64("0x0"))');
do_check_throws(function() { f2_t.ptr(v); }, TypeError);
// Test some more complex names.
do_check_eq(f_t.array().name, "g_t(*[])()");
do_check_eq(f_t.array().ptr.name, "g_t(*(*)[])()");
do_check_eq(fp_t.array().name, "g_t(*[])()");
do_check_eq(fp_t.array().ptr.name, "g_t(*(*)[])()");
let f3_t = ctypes.FunctionType(ctypes.default_abi,
ctypes.char.ptr.array().ptr).ptr.array(8).array();
ctypes.char.ptr.array().ptr).ptr.ptr.array(8).array();
do_check_eq(f3_t.name, "char*(*(**[][8])())[]");
#ifdef _WIN32
#ifndef _WIN64
f3_t = ctypes.FunctionType(ctypes.stdcall_abi,
ctypes.char.ptr.array().ptr).ptr.array(8).array();
do_check_eq(f3_t.name, "char*(__stdcall *(**[][8])())[]");
do_check_eq(f3_t.ptr.name, "char*(__stdcall *(**[][8])())[]");
#endif
#endif
let f4_t = ctypes.FunctionType(ctypes.default_abi,
ctypes.char.ptr.array().ptr, [ ctypes.int32_t, f_t ]);
ctypes.char.ptr.array().ptr, [ ctypes.int32_t, fp_t ]);
do_check_true(f4_t.argTypes.length == 2);
do_check_true(f4_t.argTypes[0] === ctypes.int32_t);
do_check_true(f4_t.argTypes[1] === f_t);
do_check_true(f4_t.argTypes[1] === fp_t);
do_check_throws(function() { f4_t.argTypes.z = 0; }, Error);
do_check_throws(function() { f4_t.argTypes[0] = 0; }, Error);
let t4_t = f4_t.ptr.array(8).array();
let t4_t = f4_t.ptr.ptr.array(8).array();
do_check_eq(t4_t.name, "char*(*(**[][8])(int32_t, g_t(*)()))[]");
}
@ -1908,7 +1927,7 @@ function run_cast_tests() {
let g_t = ctypes.StructType("g_t", [{ a: ctypes.int32_t }, { b: ctypes.double }]);
let a_t = ctypes.ArrayType(g_t, 4);
let p_t = ctypes.PointerType(g_t);
let f_t = ctypes.FunctionType(ctypes.default_abi, ctypes.void_t);
let f_t = ctypes.FunctionType(ctypes.default_abi, ctypes.void_t).ptr;
let a = a_t();
a[0] = { a: 5, b: 7.5 };
@ -2064,7 +2083,7 @@ function run_function_tests(library)
let test_ansi_len = library.declare("test_ansi_len", ctypes.default_abi,
ctypes.int32_t, ctypes.char.ptr);
let fn_t = ctypes.FunctionType(ctypes.default_abi, ctypes.int32_t,
[ ctypes.char.ptr ]);
[ ctypes.char.ptr ]).ptr;
let test_fnptr = library.declare("test_fnptr", ctypes.default_abi, fn_t);
@ -2076,11 +2095,12 @@ function run_function_tests(library)
// Test that we can call ptr().
do_check_eq(ptr("function pointers rule!"), 23);
// Test that library.declare() returns data of type FunctionType, and that
// Test that library.declare() returns data of type FunctionType.ptr, and that
// it is immutable.
do_check_true(test_ansi_len.constructor.__proto__ === ctypes.FunctionType.prototype);
do_check_true(test_ansi_len.constructor.targetType.__proto__ ===
ctypes.FunctionType.prototype);
do_check_eq(test_ansi_len.constructor.toSource(),
"ctypes.FunctionType(ctypes.default_abi, ctypes.int32_t, [ctypes.char.ptr])");
"ctypes.FunctionType(ctypes.default_abi, ctypes.int32_t, [ctypes.char.ptr]).ptr");
do_check_throws(function() { test_ansi_len.value = null; }, Error);
do_check_eq(ptrValue(test_ansi_len), ptrValue(ptr));
@ -2116,7 +2136,7 @@ function run_single_closure_tests(library, abi, suffix)
do_check_eq(closure_fn.call(thisobj, 7), 7 + thisobj.a);
// Construct a closure, and call it ourselves.
let fn_t = ctypes.FunctionType(abi, ctypes.int32_t, [ ctypes.int8_t ]);
let fn_t = ctypes.FunctionType(abi, ctypes.int32_t, [ ctypes.int8_t ]).ptr;
let closure = fn_t(closure_fn);
do_check_eq(closure(-17), -17 + b);
@ -2134,14 +2154,14 @@ function run_single_closure_tests(library, abi, suffix)
function run_variadic_tests(library) {
let sum_va_type = ctypes.FunctionType(ctypes.default_abi,
ctypes.int32_t,
[ctypes.uint8_t, "..."]),
[ctypes.uint8_t, "..."]).ptr,
sum_va = library.declare("test_sum_va_cdecl", ctypes.default_abi, ctypes.int32_t,
ctypes.uint8_t, "...");
do_check_eq(sum_va_type.toSource(),
'ctypes.FunctionType(ctypes.default_abi, ctypes.int32_t, [ctypes.uint8_t, "..."])');
'ctypes.FunctionType(ctypes.default_abi, ctypes.int32_t, [ctypes.uint8_t, "..."]).ptr');
do_check_eq(sum_va.constructor.name, "int32_t(*)(uint8_t, ...)");
do_check_true(sum_va.constructor.isVariadic);
do_check_true(sum_va.constructor.targetType.isVariadic);
do_check_eq(sum_va(3,
ctypes.int32_t(1),
@ -2211,9 +2231,9 @@ function run_variadic_tests(library) {
do_check_eq(result[2], 16);
do_check_true(!!(sum_va_type().value = sum_va_type()));
let sum_notva_type = ctypes.FunctionType(sum_va_type.abi,
sum_va_type.returnType,
[ctypes.uint8_t]);
let sum_notva_type = ctypes.FunctionType(sum_va_type.targetType.abi,
sum_va_type.targetType.returnType,
[ctypes.uint8_t]).ptr;
do_check_throws(function() {
sum_va_type().value = sum_notva_type();
}, Error);