mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-05 08:35:26 +00:00
Merge.
This commit is contained in:
commit
02ceab2b60
@ -79,8 +79,8 @@ BUILTIN2(extern, DOUBLE, js_StringToNumber, CONTEXT, STRING,
|
||||
BUILTIN2(extern, INT32, js_StringToInt32, CONTEXT, STRING, 1, 1)
|
||||
BUILTIN3(extern, JSVAL, js_Any_getprop, CONTEXT, OBJECT, STRING, 0, 0)
|
||||
BUILTIN4(extern, BOOL, js_Any_setprop, CONTEXT, OBJECT, STRING, JSVAL, 0, 0)
|
||||
BUILTIN3(extern, JSVAL, js_Any_getelem, CONTEXT, OBJECT, UINT32, 0, 0)
|
||||
BUILTIN4(extern, BOOL, js_Any_setelem, CONTEXT, OBJECT, UINT32, JSVAL, 0, 0)
|
||||
BUILTIN3(extern, JSVAL, js_Any_getelem, CONTEXT, OBJECT, INT32, 0, 0)
|
||||
BUILTIN4(extern, BOOL, js_Any_setelem, CONTEXT, OBJECT, INT32, JSVAL, 0, 0)
|
||||
BUILTIN3(extern, OBJECT, js_FastValueToIterator, CONTEXT, UINT32, JSVAL, 0, 0)
|
||||
BUILTIN2(extern, JSVAL, js_FastCallIteratorNext, CONTEXT, OBJECT, 0, 0)
|
||||
BUILTIN2(extern, BOOL, js_CloseIterator, CONTEXT, JSVAL, 0, 0)
|
||||
|
@ -210,7 +210,7 @@ js_Any_setprop(JSContext* cx, JSObject* obj, JSString* idstr, jsval v)
|
||||
}
|
||||
|
||||
jsval FASTCALL
|
||||
js_Any_getelem(JSContext* cx, JSObject* obj, uint32 index)
|
||||
js_Any_getelem(JSContext* cx, JSObject* obj, int32 index)
|
||||
{
|
||||
jsval v;
|
||||
jsid id;
|
||||
@ -224,7 +224,7 @@ js_Any_getelem(JSContext* cx, JSObject* obj, uint32 index)
|
||||
}
|
||||
|
||||
JSBool FASTCALL
|
||||
js_Any_setelem(JSContext* cx, JSObject* obj, uint32 index, jsval v)
|
||||
js_Any_setelem(JSContext* cx, JSObject* obj, int32 index, jsval v)
|
||||
{
|
||||
jsid id;
|
||||
if (index < 0)
|
||||
|
@ -5720,8 +5720,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
default:
|
||||
/*
|
||||
* If useless, just emit JSOP_TRUE; otherwise convert delete foo()
|
||||
* to foo(), true (a comma expression, requiring SRC_PCDELTA, and
|
||||
* also JSOP_GROUP for correctly parenthesized decompilation).
|
||||
* to foo(), true (a comma expression, requiring SRC_PCDELTA).
|
||||
*/
|
||||
useful = JS_FALSE;
|
||||
if (!CheckSideEffects(cx, cg, pn2, &useful))
|
||||
@ -5743,8 +5742,6 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
|
||||
return JS_FALSE;
|
||||
}
|
||||
if (js_Emit1(cx, cg, JSOP_GROUP) < 0)
|
||||
return JS_FALSE;
|
||||
}
|
||||
break;
|
||||
|
||||
@ -6165,8 +6162,6 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
if (!js_EmitTree(cx, cg, pn->pn_kid))
|
||||
return JS_FALSE;
|
||||
cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
|
||||
if (js_Emit1(cx, cg, JSOP_GROUP) < 0)
|
||||
return JS_FALSE;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -2857,7 +2857,6 @@ js_Interpret(JSContext *cx)
|
||||
|
||||
/* No-ops for ease of decompilation. */
|
||||
ADD_EMPTY_CASE(JSOP_NOP)
|
||||
ADD_EMPTY_CASE(JSOP_GROUP)
|
||||
ADD_EMPTY_CASE(JSOP_CONDSWITCH)
|
||||
ADD_EMPTY_CASE(JSOP_TRY)
|
||||
ADD_EMPTY_CASE(JSOP_FINALLY)
|
||||
@ -5137,15 +5136,11 @@ js_Interpret(JSContext *cx)
|
||||
if (!prop) {
|
||||
/* Kludge to allow (typeof foo == "undefined") tests. */
|
||||
endpc = script->code + script->length;
|
||||
for (pc2 = regs.pc + JSOP_NAME_LENGTH; pc2 < endpc; pc2++) {
|
||||
op2 = (JSOp)*pc2;
|
||||
if (op2 == JSOP_TYPEOF) {
|
||||
PUSH_OPND(JSVAL_VOID);
|
||||
len = JSOP_NAME_LENGTH;
|
||||
DO_NEXT_OP(len);
|
||||
}
|
||||
if (op2 != JSOP_GROUP)
|
||||
break;
|
||||
op2 = (JSOp) regs.pc[JSOP_NAME_LENGTH];
|
||||
if (op2 == JSOP_TYPEOF) {
|
||||
PUSH_OPND(JSVAL_VOID);
|
||||
len = JSOP_NAME_LENGTH;
|
||||
DO_NEXT_OP(len);
|
||||
}
|
||||
goto atom_not_defined;
|
||||
}
|
||||
@ -6841,6 +6836,7 @@ js_Interpret(JSContext *cx)
|
||||
L_JSOP_UNUSED77:
|
||||
L_JSOP_UNUSED78:
|
||||
L_JSOP_UNUSED79:
|
||||
L_JSOP_UNUSED131:
|
||||
L_JSOP_UNUSED201:
|
||||
L_JSOP_UNUSED202:
|
||||
L_JSOP_UNUSED203:
|
||||
|
@ -1560,8 +1560,11 @@ js_HasOwnProperty(JSContext *cx, JSLookupPropOp lookup, JSObject *obj, jsid id,
|
||||
static int32 FASTCALL
|
||||
Object_p_hasOwnProperty(JSContext* cx, JSObject* obj, JSString *str)
|
||||
{
|
||||
jsid id = ATOM_TO_JSID(STRING_TO_JSVAL(str));
|
||||
jsid id;
|
||||
jsval v;
|
||||
|
||||
if (!js_ValueToStringId(cx, STRING_TO_JSVAL(str), &id))
|
||||
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
|
||||
if (!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, id, &v))
|
||||
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
|
||||
JS_ASSERT(JSVAL_IS_BOOLEAN(v));
|
||||
@ -3324,9 +3327,6 @@ Detecting(JSContext *cx, jsbytecode *pc)
|
||||
}
|
||||
return JS_FALSE;
|
||||
|
||||
case JSOP_GROUP:
|
||||
break;
|
||||
|
||||
default:
|
||||
/*
|
||||
* At this point, anything but an extended atom index prefix means
|
||||
|
@ -965,10 +965,10 @@ PushOff(SprintStack *ss, ptrdiff_t off, JSOp op)
|
||||
}
|
||||
|
||||
static ptrdiff_t
|
||||
PopOff(SprintStack *ss, JSOp op)
|
||||
PopOffPrec(SprintStack *ss, uint8 prec)
|
||||
{
|
||||
uintN top;
|
||||
const JSCodeSpec *cs, *topcs;
|
||||
const JSCodeSpec *topcs;
|
||||
ptrdiff_t off;
|
||||
|
||||
/* ss->top points to the next free slot; be paranoid about underflow. */
|
||||
@ -980,8 +980,7 @@ PopOff(SprintStack *ss, JSOp op)
|
||||
ss->top = --top;
|
||||
off = GetOff(ss, top);
|
||||
topcs = &js_CodeSpec[ss->opcodes[top]];
|
||||
cs = &js_CodeSpec[op];
|
||||
if (topcs->prec != 0 && topcs->prec < cs->prec) {
|
||||
if (topcs->prec != 0 && topcs->prec < prec) {
|
||||
ss->sprinter.offset = ss->offsets[top] = off - 2;
|
||||
off = Sprint(&ss->sprinter, "(%s)", OFF2STR(&ss->sprinter, off));
|
||||
} else {
|
||||
@ -991,14 +990,26 @@ PopOff(SprintStack *ss, JSOp op)
|
||||
}
|
||||
|
||||
static const char *
|
||||
PopStr(SprintStack *ss, JSOp op)
|
||||
PopStrPrec(SprintStack *ss, uint8 prec)
|
||||
{
|
||||
ptrdiff_t off;
|
||||
|
||||
off = PopOff(ss, op);
|
||||
off = PopOffPrec(ss, prec);
|
||||
return OFF2STR(&ss->sprinter, off);
|
||||
}
|
||||
|
||||
static ptrdiff_t
|
||||
PopOff(SprintStack *ss, JSOp op)
|
||||
{
|
||||
return PopOffPrec(ss, js_CodeSpec[op].prec);
|
||||
}
|
||||
|
||||
static const char *
|
||||
PopStr(SprintStack *ss, JSOp op)
|
||||
{
|
||||
return PopStrPrec(ss, js_CodeSpec[op].prec);
|
||||
}
|
||||
|
||||
typedef struct TableEntry {
|
||||
jsval key;
|
||||
ptrdiff_t offset;
|
||||
@ -1744,10 +1755,17 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
/*
|
||||
* Local macros
|
||||
*/
|
||||
#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, NULL)
|
||||
#define DECOMPILE_CODE(pc,nb) if (!Decompile(ss, pc, nb, JSOP_NOP)) return NULL
|
||||
#define NEXT_OP(pc) (((pc) + (len) == endpc) ? nextop : pc[len])
|
||||
#define POP_STR() PopStr(ss, op)
|
||||
#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, NULL)
|
||||
#define POP_STR_PREC(prec) PopStrPrec(ss, prec)
|
||||
|
||||
/*
|
||||
* Pop a condition expression for if/for/while. JSOP_IFEQ's precedence forces
|
||||
* extra parens around assignment, which avoids a strict-mode warning.
|
||||
*/
|
||||
#define POP_COND_STR() PopStr(ss, JSOP_IFEQ)
|
||||
|
||||
/*
|
||||
* Callers know that ATOM_IS_STRING(atom), and we leave it to the optimizer to
|
||||
@ -1808,6 +1826,23 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
GET_QUOTE_AND_FMT(qfmt, ufmt, rval); \
|
||||
JS_END_MACRO
|
||||
|
||||
/*
|
||||
* Per spec, new x(y).z means (new x(y))).z. For example new (x(y).z) must
|
||||
* decompile with the constructor parenthesized, but new x.z should not. The
|
||||
* normal rules give x(y).z and x.z identical precedence: both are produced by
|
||||
* JSOP_GETPROP.
|
||||
*
|
||||
* Therefore, we need to know in case JSOP_NEW whether the constructor
|
||||
* expression contains any unparenthesized function calls. So when building a
|
||||
* MemberExpression or CallExpression, we set ss->opcodes[n] to JSOP_CALL if
|
||||
* this is true. x(y).z gets JSOP_CALL, not JSOP_GETPROP.
|
||||
*/
|
||||
#define PROPAGATE_CALLNESS() \
|
||||
JS_BEGIN_MACRO \
|
||||
if (ss->opcodes[ss->top - 1] == JSOP_CALL) \
|
||||
saveop = JSOP_CALL; \
|
||||
JS_END_MACRO
|
||||
|
||||
cx = ss->sprinter.context;
|
||||
JS_CHECK_RECURSION(cx, return NULL);
|
||||
|
||||
@ -1986,8 +2021,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
op = (JSOp) pc[oplen];
|
||||
LOCAL_ASSERT(op != saveop);
|
||||
}
|
||||
rval = POP_STR();
|
||||
lval = POP_STR();
|
||||
rval = POP_STR_PREC(cs->prec + (!inXML && !!(cs->format & JOF_LEFTASSOC)));
|
||||
lval = POP_STR_PREC(cs->prec + (!inXML && !(cs->format & JOF_LEFTASSOC)));
|
||||
if (op != saveop) {
|
||||
/* Print only the right operand of the assignment-op. */
|
||||
todo = SprintCString(&ss->sprinter, rval);
|
||||
@ -2035,7 +2070,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
jp->indent += 4;
|
||||
DECOMPILE_CODE(pc, tail);
|
||||
jp->indent -= 4;
|
||||
js_printf(jp, "\t} while (%s);\n", POP_STR());
|
||||
js_printf(jp, "\t} while (%s);\n", POP_COND_STR());
|
||||
pc += tail;
|
||||
len = js_CodeSpec[*pc].length;
|
||||
todo = -2;
|
||||
@ -2071,7 +2106,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
if (cond != tail) {
|
||||
/* Decompile the loop condition. */
|
||||
DECOMPILE_CODE(pc + cond, tail - cond);
|
||||
js_printf(jp, " %s", POP_STR());
|
||||
js_printf(jp, " %s", POP_COND_STR());
|
||||
}
|
||||
|
||||
/* Need a semicolon whether or not there was a cond. */
|
||||
@ -2153,44 +2188,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
}
|
||||
break;
|
||||
|
||||
case JSOP_GROUP:
|
||||
cs = &js_CodeSpec[lastop];
|
||||
if ((cs->prec != 0 &&
|
||||
cs->prec <= js_CodeSpec[NEXT_OP(pc)].prec) ||
|
||||
pc[JSOP_GROUP_LENGTH] == JSOP_NULL ||
|
||||
pc[JSOP_GROUP_LENGTH] == JSOP_NULLTHIS ||
|
||||
pc[JSOP_GROUP_LENGTH] == JSOP_DUP ||
|
||||
pc[JSOP_GROUP_LENGTH] == JSOP_IFEQ ||
|
||||
pc[JSOP_GROUP_LENGTH] == JSOP_IFNE) {
|
||||
/*
|
||||
* Force parens if this JSOP_GROUP forced re-association
|
||||
* against precedence, or if this is a call or constructor
|
||||
* expression, or if it is destructured (JSOP_DUP), or if
|
||||
* it is an if or loop condition test.
|
||||
*
|
||||
* This is necessary to handle the operator new grammar,
|
||||
* by which new x(y).z means (new x(y))).z. For example
|
||||
* new (x(y).z) must decompile with the constructor
|
||||
* parenthesized, but normal precedence has JSOP_GETPROP
|
||||
* (for the final .z) higher than JSOP_NEW. In general,
|
||||
* if the call or constructor expression is parenthesized,
|
||||
* we preserve parens.
|
||||
*/
|
||||
op = JSOP_NAME;
|
||||
rval = POP_STR();
|
||||
todo = SprintCString(&ss->sprinter, rval);
|
||||
} else {
|
||||
/*
|
||||
* Don't explicitly parenthesize -- just fix the top
|
||||
* opcode so that the auto-parens magic in PopOff can do
|
||||
* its thing.
|
||||
*/
|
||||
LOCAL_ASSERT(ss->top != 0);
|
||||
ss->opcodes[ss->top-1] = saveop = lastop;
|
||||
todo = -2;
|
||||
}
|
||||
break;
|
||||
|
||||
case JSOP_PUSH:
|
||||
#if JS_HAS_DESTRUCTURING
|
||||
sn = js_GetSrcNote(jp->script, pc);
|
||||
@ -2816,6 +2813,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
LOCAL_ASSERT(jp->fun);
|
||||
fun = jp->fun;
|
||||
if (fun->flags & JSFUN_EXPR_CLOSURE) {
|
||||
/* Turn on parens around comma-expression here. */
|
||||
op = JSOP_SETNAME;
|
||||
rval = POP_STR();
|
||||
js_printf(jp, (*rval == '{') ? "(%s)%s" : ss_format,
|
||||
rval,
|
||||
@ -2967,8 +2966,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
cond = GetJumpOffset(pc, pc);
|
||||
tail = js_GetSrcNoteOffset(sn, 0);
|
||||
DECOMPILE_CODE(pc + cond, tail - cond);
|
||||
rval = POP_STR();
|
||||
js_printf(jp, "\twhile (%s) {\n", rval);
|
||||
js_printf(jp, "\twhile (%s) {\n", POP_COND_STR());
|
||||
jp->indent += 4;
|
||||
DECOMPILE_CODE(pc + oplen, cond - oplen);
|
||||
jp->indent -= 4;
|
||||
@ -3023,8 +3021,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
switch (sn ? SN_TYPE(sn) : SRC_NULL) {
|
||||
case SRC_IF:
|
||||
case SRC_IF_ELSE:
|
||||
op = JSOP_NOP; /* turn off parens */
|
||||
rval = POP_STR();
|
||||
rval = POP_COND_STR();
|
||||
if (ss->inArrayInit || ss->inGenExp) {
|
||||
LOCAL_ASSERT(SN_TYPE(sn) == SRC_IF);
|
||||
ss->sprinter.offset -= PAREN_SLOP;
|
||||
@ -3467,6 +3464,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
/*
|
||||
* Special case: new (x(y)(z)) must be parenthesized like so.
|
||||
* Same for new (x(y).z) -- contrast with new x(y).z.
|
||||
* See PROPAGATE_CALLNESS.
|
||||
*/
|
||||
op = (JSOp) ss->opcodes[ss->top-1];
|
||||
lval = PopStr(ss,
|
||||
@ -3535,6 +3533,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
|
||||
case JSOP_DELPROP:
|
||||
GET_ATOM_QUOTE_AND_FMT("%s %s[%s]", "%s %s.%s", rval);
|
||||
op = JSOP_GETPROP;
|
||||
lval = POP_STR();
|
||||
todo = Sprint(&ss->sprinter, fmt, js_delete_str, lval, rval);
|
||||
break;
|
||||
@ -3542,7 +3541,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
case JSOP_DELELEM:
|
||||
op = JSOP_NOP; /* turn off parens */
|
||||
xval = POP_STR();
|
||||
op = saveop;
|
||||
op = JSOP_GETPROP;
|
||||
lval = POP_STR();
|
||||
if (*xval == '\0')
|
||||
goto do_delete_lval;
|
||||
@ -3556,6 +3555,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
#if JS_HAS_XML_SUPPORT
|
||||
case JSOP_DELDESC:
|
||||
xval = POP_STR();
|
||||
op = JSOP_GETPROP;
|
||||
lval = POP_STR();
|
||||
todo = Sprint(&ss->sprinter, "%s %s..%s",
|
||||
js_delete_str, lval, xval);
|
||||
@ -3700,6 +3700,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
do_getprop:
|
||||
GET_QUOTE_AND_FMT(index_format, dot_format, rval);
|
||||
do_getprop_lval:
|
||||
PROPAGATE_CALLNESS();
|
||||
lval = POP_STR();
|
||||
todo = Sprint(&ss->sprinter, fmt, lval, rval);
|
||||
break;
|
||||
@ -3773,6 +3774,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
op = JSOP_NOP; /* turn off parens */
|
||||
xval = POP_STR();
|
||||
op = saveop;
|
||||
PROPAGATE_CALLNESS();
|
||||
lval = POP_STR();
|
||||
if (*xval == '\0') {
|
||||
todo = Sprint(&ss->sprinter, "%s", lval);
|
||||
@ -4243,14 +4245,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
break;
|
||||
}
|
||||
|
||||
case JSOP_STRICTEQ:
|
||||
case JSOP_STRICTNE:
|
||||
rval = POP_STR();
|
||||
lval = POP_STR();
|
||||
todo = Sprint(&ss->sprinter, "%s %c== %s",
|
||||
lval, (op == JSOP_STRICTEQ) ? '=' : '!', rval);
|
||||
break;
|
||||
|
||||
case JSOP_DEFFUN:
|
||||
LOAD_FUNCTION(0);
|
||||
todo = -2;
|
||||
@ -4607,12 +4601,14 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
|
||||
|
||||
case JSOP_ENDFILTER:
|
||||
rval = POP_STR();
|
||||
PROPAGATE_CALLNESS();
|
||||
lval = POP_STR();
|
||||
todo = Sprint(&ss->sprinter, "%s.(%s)", lval, rval);
|
||||
break;
|
||||
|
||||
case JSOP_DESCENDANTS:
|
||||
rval = POP_STR();
|
||||
PROPAGATE_CALLNESS();
|
||||
lval = POP_STR();
|
||||
todo = Sprint(&ss->sprinter, "%s..%s", lval, rval);
|
||||
break;
|
||||
|
@ -78,9 +78,9 @@
|
||||
* 12 <<, >>, >>> JSOP_LSH, JSOP_RSH, JSOP_URSH
|
||||
* 13 +, -, etc. JSOP_ADD, JSOP_SUB, etc.
|
||||
* 14 *, /, % JSOP_MUL, JSOP_DIV, JSOP_MOD
|
||||
* 15 !, ~, etc. JSOP_NOT, JSOP_BITNOT, etc.
|
||||
* 15 !, ~, delete, etc. JSOP_NOT, JSOP_BITNOT, JSOP_DEL*, etc.
|
||||
* 16 3.14, 0, etc. JSOP_DOUBLE, JSOP_ZERO, etc.
|
||||
* 17 delete, new JSOP_DEL*, JSOP_NEW
|
||||
* 17 new JSOP_NEW
|
||||
* 18 x.y, f(), etc. JSOP_GETPROP, JSOP_CALL, etc.
|
||||
* 19 x, null, etc. JSOP_NAME, JSOP_NULL, etc.
|
||||
*
|
||||
@ -139,9 +139,9 @@ OPDEF(JSOP_NOT, 32, "not", "!", 1, 1, 1, 15, JOF_BYTE|J
|
||||
OPDEF(JSOP_BITNOT, 33, "bitnot", "~", 1, 1, 1, 15, JOF_BYTE)
|
||||
OPDEF(JSOP_NEG, 34, "neg", "- ", 1, 1, 1, 15, JOF_BYTE)
|
||||
OPDEF(JSOP_NEW, 35, js_new_str, NULL, 3, -1, 1, 17, JOF_UINT16|JOF_INVOKE)
|
||||
OPDEF(JSOP_DELNAME, 36, "delname", NULL, 3, 0, 1, 17, JOF_ATOM|JOF_NAME|JOF_DEL)
|
||||
OPDEF(JSOP_DELPROP, 37, "delprop", NULL, 3, 1, 1, 17, JOF_ATOM|JOF_PROP|JOF_DEL)
|
||||
OPDEF(JSOP_DELELEM, 38, "delelem", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL)
|
||||
OPDEF(JSOP_DELNAME, 36, "delname", NULL, 3, 0, 1, 15, JOF_ATOM|JOF_NAME|JOF_DEL)
|
||||
OPDEF(JSOP_DELPROP, 37, "delprop", NULL, 3, 1, 1, 15, JOF_ATOM|JOF_PROP|JOF_DEL)
|
||||
OPDEF(JSOP_DELELEM, 38, "delelem", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_DEL)
|
||||
OPDEF(JSOP_TYPEOF, 39, js_typeof_str,NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
|
||||
OPDEF(JSOP_VOID, 40, js_void_str, NULL, 1, 1, 1, 15, JOF_BYTE)
|
||||
|
||||
@ -181,8 +181,8 @@ OPDEF(JSOP_TABLESWITCH, 70, "tableswitch", NULL, -1, 1, 0, 0, JOF_TABLES
|
||||
OPDEF(JSOP_LOOKUPSWITCH, 71, "lookupswitch", NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCH|JOF_DETECTING|JOF_PARENHEAD)
|
||||
|
||||
/* New, infallible/transitive identity ops. */
|
||||
OPDEF(JSOP_STRICTEQ, 72, "stricteq", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING)
|
||||
OPDEF(JSOP_STRICTNE, 73, "strictne", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING)
|
||||
OPDEF(JSOP_STRICTEQ, 72, "stricteq", "===", 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING|JOF_LEFTASSOC)
|
||||
OPDEF(JSOP_STRICTNE, 73, "strictne", "!==", 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING|JOF_LEFTASSOC)
|
||||
|
||||
/* Resume instruction (emitted for the JIT for instructions that can't be restarted). */
|
||||
OPDEF(JSOP_RESUME, 74, "resume", NULL, 1, 0, 0, 0, JOF_BYTE)
|
||||
@ -318,7 +318,7 @@ OPDEF(JSOP_NAMEDFUNOBJ, 129, "namedfunobj", NULL, 3, 0, 1, 19, JOF_OBJECT
|
||||
OPDEF(JSOP_SETLOCALPOP, 130, "setlocalpop", NULL, 3, 1, 0, 3, JOF_LOCAL|JOF_NAME|JOF_SET)
|
||||
|
||||
/* Parenthesization opcode to help the decompiler. */
|
||||
OPDEF(JSOP_GROUP, 131, "group", NULL, 1, 0, 0, 19, JOF_BYTE)
|
||||
OPDEF(JSOP_UNUSED131, 131, "unused131", NULL, 1, 0, 0, 0, JOF_BYTE)
|
||||
|
||||
/*
|
||||
* Host object extension: given 'o.item(i) = j', the left-hand side compiles
|
||||
@ -419,7 +419,7 @@ OPDEF(JSOP_GETFUNNS, 185,"getfunns", NULL, 1, 0, 1, 19, JOF_BYTE)
|
||||
*/
|
||||
OPDEF(JSOP_GETUPVAR, 186,"getupvar", NULL, 3, 0, 1, 19, JOF_UINT16|JOF_NAME)
|
||||
|
||||
OPDEF(JSOP_DELDESC, 187,"deldesc", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL)
|
||||
OPDEF(JSOP_DELDESC, 187,"deldesc", NULL, 1, 2, 1, 15, JOF_BYTE|JOF_ELEM|JOF_DEL)
|
||||
|
||||
/*
|
||||
* Opcode to hold 24-bit immediate integer operands.
|
||||
|
@ -4516,7 +4516,7 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
|
||||
} else if (tt == TOK_RP) {
|
||||
JSParseNode *group = pn3;
|
||||
|
||||
/* Recycle the useless TOK_RP/JSOP_GROUP node. */
|
||||
/* Recycle the useless TOK_RP node. */
|
||||
pn3 = group->pn_kid;
|
||||
group->pn_kid = NULL;
|
||||
RecycleTree(group, tc);
|
||||
@ -6279,7 +6279,7 @@ Boolish(JSParseNode *pn)
|
||||
{
|
||||
switch (pn->pn_op) {
|
||||
case JSOP_DOUBLE:
|
||||
return pn->pn_dval != 0;
|
||||
return pn->pn_dval != 0 && !JSDOUBLE_IS_NaN(pn->pn_dval);
|
||||
|
||||
case JSOP_STRING:
|
||||
return JSSTRING_LENGTH(ATOM_TO_STRING(pn->pn_atom)) != 0;
|
||||
@ -6728,6 +6728,14 @@ js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc, bool inCond)
|
||||
pn->pn_arity = PN_NULLARY;
|
||||
pn->pn_dval = d;
|
||||
RecycleTree(pn1, tc);
|
||||
} else if (pn1->pn_type == TOK_PRIMARY) {
|
||||
if (pn->pn_op == JSOP_NOT &&
|
||||
(pn1->pn_op == JSOP_TRUE ||
|
||||
pn1->pn_op == JSOP_FALSE)) {
|
||||
PN_MOVE_NODE(pn, pn1);
|
||||
pn->pn_op = (pn->pn_op == JSOP_TRUE) ? JSOP_FALSE : JSOP_TRUE;
|
||||
RecycleTree(pn1, tc);
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -374,8 +374,8 @@ Oracle::clear()
|
||||
}
|
||||
|
||||
#if defined(NJ_SOFTFLOAT)
|
||||
JS_DECLARE_CALLINFO(i2f)
|
||||
JS_DECLARE_CALLINFO(u2f)
|
||||
JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1)
|
||||
JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1)
|
||||
#endif
|
||||
|
||||
static bool isi2f(LInsp i)
|
||||
@ -475,91 +475,89 @@ static bool overflowSafe(LIns* i)
|
||||
#if defined(NJ_SOFTFLOAT)
|
||||
/* soft float */
|
||||
|
||||
JS_DEFINE_CALLINFO_1(static, DOUBLE, fneg, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, INT32, fcmplt, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, INT32, fcmple, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, INT32, fcmpge, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(static, DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1)
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_fneg(jsdouble x)
|
||||
fneg(jsdouble x)
|
||||
{
|
||||
return -x;
|
||||
}
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_i2f(int32 i)
|
||||
i2f(int32 i)
|
||||
{
|
||||
return i;
|
||||
}
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_u2f(jsuint u)
|
||||
u2f(jsuint u)
|
||||
{
|
||||
return u;
|
||||
}
|
||||
|
||||
int32 FASTCALL
|
||||
js_fcmpeq(jsdouble x, jsdouble y)
|
||||
fcmpeq(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x==y;
|
||||
}
|
||||
|
||||
int32 FASTCALL
|
||||
js_fcmplt(jsdouble x, jsdouble y)
|
||||
fcmplt(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x < y;
|
||||
}
|
||||
|
||||
int32 FASTCALL
|
||||
js_fcmple(jsdouble x, jsdouble y)
|
||||
fcmple(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x <= y;
|
||||
}
|
||||
|
||||
int32 FASTCALL
|
||||
js_fcmpgt(jsdouble x, jsdouble y)
|
||||
fcmpgt(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x > y;
|
||||
}
|
||||
|
||||
int32 FASTCALL
|
||||
js_fcmpge(jsdouble x, jsdouble y)
|
||||
fcmpge(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x >= y;
|
||||
}
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_fmul(jsdouble x, jsdouble y)
|
||||
fmul(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x * y;
|
||||
}
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_fadd(jsdouble x, jsdouble y)
|
||||
fadd(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x + y;
|
||||
}
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_fdiv(jsdouble x, jsdouble y)
|
||||
fdiv(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x / y;
|
||||
}
|
||||
|
||||
jsdouble FASTCALL
|
||||
js_fsub(jsdouble x, jsdouble y)
|
||||
fsub(jsdouble x, jsdouble y)
|
||||
{
|
||||
return x - y;
|
||||
}
|
||||
|
||||
JS_DEFINE_CALLINFO_1(DOUBLE, fneg, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_1(DOUBLE, i2f, INT32, 1, 1)
|
||||
JS_DEFINE_CALLINFO_1(DOUBLE, u2f, UINT32, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(INT32, fcmplt, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(INT32, fcmple, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(INT32, fcmpge, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1)
|
||||
JS_DEFINE_CALLINFO_2(DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1)
|
||||
|
||||
class SoftFloatFilter: public LirWriter
|
||||
{
|
||||
public:
|
||||
@ -6163,12 +6161,6 @@ TraceRecorder::record_JSOP_SETLOCALPOP()
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
TraceRecorder::record_JSOP_GROUP()
|
||||
{
|
||||
return true; // no-op
|
||||
}
|
||||
|
||||
bool
|
||||
TraceRecorder::record_JSOP_SETCALL()
|
||||
{
|
||||
@ -6941,6 +6933,7 @@ UNUSED(JSOP_UNUSED76)
|
||||
UNUSED(JSOP_UNUSED77)
|
||||
UNUSED(JSOP_UNUSED78)
|
||||
UNUSED(JSOP_UNUSED79)
|
||||
UNUSED(JSOP_UNUSED131)
|
||||
UNUSED(JSOP_UNUSED201)
|
||||
UNUSED(JSOP_UNUSED202)
|
||||
UNUSED(JSOP_UNUSED203)
|
||||
|
@ -204,7 +204,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
|
||||
* before deserialization of bytecode. If the saved version does not match
|
||||
* the current version, abort deserialization and invalidate the file.
|
||||
*/
|
||||
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 32)
|
||||
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 33)
|
||||
|
||||
/*
|
||||
* Library-private functions.
|
||||
|
@ -214,7 +214,7 @@ namespace nanojit
|
||||
|
||||
// nothing free, steal one
|
||||
// LSRA says pick the one with the furthest use
|
||||
LIns* vic = findVictim(regs,allow);
|
||||
LIns* vic = findVictim(regs, allow);
|
||||
NanoAssert(vic != NULL);
|
||||
|
||||
Reservation* resv = getresv(vic);
|
||||
@ -528,6 +528,16 @@ namespace nanojit
|
||||
return findRegFor(i, rmask(w));
|
||||
}
|
||||
|
||||
Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
|
||||
{
|
||||
if (i->isop(LIR_alloc)) {
|
||||
d += findMemFor(i);
|
||||
return FP;
|
||||
} else {
|
||||
return findRegFor(i, allow);
|
||||
}
|
||||
}
|
||||
|
||||
Register Assembler::findRegFor(LIns* i, RegisterMask allow)
|
||||
{
|
||||
if (i->isop(LIR_alloc)) {
|
||||
@ -554,6 +564,8 @@ namespace nanojit
|
||||
resv = reserveAlloc(i);
|
||||
|
||||
r = resv->reg;
|
||||
|
||||
#ifdef AVMPLUS_IA32
|
||||
if (r != UnknownReg &&
|
||||
((rmask(r)&XmmRegs) && !(allow&XmmRegs) ||
|
||||
(rmask(r)&x87Regs) && !(allow&x87Regs)))
|
||||
@ -563,6 +575,7 @@ namespace nanojit
|
||||
evict(r);
|
||||
r = UnknownReg;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (r == UnknownReg)
|
||||
{
|
||||
@ -610,6 +623,20 @@ namespace nanojit
|
||||
return rr;
|
||||
}
|
||||
|
||||
void Assembler::asm_spilli(LInsp i, Reservation *resv, bool pop)
|
||||
{
|
||||
int d = disp(resv);
|
||||
Register rr = resv->reg;
|
||||
bool quad = i->opcode() == LIR_param || i->isQuad();
|
||||
asm_spill(rr, d, pop, quad);
|
||||
if (d)
|
||||
{
|
||||
verbose_only(if (_verbose) {
|
||||
outputf(" spill %s",_thisfrag->lirbuf->names->formatRef(i));
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::freeRsrcOf(LIns *i, bool pop)
|
||||
{
|
||||
Reservation* resv = getresv(i);
|
||||
@ -632,66 +659,6 @@ namespace nanojit
|
||||
_allocator.addFree(r);
|
||||
}
|
||||
|
||||
void Assembler::asm_cmp(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
|
||||
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
|
||||
if ((condop == LIR_ov) || (condop == LIR_cs))
|
||||
return;
|
||||
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
Reservation *rA, *rB;
|
||||
|
||||
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
|
||||
|
||||
// Not supported yet.
|
||||
#if !defined NANOJIT_64BIT
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
#endif
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst())
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
if (rhs->isQuad()) {
|
||||
#if defined NANOJIT_64BIT
|
||||
TESTQ(r, r);
|
||||
#endif
|
||||
} else {
|
||||
TEST(r,r);
|
||||
}
|
||||
// No 64-bit immediates so fall-back to below
|
||||
}
|
||||
else if (!rhs->isQuad()) {
|
||||
Register r;
|
||||
if (lhs->isop(LIR_alloc)) {
|
||||
r = FP;
|
||||
c += findMemFor(lhs);
|
||||
} else {
|
||||
r = findRegFor(lhs, GpRegs);
|
||||
}
|
||||
CMPi(r, c);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
findRegFor2(GpRegs, lhs, rA, rhs, rB);
|
||||
Register ra = rA->reg;
|
||||
Register rb = rB->reg;
|
||||
if (rhs->isQuad()) {
|
||||
#if defined NANOJIT_64BIT
|
||||
CMPQ(ra, rb);
|
||||
#endif
|
||||
} else {
|
||||
CMP(ra, rb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::patch(GuardRecord *lr)
|
||||
{
|
||||
Fragment *frag = lr->exit->target;
|
||||
@ -1068,7 +1035,11 @@ namespace nanojit
|
||||
JMP(_epilogue);
|
||||
}
|
||||
assignSavedParams();
|
||||
#ifdef NANOJIT_IA32
|
||||
findSpecificRegFor(ins->oprnd1(), FST0);
|
||||
#else
|
||||
NanoAssert(false);
|
||||
#endif
|
||||
fpu_pop();
|
||||
break;
|
||||
}
|
||||
@ -1089,19 +1060,15 @@ namespace nanojit
|
||||
break;
|
||||
}
|
||||
case LIR_short:
|
||||
{
|
||||
countlir_imm();
|
||||
asm_short(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_int:
|
||||
{
|
||||
countlir_imm();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val;
|
||||
if (op == LIR_int)
|
||||
val = ins->imm32();
|
||||
else
|
||||
val = ins->imm16();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
asm_int(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_quad:
|
||||
@ -1123,139 +1090,36 @@ namespace nanojit
|
||||
case LIR_param:
|
||||
{
|
||||
countlir_param();
|
||||
uint32_t a = ins->imm8();
|
||||
uint32_t kind = ins->imm8b();
|
||||
if (kind == 0) {
|
||||
// ordinary param
|
||||
AbiKind abi = _thisfrag->lirbuf->abi;
|
||||
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
|
||||
if (a < abi_regcount) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// incoming arg is on stack, and EAX points nearby (see genPrologue)
|
||||
//_nvprof("param-evict-eax",1);
|
||||
Register r = prepResultReg(ins, GpRegs & ~rmask(EAX));
|
||||
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
asm_param(ins);
|
||||
break;
|
||||
}
|
||||
case LIR_qlo:
|
||||
{
|
||||
countlir_qlo();
|
||||
LIns *q = ins->oprnd1();
|
||||
|
||||
if (!asm_qlo(ins, q))
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d, FP);
|
||||
}
|
||||
asm_qlo(ins);
|
||||
break;
|
||||
}
|
||||
}
|
||||
case LIR_qhi:
|
||||
{
|
||||
countlir_qhi();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d+4, FP);
|
||||
asm_qhi(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
case LIR_qcmov:
|
||||
case LIR_cmov:
|
||||
{
|
||||
countlir_cmov();
|
||||
LIns* condval = ins->oprnd1();
|
||||
NanoAssert(condval->isCmp());
|
||||
|
||||
LIns* values = ins->oprnd2();
|
||||
|
||||
NanoAssert(values->opcode() == LIR_2);
|
||||
LIns* iftrue = values->oprnd1();
|
||||
LIns* iffalse = values->oprnd2();
|
||||
|
||||
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
|
||||
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
if (op == LIR_cmov) {
|
||||
switch (condval->opcode())
|
||||
{
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
} else if (op == LIR_qcmov) {
|
||||
#if !defined NANOJIT_64BIT
|
||||
NanoAssert(0);
|
||||
#else
|
||||
switch (condval->opcode())
|
||||
{
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRQNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRQNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRQNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRQGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRQG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRQLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRQL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRQAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRQA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRQBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRQB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
#endif
|
||||
}
|
||||
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
|
||||
asm_cmp(condval);
|
||||
asm_cmov(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
case LIR_ld:
|
||||
case LIR_ldc:
|
||||
case LIR_ldcb:
|
||||
{
|
||||
countlir_ld();
|
||||
LIns* base = ins->oprnd1();
|
||||
LIns* disp = ins->oprnd2();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Register ra;
|
||||
int d = disp->constval();
|
||||
if (base->isop(LIR_alloc)) {
|
||||
ra = FP;
|
||||
d += findMemFor(base);
|
||||
} else {
|
||||
ra = findRegFor(base, GpRegs);
|
||||
}
|
||||
if (op == LIR_ldcb)
|
||||
LD8Z(rr, d, ra);
|
||||
else
|
||||
LD(rr, d, ra);
|
||||
asm_ld(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
case LIR_ldq:
|
||||
case LIR_ldqc:
|
||||
{
|
||||
@ -1263,31 +1127,13 @@ namespace nanojit
|
||||
asm_load64(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
case LIR_neg:
|
||||
case LIR_not:
|
||||
{
|
||||
countlir_alu();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
LIns* lhs = ins->oprnd1();
|
||||
Reservation *rA = getresv(lhs);
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
Register ra;
|
||||
if (rA == 0 || (ra=rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (op == LIR_not)
|
||||
NOT(rr);
|
||||
else
|
||||
NEG(rr);
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
asm_neg_not(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
case LIR_qjoin:
|
||||
{
|
||||
countlir_qjoin();
|
||||
@ -1318,115 +1164,7 @@ namespace nanojit
|
||||
case LIR_ush:
|
||||
{
|
||||
countlir_alu();
|
||||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
|
||||
Register rb = UnknownReg;
|
||||
RegisterMask allow = GpRegs;
|
||||
bool forceReg = (op == LIR_mul || !rhs->isconst());
|
||||
|
||||
#ifdef NANOJIT_ARM
|
||||
// Arm can't do an immediate op with immediates
|
||||
// outside of +/-255 (for AND) r outside of
|
||||
// 0..255 for others.
|
||||
if (!forceReg)
|
||||
{
|
||||
if (rhs->isconst() && !isU8(rhs->constval()))
|
||||
forceReg = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (lhs != rhs && forceReg)
|
||||
{
|
||||
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
|
||||
rb = findRegFor(rhs, allow);
|
||||
}
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->constval();
|
||||
LEA(rr, d, FP);
|
||||
break;
|
||||
}
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Reservation* rA = getresv(lhs);
|
||||
Register ra;
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
if (rA == 0 || (ra = rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (forceReg)
|
||||
{
|
||||
if (lhs == rhs)
|
||||
rb = ra;
|
||||
|
||||
if (op == LIR_add || op == LIR_addp)
|
||||
ADD(rr, rb);
|
||||
else if (op == LIR_sub)
|
||||
SUB(rr, rb);
|
||||
else if (op == LIR_mul)
|
||||
MUL(rr, rb);
|
||||
else if (op == LIR_and)
|
||||
AND(rr, rb);
|
||||
else if (op == LIR_or)
|
||||
OR(rr, rb);
|
||||
else if (op == LIR_xor)
|
||||
XOR(rr, rb);
|
||||
else if (op == LIR_lsh)
|
||||
SHL(rr, rb);
|
||||
else if (op == LIR_rsh)
|
||||
SAR(rr, rb);
|
||||
else if (op == LIR_ush)
|
||||
SHR(rr, rb);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
else
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (op == LIR_add || op == LIR_addp) {
|
||||
#ifdef NANOJIT_IA32_TODO
|
||||
if (ra != rr) {
|
||||
// this doesn't set cc's, only use it when cc's not required.
|
||||
LEA(rr, c, ra);
|
||||
ra = rr; // suppress mov
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ADDi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_sub) {
|
||||
#ifdef NANOJIT_IA32
|
||||
if (ra != rr) {
|
||||
LEA(rr, -c, ra);
|
||||
ra = rr;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
SUBi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_and)
|
||||
ANDi(rr, c);
|
||||
else if (op == LIR_or)
|
||||
ORi(rr, c);
|
||||
else if (op == LIR_xor)
|
||||
XORi(rr, c);
|
||||
else if (op == LIR_lsh)
|
||||
SHLi(rr, c);
|
||||
else if (op == LIR_rsh)
|
||||
SARi(rr, c);
|
||||
else if (op == LIR_ush)
|
||||
SHRi(rr, c);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
asm_arith(ins);
|
||||
break;
|
||||
}
|
||||
#ifndef NJ_SOFTFLOAT
|
||||
@ -1602,28 +1340,10 @@ namespace nanojit
|
||||
case LIR_loop:
|
||||
{
|
||||
countlir_loop();
|
||||
JMP_long_placeholder(); // jump to SOT
|
||||
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
|
||||
|
||||
loopJumps.add(_nIns);
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
// branching from this frag to ourself.
|
||||
if (_frago->core()->config.show_stats)
|
||||
#if defined NANOJIT_AMD64
|
||||
LDQi(argRegs[1], intptr_t((Fragment*)_thisfrag));
|
||||
#else
|
||||
LDi(argRegs[1], int((Fragment*)_thisfrag));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
assignSavedParams();
|
||||
|
||||
// restore first parameter, the only one we use
|
||||
LInsp state = _thisfrag->lirbuf->state;
|
||||
findSpecificRegFor(state, argRegs[state->imm8()]);
|
||||
asm_loop(ins, loopJumps);
|
||||
break;
|
||||
}
|
||||
|
||||
#ifndef NJ_SOFTFLOAT
|
||||
case LIR_feq:
|
||||
case LIR_fle:
|
||||
@ -1632,17 +1352,7 @@ namespace nanojit
|
||||
case LIR_fge:
|
||||
{
|
||||
countlir_fpu();
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
asm_setcc(r, ins);
|
||||
#ifdef NJ_ARM_VFP
|
||||
SETE(r);
|
||||
#else
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
SETNP(r);
|
||||
#endif
|
||||
asm_fcmp(ins);
|
||||
asm_fcond(ins);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
@ -1659,33 +1369,7 @@ namespace nanojit
|
||||
case LIR_uge:
|
||||
{
|
||||
countlir_alu();
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
if (op == LIR_eq)
|
||||
SETE(r);
|
||||
else if (op == LIR_ov)
|
||||
SETO(r);
|
||||
else if (op == LIR_cs)
|
||||
SETC(r);
|
||||
else if (op == LIR_lt)
|
||||
SETL(r);
|
||||
else if (op == LIR_le)
|
||||
SETLE(r);
|
||||
else if (op == LIR_gt)
|
||||
SETG(r);
|
||||
else if (op == LIR_ge)
|
||||
SETGE(r);
|
||||
else if (op == LIR_ult)
|
||||
SETB(r);
|
||||
else if (op == LIR_ule)
|
||||
SETBE(r);
|
||||
else if (op == LIR_ugt)
|
||||
SETA(r);
|
||||
else // if (op == LIR_uge)
|
||||
SETAE(r);
|
||||
asm_cmp(ins);
|
||||
asm_cond(ins);
|
||||
break;
|
||||
}
|
||||
|
||||
@ -1730,73 +1414,6 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
|
||||
{
|
||||
NIns* at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(cond->isCond());
|
||||
#ifndef NJ_SOFTFLOAT
|
||||
if (condop >= LIR_feq && condop <= LIR_fge)
|
||||
{
|
||||
return asm_jmpcc(branchOnFalse, cond, targ);
|
||||
}
|
||||
#endif
|
||||
// produce the branch
|
||||
if (branchOnFalse)
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JNE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JNO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JNC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JNL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JNLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JNG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JNGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JNB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JNBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JNA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JNAE(targ);
|
||||
}
|
||||
else // op == LIR_xt
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JAE(targ);
|
||||
}
|
||||
at = _nIns;
|
||||
asm_cmp(cond);
|
||||
return at;
|
||||
}
|
||||
|
||||
void Assembler::assignSavedParams()
|
||||
{
|
||||
// restore saved regs
|
||||
@ -1847,6 +1464,7 @@ namespace nanojit
|
||||
return;
|
||||
|
||||
#ifdef NANOJIT_ARM
|
||||
// @todo Why is there here?!? This routine should be indep. of platform
|
||||
verbose_only(
|
||||
if (_verbose) {
|
||||
char* s = &outline[0];
|
||||
|
@ -224,7 +224,8 @@ namespace nanojit
|
||||
void assignSaved(RegAlloc &saved, RegisterMask skip);
|
||||
LInsp findVictim(RegAlloc& regs, RegisterMask allow);
|
||||
|
||||
int findMemFor(LIns* i);
|
||||
Register getBaseReg(LIns *i, int &d, RegisterMask allow);
|
||||
int findMemFor(LIns* i);
|
||||
Register findRegFor(LIns* i, RegisterMask allow);
|
||||
void findRegFor2(RegisterMask allow, LIns* ia, Reservation* &ra, LIns *ib, Reservation* &rb);
|
||||
Register findSpecificRegFor(LIns* i, Register w);
|
||||
@ -286,12 +287,23 @@ namespace nanojit
|
||||
void asm_restore(LInsp, Reservation*, Register);
|
||||
void asm_load(int d, Register r);
|
||||
void asm_spilli(LInsp i, Reservation *resv, bool pop);
|
||||
void asm_spill(Register rr, int d, bool pop=false, bool quad=false);
|
||||
void asm_spill(Register rr, int d, bool pop, bool quad);
|
||||
void asm_load64(LInsp i);
|
||||
void asm_pusharg(LInsp p);
|
||||
NIns* asm_adjustBranch(NIns* at, NIns* target);
|
||||
void asm_quad(LInsp i);
|
||||
bool asm_qlo(LInsp ins, LInsp q);
|
||||
void asm_loop(LInsp i, NInsList& loopJumps);
|
||||
void asm_fcond(LInsp i);
|
||||
void asm_cond(LInsp i);
|
||||
void asm_arith(LInsp i);
|
||||
void asm_neg_not(LInsp i);
|
||||
void asm_ld(LInsp i);
|
||||
void asm_cmov(LInsp i);
|
||||
void asm_param(LInsp i);
|
||||
void asm_int(LInsp i);
|
||||
void asm_short(LInsp i);
|
||||
void asm_qlo(LInsp i);
|
||||
void asm_qhi(LInsp i);
|
||||
void asm_fneg(LInsp ins);
|
||||
void asm_fop(LInsp ins);
|
||||
void asm_i2f(LInsp ins);
|
||||
|
@ -250,7 +250,7 @@ namespace nanojit
|
||||
|
||||
LInsp LirBufWriter::ensureReferenceable(LInsp i, int32_t addedDistance)
|
||||
{
|
||||
NanoAssert(i != 0 && !i->isTramp());
|
||||
NanoAssert(i != 0 /* && !i->isTramp()*/);
|
||||
LInsp next = _buf->next();
|
||||
LInsp from = next + 2*addedDistance;
|
||||
if (canReference(from,i))
|
||||
@ -1047,38 +1047,39 @@ namespace nanojit
|
||||
NanoAssert(op != LIR_skip); // LIR_skip here is just an error condition
|
||||
|
||||
ArgSize sizes[2*MAXARGS];
|
||||
uint32_t argc = ci->get_sizes(sizes);
|
||||
int32_t argc = ci->get_sizes(sizes);
|
||||
|
||||
#ifdef NJ_SOFTFLOAT
|
||||
if (op == LIR_fcall)
|
||||
op = LIR_callh;
|
||||
LInsp args2[MAXARGS*2]; // arm could require 2 args per double
|
||||
int32_t j = 0;
|
||||
for (int32_t i = 0; i < MAXARGS; i++) {
|
||||
int32_t i = 0;
|
||||
while (j < argc) {
|
||||
argt >>= 2;
|
||||
ArgSize a = ArgSize(argt&3);
|
||||
if (a == ARGSIZE_F) {
|
||||
LInsp q = args[i];
|
||||
LInsp q = args[i++];
|
||||
args2[j++] = ins1(LIR_qhi, q);
|
||||
args2[j++] = ins1(LIR_qlo, q);
|
||||
} else if (a != ARGSIZE_NONE) {
|
||||
args2[j++] = args[i];
|
||||
} else {
|
||||
args2[j++] = args[i++];
|
||||
}
|
||||
}
|
||||
args = args2;
|
||||
NanoAssert(j == argc);
|
||||
#endif
|
||||
|
||||
NanoAssert(argc <= MAXARGS);
|
||||
NanoAssert(argc <= (int)MAXARGS);
|
||||
uint32_t words = argwords(argc);
|
||||
ensureRoom(words+LIns::callInfoWords+1+argc); // ins size + possible tramps
|
||||
for (uint32_t i=0; i < argc; i++)
|
||||
for (int32_t i=0; i < argc; i++)
|
||||
args[i] = ensureReferenceable(args[i], argc-i);
|
||||
uint8_t* offs = (uint8_t*)_buf->next();
|
||||
LIns *l = _buf->next() + words;
|
||||
*(const CallInfo **)l = ci;
|
||||
l += LIns::callInfoWords;
|
||||
for (uint32_t i=0; i < argc; i++)
|
||||
for (int32_t i=0; i < argc; i++)
|
||||
offs[i] = (uint8_t) l->reference(args[i]);
|
||||
#if defined NANOJIT_64BIT
|
||||
l->initOpcode(op);
|
||||
|
@ -39,8 +39,6 @@
|
||||
#ifndef __nanojit_LIR__
|
||||
#define __nanojit_LIR__
|
||||
|
||||
namespace avmplus { class RegionTracker; }
|
||||
|
||||
/**
|
||||
* Fundamentally, the arguments to the various operands can be grouped along
|
||||
* two dimensions. One dimension is size: can the arguments fit into a 32-bit
|
||||
@ -946,7 +944,6 @@ namespace nanojit
|
||||
class Assembler;
|
||||
|
||||
void compile(Assembler *assm, Fragment *frag);
|
||||
verbose_only( void printTracker(const char* s, avmplus::RegionTracker& trk, Assembler* assm); )
|
||||
verbose_only(void live(GC *gc, LirBuffer *lirbuf);)
|
||||
|
||||
class StackFilter: public LirFilter
|
||||
|
@ -64,16 +64,15 @@ const char* regNames[] = {"r0","r1","r2","r3","r4","r5","r6","r7","r8","r9","r10
|
||||
|
||||
const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
|
||||
const Register Assembler::retRegs[] = { R0, R1 };
|
||||
const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
|
||||
|
||||
void
|
||||
Assembler::nInit(AvmCore*)
|
||||
{
|
||||
// all ARMs have conditional move
|
||||
avmplus::AvmCore::cmov_available = true;
|
||||
}
|
||||
|
||||
NIns*
|
||||
Assembler::genPrologue(RegisterMask needSaving)
|
||||
Assembler::genPrologue()
|
||||
{
|
||||
/**
|
||||
* Prologue
|
||||
@ -81,16 +80,13 @@ Assembler::genPrologue(RegisterMask needSaving)
|
||||
|
||||
// NJ_RESV_OFFSET is space at the top of the stack for us
|
||||
// to use for parameter passing (8 bytes at the moment)
|
||||
uint32_t stackNeeded = 4 * _activation.highwatermark + NJ_STACK_OFFSET;
|
||||
uint32_t savingCount = 0;
|
||||
uint32_t stackNeeded = STACK_GRANULARITY * _activation.highwatermark + NJ_STACK_OFFSET;
|
||||
|
||||
uint32_t savingMask = 0;
|
||||
savingCount = 9; //R4-R10,R11,LR
|
||||
savingMask = SavedRegs | rmask(FRAME_PTR);
|
||||
(void)needSaving;
|
||||
uint32_t savingMask = rmask(FP) | rmask(LR);
|
||||
uint32_t savingCount = 2;
|
||||
|
||||
// so for alignment purposes we've pushed return addr, fp, and savingCount registers
|
||||
uint32_t stackPushed = 4 * (2+savingCount);
|
||||
// so for alignment purposes we've pushed return addr and fp
|
||||
uint32_t stackPushed = STACK_GRANULARITY * savingCount;
|
||||
uint32_t aligned = alignUp(stackNeeded + stackPushed, NJ_ALIGN_STACK);
|
||||
int32_t amt = aligned - stackPushed;
|
||||
|
||||
@ -102,8 +98,8 @@ Assembler::genPrologue(RegisterMask needSaving)
|
||||
verbose_only( verbose_output(" patch entry"); )
|
||||
NIns *patchEntry = _nIns;
|
||||
|
||||
MR(FRAME_PTR, SP);
|
||||
PUSH_mask(savingMask|rmask(LR));
|
||||
MR(FP, SP);
|
||||
PUSH_mask(savingMask);
|
||||
return patchEntry;
|
||||
}
|
||||
|
||||
@ -130,7 +126,7 @@ Assembler::nFragExit(LInsp guard)
|
||||
}
|
||||
|
||||
// pop the stack frame first
|
||||
MR(SP, FRAME_PTR);
|
||||
MR(SP, FP);
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
if (_frago->core()->config.show_stats) {
|
||||
@ -142,15 +138,18 @@ Assembler::nFragExit(LInsp guard)
|
||||
#endif
|
||||
|
||||
// return value is GuardRecord*
|
||||
LDi(R2, int(lr));
|
||||
LDi(R0, int(lr));
|
||||
}
|
||||
|
||||
NIns*
|
||||
Assembler::genEpilogue(RegisterMask restore)
|
||||
Assembler::genEpilogue()
|
||||
{
|
||||
BX(LR); // return
|
||||
MR(R0,R2); // return LinkRecord*
|
||||
RegisterMask savingMask = restore | rmask(FRAME_PTR) | rmask(LR);
|
||||
|
||||
// this is needed if we jump here from nFragExit
|
||||
//MR(R0,R2); // return LinkRecord*
|
||||
|
||||
RegisterMask savingMask = rmask(FP) | rmask(LR);
|
||||
POP_mask(savingMask); // regs
|
||||
return _nIns;
|
||||
}
|
||||
@ -300,7 +299,14 @@ Assembler::nRegisterResetAll(RegAlloc& a)
|
||||
// add scratch registers to our free list for the allocator
|
||||
a.clear();
|
||||
a.used = 0;
|
||||
a.free = rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) | rmask(R5) | FpRegs;
|
||||
a.free =
|
||||
rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) |
|
||||
rmask(R5) | rmask(R6) | rmask(R7) | rmask(R8) | rmask(R9) |
|
||||
rmask(R10);
|
||||
#ifdef NJ_ARM_VFP
|
||||
a.free |= FpRegs;
|
||||
#endif
|
||||
|
||||
debug_only(a.managed = a.free);
|
||||
}
|
||||
|
||||
@ -399,15 +405,11 @@ Assembler::asm_restore(LInsp i, Reservation *resv, Register r)
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_spill(LInsp i, Reservation *resv, bool pop)
|
||||
Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
|
||||
{
|
||||
(void)i;
|
||||
(void)pop;
|
||||
//fprintf (stderr, "resv->arIndex: %d\n", resv->arIndex);
|
||||
if (resv->arIndex) {
|
||||
int d = disp(resv);
|
||||
// save to spill location
|
||||
Register rr = resv->reg;
|
||||
(void) pop;
|
||||
(void) quad;
|
||||
if (d) {
|
||||
if (IsFpReg(rr)) {
|
||||
if (isS8(d >> 2)) {
|
||||
FSTD(rr, FP, d);
|
||||
@ -418,11 +420,6 @@ Assembler::asm_spill(LInsp i, Reservation *resv, bool pop)
|
||||
} else {
|
||||
STR(rr, FP, d);
|
||||
}
|
||||
|
||||
verbose_only(if (_verbose){
|
||||
outputf(" spill %s",_thisfrag->lirbuf->names->formatRef(i));
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -599,13 +596,6 @@ Assembler::asm_quad(LInsp ins)
|
||||
//asm_output("<<< asm_quad");
|
||||
}
|
||||
|
||||
bool
|
||||
Assembler::asm_qlo(LInsp ins, LInsp q)
|
||||
{
|
||||
(void)ins; (void)q;
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_nongp_copy(Register r, Register s)
|
||||
{
|
||||
@ -623,7 +613,7 @@ Assembler::asm_nongp_copy(Register r, Register s)
|
||||
}
|
||||
|
||||
Register
|
||||
Assembler::asm_binop_rhs_reg(LInsp ins)
|
||||
Assembler::asm_binop_rhs_reg(LInsp)
|
||||
{
|
||||
return UnknownReg;
|
||||
}
|
||||
@ -871,7 +861,7 @@ Assembler::LD32_nochk(Register r, int32_t imm)
|
||||
void
|
||||
Assembler::B_cond_chk(ConditionCode _c, NIns* _t, bool _chk)
|
||||
{
|
||||
int32 offs = PC_OFFSET_FROM(_t,_nIns-1);
|
||||
int32_t offs = PC_OFFSET_FROM(_t,_nIns-1);
|
||||
//fprintf(stderr, "B_cond_chk target: 0x%08x offset: %d @0x%08x\n", _t, offs, _nIns-1);
|
||||
if (isS24(offs)) {
|
||||
if (_chk) underrunProtect(4);
|
||||
@ -947,8 +937,6 @@ Assembler::asm_add_imm(Register rd, Register rn, int32_t imm)
|
||||
* VFP
|
||||
*/
|
||||
|
||||
#ifdef NJ_ARM_VFP
|
||||
|
||||
void
|
||||
Assembler::asm_i2f(LInsp ins)
|
||||
{
|
||||
@ -1094,13 +1082,538 @@ Assembler::asm_fcmp(LInsp ins)
|
||||
}
|
||||
|
||||
Register
|
||||
Assembler::asm_prep_fcall(Reservation* rR, LInsp ins)
|
||||
Assembler::asm_prep_fcall(Reservation*, LInsp)
|
||||
{
|
||||
// We have nothing to do here; we do it all in asm_call.
|
||||
return UnknownReg;
|
||||
}
|
||||
|
||||
#endif /* NJ_ARM_VFP */
|
||||
NIns*
|
||||
Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
|
||||
{
|
||||
NIns* at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(cond->isCond());
|
||||
|
||||
if (condop >= LIR_feq && condop <= LIR_fge)
|
||||
{
|
||||
if (branchOnFalse)
|
||||
JNE(targ);
|
||||
else
|
||||
JE(targ);
|
||||
|
||||
NIns *at = _nIns;
|
||||
asm_fcmp(cond);
|
||||
return at;
|
||||
}
|
||||
|
||||
// produce the branch
|
||||
if (branchOnFalse) {
|
||||
if (condop == LIR_eq)
|
||||
JNE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JNO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JNC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JNL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JNLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JNG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JNGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JNB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JNBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JNA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JNAE(targ);
|
||||
} else // op == LIR_xt
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JAE(targ);
|
||||
}
|
||||
at = _nIns;
|
||||
asm_cmp(cond);
|
||||
return at;
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_cmp(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
|
||||
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
|
||||
if ((condop == LIR_ov) || (condop == LIR_cs))
|
||||
return;
|
||||
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
Reservation *rA, *rB;
|
||||
|
||||
// Not supported yet.
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst()) {
|
||||
int c = rhs->constval();
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
TEST(r,r);
|
||||
// No 64-bit immediates so fall-back to below
|
||||
}
|
||||
else if (!rhs->isQuad()) {
|
||||
Register r = getBaseReg(lhs, c, GpRegs);
|
||||
CMPi(r, c);
|
||||
}
|
||||
} else {
|
||||
findRegFor2(GpRegs, lhs, rA, rhs, rB);
|
||||
Register ra = rA->reg;
|
||||
Register rb = rB->reg;
|
||||
CMP(ra, rb);
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
|
||||
{
|
||||
(void)ins;
|
||||
JMP_long_placeholder(); // jump to SOT
|
||||
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
|
||||
|
||||
loopJumps.add(_nIns);
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
// branching from this frag to ourself.
|
||||
if (_frago->core()->config.show_stats)
|
||||
LDi(argRegs[1], int((Fragment*)_thisfrag));
|
||||
#endif
|
||||
|
||||
assignSavedParams();
|
||||
|
||||
// restore first parameter, the only one we use
|
||||
LInsp state = _thisfrag->lirbuf->state;
|
||||
findSpecificRegFor(state, argRegs[state->imm8()]);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_fcond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
|
||||
SETE(r);
|
||||
asm_fcmp(ins);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_cond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
LOpcode op = ins->opcode();
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
if (op == LIR_eq)
|
||||
SETE(r);
|
||||
else if (op == LIR_ov)
|
||||
SETO(r);
|
||||
else if (op == LIR_cs)
|
||||
SETC(r);
|
||||
else if (op == LIR_lt)
|
||||
SETL(r);
|
||||
else if (op == LIR_le)
|
||||
SETLE(r);
|
||||
else if (op == LIR_gt)
|
||||
SETG(r);
|
||||
else if (op == LIR_ge)
|
||||
SETGE(r);
|
||||
else if (op == LIR_ult)
|
||||
SETB(r);
|
||||
else if (op == LIR_ule)
|
||||
SETBE(r);
|
||||
else if (op == LIR_ugt)
|
||||
SETA(r);
|
||||
else // if (op == LIR_uge)
|
||||
SETAE(r);
|
||||
asm_cmp(ins);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_arith(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
|
||||
Register rb = UnknownReg;
|
||||
RegisterMask allow = GpRegs;
|
||||
bool forceReg = (op == LIR_mul || !rhs->isconst());
|
||||
|
||||
// Arm can't do an immediate op with immediates
|
||||
// outside of +/-255 (for AND) r outside of
|
||||
// 0..255 for others.
|
||||
if (!forceReg) {
|
||||
if (rhs->isconst() && !isU8(rhs->constval()))
|
||||
forceReg = true;
|
||||
}
|
||||
|
||||
if (lhs != rhs && forceReg) {
|
||||
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
|
||||
rb = findRegFor(rhs, allow);
|
||||
}
|
||||
allow &= ~rmask(rb);
|
||||
} else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->constval();
|
||||
LEA(rr, d, FP);
|
||||
}
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Reservation* rA = getresv(lhs);
|
||||
Register ra;
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
if (rA == 0 || (ra = rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (forceReg) {
|
||||
if (lhs == rhs)
|
||||
rb = ra;
|
||||
|
||||
if (op == LIR_add || op == LIR_addp)
|
||||
ADD(rr, rb);
|
||||
else if (op == LIR_sub)
|
||||
SUB(rr, rb);
|
||||
else if (op == LIR_mul)
|
||||
MUL(rr, rb);
|
||||
else if (op == LIR_and)
|
||||
AND(rr, rb);
|
||||
else if (op == LIR_or)
|
||||
OR(rr, rb);
|
||||
else if (op == LIR_xor)
|
||||
XOR(rr, rb);
|
||||
else if (op == LIR_lsh)
|
||||
SHL(rr, rb);
|
||||
else if (op == LIR_rsh)
|
||||
SAR(rr, rb);
|
||||
else if (op == LIR_ush)
|
||||
SHR(rr, rb);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
} else {
|
||||
int c = rhs->constval();
|
||||
if (op == LIR_add || op == LIR_addp)
|
||||
ADDi(rr, c);
|
||||
else if (op == LIR_sub)
|
||||
SUBi(rr, c);
|
||||
else if (op == LIR_and)
|
||||
ANDi(rr, c);
|
||||
else if (op == LIR_or)
|
||||
ORi(rr, c);
|
||||
else if (op == LIR_xor)
|
||||
XORi(rr, c);
|
||||
else if (op == LIR_lsh)
|
||||
SHLi(rr, c);
|
||||
else if (op == LIR_rsh)
|
||||
SARi(rr, c);
|
||||
else if (op == LIR_ush)
|
||||
SHRi(rr, c);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
|
||||
if (rr != ra)
|
||||
MR(rr,ra);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_neg_not(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
LIns* lhs = ins->oprnd1();
|
||||
Reservation *rA = getresv(lhs);
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
Register ra;
|
||||
if (rA == 0 || (ra=rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (op == LIR_not)
|
||||
NOT(rr);
|
||||
else
|
||||
NEG(rr);
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_ld(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LIns* base = ins->oprnd1();
|
||||
LIns* disp = ins->oprnd2();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = disp->constval();
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
if (op == LIR_ldcb)
|
||||
LD8Z(rr, d, ra);
|
||||
else
|
||||
LD(rr, d, ra);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_cmov(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LIns* condval = ins->oprnd1();
|
||||
NanoAssert(condval->isCmp());
|
||||
|
||||
LIns* values = ins->oprnd2();
|
||||
|
||||
NanoAssert(values->opcode() == LIR_2);
|
||||
LIns* iftrue = values->oprnd1();
|
||||
LIns* iffalse = values->oprnd2();
|
||||
|
||||
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
|
||||
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
if (op == LIR_cmov) {
|
||||
switch (condval->opcode()) {
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
} else if (op == LIR_qcmov) {
|
||||
NanoAssert(0);
|
||||
}
|
||||
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
|
||||
asm_cmp(condval);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_qhi(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d+4, FP);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_qlo(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d, FP);
|
||||
|
||||
#if 0
|
||||
LIns *q = ins->oprnd1();
|
||||
|
||||
Reservation *resv = getresv(ins);
|
||||
Register rr = resv->reg;
|
||||
if (rr == UnknownReg) {
|
||||
// store quad in spill loc
|
||||
int d = disp(resv);
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVDm(d, FP, qr);
|
||||
} else {
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVD(rr,qr);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
void
|
||||
Assembler::asm_param(LInsp ins)
|
||||
{
|
||||
uint32_t a = ins->imm8();
|
||||
uint32_t kind = ins->imm8b();
|
||||
if (kind == 0) {
|
||||
// ordinary param
|
||||
AbiKind abi = _thisfrag->lirbuf->abi;
|
||||
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
|
||||
if (a < abi_regcount) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// incoming arg is on stack, and EBP points nearby (see genPrologue)
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
}
|
||||
} else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_short(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm16();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
}
|
||||
|
||||
void
|
||||
Assembler::asm_int(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm32();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
}
|
||||
|
||||
#if 0
|
||||
void
|
||||
Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
Reservation *rR = getresv(ins);
|
||||
Register rr = rR->reg;
|
||||
if (rr != UnknownReg)
|
||||
{
|
||||
// @todo -- add special-cases for 0 and 1
|
||||
_allocator.retire(rr);
|
||||
rR->reg = UnknownReg;
|
||||
NanoAssert((rmask(rr) & FpRegs) != 0);
|
||||
|
||||
const double d = ins->constvalf();
|
||||
const uint64_t q = ins->constvalq();
|
||||
if (rmask(rr) & XmmRegs) {
|
||||
if (q == 0.0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
SSE_XORPDr(rr, rr);
|
||||
} else if (d == 1.0) {
|
||||
// 1.0 is extremely frequent and worth special-casing!
|
||||
static const double k_ONE = 1.0;
|
||||
LDSDm(rr, &k_ONE);
|
||||
} else {
|
||||
findMemFor(ins);
|
||||
const int d = disp(rR);
|
||||
SSE_LDQ(rr, d, FP);
|
||||
}
|
||||
} else {
|
||||
if (q == 0.0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
FLDZ();
|
||||
} else if (d == 1.0) {
|
||||
FLD1();
|
||||
} else {
|
||||
findMemFor(ins);
|
||||
int d = disp(rR);
|
||||
FLDQ(d,FP);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
|
||||
int d = disp(rR);
|
||||
freeRsrcOf(ins, false);
|
||||
if (d) {
|
||||
const int32_t* p = (const int32_t*) (ins-2);
|
||||
STi(FP,d+4,p[1]);
|
||||
STi(FP,d,p[0]);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
Assembler::asm_arg(ArgSize sz, LInsp p, Register r)
|
||||
{
|
||||
if (sz == ARGSIZE_Q) {
|
||||
// ref arg - use lea
|
||||
if (r != UnknownReg) {
|
||||
// arg in specific reg
|
||||
int da = findMemFor(p);
|
||||
LEA(r, da, FP);
|
||||
} else {
|
||||
NanoAssert(0); // not supported
|
||||
}
|
||||
} else if (sz == ARGSIZE_LO) {
|
||||
if (r != UnknownReg) {
|
||||
// arg goes in specific register
|
||||
if (p->isconst()) {
|
||||
LDi(r, p->constval());
|
||||
} else {
|
||||
Reservation* rA = getresv(p);
|
||||
if (rA) {
|
||||
if (rA->reg == UnknownReg) {
|
||||
// load it into the arg reg
|
||||
int d = findMemFor(p);
|
||||
if (p->isop(LIR_alloc)) {
|
||||
LEA(r, d, FP);
|
||||
} else {
|
||||
LD(r, d, FP);
|
||||
}
|
||||
} else {
|
||||
// it must be in a saved reg
|
||||
MR(r, rA->reg);
|
||||
}
|
||||
} else {
|
||||
// this is the last use, so fine to assign it
|
||||
// to the scratch reg, it's dead after this point.
|
||||
findSpecificRegFor(p, r);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
asm_pusharg(p);
|
||||
}
|
||||
} else {
|
||||
NanoAssert(sz == ARGSIZE_F);
|
||||
asm_farg(p);
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
#endif /* FEATURE_NANOJIT */
|
||||
|
@ -42,6 +42,17 @@
|
||||
#define __nanojit_NativeArm__
|
||||
|
||||
|
||||
#ifdef PERFM
|
||||
#include "../vprof/vprof.h"
|
||||
#define count_instr() _nvprof("arm",1)
|
||||
#define count_prolog() _nvprof("arm-prolog",1); count_instr();
|
||||
#define count_imt() _nvprof("arm-imt",1) count_instr()
|
||||
#else
|
||||
#define count_instr()
|
||||
#define count_prolog()
|
||||
#define count_imt()
|
||||
#endif
|
||||
|
||||
namespace nanojit
|
||||
{
|
||||
|
||||
@ -50,7 +61,7 @@ const int NJ_LOG2_PAGE_SIZE = 12; // 4K
|
||||
// If NJ_ARM_VFP is defined, then VFP is assumed to
|
||||
// be present. If it's not defined, then softfloat
|
||||
// is used, and NJ_SOFTFLOAT is defined.
|
||||
#define NJ_ARM_VFP
|
||||
//#define NJ_ARM_VFP
|
||||
|
||||
#ifdef NJ_ARM_VFP
|
||||
|
||||
@ -108,10 +119,6 @@ typedef enum {
|
||||
FirstFloatReg = 16,
|
||||
LastFloatReg = 22,
|
||||
|
||||
// helpers
|
||||
FRAME_PTR = 11,
|
||||
ESP = SP,
|
||||
|
||||
FirstReg = 0,
|
||||
#ifdef NJ_ARM_VFP
|
||||
LastReg = 23,
|
||||
@ -152,13 +159,12 @@ typedef struct _FragInfo {
|
||||
NIns* epilogue;
|
||||
} FragInfo;
|
||||
|
||||
#ifdef ARM_VFP
|
||||
static const RegisterMask SavedFpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6 | 1<<D7;
|
||||
#else
|
||||
// D0-D6 are not saved; D7-D15 are, but we don't use those,
|
||||
// so we don't have to worry about saving/restoring them
|
||||
static const RegisterMask SavedFpRegs = 0;
|
||||
#endif
|
||||
static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
|
||||
static const int NumSavedRegs = 7;
|
||||
static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10 | SavedFpRegs;
|
||||
|
||||
static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2f/u2f.
|
||||
static const RegisterMask GpRegs = 0x07FF;
|
||||
static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
|
||||
@ -200,6 +206,7 @@ verbose_only( extern const char* regNames[]; )
|
||||
void nativePageSetup(); \
|
||||
void asm_quad_nochk(Register, const int32_t*); \
|
||||
void asm_add_imm(Register, Register, int32_t); \
|
||||
void asm_fcmp(LInsp); \
|
||||
int* _nSlot; \
|
||||
int* _nExitSlot;
|
||||
|
||||
@ -218,8 +225,6 @@ verbose_only( extern const char* regNames[]; )
|
||||
|
||||
#define IMM32(imm) *(--_nIns) = (NIns)((imm));
|
||||
|
||||
#define FUNCADDR(addr) ( ((int)addr) )
|
||||
|
||||
#define OP_IMM (1<<25)
|
||||
#define OP_STAT (1<<20)
|
||||
|
||||
|
@ -60,12 +60,7 @@ namespace nanojit
|
||||
#endif
|
||||
const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
|
||||
const Register Assembler::retRegs[] = { R0, R1 };
|
||||
|
||||
#ifdef NJ_THUMB_JIT
|
||||
const Register Assembler::savedRegs[] = { R4, R5, R6, R7 };
|
||||
#else
|
||||
const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
|
||||
#endif
|
||||
|
||||
void Assembler::nInit(AvmCore*)
|
||||
{
|
||||
@ -355,6 +350,7 @@ namespace nanojit
|
||||
asm_mmq(rb, dr, FP, da);
|
||||
}
|
||||
|
||||
|
||||
void Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
Reservation *rR = getresv(ins);
|
||||
@ -368,10 +364,477 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
bool Assembler::asm_qlo(LInsp ins, LInsp q)
|
||||
NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
|
||||
{
|
||||
(void)ins; (void)q;
|
||||
return false;
|
||||
NIns* at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(cond->isCond());
|
||||
#ifndef NJ_SOFTFLOAT
|
||||
if (condop >= LIR_feq && condop <= LIR_fge)
|
||||
{
|
||||
return asm_jmpcc(branchOnFalse, cond, targ);
|
||||
}
|
||||
#endif
|
||||
// produce the branch
|
||||
if (branchOnFalse)
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JNE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JNO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JNC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JNL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JNLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JNG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JNGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JNB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JNBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JNA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JNAE(targ);
|
||||
}
|
||||
else // op == LIR_xt
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JAE(targ);
|
||||
}
|
||||
at = _nIns;
|
||||
asm_cmp(cond);
|
||||
return at;
|
||||
}
|
||||
|
||||
void Assembler::asm_cmp(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
|
||||
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
|
||||
if ((condop == LIR_ov) || (condop == LIR_cs))
|
||||
return;
|
||||
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
Reservation *rA, *rB;
|
||||
|
||||
// Not supported yet.
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst())
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
TEST(r,r);
|
||||
// No 64-bit immediates so fall-back to below
|
||||
}
|
||||
else if (!rhs->isQuad()) {
|
||||
Register r = getBaseReg(lhs, c, GpRegs);
|
||||
CMPi(r, c);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
findRegFor2(GpRegs, lhs, rA, rhs, rB);
|
||||
Register ra = rA->reg;
|
||||
Register rb = rB->reg;
|
||||
CMP(ra, rb);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
|
||||
{
|
||||
(void)ins;
|
||||
JMP_long_placeholder(); // jump to SOT
|
||||
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
|
||||
|
||||
loopJumps.add(_nIns);
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
// branching from this frag to ourself.
|
||||
if (_frago->core()->config.show_stats)
|
||||
LDi(argRegs[1], int((Fragment*)_thisfrag));
|
||||
#endif
|
||||
|
||||
assignSavedParams();
|
||||
|
||||
// restore first parameter, the only one we use
|
||||
LInsp state = _thisfrag->lirbuf->state;
|
||||
findSpecificRegFor(state, argRegs[state->imm8()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
asm_setcc(r, ins);
|
||||
#ifdef NJ_ARM_VFP
|
||||
SETE(r);
|
||||
#else
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
SETNP(r);
|
||||
#endif
|
||||
asm_fcmp(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_cond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
LOpcode op = ins->opcode();
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
if (op == LIR_eq)
|
||||
SETE(r);
|
||||
else if (op == LIR_ov)
|
||||
SETO(r);
|
||||
else if (op == LIR_cs)
|
||||
SETC(r);
|
||||
else if (op == LIR_lt)
|
||||
SETL(r);
|
||||
else if (op == LIR_le)
|
||||
SETLE(r);
|
||||
else if (op == LIR_gt)
|
||||
SETG(r);
|
||||
else if (op == LIR_ge)
|
||||
SETGE(r);
|
||||
else if (op == LIR_ult)
|
||||
SETB(r);
|
||||
else if (op == LIR_ule)
|
||||
SETBE(r);
|
||||
else if (op == LIR_ugt)
|
||||
SETA(r);
|
||||
else // if (op == LIR_uge)
|
||||
SETAE(r);
|
||||
asm_cmp(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_arith(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
|
||||
Register rb = UnknownReg;
|
||||
RegisterMask allow = GpRegs;
|
||||
bool forceReg = (op == LIR_mul || !rhs->isconst());
|
||||
|
||||
#ifdef NANOJIT_ARM
|
||||
// Arm can't do an immediate op with immediates
|
||||
// outside of +/-255 (for AND) r outside of
|
||||
// 0..255 for others.
|
||||
if (!forceReg)
|
||||
{
|
||||
if (rhs->isconst() && !isU8(rhs->constval()))
|
||||
forceReg = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (lhs != rhs && forceReg)
|
||||
{
|
||||
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
|
||||
rb = findRegFor(rhs, allow);
|
||||
}
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->constval();
|
||||
LEA(rr, d, FP);
|
||||
}
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Reservation* rA = getresv(lhs);
|
||||
Register ra;
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
if (rA == 0 || (ra = rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (forceReg)
|
||||
{
|
||||
if (lhs == rhs)
|
||||
rb = ra;
|
||||
|
||||
if (op == LIR_add || op == LIR_addp)
|
||||
ADD(rr, rb);
|
||||
else if (op == LIR_sub)
|
||||
SUB(rr, rb);
|
||||
else if (op == LIR_mul)
|
||||
MUL(rr, rb);
|
||||
else if (op == LIR_and)
|
||||
AND(rr, rb);
|
||||
else if (op == LIR_or)
|
||||
OR(rr, rb);
|
||||
else if (op == LIR_xor)
|
||||
XOR(rr, rb);
|
||||
else if (op == LIR_lsh)
|
||||
SHL(rr, rb);
|
||||
else if (op == LIR_rsh)
|
||||
SAR(rr, rb);
|
||||
else if (op == LIR_ush)
|
||||
SHR(rr, rb);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
else
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (op == LIR_add || op == LIR_addp) {
|
||||
{
|
||||
ADDi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_sub) {
|
||||
{
|
||||
SUBi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_and)
|
||||
ANDi(rr, c);
|
||||
else if (op == LIR_or)
|
||||
ORi(rr, c);
|
||||
else if (op == LIR_xor)
|
||||
XORi(rr, c);
|
||||
else if (op == LIR_lsh)
|
||||
SHLi(rr, c);
|
||||
else if (op == LIR_rsh)
|
||||
SARi(rr, c);
|
||||
else if (op == LIR_ush)
|
||||
SHRi(rr, c);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_neg_not(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
LIns* lhs = ins->oprnd1();
|
||||
Reservation *rA = getresv(lhs);
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
Register ra;
|
||||
if (rA == 0 || (ra=rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (op == LIR_not)
|
||||
NOT(rr);
|
||||
else
|
||||
NEG(rr);
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_ld(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LIns* base = ins->oprnd1();
|
||||
LIns* disp = ins->oprnd2();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = disp->constval();
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
if (op == LIR_ldcb)
|
||||
LD8Z(rr, d, ra);
|
||||
else
|
||||
LD(rr, d, ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_cmov(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LIns* condval = ins->oprnd1();
|
||||
NanoAssert(condval->isCmp());
|
||||
|
||||
LIns* values = ins->oprnd2();
|
||||
|
||||
NanoAssert(values->opcode() == LIR_2);
|
||||
LIns* iftrue = values->oprnd1();
|
||||
LIns* iffalse = values->oprnd2();
|
||||
|
||||
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
|
||||
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
if (op == LIR_cmov) {
|
||||
switch (condval->opcode())
|
||||
{
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
} else if (op == LIR_qcmov) {
|
||||
NanoAssert(0);
|
||||
}
|
||||
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
|
||||
asm_cmp(condval);
|
||||
}
|
||||
|
||||
void Assembler::asm_qhi(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d+4, FP);
|
||||
}
|
||||
|
||||
void Assembler::asm_param(LInsp ins)
|
||||
{
|
||||
uint32_t a = ins->imm8();
|
||||
uint32_t kind = ins->imm8b();
|
||||
if (kind == 0) {
|
||||
// ordinary param
|
||||
AbiKind abi = _thisfrag->lirbuf->abi;
|
||||
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
|
||||
if (a < abi_regcount) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// incoming arg is on stack, and EBP points nearby (see genPrologue)
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_short(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm16();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
}
|
||||
|
||||
void Assembler::asm_int(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm32();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
}
|
||||
|
||||
void Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
Reservation *rR = getresv(ins);
|
||||
Register rr = rR->reg;
|
||||
if (rr != UnknownReg)
|
||||
{
|
||||
// @todo -- add special-cases for 0 and 1
|
||||
_allocator.retire(rr);
|
||||
rR->reg = UnknownReg;
|
||||
NanoAssert((rmask(rr) & FpRegs) != 0);
|
||||
|
||||
const double d = ins->constvalf();
|
||||
const uint64_t q = ins->constvalq();
|
||||
if (rmask(rr) & XmmRegs) {
|
||||
if (q == 0.0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
SSE_XORPDr(rr, rr);
|
||||
} else if (d == 1.0) {
|
||||
// 1.0 is extremely frequent and worth special-casing!
|
||||
static const double k_ONE = 1.0;
|
||||
LDSDm(rr, &k_ONE);
|
||||
} else {
|
||||
findMemFor(ins);
|
||||
const int d = disp(rR);
|
||||
SSE_LDQ(rr, d, FP);
|
||||
}
|
||||
} else {
|
||||
if (q == 0.0) {
|
||||
// test (int64)0 since -0.0 == 0.0
|
||||
FLDZ();
|
||||
} else if (d == 1.0) {
|
||||
FLD1();
|
||||
} else {
|
||||
findMemFor(ins);
|
||||
int d = disp(rR);
|
||||
FLDQ(d,FP);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
|
||||
int d = disp(rR);
|
||||
freeRsrcOf(ins, false);
|
||||
if (d)
|
||||
{
|
||||
const int32_t* p = (const int32_t*) (ins-2);
|
||||
STi(FP,d+4,p[1]);
|
||||
STi(FP,d,p[0]);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_qlo(LInsp ins)
|
||||
{
|
||||
LIns *q = ins->oprnd1();
|
||||
Reservation *resv = getresv(ins);
|
||||
Register rr = resv->reg;
|
||||
if (rr == UnknownReg) {
|
||||
// store quad in spill loc
|
||||
int d = disp(resv);
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVDm(d, FP, qr);
|
||||
} else {
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVD(rr,qr);
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_nongp_copy(Register r, Register s)
|
||||
|
@ -64,27 +64,22 @@ namespace nanojit
|
||||
/* ARM registers */
|
||||
typedef enum
|
||||
{
|
||||
R0 = 0,
|
||||
R1 = 1,
|
||||
R2 = 2,
|
||||
R3 = 3,
|
||||
R4 = 4,
|
||||
R5 = 5,
|
||||
R6 = 6,
|
||||
R7 = 7,
|
||||
R8 = 8,
|
||||
//R9 = 9,
|
||||
//R10 = 10,
|
||||
//R11 = 11,
|
||||
IP = 12,
|
||||
SP = 13,
|
||||
LR = 14,
|
||||
PC = 15,
|
||||
|
||||
FP = SP,
|
||||
|
||||
// helpers
|
||||
FRAME_PTR = R7,
|
||||
R0 = 0, // 32bit return value, aka A1
|
||||
R1 = 1, // msw of 64bit return value, A2
|
||||
R2 = 2, // A3
|
||||
R3 = 3, // A4
|
||||
R4 = 4, // V1
|
||||
R5 = 5, // V2
|
||||
R6 = 6, // V3
|
||||
R7 = 7, // V4
|
||||
R8 = 8, // V5
|
||||
R9 = 9, // V6, SB (stack base)
|
||||
R10 = 10, // V7, SL
|
||||
FP = 11, // V8, frame pointer
|
||||
IP = 12, // intra-procedure call scratch register
|
||||
SP = 13, // stack pointer
|
||||
LR = 14, // link register (BL sets LR = return address)
|
||||
PC = 15, // program counter
|
||||
|
||||
FirstReg = 0,
|
||||
LastReg = 5,
|
||||
|
@ -354,7 +354,8 @@ namespace nanojit
|
||||
#else
|
||||
if (mprotect((void *)addr, count*NJ_PAGE_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC) == -1) {
|
||||
#endif
|
||||
AvmDebugLog(("FATAL ERROR: mprotect(PROT_EXEC) failed\n"));
|
||||
// todo: we can't abort or assert here, we have to fail gracefully.
|
||||
NanoAssertMsg(false, "FATAL ERROR: mprotect(PROT_EXEC) failed\n");
|
||||
abort();
|
||||
}
|
||||
#endif
|
||||
@ -539,13 +540,7 @@ namespace nanojit
|
||||
{
|
||||
if (value->isconst())
|
||||
{
|
||||
Register rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
dr += findMemFor(base);
|
||||
} else {
|
||||
rb = findRegFor(base, GpRegs);
|
||||
}
|
||||
Register rb = getBaseReg(base, dr, GpRegs);
|
||||
int c = value->constval();
|
||||
STi(rb, dr, c);
|
||||
}
|
||||
@ -610,20 +605,6 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_spilli(LInsp i, Reservation *resv, bool pop)
|
||||
{
|
||||
int d = disp(resv);
|
||||
Register rr = resv->reg;
|
||||
bool quad = i->opcode() == LIR_param || i->isQuad();
|
||||
asm_spill(rr, d, pop, quad);
|
||||
if (d)
|
||||
{
|
||||
verbose_only(if (_verbose) {
|
||||
outputf(" spill %s",_thisfrag->lirbuf->names->formatRef(i));
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_load64(LInsp ins)
|
||||
{
|
||||
LIns* base = ins->oprnd1();
|
||||
@ -634,13 +615,7 @@ namespace nanojit
|
||||
if (rr != UnknownReg && rmask(rr) & XmmRegs)
|
||||
{
|
||||
freeRsrcOf(ins, false);
|
||||
Register rb;
|
||||
if (base->isop(LIR_alloc)) {
|
||||
rb = FP;
|
||||
db += findMemFor(base);
|
||||
} else {
|
||||
rb = findRegFor(base, GpRegs);
|
||||
}
|
||||
Register rb = getBaseReg(base, db, GpRegs);
|
||||
SSE_LDQ(rr, db, rb);
|
||||
}
|
||||
#if defined NANOJIT_AMD64
|
||||
@ -844,6 +819,462 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
|
||||
NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
|
||||
{
|
||||
NIns* at = 0;
|
||||
LOpcode condop = cond->opcode();
|
||||
NanoAssert(cond->isCond());
|
||||
#ifndef NJ_SOFTFLOAT
|
||||
if (condop >= LIR_feq && condop <= LIR_fge)
|
||||
{
|
||||
return asm_jmpcc(branchOnFalse, cond, targ);
|
||||
}
|
||||
#endif
|
||||
// produce the branch
|
||||
if (branchOnFalse)
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JNE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JNO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JNC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JNL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JNLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JNG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JNGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JNB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JNBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JNA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JNAE(targ);
|
||||
}
|
||||
else // op == LIR_xt
|
||||
{
|
||||
if (condop == LIR_eq)
|
||||
JE(targ);
|
||||
else if (condop == LIR_ov)
|
||||
JO(targ);
|
||||
else if (condop == LIR_cs)
|
||||
JC(targ);
|
||||
else if (condop == LIR_lt)
|
||||
JL(targ);
|
||||
else if (condop == LIR_le)
|
||||
JLE(targ);
|
||||
else if (condop == LIR_gt)
|
||||
JG(targ);
|
||||
else if (condop == LIR_ge)
|
||||
JGE(targ);
|
||||
else if (condop == LIR_ult)
|
||||
JB(targ);
|
||||
else if (condop == LIR_ule)
|
||||
JBE(targ);
|
||||
else if (condop == LIR_ugt)
|
||||
JA(targ);
|
||||
else //if (condop == LIR_uge)
|
||||
JAE(targ);
|
||||
}
|
||||
at = _nIns;
|
||||
asm_cmp(cond);
|
||||
return at;
|
||||
}
|
||||
|
||||
void Assembler::asm_cmp(LIns *cond)
|
||||
{
|
||||
LOpcode condop = cond->opcode();
|
||||
|
||||
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
|
||||
if ((condop == LIR_ov) || (condop == LIR_cs))
|
||||
return;
|
||||
|
||||
LInsp lhs = cond->oprnd1();
|
||||
LInsp rhs = cond->oprnd2();
|
||||
Reservation *rA, *rB;
|
||||
|
||||
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
|
||||
|
||||
// Not supported yet.
|
||||
#if !defined NANOJIT_64BIT
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
#endif
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst())
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
if (rhs->isQuad()) {
|
||||
#if defined NANOJIT_64BIT
|
||||
TESTQ(r, r);
|
||||
#endif
|
||||
} else {
|
||||
TEST(r,r);
|
||||
}
|
||||
// No 64-bit immediates so fall-back to below
|
||||
}
|
||||
else if (!rhs->isQuad()) {
|
||||
Register r = getBaseReg(lhs, c, GpRegs);
|
||||
CMPi(r, c);
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
findRegFor2(GpRegs, lhs, rA, rhs, rB);
|
||||
Register ra = rA->reg;
|
||||
Register rb = rB->reg;
|
||||
if (rhs->isQuad()) {
|
||||
#if defined NANOJIT_64BIT
|
||||
CMPQ(ra, rb);
|
||||
#endif
|
||||
} else {
|
||||
CMP(ra, rb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
|
||||
{
|
||||
(void)ins;
|
||||
JMP_long_placeholder(); // jump to SOT
|
||||
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
|
||||
|
||||
loopJumps.add(_nIns);
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
// branching from this frag to ourself.
|
||||
if (_frago->core()->config.show_stats)
|
||||
#if defined NANOJIT_AMD64
|
||||
LDQi(argRegs[1], intptr_t((Fragment*)_thisfrag));
|
||||
#else
|
||||
LDi(argRegs[1], int((Fragment*)_thisfrag));
|
||||
#endif
|
||||
#endif
|
||||
|
||||
assignSavedParams();
|
||||
|
||||
// restore first parameter, the only one we use
|
||||
LInsp state = _thisfrag->lirbuf->state;
|
||||
findSpecificRegFor(state, argRegs[state->imm8()]);
|
||||
}
|
||||
|
||||
void Assembler::asm_fcond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
asm_setcc(r, ins);
|
||||
#ifdef NJ_ARM_VFP
|
||||
SETE(r);
|
||||
#else
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
SETNP(r);
|
||||
#endif
|
||||
asm_fcmp(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_cond(LInsp ins)
|
||||
{
|
||||
// only want certain regs
|
||||
LOpcode op = ins->opcode();
|
||||
Register r = prepResultReg(ins, AllowableFlagRegs);
|
||||
// SETcc only sets low 8 bits, so extend
|
||||
MOVZX8(r,r);
|
||||
if (op == LIR_eq)
|
||||
SETE(r);
|
||||
else if (op == LIR_ov)
|
||||
SETO(r);
|
||||
else if (op == LIR_cs)
|
||||
SETC(r);
|
||||
else if (op == LIR_lt)
|
||||
SETL(r);
|
||||
else if (op == LIR_le)
|
||||
SETLE(r);
|
||||
else if (op == LIR_gt)
|
||||
SETG(r);
|
||||
else if (op == LIR_ge)
|
||||
SETGE(r);
|
||||
else if (op == LIR_ult)
|
||||
SETB(r);
|
||||
else if (op == LIR_ule)
|
||||
SETBE(r);
|
||||
else if (op == LIR_ugt)
|
||||
SETA(r);
|
||||
else // if (op == LIR_uge)
|
||||
SETAE(r);
|
||||
asm_cmp(ins);
|
||||
}
|
||||
|
||||
void Assembler::asm_arith(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
|
||||
Register rb = UnknownReg;
|
||||
RegisterMask allow = GpRegs;
|
||||
bool forceReg = (op == LIR_mul || !rhs->isconst());
|
||||
|
||||
#ifdef NANOJIT_ARM
|
||||
// Arm can't do an immediate op with immediates
|
||||
// outside of +/-255 (for AND) r outside of
|
||||
// 0..255 for others.
|
||||
if (!forceReg)
|
||||
{
|
||||
if (rhs->isconst() && !isU8(rhs->constval()))
|
||||
forceReg = true;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (lhs != rhs && forceReg)
|
||||
{
|
||||
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
|
||||
rb = findRegFor(rhs, allow);
|
||||
}
|
||||
allow &= ~rmask(rb);
|
||||
}
|
||||
else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
|
||||
// add alloc+const, use lea
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
int d = findMemFor(lhs) + rhs->constval();
|
||||
LEA(rr, d, FP);
|
||||
}
|
||||
|
||||
Register rr = prepResultReg(ins, allow);
|
||||
Reservation* rA = getresv(lhs);
|
||||
Register ra;
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
if (rA == 0 || (ra = rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (forceReg)
|
||||
{
|
||||
if (lhs == rhs)
|
||||
rb = ra;
|
||||
|
||||
if (op == LIR_add || op == LIR_addp)
|
||||
ADD(rr, rb);
|
||||
else if (op == LIR_sub)
|
||||
SUB(rr, rb);
|
||||
else if (op == LIR_mul)
|
||||
MUL(rr, rb);
|
||||
else if (op == LIR_and)
|
||||
AND(rr, rb);
|
||||
else if (op == LIR_or)
|
||||
OR(rr, rb);
|
||||
else if (op == LIR_xor)
|
||||
XOR(rr, rb);
|
||||
else if (op == LIR_lsh)
|
||||
SHL(rr, rb);
|
||||
else if (op == LIR_rsh)
|
||||
SAR(rr, rb);
|
||||
else if (op == LIR_ush)
|
||||
SHR(rr, rb);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
else
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (op == LIR_add || op == LIR_addp) {
|
||||
#ifdef NANOJIT_IA32_TODO
|
||||
if (ra != rr) {
|
||||
// this doesn't set cc's, only use it when cc's not required.
|
||||
LEA(rr, c, ra);
|
||||
ra = rr; // suppress mov
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ADDi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_sub) {
|
||||
#ifdef NANOJIT_IA32
|
||||
if (ra != rr) {
|
||||
LEA(rr, -c, ra);
|
||||
ra = rr;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
SUBi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_and)
|
||||
ANDi(rr, c);
|
||||
else if (op == LIR_or)
|
||||
ORi(rr, c);
|
||||
else if (op == LIR_xor)
|
||||
XORi(rr, c);
|
||||
else if (op == LIR_lsh)
|
||||
SHLi(rr, c);
|
||||
else if (op == LIR_rsh)
|
||||
SARi(rr, c);
|
||||
else if (op == LIR_ush)
|
||||
SHRi(rr, c);
|
||||
else
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_neg_not(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
LIns* lhs = ins->oprnd1();
|
||||
Reservation *rA = getresv(lhs);
|
||||
// if this is last use of lhs in reg, we can re-use result reg
|
||||
Register ra;
|
||||
if (rA == 0 || (ra=rA->reg) == UnknownReg)
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
// else, rA already has a register assigned.
|
||||
|
||||
if (op == LIR_not)
|
||||
NOT(rr);
|
||||
else
|
||||
NEG(rr);
|
||||
|
||||
if ( rr != ra )
|
||||
MR(rr,ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_ld(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LIns* base = ins->oprnd1();
|
||||
LIns* disp = ins->oprnd2();
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = disp->constval();
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
if (op == LIR_ldcb)
|
||||
LD8Z(rr, d, ra);
|
||||
else
|
||||
LD(rr, d, ra);
|
||||
}
|
||||
|
||||
void Assembler::asm_cmov(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
LIns* condval = ins->oprnd1();
|
||||
NanoAssert(condval->isCmp());
|
||||
|
||||
LIns* values = ins->oprnd2();
|
||||
|
||||
NanoAssert(values->opcode() == LIR_2);
|
||||
LIns* iftrue = values->oprnd1();
|
||||
LIns* iffalse = values->oprnd2();
|
||||
|
||||
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
|
||||
|
||||
const Register rr = prepResultReg(ins, GpRegs);
|
||||
|
||||
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
|
||||
// (This is true on Intel, is it true on all architectures?)
|
||||
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
|
||||
if (op == LIR_cmov) {
|
||||
switch (condval->opcode())
|
||||
{
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
} else if (op == LIR_qcmov) {
|
||||
#if !defined NANOJIT_64BIT
|
||||
NanoAssert(0);
|
||||
#else
|
||||
switch (condval->opcode())
|
||||
{
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRQNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRQNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRQNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRQGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRQG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRQLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRQL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRQAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRQA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRQBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRQB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
#endif
|
||||
}
|
||||
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
|
||||
asm_cmp(condval);
|
||||
}
|
||||
|
||||
void Assembler::asm_qhi(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
LIns *q = ins->oprnd1();
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d+4, FP);
|
||||
}
|
||||
|
||||
void Assembler::asm_param(LInsp ins)
|
||||
{
|
||||
uint32_t a = ins->imm8();
|
||||
uint32_t kind = ins->imm8b();
|
||||
if (kind == 0) {
|
||||
// ordinary param
|
||||
AbiKind abi = _thisfrag->lirbuf->abi;
|
||||
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
|
||||
if (a < abi_regcount) {
|
||||
// incoming arg in register
|
||||
prepResultReg(ins, rmask(argRegs[a]));
|
||||
} else {
|
||||
// incoming arg is on stack, and EBP points nearby (see genPrologue)
|
||||
Register r = prepResultReg(ins, GpRegs);
|
||||
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
|
||||
LD(r, d, FP);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// saved param
|
||||
prepResultReg(ins, rmask(savedRegs[a]));
|
||||
}
|
||||
}
|
||||
|
||||
void Assembler::asm_short(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm16();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
}
|
||||
|
||||
void Assembler::asm_int(LInsp ins)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int32_t val = ins->imm32();
|
||||
if (val == 0)
|
||||
XOR(rr,rr);
|
||||
else
|
||||
LDi(rr, val);
|
||||
}
|
||||
|
||||
void Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
@ -935,30 +1366,34 @@ namespace nanojit
|
||||
#endif
|
||||
}
|
||||
|
||||
bool Assembler::asm_qlo(LInsp ins, LInsp q)
|
||||
void Assembler::asm_qlo(LInsp ins)
|
||||
{
|
||||
LIns *q = ins->oprnd1();
|
||||
|
||||
#if defined NANOJIT_IA32
|
||||
if (!avmplus::AvmCore::use_sse2())
|
||||
{
|
||||
return false;
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = findMemFor(q);
|
||||
LD(rr, d, FP);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
|
||||
Reservation *resv = getresv(ins);
|
||||
Register rr = resv->reg;
|
||||
if (rr == UnknownReg) {
|
||||
// store quad in spill loc
|
||||
int d = disp(resv);
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVDm(d, FP, qr);
|
||||
} else {
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVD(rr,qr);
|
||||
{
|
||||
Reservation *resv = getresv(ins);
|
||||
Register rr = resv->reg;
|
||||
if (rr == UnknownReg) {
|
||||
// store quad in spill loc
|
||||
int d = disp(resv);
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVDm(d, FP, qr);
|
||||
} else {
|
||||
freeRsrcOf(ins, false);
|
||||
Register qr = findRegFor(q, XmmRegs);
|
||||
SSE_MOVD(rr,qr);
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Assembler::asm_fneg(LInsp ins)
|
||||
@ -1624,6 +2059,5 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#endif /* FEATURE_NANOJIT */
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ namespace nanojit
|
||||
{
|
||||
free = 0;
|
||||
used = 0;
|
||||
memset(active, 0, NJ_MAX_REGISTERS * sizeof(LIns*));
|
||||
memset(active, 0, (LastReg+1) * sizeof(LIns*));
|
||||
}
|
||||
|
||||
bool RegAlloc::isFree(Register r)
|
||||
@ -120,6 +120,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
NanoAssert(a != 0);
|
||||
return a;
|
||||
}
|
||||
@ -130,7 +131,7 @@ namespace nanojit
|
||||
if (!frag || !frag->lirbuf)
|
||||
return;
|
||||
LirNameMap *names = frag->lirbuf->names;
|
||||
for(int i=0; i<NJ_MAX_REGISTERS; i++)
|
||||
for(int i=0; i<(LastReg+1); i++)
|
||||
{
|
||||
LIns* ins = regs.active[i];
|
||||
Register r = (Register)i;
|
||||
|
Loading…
Reference in New Issue
Block a user