This commit is contained in:
Andreas Gal 2008-10-21 17:50:52 -07:00
commit 02ceab2b60
20 changed files with 1753 additions and 726 deletions

View File

@ -79,8 +79,8 @@ BUILTIN2(extern, DOUBLE, js_StringToNumber, CONTEXT, STRING,
BUILTIN2(extern, INT32, js_StringToInt32, CONTEXT, STRING, 1, 1) BUILTIN2(extern, INT32, js_StringToInt32, CONTEXT, STRING, 1, 1)
BUILTIN3(extern, JSVAL, js_Any_getprop, CONTEXT, OBJECT, STRING, 0, 0) BUILTIN3(extern, JSVAL, js_Any_getprop, CONTEXT, OBJECT, STRING, 0, 0)
BUILTIN4(extern, BOOL, js_Any_setprop, CONTEXT, OBJECT, STRING, JSVAL, 0, 0) BUILTIN4(extern, BOOL, js_Any_setprop, CONTEXT, OBJECT, STRING, JSVAL, 0, 0)
BUILTIN3(extern, JSVAL, js_Any_getelem, CONTEXT, OBJECT, UINT32, 0, 0) BUILTIN3(extern, JSVAL, js_Any_getelem, CONTEXT, OBJECT, INT32, 0, 0)
BUILTIN4(extern, BOOL, js_Any_setelem, CONTEXT, OBJECT, UINT32, JSVAL, 0, 0) BUILTIN4(extern, BOOL, js_Any_setelem, CONTEXT, OBJECT, INT32, JSVAL, 0, 0)
BUILTIN3(extern, OBJECT, js_FastValueToIterator, CONTEXT, UINT32, JSVAL, 0, 0) BUILTIN3(extern, OBJECT, js_FastValueToIterator, CONTEXT, UINT32, JSVAL, 0, 0)
BUILTIN2(extern, JSVAL, js_FastCallIteratorNext, CONTEXT, OBJECT, 0, 0) BUILTIN2(extern, JSVAL, js_FastCallIteratorNext, CONTEXT, OBJECT, 0, 0)
BUILTIN2(extern, BOOL, js_CloseIterator, CONTEXT, JSVAL, 0, 0) BUILTIN2(extern, BOOL, js_CloseIterator, CONTEXT, JSVAL, 0, 0)

View File

@ -210,7 +210,7 @@ js_Any_setprop(JSContext* cx, JSObject* obj, JSString* idstr, jsval v)
} }
jsval FASTCALL jsval FASTCALL
js_Any_getelem(JSContext* cx, JSObject* obj, uint32 index) js_Any_getelem(JSContext* cx, JSObject* obj, int32 index)
{ {
jsval v; jsval v;
jsid id; jsid id;
@ -224,7 +224,7 @@ js_Any_getelem(JSContext* cx, JSObject* obj, uint32 index)
} }
JSBool FASTCALL JSBool FASTCALL
js_Any_setelem(JSContext* cx, JSObject* obj, uint32 index, jsval v) js_Any_setelem(JSContext* cx, JSObject* obj, int32 index, jsval v)
{ {
jsid id; jsid id;
if (index < 0) if (index < 0)

View File

@ -5720,8 +5720,7 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
default: default:
/* /*
* If useless, just emit JSOP_TRUE; otherwise convert delete foo() * If useless, just emit JSOP_TRUE; otherwise convert delete foo()
* to foo(), true (a comma expression, requiring SRC_PCDELTA, and * to foo(), true (a comma expression, requiring SRC_PCDELTA).
* also JSOP_GROUP for correctly parenthesized decompilation).
*/ */
useful = JS_FALSE; useful = JS_FALSE;
if (!CheckSideEffects(cx, cg, pn2, &useful)) if (!CheckSideEffects(cx, cg, pn2, &useful))
@ -5743,8 +5742,6 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off)) if (!js_SetSrcNoteOffset(cx, cg, (uintN)noteIndex, 0, tmp-off))
return JS_FALSE; return JS_FALSE;
} }
if (js_Emit1(cx, cg, JSOP_GROUP) < 0)
return JS_FALSE;
} }
break; break;
@ -6165,8 +6162,6 @@ js_EmitTree(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
if (!js_EmitTree(cx, cg, pn->pn_kid)) if (!js_EmitTree(cx, cg, pn->pn_kid))
return JS_FALSE; return JS_FALSE;
cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT; cg->treeContext.flags |= oldflags & TCF_IN_FOR_INIT;
if (js_Emit1(cx, cg, JSOP_GROUP) < 0)
return JS_FALSE;
break; break;
} }

View File

@ -2857,7 +2857,6 @@ js_Interpret(JSContext *cx)
/* No-ops for ease of decompilation. */ /* No-ops for ease of decompilation. */
ADD_EMPTY_CASE(JSOP_NOP) ADD_EMPTY_CASE(JSOP_NOP)
ADD_EMPTY_CASE(JSOP_GROUP)
ADD_EMPTY_CASE(JSOP_CONDSWITCH) ADD_EMPTY_CASE(JSOP_CONDSWITCH)
ADD_EMPTY_CASE(JSOP_TRY) ADD_EMPTY_CASE(JSOP_TRY)
ADD_EMPTY_CASE(JSOP_FINALLY) ADD_EMPTY_CASE(JSOP_FINALLY)
@ -5137,15 +5136,11 @@ js_Interpret(JSContext *cx)
if (!prop) { if (!prop) {
/* Kludge to allow (typeof foo == "undefined") tests. */ /* Kludge to allow (typeof foo == "undefined") tests. */
endpc = script->code + script->length; endpc = script->code + script->length;
for (pc2 = regs.pc + JSOP_NAME_LENGTH; pc2 < endpc; pc2++) { op2 = (JSOp) regs.pc[JSOP_NAME_LENGTH];
op2 = (JSOp)*pc2; if (op2 == JSOP_TYPEOF) {
if (op2 == JSOP_TYPEOF) { PUSH_OPND(JSVAL_VOID);
PUSH_OPND(JSVAL_VOID); len = JSOP_NAME_LENGTH;
len = JSOP_NAME_LENGTH; DO_NEXT_OP(len);
DO_NEXT_OP(len);
}
if (op2 != JSOP_GROUP)
break;
} }
goto atom_not_defined; goto atom_not_defined;
} }
@ -6841,6 +6836,7 @@ js_Interpret(JSContext *cx)
L_JSOP_UNUSED77: L_JSOP_UNUSED77:
L_JSOP_UNUSED78: L_JSOP_UNUSED78:
L_JSOP_UNUSED79: L_JSOP_UNUSED79:
L_JSOP_UNUSED131:
L_JSOP_UNUSED201: L_JSOP_UNUSED201:
L_JSOP_UNUSED202: L_JSOP_UNUSED202:
L_JSOP_UNUSED203: L_JSOP_UNUSED203:

View File

@ -1560,8 +1560,11 @@ js_HasOwnProperty(JSContext *cx, JSLookupPropOp lookup, JSObject *obj, jsid id,
static int32 FASTCALL static int32 FASTCALL
Object_p_hasOwnProperty(JSContext* cx, JSObject* obj, JSString *str) Object_p_hasOwnProperty(JSContext* cx, JSObject* obj, JSString *str)
{ {
jsid id = ATOM_TO_JSID(STRING_TO_JSVAL(str)); jsid id;
jsval v; jsval v;
if (!js_ValueToStringId(cx, STRING_TO_JSVAL(str), &id))
return JSVAL_TO_BOOLEAN(JSVAL_VOID);
if (!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, id, &v)) if (!js_HasOwnProperty(cx, obj->map->ops->lookupProperty, obj, id, &v))
return JSVAL_TO_BOOLEAN(JSVAL_VOID); return JSVAL_TO_BOOLEAN(JSVAL_VOID);
JS_ASSERT(JSVAL_IS_BOOLEAN(v)); JS_ASSERT(JSVAL_IS_BOOLEAN(v));
@ -3324,9 +3327,6 @@ Detecting(JSContext *cx, jsbytecode *pc)
} }
return JS_FALSE; return JS_FALSE;
case JSOP_GROUP:
break;
default: default:
/* /*
* At this point, anything but an extended atom index prefix means * At this point, anything but an extended atom index prefix means

View File

@ -965,10 +965,10 @@ PushOff(SprintStack *ss, ptrdiff_t off, JSOp op)
} }
static ptrdiff_t static ptrdiff_t
PopOff(SprintStack *ss, JSOp op) PopOffPrec(SprintStack *ss, uint8 prec)
{ {
uintN top; uintN top;
const JSCodeSpec *cs, *topcs; const JSCodeSpec *topcs;
ptrdiff_t off; ptrdiff_t off;
/* ss->top points to the next free slot; be paranoid about underflow. */ /* ss->top points to the next free slot; be paranoid about underflow. */
@ -980,8 +980,7 @@ PopOff(SprintStack *ss, JSOp op)
ss->top = --top; ss->top = --top;
off = GetOff(ss, top); off = GetOff(ss, top);
topcs = &js_CodeSpec[ss->opcodes[top]]; topcs = &js_CodeSpec[ss->opcodes[top]];
cs = &js_CodeSpec[op]; if (topcs->prec != 0 && topcs->prec < prec) {
if (topcs->prec != 0 && topcs->prec < cs->prec) {
ss->sprinter.offset = ss->offsets[top] = off - 2; ss->sprinter.offset = ss->offsets[top] = off - 2;
off = Sprint(&ss->sprinter, "(%s)", OFF2STR(&ss->sprinter, off)); off = Sprint(&ss->sprinter, "(%s)", OFF2STR(&ss->sprinter, off));
} else { } else {
@ -991,14 +990,26 @@ PopOff(SprintStack *ss, JSOp op)
} }
static const char * static const char *
PopStr(SprintStack *ss, JSOp op) PopStrPrec(SprintStack *ss, uint8 prec)
{ {
ptrdiff_t off; ptrdiff_t off;
off = PopOff(ss, op); off = PopOffPrec(ss, prec);
return OFF2STR(&ss->sprinter, off); return OFF2STR(&ss->sprinter, off);
} }
static ptrdiff_t
PopOff(SprintStack *ss, JSOp op)
{
return PopOffPrec(ss, js_CodeSpec[op].prec);
}
static const char *
PopStr(SprintStack *ss, JSOp op)
{
return PopStrPrec(ss, js_CodeSpec[op].prec);
}
typedef struct TableEntry { typedef struct TableEntry {
jsval key; jsval key;
ptrdiff_t offset; ptrdiff_t offset;
@ -1744,10 +1755,17 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
/* /*
* Local macros * Local macros
*/ */
#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, NULL)
#define DECOMPILE_CODE(pc,nb) if (!Decompile(ss, pc, nb, JSOP_NOP)) return NULL #define DECOMPILE_CODE(pc,nb) if (!Decompile(ss, pc, nb, JSOP_NOP)) return NULL
#define NEXT_OP(pc) (((pc) + (len) == endpc) ? nextop : pc[len]) #define NEXT_OP(pc) (((pc) + (len) == endpc) ? nextop : pc[len])
#define POP_STR() PopStr(ss, op) #define POP_STR() PopStr(ss, op)
#define LOCAL_ASSERT(expr) LOCAL_ASSERT_RV(expr, NULL) #define POP_STR_PREC(prec) PopStrPrec(ss, prec)
/*
* Pop a condition expression for if/for/while. JSOP_IFEQ's precedence forces
* extra parens around assignment, which avoids a strict-mode warning.
*/
#define POP_COND_STR() PopStr(ss, JSOP_IFEQ)
/* /*
* Callers know that ATOM_IS_STRING(atom), and we leave it to the optimizer to * Callers know that ATOM_IS_STRING(atom), and we leave it to the optimizer to
@ -1808,6 +1826,23 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
GET_QUOTE_AND_FMT(qfmt, ufmt, rval); \ GET_QUOTE_AND_FMT(qfmt, ufmt, rval); \
JS_END_MACRO JS_END_MACRO
/*
* Per spec, new x(y).z means (new x(y))).z. For example new (x(y).z) must
* decompile with the constructor parenthesized, but new x.z should not. The
* normal rules give x(y).z and x.z identical precedence: both are produced by
* JSOP_GETPROP.
*
* Therefore, we need to know in case JSOP_NEW whether the constructor
* expression contains any unparenthesized function calls. So when building a
* MemberExpression or CallExpression, we set ss->opcodes[n] to JSOP_CALL if
* this is true. x(y).z gets JSOP_CALL, not JSOP_GETPROP.
*/
#define PROPAGATE_CALLNESS() \
JS_BEGIN_MACRO \
if (ss->opcodes[ss->top - 1] == JSOP_CALL) \
saveop = JSOP_CALL; \
JS_END_MACRO
cx = ss->sprinter.context; cx = ss->sprinter.context;
JS_CHECK_RECURSION(cx, return NULL); JS_CHECK_RECURSION(cx, return NULL);
@ -1986,8 +2021,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
op = (JSOp) pc[oplen]; op = (JSOp) pc[oplen];
LOCAL_ASSERT(op != saveop); LOCAL_ASSERT(op != saveop);
} }
rval = POP_STR(); rval = POP_STR_PREC(cs->prec + (!inXML && !!(cs->format & JOF_LEFTASSOC)));
lval = POP_STR(); lval = POP_STR_PREC(cs->prec + (!inXML && !(cs->format & JOF_LEFTASSOC)));
if (op != saveop) { if (op != saveop) {
/* Print only the right operand of the assignment-op. */ /* Print only the right operand of the assignment-op. */
todo = SprintCString(&ss->sprinter, rval); todo = SprintCString(&ss->sprinter, rval);
@ -2035,7 +2070,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
jp->indent += 4; jp->indent += 4;
DECOMPILE_CODE(pc, tail); DECOMPILE_CODE(pc, tail);
jp->indent -= 4; jp->indent -= 4;
js_printf(jp, "\t} while (%s);\n", POP_STR()); js_printf(jp, "\t} while (%s);\n", POP_COND_STR());
pc += tail; pc += tail;
len = js_CodeSpec[*pc].length; len = js_CodeSpec[*pc].length;
todo = -2; todo = -2;
@ -2071,7 +2106,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
if (cond != tail) { if (cond != tail) {
/* Decompile the loop condition. */ /* Decompile the loop condition. */
DECOMPILE_CODE(pc + cond, tail - cond); DECOMPILE_CODE(pc + cond, tail - cond);
js_printf(jp, " %s", POP_STR()); js_printf(jp, " %s", POP_COND_STR());
} }
/* Need a semicolon whether or not there was a cond. */ /* Need a semicolon whether or not there was a cond. */
@ -2153,44 +2188,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
} }
break; break;
case JSOP_GROUP:
cs = &js_CodeSpec[lastop];
if ((cs->prec != 0 &&
cs->prec <= js_CodeSpec[NEXT_OP(pc)].prec) ||
pc[JSOP_GROUP_LENGTH] == JSOP_NULL ||
pc[JSOP_GROUP_LENGTH] == JSOP_NULLTHIS ||
pc[JSOP_GROUP_LENGTH] == JSOP_DUP ||
pc[JSOP_GROUP_LENGTH] == JSOP_IFEQ ||
pc[JSOP_GROUP_LENGTH] == JSOP_IFNE) {
/*
* Force parens if this JSOP_GROUP forced re-association
* against precedence, or if this is a call or constructor
* expression, or if it is destructured (JSOP_DUP), or if
* it is an if or loop condition test.
*
* This is necessary to handle the operator new grammar,
* by which new x(y).z means (new x(y))).z. For example
* new (x(y).z) must decompile with the constructor
* parenthesized, but normal precedence has JSOP_GETPROP
* (for the final .z) higher than JSOP_NEW. In general,
* if the call or constructor expression is parenthesized,
* we preserve parens.
*/
op = JSOP_NAME;
rval = POP_STR();
todo = SprintCString(&ss->sprinter, rval);
} else {
/*
* Don't explicitly parenthesize -- just fix the top
* opcode so that the auto-parens magic in PopOff can do
* its thing.
*/
LOCAL_ASSERT(ss->top != 0);
ss->opcodes[ss->top-1] = saveop = lastop;
todo = -2;
}
break;
case JSOP_PUSH: case JSOP_PUSH:
#if JS_HAS_DESTRUCTURING #if JS_HAS_DESTRUCTURING
sn = js_GetSrcNote(jp->script, pc); sn = js_GetSrcNote(jp->script, pc);
@ -2816,6 +2813,8 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
LOCAL_ASSERT(jp->fun); LOCAL_ASSERT(jp->fun);
fun = jp->fun; fun = jp->fun;
if (fun->flags & JSFUN_EXPR_CLOSURE) { if (fun->flags & JSFUN_EXPR_CLOSURE) {
/* Turn on parens around comma-expression here. */
op = JSOP_SETNAME;
rval = POP_STR(); rval = POP_STR();
js_printf(jp, (*rval == '{') ? "(%s)%s" : ss_format, js_printf(jp, (*rval == '{') ? "(%s)%s" : ss_format,
rval, rval,
@ -2967,8 +2966,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
cond = GetJumpOffset(pc, pc); cond = GetJumpOffset(pc, pc);
tail = js_GetSrcNoteOffset(sn, 0); tail = js_GetSrcNoteOffset(sn, 0);
DECOMPILE_CODE(pc + cond, tail - cond); DECOMPILE_CODE(pc + cond, tail - cond);
rval = POP_STR(); js_printf(jp, "\twhile (%s) {\n", POP_COND_STR());
js_printf(jp, "\twhile (%s) {\n", rval);
jp->indent += 4; jp->indent += 4;
DECOMPILE_CODE(pc + oplen, cond - oplen); DECOMPILE_CODE(pc + oplen, cond - oplen);
jp->indent -= 4; jp->indent -= 4;
@ -3023,8 +3021,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
switch (sn ? SN_TYPE(sn) : SRC_NULL) { switch (sn ? SN_TYPE(sn) : SRC_NULL) {
case SRC_IF: case SRC_IF:
case SRC_IF_ELSE: case SRC_IF_ELSE:
op = JSOP_NOP; /* turn off parens */ rval = POP_COND_STR();
rval = POP_STR();
if (ss->inArrayInit || ss->inGenExp) { if (ss->inArrayInit || ss->inGenExp) {
LOCAL_ASSERT(SN_TYPE(sn) == SRC_IF); LOCAL_ASSERT(SN_TYPE(sn) == SRC_IF);
ss->sprinter.offset -= PAREN_SLOP; ss->sprinter.offset -= PAREN_SLOP;
@ -3467,6 +3464,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
/* /*
* Special case: new (x(y)(z)) must be parenthesized like so. * Special case: new (x(y)(z)) must be parenthesized like so.
* Same for new (x(y).z) -- contrast with new x(y).z. * Same for new (x(y).z) -- contrast with new x(y).z.
* See PROPAGATE_CALLNESS.
*/ */
op = (JSOp) ss->opcodes[ss->top-1]; op = (JSOp) ss->opcodes[ss->top-1];
lval = PopStr(ss, lval = PopStr(ss,
@ -3535,6 +3533,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
case JSOP_DELPROP: case JSOP_DELPROP:
GET_ATOM_QUOTE_AND_FMT("%s %s[%s]", "%s %s.%s", rval); GET_ATOM_QUOTE_AND_FMT("%s %s[%s]", "%s %s.%s", rval);
op = JSOP_GETPROP;
lval = POP_STR(); lval = POP_STR();
todo = Sprint(&ss->sprinter, fmt, js_delete_str, lval, rval); todo = Sprint(&ss->sprinter, fmt, js_delete_str, lval, rval);
break; break;
@ -3542,7 +3541,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
case JSOP_DELELEM: case JSOP_DELELEM:
op = JSOP_NOP; /* turn off parens */ op = JSOP_NOP; /* turn off parens */
xval = POP_STR(); xval = POP_STR();
op = saveop; op = JSOP_GETPROP;
lval = POP_STR(); lval = POP_STR();
if (*xval == '\0') if (*xval == '\0')
goto do_delete_lval; goto do_delete_lval;
@ -3556,6 +3555,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
#if JS_HAS_XML_SUPPORT #if JS_HAS_XML_SUPPORT
case JSOP_DELDESC: case JSOP_DELDESC:
xval = POP_STR(); xval = POP_STR();
op = JSOP_GETPROP;
lval = POP_STR(); lval = POP_STR();
todo = Sprint(&ss->sprinter, "%s %s..%s", todo = Sprint(&ss->sprinter, "%s %s..%s",
js_delete_str, lval, xval); js_delete_str, lval, xval);
@ -3700,6 +3700,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
do_getprop: do_getprop:
GET_QUOTE_AND_FMT(index_format, dot_format, rval); GET_QUOTE_AND_FMT(index_format, dot_format, rval);
do_getprop_lval: do_getprop_lval:
PROPAGATE_CALLNESS();
lval = POP_STR(); lval = POP_STR();
todo = Sprint(&ss->sprinter, fmt, lval, rval); todo = Sprint(&ss->sprinter, fmt, lval, rval);
break; break;
@ -3773,6 +3774,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
op = JSOP_NOP; /* turn off parens */ op = JSOP_NOP; /* turn off parens */
xval = POP_STR(); xval = POP_STR();
op = saveop; op = saveop;
PROPAGATE_CALLNESS();
lval = POP_STR(); lval = POP_STR();
if (*xval == '\0') { if (*xval == '\0') {
todo = Sprint(&ss->sprinter, "%s", lval); todo = Sprint(&ss->sprinter, "%s", lval);
@ -4243,14 +4245,6 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
break; break;
} }
case JSOP_STRICTEQ:
case JSOP_STRICTNE:
rval = POP_STR();
lval = POP_STR();
todo = Sprint(&ss->sprinter, "%s %c== %s",
lval, (op == JSOP_STRICTEQ) ? '=' : '!', rval);
break;
case JSOP_DEFFUN: case JSOP_DEFFUN:
LOAD_FUNCTION(0); LOAD_FUNCTION(0);
todo = -2; todo = -2;
@ -4607,12 +4601,14 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
case JSOP_ENDFILTER: case JSOP_ENDFILTER:
rval = POP_STR(); rval = POP_STR();
PROPAGATE_CALLNESS();
lval = POP_STR(); lval = POP_STR();
todo = Sprint(&ss->sprinter, "%s.(%s)", lval, rval); todo = Sprint(&ss->sprinter, "%s.(%s)", lval, rval);
break; break;
case JSOP_DESCENDANTS: case JSOP_DESCENDANTS:
rval = POP_STR(); rval = POP_STR();
PROPAGATE_CALLNESS();
lval = POP_STR(); lval = POP_STR();
todo = Sprint(&ss->sprinter, "%s..%s", lval, rval); todo = Sprint(&ss->sprinter, "%s..%s", lval, rval);
break; break;

View File

@ -78,9 +78,9 @@
* 12 <<, >>, >>> JSOP_LSH, JSOP_RSH, JSOP_URSH * 12 <<, >>, >>> JSOP_LSH, JSOP_RSH, JSOP_URSH
* 13 +, -, etc. JSOP_ADD, JSOP_SUB, etc. * 13 +, -, etc. JSOP_ADD, JSOP_SUB, etc.
* 14 *, /, % JSOP_MUL, JSOP_DIV, JSOP_MOD * 14 *, /, % JSOP_MUL, JSOP_DIV, JSOP_MOD
* 15 !, ~, etc. JSOP_NOT, JSOP_BITNOT, etc. * 15 !, ~, delete, etc. JSOP_NOT, JSOP_BITNOT, JSOP_DEL*, etc.
* 16 3.14, 0, etc. JSOP_DOUBLE, JSOP_ZERO, etc. * 16 3.14, 0, etc. JSOP_DOUBLE, JSOP_ZERO, etc.
* 17 delete, new JSOP_DEL*, JSOP_NEW * 17 new JSOP_NEW
* 18 x.y, f(), etc. JSOP_GETPROP, JSOP_CALL, etc. * 18 x.y, f(), etc. JSOP_GETPROP, JSOP_CALL, etc.
* 19 x, null, etc. JSOP_NAME, JSOP_NULL, etc. * 19 x, null, etc. JSOP_NAME, JSOP_NULL, etc.
* *
@ -139,9 +139,9 @@ OPDEF(JSOP_NOT, 32, "not", "!", 1, 1, 1, 15, JOF_BYTE|J
OPDEF(JSOP_BITNOT, 33, "bitnot", "~", 1, 1, 1, 15, JOF_BYTE) OPDEF(JSOP_BITNOT, 33, "bitnot", "~", 1, 1, 1, 15, JOF_BYTE)
OPDEF(JSOP_NEG, 34, "neg", "- ", 1, 1, 1, 15, JOF_BYTE) OPDEF(JSOP_NEG, 34, "neg", "- ", 1, 1, 1, 15, JOF_BYTE)
OPDEF(JSOP_NEW, 35, js_new_str, NULL, 3, -1, 1, 17, JOF_UINT16|JOF_INVOKE) OPDEF(JSOP_NEW, 35, js_new_str, NULL, 3, -1, 1, 17, JOF_UINT16|JOF_INVOKE)
OPDEF(JSOP_DELNAME, 36, "delname", NULL, 3, 0, 1, 17, JOF_ATOM|JOF_NAME|JOF_DEL) OPDEF(JSOP_DELNAME, 36, "delname", NULL, 3, 0, 1, 15, JOF_ATOM|JOF_NAME|JOF_DEL)
OPDEF(JSOP_DELPROP, 37, "delprop", NULL, 3, 1, 1, 17, JOF_ATOM|JOF_PROP|JOF_DEL) OPDEF(JSOP_DELPROP, 37, "delprop", NULL, 3, 1, 1, 15, JOF_ATOM|JOF_PROP|JOF_DEL)
OPDEF(JSOP_DELELEM, 38, "delelem", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL) OPDEF(JSOP_DELELEM, 38, "delelem", NULL, 1, 2, 1, 15, JOF_BYTE |JOF_ELEM|JOF_DEL)
OPDEF(JSOP_TYPEOF, 39, js_typeof_str,NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING) OPDEF(JSOP_TYPEOF, 39, js_typeof_str,NULL, 1, 1, 1, 15, JOF_BYTE|JOF_DETECTING)
OPDEF(JSOP_VOID, 40, js_void_str, NULL, 1, 1, 1, 15, JOF_BYTE) OPDEF(JSOP_VOID, 40, js_void_str, NULL, 1, 1, 1, 15, JOF_BYTE)
@ -181,8 +181,8 @@ OPDEF(JSOP_TABLESWITCH, 70, "tableswitch", NULL, -1, 1, 0, 0, JOF_TABLES
OPDEF(JSOP_LOOKUPSWITCH, 71, "lookupswitch", NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCH|JOF_DETECTING|JOF_PARENHEAD) OPDEF(JSOP_LOOKUPSWITCH, 71, "lookupswitch", NULL, -1, 1, 0, 0, JOF_LOOKUPSWITCH|JOF_DETECTING|JOF_PARENHEAD)
/* New, infallible/transitive identity ops. */ /* New, infallible/transitive identity ops. */
OPDEF(JSOP_STRICTEQ, 72, "stricteq", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING) OPDEF(JSOP_STRICTEQ, 72, "stricteq", "===", 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING|JOF_LEFTASSOC)
OPDEF(JSOP_STRICTNE, 73, "strictne", NULL, 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING) OPDEF(JSOP_STRICTNE, 73, "strictne", "!==", 1, 2, 1, 10, JOF_BYTE|JOF_DETECTING|JOF_LEFTASSOC)
/* Resume instruction (emitted for the JIT for instructions that can't be restarted). */ /* Resume instruction (emitted for the JIT for instructions that can't be restarted). */
OPDEF(JSOP_RESUME, 74, "resume", NULL, 1, 0, 0, 0, JOF_BYTE) OPDEF(JSOP_RESUME, 74, "resume", NULL, 1, 0, 0, 0, JOF_BYTE)
@ -318,7 +318,7 @@ OPDEF(JSOP_NAMEDFUNOBJ, 129, "namedfunobj", NULL, 3, 0, 1, 19, JOF_OBJECT
OPDEF(JSOP_SETLOCALPOP, 130, "setlocalpop", NULL, 3, 1, 0, 3, JOF_LOCAL|JOF_NAME|JOF_SET) OPDEF(JSOP_SETLOCALPOP, 130, "setlocalpop", NULL, 3, 1, 0, 3, JOF_LOCAL|JOF_NAME|JOF_SET)
/* Parenthesization opcode to help the decompiler. */ /* Parenthesization opcode to help the decompiler. */
OPDEF(JSOP_GROUP, 131, "group", NULL, 1, 0, 0, 19, JOF_BYTE) OPDEF(JSOP_UNUSED131, 131, "unused131", NULL, 1, 0, 0, 0, JOF_BYTE)
/* /*
* Host object extension: given 'o.item(i) = j', the left-hand side compiles * Host object extension: given 'o.item(i) = j', the left-hand side compiles
@ -419,7 +419,7 @@ OPDEF(JSOP_GETFUNNS, 185,"getfunns", NULL, 1, 0, 1, 19, JOF_BYTE)
*/ */
OPDEF(JSOP_GETUPVAR, 186,"getupvar", NULL, 3, 0, 1, 19, JOF_UINT16|JOF_NAME) OPDEF(JSOP_GETUPVAR, 186,"getupvar", NULL, 3, 0, 1, 19, JOF_UINT16|JOF_NAME)
OPDEF(JSOP_DELDESC, 187,"deldesc", NULL, 1, 2, 1, 17, JOF_BYTE |JOF_ELEM|JOF_DEL) OPDEF(JSOP_DELDESC, 187,"deldesc", NULL, 1, 2, 1, 15, JOF_BYTE|JOF_ELEM|JOF_DEL)
/* /*
* Opcode to hold 24-bit immediate integer operands. * Opcode to hold 24-bit immediate integer operands.

View File

@ -4516,7 +4516,7 @@ MemberExpr(JSContext *cx, JSTokenStream *ts, JSTreeContext *tc,
} else if (tt == TOK_RP) { } else if (tt == TOK_RP) {
JSParseNode *group = pn3; JSParseNode *group = pn3;
/* Recycle the useless TOK_RP/JSOP_GROUP node. */ /* Recycle the useless TOK_RP node. */
pn3 = group->pn_kid; pn3 = group->pn_kid;
group->pn_kid = NULL; group->pn_kid = NULL;
RecycleTree(group, tc); RecycleTree(group, tc);
@ -6279,7 +6279,7 @@ Boolish(JSParseNode *pn)
{ {
switch (pn->pn_op) { switch (pn->pn_op) {
case JSOP_DOUBLE: case JSOP_DOUBLE:
return pn->pn_dval != 0; return pn->pn_dval != 0 && !JSDOUBLE_IS_NaN(pn->pn_dval);
case JSOP_STRING: case JSOP_STRING:
return JSSTRING_LENGTH(ATOM_TO_STRING(pn->pn_atom)) != 0; return JSSTRING_LENGTH(ATOM_TO_STRING(pn->pn_atom)) != 0;
@ -6728,6 +6728,14 @@ js_FoldConstants(JSContext *cx, JSParseNode *pn, JSTreeContext *tc, bool inCond)
pn->pn_arity = PN_NULLARY; pn->pn_arity = PN_NULLARY;
pn->pn_dval = d; pn->pn_dval = d;
RecycleTree(pn1, tc); RecycleTree(pn1, tc);
} else if (pn1->pn_type == TOK_PRIMARY) {
if (pn->pn_op == JSOP_NOT &&
(pn1->pn_op == JSOP_TRUE ||
pn1->pn_op == JSOP_FALSE)) {
PN_MOVE_NODE(pn, pn1);
pn->pn_op = (pn->pn_op == JSOP_TRUE) ? JSOP_FALSE : JSOP_TRUE;
RecycleTree(pn1, tc);
}
} }
break; break;

View File

@ -374,8 +374,8 @@ Oracle::clear()
} }
#if defined(NJ_SOFTFLOAT) #if defined(NJ_SOFTFLOAT)
JS_DECLARE_CALLINFO(i2f) JS_DEFINE_CALLINFO_1(static, DOUBLE, i2f, INT32, 1, 1)
JS_DECLARE_CALLINFO(u2f) JS_DEFINE_CALLINFO_1(static, DOUBLE, u2f, UINT32, 1, 1)
#endif #endif
static bool isi2f(LInsp i) static bool isi2f(LInsp i)
@ -475,91 +475,89 @@ static bool overflowSafe(LIns* i)
#if defined(NJ_SOFTFLOAT) #if defined(NJ_SOFTFLOAT)
/* soft float */ /* soft float */
JS_DEFINE_CALLINFO_1(static, DOUBLE, fneg, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, INT32, fcmplt, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, INT32, fcmple, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, INT32, fcmpge, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(static, DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1)
jsdouble FASTCALL jsdouble FASTCALL
js_fneg(jsdouble x) fneg(jsdouble x)
{ {
return -x; return -x;
} }
jsdouble FASTCALL jsdouble FASTCALL
js_i2f(int32 i) i2f(int32 i)
{ {
return i; return i;
} }
jsdouble FASTCALL jsdouble FASTCALL
js_u2f(jsuint u) u2f(jsuint u)
{ {
return u; return u;
} }
int32 FASTCALL int32 FASTCALL
js_fcmpeq(jsdouble x, jsdouble y) fcmpeq(jsdouble x, jsdouble y)
{ {
return x==y; return x==y;
} }
int32 FASTCALL int32 FASTCALL
js_fcmplt(jsdouble x, jsdouble y) fcmplt(jsdouble x, jsdouble y)
{ {
return x < y; return x < y;
} }
int32 FASTCALL int32 FASTCALL
js_fcmple(jsdouble x, jsdouble y) fcmple(jsdouble x, jsdouble y)
{ {
return x <= y; return x <= y;
} }
int32 FASTCALL int32 FASTCALL
js_fcmpgt(jsdouble x, jsdouble y) fcmpgt(jsdouble x, jsdouble y)
{ {
return x > y; return x > y;
} }
int32 FASTCALL int32 FASTCALL
js_fcmpge(jsdouble x, jsdouble y) fcmpge(jsdouble x, jsdouble y)
{ {
return x >= y; return x >= y;
} }
jsdouble FASTCALL jsdouble FASTCALL
js_fmul(jsdouble x, jsdouble y) fmul(jsdouble x, jsdouble y)
{ {
return x * y; return x * y;
} }
jsdouble FASTCALL jsdouble FASTCALL
js_fadd(jsdouble x, jsdouble y) fadd(jsdouble x, jsdouble y)
{ {
return x + y; return x + y;
} }
jsdouble FASTCALL jsdouble FASTCALL
js_fdiv(jsdouble x, jsdouble y) fdiv(jsdouble x, jsdouble y)
{ {
return x / y; return x / y;
} }
jsdouble FASTCALL jsdouble FASTCALL
js_fsub(jsdouble x, jsdouble y) fsub(jsdouble x, jsdouble y)
{ {
return x - y; return x - y;
} }
JS_DEFINE_CALLINFO_1(DOUBLE, fneg, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_1(DOUBLE, i2f, INT32, 1, 1)
JS_DEFINE_CALLINFO_1(DOUBLE, u2f, UINT32, 1, 1)
JS_DEFINE_CALLINFO_2(INT32, fcmpeq, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(INT32, fcmplt, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(INT32, fcmple, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(INT32, fcmpgt, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(INT32, fcmpge, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(DOUBLE, fmul, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(DOUBLE, fadd, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(DOUBLE, fdiv, DOUBLE, DOUBLE, 1, 1)
JS_DEFINE_CALLINFO_2(DOUBLE, fsub, DOUBLE, DOUBLE, 1, 1)
class SoftFloatFilter: public LirWriter class SoftFloatFilter: public LirWriter
{ {
public: public:
@ -6163,12 +6161,6 @@ TraceRecorder::record_JSOP_SETLOCALPOP()
return true; return true;
} }
bool
TraceRecorder::record_JSOP_GROUP()
{
return true; // no-op
}
bool bool
TraceRecorder::record_JSOP_SETCALL() TraceRecorder::record_JSOP_SETCALL()
{ {
@ -6941,6 +6933,7 @@ UNUSED(JSOP_UNUSED76)
UNUSED(JSOP_UNUSED77) UNUSED(JSOP_UNUSED77)
UNUSED(JSOP_UNUSED78) UNUSED(JSOP_UNUSED78)
UNUSED(JSOP_UNUSED79) UNUSED(JSOP_UNUSED79)
UNUSED(JSOP_UNUSED131)
UNUSED(JSOP_UNUSED201) UNUSED(JSOP_UNUSED201)
UNUSED(JSOP_UNUSED202) UNUSED(JSOP_UNUSED202)
UNUSED(JSOP_UNUSED203) UNUSED(JSOP_UNUSED203)

View File

@ -204,7 +204,7 @@ JS_XDRFindClassById(JSXDRState *xdr, uint32 id);
* before deserialization of bytecode. If the saved version does not match * before deserialization of bytecode. If the saved version does not match
* the current version, abort deserialization and invalidate the file. * the current version, abort deserialization and invalidate the file.
*/ */
#define JSXDR_BYTECODE_VERSION (0xb973c0de - 32) #define JSXDR_BYTECODE_VERSION (0xb973c0de - 33)
/* /*
* Library-private functions. * Library-private functions.

View File

@ -214,7 +214,7 @@ namespace nanojit
// nothing free, steal one // nothing free, steal one
// LSRA says pick the one with the furthest use // LSRA says pick the one with the furthest use
LIns* vic = findVictim(regs,allow); LIns* vic = findVictim(regs, allow);
NanoAssert(vic != NULL); NanoAssert(vic != NULL);
Reservation* resv = getresv(vic); Reservation* resv = getresv(vic);
@ -527,6 +527,16 @@ namespace nanojit
{ {
return findRegFor(i, rmask(w)); return findRegFor(i, rmask(w));
} }
Register Assembler::getBaseReg(LIns *i, int &d, RegisterMask allow)
{
if (i->isop(LIR_alloc)) {
d += findMemFor(i);
return FP;
} else {
return findRegFor(i, allow);
}
}
Register Assembler::findRegFor(LIns* i, RegisterMask allow) Register Assembler::findRegFor(LIns* i, RegisterMask allow)
{ {
@ -554,6 +564,8 @@ namespace nanojit
resv = reserveAlloc(i); resv = reserveAlloc(i);
r = resv->reg; r = resv->reg;
#ifdef AVMPLUS_IA32
if (r != UnknownReg && if (r != UnknownReg &&
((rmask(r)&XmmRegs) && !(allow&XmmRegs) || ((rmask(r)&XmmRegs) && !(allow&XmmRegs) ||
(rmask(r)&x87Regs) && !(allow&x87Regs))) (rmask(r)&x87Regs) && !(allow&x87Regs)))
@ -563,6 +575,7 @@ namespace nanojit
evict(r); evict(r);
r = UnknownReg; r = UnknownReg;
} }
#endif
if (r == UnknownReg) if (r == UnknownReg)
{ {
@ -610,6 +623,20 @@ namespace nanojit
return rr; return rr;
} }
void Assembler::asm_spilli(LInsp i, Reservation *resv, bool pop)
{
int d = disp(resv);
Register rr = resv->reg;
bool quad = i->opcode() == LIR_param || i->isQuad();
asm_spill(rr, d, pop, quad);
if (d)
{
verbose_only(if (_verbose) {
outputf(" spill %s",_thisfrag->lirbuf->names->formatRef(i));
})
}
}
void Assembler::freeRsrcOf(LIns *i, bool pop) void Assembler::freeRsrcOf(LIns *i, bool pop)
{ {
Reservation* resv = getresv(i); Reservation* resv = getresv(i);
@ -632,66 +659,6 @@ namespace nanojit
_allocator.addFree(r); _allocator.addFree(r);
} }
void Assembler::asm_cmp(LIns *cond)
{
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
return;
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
Reservation *rA, *rB;
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
// Not supported yet.
#if !defined NANOJIT_64BIT
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
#endif
// ready to issue the compare
if (rhs->isconst())
{
int c = rhs->constval();
if (c == 0 && cond->isop(LIR_eq)) {
Register r = findRegFor(lhs, GpRegs);
if (rhs->isQuad()) {
#if defined NANOJIT_64BIT
TESTQ(r, r);
#endif
} else {
TEST(r,r);
}
// No 64-bit immediates so fall-back to below
}
else if (!rhs->isQuad()) {
Register r;
if (lhs->isop(LIR_alloc)) {
r = FP;
c += findMemFor(lhs);
} else {
r = findRegFor(lhs, GpRegs);
}
CMPi(r, c);
}
}
else
{
findRegFor2(GpRegs, lhs, rA, rhs, rB);
Register ra = rA->reg;
Register rb = rB->reg;
if (rhs->isQuad()) {
#if defined NANOJIT_64BIT
CMPQ(ra, rb);
#endif
} else {
CMP(ra, rb);
}
}
}
void Assembler::patch(GuardRecord *lr) void Assembler::patch(GuardRecord *lr)
{ {
Fragment *frag = lr->exit->target; Fragment *frag = lr->exit->target;
@ -1068,7 +1035,11 @@ namespace nanojit
JMP(_epilogue); JMP(_epilogue);
} }
assignSavedParams(); assignSavedParams();
#ifdef NANOJIT_IA32
findSpecificRegFor(ins->oprnd1(), FST0); findSpecificRegFor(ins->oprnd1(), FST0);
#else
NanoAssert(false);
#endif
fpu_pop(); fpu_pop();
break; break;
} }
@ -1089,19 +1060,15 @@ namespace nanojit
break; break;
} }
case LIR_short: case LIR_short:
{
countlir_imm();
asm_short(ins);
break;
}
case LIR_int: case LIR_int:
{ {
countlir_imm(); countlir_imm();
Register rr = prepResultReg(ins, GpRegs); asm_int(ins);
int32_t val;
if (op == LIR_int)
val = ins->imm32();
else
val = ins->imm16();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
break; break;
} }
case LIR_quad: case LIR_quad:
@ -1123,139 +1090,36 @@ namespace nanojit
case LIR_param: case LIR_param:
{ {
countlir_param(); countlir_param();
uint32_t a = ins->imm8(); asm_param(ins);
uint32_t kind = ins->imm8b();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
if (a < abi_regcount) {
// incoming arg in register
prepResultReg(ins, rmask(argRegs[a]));
} else {
// incoming arg is on stack, and EAX points nearby (see genPrologue)
//_nvprof("param-evict-eax",1);
Register r = prepResultReg(ins, GpRegs & ~rmask(EAX));
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
}
}
else {
// saved param
prepResultReg(ins, rmask(savedRegs[a]));
}
break; break;
} }
case LIR_qlo: case LIR_qlo:
{ {
countlir_qlo(); countlir_qlo();
LIns *q = ins->oprnd1(); asm_qlo(ins);
if (!asm_qlo(ins, q))
{
Register rr = prepResultReg(ins, GpRegs);
int d = findMemFor(q);
LD(rr, d, FP);
}
break; break;
} }
case LIR_qhi: case LIR_qhi:
{ {
countlir_qhi(); countlir_qhi();
Register rr = prepResultReg(ins, GpRegs); asm_qhi(ins);
LIns *q = ins->oprnd1();
int d = findMemFor(q);
LD(rr, d+4, FP);
break; break;
} }
case LIR_qcmov: case LIR_qcmov:
case LIR_cmov: case LIR_cmov:
{ {
countlir_cmov(); countlir_cmov();
LIns* condval = ins->oprnd1(); asm_cmov(ins);
NanoAssert(condval->isCmp());
LIns* values = ins->oprnd2();
NanoAssert(values->opcode() == LIR_2);
LIns* iftrue = values->oprnd1();
LIns* iffalse = values->oprnd2();
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
const Register rr = prepResultReg(ins, GpRegs);
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
if (op == LIR_cmov) {
switch (condval->opcode())
{
// note that these are all opposites...
case LIR_eq: MRNE(rr, iffalsereg); break;
case LIR_ov: MRNO(rr, iffalsereg); break;
case LIR_cs: MRNC(rr, iffalsereg); break;
case LIR_lt: MRGE(rr, iffalsereg); break;
case LIR_le: MRG(rr, iffalsereg); break;
case LIR_gt: MRLE(rr, iffalsereg); break;
case LIR_ge: MRL(rr, iffalsereg); break;
case LIR_ult: MRAE(rr, iffalsereg); break;
case LIR_ule: MRA(rr, iffalsereg); break;
case LIR_ugt: MRBE(rr, iffalsereg); break;
case LIR_uge: MRB(rr, iffalsereg); break;
debug_only( default: NanoAssert(0); break; )
}
} else if (op == LIR_qcmov) {
#if !defined NANOJIT_64BIT
NanoAssert(0);
#else
switch (condval->opcode())
{
// note that these are all opposites...
case LIR_eq: MRQNE(rr, iffalsereg); break;
case LIR_ov: MRQNO(rr, iffalsereg); break;
case LIR_cs: MRQNC(rr, iffalsereg); break;
case LIR_lt: MRQGE(rr, iffalsereg); break;
case LIR_le: MRQG(rr, iffalsereg); break;
case LIR_gt: MRQLE(rr, iffalsereg); break;
case LIR_ge: MRQL(rr, iffalsereg); break;
case LIR_ult: MRQAE(rr, iffalsereg); break;
case LIR_ule: MRQA(rr, iffalsereg); break;
case LIR_ugt: MRQBE(rr, iffalsereg); break;
case LIR_uge: MRQB(rr, iffalsereg); break;
debug_only( default: NanoAssert(0); break; )
}
#endif
}
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
asm_cmp(condval);
break; break;
} }
case LIR_ld: case LIR_ld:
case LIR_ldc: case LIR_ldc:
case LIR_ldcb: case LIR_ldcb:
{ {
countlir_ld(); countlir_ld();
LIns* base = ins->oprnd1(); asm_ld(ins);
LIns* disp = ins->oprnd2();
Register rr = prepResultReg(ins, GpRegs);
Register ra;
int d = disp->constval();
if (base->isop(LIR_alloc)) {
ra = FP;
d += findMemFor(base);
} else {
ra = findRegFor(base, GpRegs);
}
if (op == LIR_ldcb)
LD8Z(rr, d, ra);
else
LD(rr, d, ra);
break; break;
} }
case LIR_ldq: case LIR_ldq:
case LIR_ldqc: case LIR_ldqc:
{ {
@ -1263,31 +1127,13 @@ namespace nanojit
asm_load64(ins); asm_load64(ins);
break; break;
} }
case LIR_neg: case LIR_neg:
case LIR_not: case LIR_not:
{ {
countlir_alu(); countlir_alu();
Register rr = prepResultReg(ins, GpRegs); asm_neg_not(ins);
LIns* lhs = ins->oprnd1();
Reservation *rA = getresv(lhs);
// if this is last use of lhs in reg, we can re-use result reg
Register ra;
if (rA == 0 || (ra=rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (op == LIR_not)
NOT(rr);
else
NEG(rr);
if ( rr != ra )
MR(rr,ra);
break; break;
} }
case LIR_qjoin: case LIR_qjoin:
{ {
countlir_qjoin(); countlir_qjoin();
@ -1318,115 +1164,7 @@ namespace nanojit
case LIR_ush: case LIR_ush:
{ {
countlir_alu(); countlir_alu();
LInsp lhs = ins->oprnd1(); asm_arith(ins);
LInsp rhs = ins->oprnd2();
Register rb = UnknownReg;
RegisterMask allow = GpRegs;
bool forceReg = (op == LIR_mul || !rhs->isconst());
#ifdef NANOJIT_ARM
// Arm can't do an immediate op with immediates
// outside of +/-255 (for AND) r outside of
// 0..255 for others.
if (!forceReg)
{
if (rhs->isconst() && !isU8(rhs->constval()))
forceReg = true;
}
#endif
if (lhs != rhs && forceReg)
{
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
rb = findRegFor(rhs, allow);
}
allow &= ~rmask(rb);
}
else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
// add alloc+const, use lea
Register rr = prepResultReg(ins, allow);
int d = findMemFor(lhs) + rhs->constval();
LEA(rr, d, FP);
break;
}
Register rr = prepResultReg(ins, allow);
Reservation* rA = getresv(lhs);
Register ra;
// if this is last use of lhs in reg, we can re-use result reg
if (rA == 0 || (ra = rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (forceReg)
{
if (lhs == rhs)
rb = ra;
if (op == LIR_add || op == LIR_addp)
ADD(rr, rb);
else if (op == LIR_sub)
SUB(rr, rb);
else if (op == LIR_mul)
MUL(rr, rb);
else if (op == LIR_and)
AND(rr, rb);
else if (op == LIR_or)
OR(rr, rb);
else if (op == LIR_xor)
XOR(rr, rb);
else if (op == LIR_lsh)
SHL(rr, rb);
else if (op == LIR_rsh)
SAR(rr, rb);
else if (op == LIR_ush)
SHR(rr, rb);
else
NanoAssertMsg(0, "Unsupported");
}
else
{
int c = rhs->constval();
if (op == LIR_add || op == LIR_addp) {
#ifdef NANOJIT_IA32_TODO
if (ra != rr) {
// this doesn't set cc's, only use it when cc's not required.
LEA(rr, c, ra);
ra = rr; // suppress mov
} else
#endif
{
ADDi(rr, c);
}
} else if (op == LIR_sub) {
#ifdef NANOJIT_IA32
if (ra != rr) {
LEA(rr, -c, ra);
ra = rr;
} else
#endif
{
SUBi(rr, c);
}
} else if (op == LIR_and)
ANDi(rr, c);
else if (op == LIR_or)
ORi(rr, c);
else if (op == LIR_xor)
XORi(rr, c);
else if (op == LIR_lsh)
SHLi(rr, c);
else if (op == LIR_rsh)
SARi(rr, c);
else if (op == LIR_ush)
SHRi(rr, c);
else
NanoAssertMsg(0, "Unsupported");
}
if ( rr != ra )
MR(rr,ra);
break; break;
} }
#ifndef NJ_SOFTFLOAT #ifndef NJ_SOFTFLOAT
@ -1602,28 +1340,10 @@ namespace nanojit
case LIR_loop: case LIR_loop:
{ {
countlir_loop(); countlir_loop();
JMP_long_placeholder(); // jump to SOT asm_loop(ins, loopJumps);
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
loopJumps.add(_nIns);
#ifdef NJ_VERBOSE
// branching from this frag to ourself.
if (_frago->core()->config.show_stats)
#if defined NANOJIT_AMD64
LDQi(argRegs[1], intptr_t((Fragment*)_thisfrag));
#else
LDi(argRegs[1], int((Fragment*)_thisfrag));
#endif
#endif
assignSavedParams();
// restore first parameter, the only one we use
LInsp state = _thisfrag->lirbuf->state;
findSpecificRegFor(state, argRegs[state->imm8()]);
break; break;
} }
#ifndef NJ_SOFTFLOAT #ifndef NJ_SOFTFLOAT
case LIR_feq: case LIR_feq:
case LIR_fle: case LIR_fle:
@ -1632,17 +1352,7 @@ namespace nanojit
case LIR_fge: case LIR_fge:
{ {
countlir_fpu(); countlir_fpu();
// only want certain regs asm_fcond(ins);
Register r = prepResultReg(ins, AllowableFlagRegs);
asm_setcc(r, ins);
#ifdef NJ_ARM_VFP
SETE(r);
#else
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
SETNP(r);
#endif
asm_fcmp(ins);
break; break;
} }
#endif #endif
@ -1659,36 +1369,10 @@ namespace nanojit
case LIR_uge: case LIR_uge:
{ {
countlir_alu(); countlir_alu();
// only want certain regs asm_cond(ins);
Register r = prepResultReg(ins, AllowableFlagRegs);
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
if (op == LIR_eq)
SETE(r);
else if (op == LIR_ov)
SETO(r);
else if (op == LIR_cs)
SETC(r);
else if (op == LIR_lt)
SETL(r);
else if (op == LIR_le)
SETLE(r);
else if (op == LIR_gt)
SETG(r);
else if (op == LIR_ge)
SETGE(r);
else if (op == LIR_ult)
SETB(r);
else if (op == LIR_ule)
SETBE(r);
else if (op == LIR_ugt)
SETA(r);
else // if (op == LIR_uge)
SETAE(r);
asm_cmp(ins);
break; break;
} }
#ifndef NJ_SOFTFLOAT #ifndef NJ_SOFTFLOAT
case LIR_fcall: case LIR_fcall:
case LIR_fcalli: case LIR_fcalli:
@ -1730,73 +1414,6 @@ namespace nanojit
} }
} }
NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
{
NIns* at = 0;
LOpcode condop = cond->opcode();
NanoAssert(cond->isCond());
#ifndef NJ_SOFTFLOAT
if (condop >= LIR_feq && condop <= LIR_fge)
{
return asm_jmpcc(branchOnFalse, cond, targ);
}
#endif
// produce the branch
if (branchOnFalse)
{
if (condop == LIR_eq)
JNE(targ);
else if (condop == LIR_ov)
JNO(targ);
else if (condop == LIR_cs)
JNC(targ);
else if (condop == LIR_lt)
JNL(targ);
else if (condop == LIR_le)
JNLE(targ);
else if (condop == LIR_gt)
JNG(targ);
else if (condop == LIR_ge)
JNGE(targ);
else if (condop == LIR_ult)
JNB(targ);
else if (condop == LIR_ule)
JNBE(targ);
else if (condop == LIR_ugt)
JNA(targ);
else //if (condop == LIR_uge)
JNAE(targ);
}
else // op == LIR_xt
{
if (condop == LIR_eq)
JE(targ);
else if (condop == LIR_ov)
JO(targ);
else if (condop == LIR_cs)
JC(targ);
else if (condop == LIR_lt)
JL(targ);
else if (condop == LIR_le)
JLE(targ);
else if (condop == LIR_gt)
JG(targ);
else if (condop == LIR_ge)
JGE(targ);
else if (condop == LIR_ult)
JB(targ);
else if (condop == LIR_ule)
JBE(targ);
else if (condop == LIR_ugt)
JA(targ);
else //if (condop == LIR_uge)
JAE(targ);
}
at = _nIns;
asm_cmp(cond);
return at;
}
void Assembler::assignSavedParams() void Assembler::assignSavedParams()
{ {
// restore saved regs // restore saved regs
@ -1847,6 +1464,7 @@ namespace nanojit
return; return;
#ifdef NANOJIT_ARM #ifdef NANOJIT_ARM
// @todo Why is there here?!? This routine should be indep. of platform
verbose_only( verbose_only(
if (_verbose) { if (_verbose) {
char* s = &outline[0]; char* s = &outline[0];

View File

@ -223,8 +223,9 @@ namespace nanojit
void unionRegisterState(RegAlloc& saved); void unionRegisterState(RegAlloc& saved);
void assignSaved(RegAlloc &saved, RegisterMask skip); void assignSaved(RegAlloc &saved, RegisterMask skip);
LInsp findVictim(RegAlloc& regs, RegisterMask allow); LInsp findVictim(RegAlloc& regs, RegisterMask allow);
int findMemFor(LIns* i); Register getBaseReg(LIns *i, int &d, RegisterMask allow);
int findMemFor(LIns* i);
Register findRegFor(LIns* i, RegisterMask allow); Register findRegFor(LIns* i, RegisterMask allow);
void findRegFor2(RegisterMask allow, LIns* ia, Reservation* &ra, LIns *ib, Reservation* &rb); void findRegFor2(RegisterMask allow, LIns* ia, Reservation* &ra, LIns *ib, Reservation* &rb);
Register findSpecificRegFor(LIns* i, Register w); Register findSpecificRegFor(LIns* i, Register w);
@ -286,12 +287,23 @@ namespace nanojit
void asm_restore(LInsp, Reservation*, Register); void asm_restore(LInsp, Reservation*, Register);
void asm_load(int d, Register r); void asm_load(int d, Register r);
void asm_spilli(LInsp i, Reservation *resv, bool pop); void asm_spilli(LInsp i, Reservation *resv, bool pop);
void asm_spill(Register rr, int d, bool pop=false, bool quad=false); void asm_spill(Register rr, int d, bool pop, bool quad);
void asm_load64(LInsp i); void asm_load64(LInsp i);
void asm_pusharg(LInsp p); void asm_pusharg(LInsp p);
NIns* asm_adjustBranch(NIns* at, NIns* target); NIns* asm_adjustBranch(NIns* at, NIns* target);
void asm_quad(LInsp i); void asm_quad(LInsp i);
bool asm_qlo(LInsp ins, LInsp q); void asm_loop(LInsp i, NInsList& loopJumps);
void asm_fcond(LInsp i);
void asm_cond(LInsp i);
void asm_arith(LInsp i);
void asm_neg_not(LInsp i);
void asm_ld(LInsp i);
void asm_cmov(LInsp i);
void asm_param(LInsp i);
void asm_int(LInsp i);
void asm_short(LInsp i);
void asm_qlo(LInsp i);
void asm_qhi(LInsp i);
void asm_fneg(LInsp ins); void asm_fneg(LInsp ins);
void asm_fop(LInsp ins); void asm_fop(LInsp ins);
void asm_i2f(LInsp ins); void asm_i2f(LInsp ins);

View File

@ -250,7 +250,7 @@ namespace nanojit
LInsp LirBufWriter::ensureReferenceable(LInsp i, int32_t addedDistance) LInsp LirBufWriter::ensureReferenceable(LInsp i, int32_t addedDistance)
{ {
NanoAssert(i != 0 && !i->isTramp()); NanoAssert(i != 0 /* && !i->isTramp()*/);
LInsp next = _buf->next(); LInsp next = _buf->next();
LInsp from = next + 2*addedDistance; LInsp from = next + 2*addedDistance;
if (canReference(from,i)) if (canReference(from,i))
@ -1047,38 +1047,39 @@ namespace nanojit
NanoAssert(op != LIR_skip); // LIR_skip here is just an error condition NanoAssert(op != LIR_skip); // LIR_skip here is just an error condition
ArgSize sizes[2*MAXARGS]; ArgSize sizes[2*MAXARGS];
uint32_t argc = ci->get_sizes(sizes); int32_t argc = ci->get_sizes(sizes);
#ifdef NJ_SOFTFLOAT #ifdef NJ_SOFTFLOAT
if (op == LIR_fcall) if (op == LIR_fcall)
op = LIR_callh; op = LIR_callh;
LInsp args2[MAXARGS*2]; // arm could require 2 args per double LInsp args2[MAXARGS*2]; // arm could require 2 args per double
int32_t j = 0; int32_t j = 0;
for (int32_t i = 0; i < MAXARGS; i++) { int32_t i = 0;
while (j < argc) {
argt >>= 2; argt >>= 2;
ArgSize a = ArgSize(argt&3); ArgSize a = ArgSize(argt&3);
if (a == ARGSIZE_F) { if (a == ARGSIZE_F) {
LInsp q = args[i]; LInsp q = args[i++];
args2[j++] = ins1(LIR_qhi, q); args2[j++] = ins1(LIR_qhi, q);
args2[j++] = ins1(LIR_qlo, q); args2[j++] = ins1(LIR_qlo, q);
} else if (a != ARGSIZE_NONE) { } else {
args2[j++] = args[i]; args2[j++] = args[i++];
} }
} }
args = args2; args = args2;
NanoAssert(j == argc); NanoAssert(j == argc);
#endif #endif
NanoAssert(argc <= MAXARGS); NanoAssert(argc <= (int)MAXARGS);
uint32_t words = argwords(argc); uint32_t words = argwords(argc);
ensureRoom(words+LIns::callInfoWords+1+argc); // ins size + possible tramps ensureRoom(words+LIns::callInfoWords+1+argc); // ins size + possible tramps
for (uint32_t i=0; i < argc; i++) for (int32_t i=0; i < argc; i++)
args[i] = ensureReferenceable(args[i], argc-i); args[i] = ensureReferenceable(args[i], argc-i);
uint8_t* offs = (uint8_t*)_buf->next(); uint8_t* offs = (uint8_t*)_buf->next();
LIns *l = _buf->next() + words; LIns *l = _buf->next() + words;
*(const CallInfo **)l = ci; *(const CallInfo **)l = ci;
l += LIns::callInfoWords; l += LIns::callInfoWords;
for (uint32_t i=0; i < argc; i++) for (int32_t i=0; i < argc; i++)
offs[i] = (uint8_t) l->reference(args[i]); offs[i] = (uint8_t) l->reference(args[i]);
#if defined NANOJIT_64BIT #if defined NANOJIT_64BIT
l->initOpcode(op); l->initOpcode(op);

View File

@ -39,8 +39,6 @@
#ifndef __nanojit_LIR__ #ifndef __nanojit_LIR__
#define __nanojit_LIR__ #define __nanojit_LIR__
namespace avmplus { class RegionTracker; }
/** /**
* Fundamentally, the arguments to the various operands can be grouped along * Fundamentally, the arguments to the various operands can be grouped along
* two dimensions. One dimension is size: can the arguments fit into a 32-bit * two dimensions. One dimension is size: can the arguments fit into a 32-bit
@ -946,7 +944,6 @@ namespace nanojit
class Assembler; class Assembler;
void compile(Assembler *assm, Fragment *frag); void compile(Assembler *assm, Fragment *frag);
verbose_only( void printTracker(const char* s, avmplus::RegionTracker& trk, Assembler* assm); )
verbose_only(void live(GC *gc, LirBuffer *lirbuf);) verbose_only(void live(GC *gc, LirBuffer *lirbuf);)
class StackFilter: public LirFilter class StackFilter: public LirFilter

View File

@ -64,16 +64,15 @@ const char* regNames[] = {"r0","r1","r2","r3","r4","r5","r6","r7","r8","r9","r10
const Register Assembler::argRegs[] = { R0, R1, R2, R3 }; const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
const Register Assembler::retRegs[] = { R0, R1 }; const Register Assembler::retRegs[] = { R0, R1 };
const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
void void
Assembler::nInit(AvmCore*) Assembler::nInit(AvmCore*)
{ {
// all ARMs have conditional move
avmplus::AvmCore::cmov_available = true;
} }
NIns* NIns*
Assembler::genPrologue(RegisterMask needSaving) Assembler::genPrologue()
{ {
/** /**
* Prologue * Prologue
@ -81,29 +80,26 @@ Assembler::genPrologue(RegisterMask needSaving)
// NJ_RESV_OFFSET is space at the top of the stack for us // NJ_RESV_OFFSET is space at the top of the stack for us
// to use for parameter passing (8 bytes at the moment) // to use for parameter passing (8 bytes at the moment)
uint32_t stackNeeded = 4 * _activation.highwatermark + NJ_STACK_OFFSET; uint32_t stackNeeded = STACK_GRANULARITY * _activation.highwatermark + NJ_STACK_OFFSET;
uint32_t savingCount = 0;
uint32_t savingMask = 0; uint32_t savingMask = rmask(FP) | rmask(LR);
savingCount = 9; //R4-R10,R11,LR uint32_t savingCount = 2;
savingMask = SavedRegs | rmask(FRAME_PTR);
(void)needSaving;
// so for alignment purposes we've pushed return addr, fp, and savingCount registers // so for alignment purposes we've pushed return addr and fp
uint32_t stackPushed = 4 * (2+savingCount); uint32_t stackPushed = STACK_GRANULARITY * savingCount;
uint32_t aligned = alignUp(stackNeeded + stackPushed, NJ_ALIGN_STACK); uint32_t aligned = alignUp(stackNeeded + stackPushed, NJ_ALIGN_STACK);
int32_t amt = aligned - stackPushed; int32_t amt = aligned - stackPushed;
// Make room on stack for what we are doing // Make room on stack for what we are doing
if (amt) if (amt)
SUBi(SP, amt); SUBi(SP, amt);
verbose_only( verbose_outputf(" %p:",_nIns); ) verbose_only( verbose_outputf(" %p:",_nIns); )
verbose_only( verbose_output(" patch entry"); ) verbose_only( verbose_output(" patch entry"); )
NIns *patchEntry = _nIns; NIns *patchEntry = _nIns;
MR(FRAME_PTR, SP); MR(FP, SP);
PUSH_mask(savingMask|rmask(LR)); PUSH_mask(savingMask);
return patchEntry; return patchEntry;
} }
@ -130,7 +126,7 @@ Assembler::nFragExit(LInsp guard)
} }
// pop the stack frame first // pop the stack frame first
MR(SP, FRAME_PTR); MR(SP, FP);
#ifdef NJ_VERBOSE #ifdef NJ_VERBOSE
if (_frago->core()->config.show_stats) { if (_frago->core()->config.show_stats) {
@ -142,19 +138,22 @@ Assembler::nFragExit(LInsp guard)
#endif #endif
// return value is GuardRecord* // return value is GuardRecord*
LDi(R2, int(lr)); LDi(R0, int(lr));
} }
NIns* NIns*
Assembler::genEpilogue(RegisterMask restore) Assembler::genEpilogue()
{ {
BX(LR); // return BX(LR); // return
MR(R0,R2); // return LinkRecord*
RegisterMask savingMask = restore | rmask(FRAME_PTR) | rmask(LR); // this is needed if we jump here from nFragExit
//MR(R0,R2); // return LinkRecord*
RegisterMask savingMask = rmask(FP) | rmask(LR);
POP_mask(savingMask); // regs POP_mask(savingMask); // regs
return _nIns; return _nIns;
} }
void void
Assembler::asm_call(LInsp ins) Assembler::asm_call(LInsp ins)
{ {
@ -252,7 +251,7 @@ Assembler::asm_call(LInsp ins)
roffset = 1; roffset = 1;
} }
} }
void void
Assembler::nMarkExecute(Page* page, int32_t count, bool enable) Assembler::nMarkExecute(Page* page, int32_t count, bool enable)
{ {
@ -267,7 +266,7 @@ Assembler::nMarkExecute(Page* page, int32_t count, bool enable)
(void)count; (void)count;
(void)enable; (void)enable;
} }
Register Register
Assembler::nRegisterAllocFromSet(int set) Assembler::nRegisterAllocFromSet(int set)
{ {
@ -300,7 +299,14 @@ Assembler::nRegisterResetAll(RegAlloc& a)
// add scratch registers to our free list for the allocator // add scratch registers to our free list for the allocator
a.clear(); a.clear();
a.used = 0; a.used = 0;
a.free = rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) | rmask(R5) | FpRegs; a.free =
rmask(R0) | rmask(R1) | rmask(R2) | rmask(R3) | rmask(R4) |
rmask(R5) | rmask(R6) | rmask(R7) | rmask(R8) | rmask(R9) |
rmask(R10);
#ifdef NJ_ARM_VFP
a.free |= FpRegs;
#endif
debug_only(a.managed = a.free); debug_only(a.managed = a.free);
} }
@ -354,7 +360,7 @@ Assembler::asm_qjoin(LIns *ins)
AvmAssert(d); AvmAssert(d);
LIns* lo = ins->oprnd1(); LIns* lo = ins->oprnd1();
LIns* hi = ins->oprnd2(); LIns* hi = ins->oprnd2();
Register r = findRegFor(hi, GpRegs); Register r = findRegFor(hi, GpRegs);
STR(r, FP, d+4); STR(r, FP, d+4);
@ -399,15 +405,11 @@ Assembler::asm_restore(LInsp i, Reservation *resv, Register r)
} }
void void
Assembler::asm_spill(LInsp i, Reservation *resv, bool pop) Assembler::asm_spill(Register rr, int d, bool pop, bool quad)
{ {
(void)i; (void) pop;
(void)pop; (void) quad;
//fprintf (stderr, "resv->arIndex: %d\n", resv->arIndex); if (d) {
if (resv->arIndex) {
int d = disp(resv);
// save to spill location
Register rr = resv->reg;
if (IsFpReg(rr)) { if (IsFpReg(rr)) {
if (isS8(d >> 2)) { if (isS8(d >> 2)) {
FSTD(rr, FP, d); FSTD(rr, FP, d);
@ -418,11 +420,6 @@ Assembler::asm_spill(LInsp i, Reservation *resv, bool pop)
} else { } else {
STR(rr, FP, d); STR(rr, FP, d);
} }
verbose_only(if (_verbose){
outputf(" spill %s",_thisfrag->lirbuf->names->formatRef(i));
}
)
} }
} }
@ -599,13 +596,6 @@ Assembler::asm_quad(LInsp ins)
//asm_output("<<< asm_quad"); //asm_output("<<< asm_quad");
} }
bool
Assembler::asm_qlo(LInsp ins, LInsp q)
{
(void)ins; (void)q;
return false;
}
void void
Assembler::asm_nongp_copy(Register r, Register s) Assembler::asm_nongp_copy(Register r, Register s)
{ {
@ -623,7 +613,7 @@ Assembler::asm_nongp_copy(Register r, Register s)
} }
Register Register
Assembler::asm_binop_rhs_reg(LInsp ins) Assembler::asm_binop_rhs_reg(LInsp)
{ {
return UnknownReg; return UnknownReg;
} }
@ -696,7 +686,7 @@ Assembler::nativePageSetup()
if (!_nIns) _nIns = pageAlloc(); if (!_nIns) _nIns = pageAlloc();
if (!_nExitIns) _nExitIns = pageAlloc(true); if (!_nExitIns) _nExitIns = pageAlloc(true);
//fprintf(stderr, "assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns); //fprintf(stderr, "assemble onto %x exits into %x\n", (int)_nIns, (int)_nExitIns);
if (!_nSlot) if (!_nSlot)
{ {
// This needs to be done or the samepage macro gets confused; pageAlloc // This needs to be done or the samepage macro gets confused; pageAlloc
@ -764,7 +754,7 @@ Assembler::underrunProtect(int bytes)
_nSlot = pageDataStart(_nIns); _nSlot = pageDataStart(_nIns);
// If samepage() is used on _nIns and _nSlot, it'll fail, since _nIns // If samepage() is used on _nIns and _nSlot, it'll fail, since _nIns
// points to one past the end of the page right now. Assume that // points to one past the end of the page right now. Assume that
// JMP_nochk won't ever try to write to _nSlot, and so won't ever // JMP_nochk won't ever try to write to _nSlot, and so won't ever
// check samepage(). See B_cond_chk macro. // check samepage(). See B_cond_chk macro.
JMP_nochk(target); JMP_nochk(target);
@ -871,7 +861,7 @@ Assembler::LD32_nochk(Register r, int32_t imm)
void void
Assembler::B_cond_chk(ConditionCode _c, NIns* _t, bool _chk) Assembler::B_cond_chk(ConditionCode _c, NIns* _t, bool _chk)
{ {
int32 offs = PC_OFFSET_FROM(_t,_nIns-1); int32_t offs = PC_OFFSET_FROM(_t,_nIns-1);
//fprintf(stderr, "B_cond_chk target: 0x%08x offset: %d @0x%08x\n", _t, offs, _nIns-1); //fprintf(stderr, "B_cond_chk target: 0x%08x offset: %d @0x%08x\n", _t, offs, _nIns-1);
if (isS24(offs)) { if (isS24(offs)) {
if (_chk) underrunProtect(4); if (_chk) underrunProtect(4);
@ -947,8 +937,6 @@ Assembler::asm_add_imm(Register rd, Register rn, int32_t imm)
* VFP * VFP
*/ */
#ifdef NJ_ARM_VFP
void void
Assembler::asm_i2f(LInsp ins) Assembler::asm_i2f(LInsp ins)
{ {
@ -1052,7 +1040,7 @@ Assembler::asm_fcmp(LInsp ins)
// ends up having overlaps with a few other tests. So, test for // ends up having overlaps with a few other tests. So, test for
// the explicit mask. // the explicit mask.
uint8_t mask = 0x0; uint8_t mask = 0x0;
// NZCV // NZCV
// for a valid ordered result, V is always 0 from VFP // for a valid ordered result, V is always 0 from VFP
if (op == LIR_feq) if (op == LIR_feq)
@ -1080,7 +1068,7 @@ Assembler::asm_fcmp(LInsp ins)
// would reset the status bits if V (NaN flag) is set, but that // would reset the status bits if V (NaN flag) is set, but that
// doesn't work for NE. For NE could teqvs rX, #1. rX needs to // doesn't work for NE. For NE could teqvs rX, #1. rX needs to
// be any register that has lsb == 0, such as sp/fp/pc. // be any register that has lsb == 0, such as sp/fp/pc.
// Test explicily with the full mask; if V is set, test will fail. // Test explicily with the full mask; if V is set, test will fail.
// Assumption is that this will be followed up by a BEQ/BNE // Assumption is that this will be followed up by a BEQ/BNE
CMPi(Scratch, mask); CMPi(Scratch, mask);
@ -1094,13 +1082,538 @@ Assembler::asm_fcmp(LInsp ins)
} }
Register Register
Assembler::asm_prep_fcall(Reservation* rR, LInsp ins) Assembler::asm_prep_fcall(Reservation*, LInsp)
{ {
// We have nothing to do here; we do it all in asm_call. // We have nothing to do here; we do it all in asm_call.
return UnknownReg; return UnknownReg;
} }
#endif /* NJ_ARM_VFP */ NIns*
Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
{
NIns* at = 0;
LOpcode condop = cond->opcode();
NanoAssert(cond->isCond());
if (condop >= LIR_feq && condop <= LIR_fge)
{
if (branchOnFalse)
JNE(targ);
else
JE(targ);
NIns *at = _nIns;
asm_fcmp(cond);
return at;
}
// produce the branch
if (branchOnFalse) {
if (condop == LIR_eq)
JNE(targ);
else if (condop == LIR_ov)
JNO(targ);
else if (condop == LIR_cs)
JNC(targ);
else if (condop == LIR_lt)
JNL(targ);
else if (condop == LIR_le)
JNLE(targ);
else if (condop == LIR_gt)
JNG(targ);
else if (condop == LIR_ge)
JNGE(targ);
else if (condop == LIR_ult)
JNB(targ);
else if (condop == LIR_ule)
JNBE(targ);
else if (condop == LIR_ugt)
JNA(targ);
else //if (condop == LIR_uge)
JNAE(targ);
} else // op == LIR_xt
{
if (condop == LIR_eq)
JE(targ);
else if (condop == LIR_ov)
JO(targ);
else if (condop == LIR_cs)
JC(targ);
else if (condop == LIR_lt)
JL(targ);
else if (condop == LIR_le)
JLE(targ);
else if (condop == LIR_gt)
JG(targ);
else if (condop == LIR_ge)
JGE(targ);
else if (condop == LIR_ult)
JB(targ);
else if (condop == LIR_ule)
JBE(targ);
else if (condop == LIR_ugt)
JA(targ);
else //if (condop == LIR_uge)
JAE(targ);
}
at = _nIns;
asm_cmp(cond);
return at;
}
void
Assembler::asm_cmp(LIns *cond)
{
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
return;
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
Reservation *rA, *rB;
// Not supported yet.
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
// ready to issue the compare
if (rhs->isconst()) {
int c = rhs->constval();
if (c == 0 && cond->isop(LIR_eq)) {
Register r = findRegFor(lhs, GpRegs);
TEST(r,r);
// No 64-bit immediates so fall-back to below
}
else if (!rhs->isQuad()) {
Register r = getBaseReg(lhs, c, GpRegs);
CMPi(r, c);
}
} else {
findRegFor2(GpRegs, lhs, rA, rhs, rB);
Register ra = rA->reg;
Register rb = rB->reg;
CMP(ra, rb);
}
}
void
Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
{
(void)ins;
JMP_long_placeholder(); // jump to SOT
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
loopJumps.add(_nIns);
#ifdef NJ_VERBOSE
// branching from this frag to ourself.
if (_frago->core()->config.show_stats)
LDi(argRegs[1], int((Fragment*)_thisfrag));
#endif
assignSavedParams();
// restore first parameter, the only one we use
LInsp state = _thisfrag->lirbuf->state;
findSpecificRegFor(state, argRegs[state->imm8()]);
}
void
Assembler::asm_fcond(LInsp ins)
{
// only want certain regs
Register r = prepResultReg(ins, AllowableFlagRegs);
SETE(r);
asm_fcmp(ins);
}
void
Assembler::asm_cond(LInsp ins)
{
// only want certain regs
LOpcode op = ins->opcode();
Register r = prepResultReg(ins, AllowableFlagRegs);
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
if (op == LIR_eq)
SETE(r);
else if (op == LIR_ov)
SETO(r);
else if (op == LIR_cs)
SETC(r);
else if (op == LIR_lt)
SETL(r);
else if (op == LIR_le)
SETLE(r);
else if (op == LIR_gt)
SETG(r);
else if (op == LIR_ge)
SETGE(r);
else if (op == LIR_ult)
SETB(r);
else if (op == LIR_ule)
SETBE(r);
else if (op == LIR_ugt)
SETA(r);
else // if (op == LIR_uge)
SETAE(r);
asm_cmp(ins);
}
void
Assembler::asm_arith(LInsp ins)
{
LOpcode op = ins->opcode();
LInsp lhs = ins->oprnd1();
LInsp rhs = ins->oprnd2();
Register rb = UnknownReg;
RegisterMask allow = GpRegs;
bool forceReg = (op == LIR_mul || !rhs->isconst());
// Arm can't do an immediate op with immediates
// outside of +/-255 (for AND) r outside of
// 0..255 for others.
if (!forceReg) {
if (rhs->isconst() && !isU8(rhs->constval()))
forceReg = true;
}
if (lhs != rhs && forceReg) {
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
rb = findRegFor(rhs, allow);
}
allow &= ~rmask(rb);
} else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
// add alloc+const, use lea
Register rr = prepResultReg(ins, allow);
int d = findMemFor(lhs) + rhs->constval();
LEA(rr, d, FP);
}
Register rr = prepResultReg(ins, allow);
Reservation* rA = getresv(lhs);
Register ra;
// if this is last use of lhs in reg, we can re-use result reg
if (rA == 0 || (ra = rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (forceReg) {
if (lhs == rhs)
rb = ra;
if (op == LIR_add || op == LIR_addp)
ADD(rr, rb);
else if (op == LIR_sub)
SUB(rr, rb);
else if (op == LIR_mul)
MUL(rr, rb);
else if (op == LIR_and)
AND(rr, rb);
else if (op == LIR_or)
OR(rr, rb);
else if (op == LIR_xor)
XOR(rr, rb);
else if (op == LIR_lsh)
SHL(rr, rb);
else if (op == LIR_rsh)
SAR(rr, rb);
else if (op == LIR_ush)
SHR(rr, rb);
else
NanoAssertMsg(0, "Unsupported");
} else {
int c = rhs->constval();
if (op == LIR_add || op == LIR_addp)
ADDi(rr, c);
else if (op == LIR_sub)
SUBi(rr, c);
else if (op == LIR_and)
ANDi(rr, c);
else if (op == LIR_or)
ORi(rr, c);
else if (op == LIR_xor)
XORi(rr, c);
else if (op == LIR_lsh)
SHLi(rr, c);
else if (op == LIR_rsh)
SARi(rr, c);
else if (op == LIR_ush)
SHRi(rr, c);
else
NanoAssertMsg(0, "Unsupported");
}
if (rr != ra)
MR(rr,ra);
}
void
Assembler::asm_neg_not(LInsp ins)
{
LOpcode op = ins->opcode();
Register rr = prepResultReg(ins, GpRegs);
LIns* lhs = ins->oprnd1();
Reservation *rA = getresv(lhs);
// if this is last use of lhs in reg, we can re-use result reg
Register ra;
if (rA == 0 || (ra=rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (op == LIR_not)
NOT(rr);
else
NEG(rr);
if ( rr != ra )
MR(rr,ra);
}
void
Assembler::asm_ld(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* base = ins->oprnd1();
LIns* disp = ins->oprnd2();
Register rr = prepResultReg(ins, GpRegs);
int d = disp->constval();
Register ra = getBaseReg(base, d, GpRegs);
if (op == LIR_ldcb)
LD8Z(rr, d, ra);
else
LD(rr, d, ra);
}
void
Assembler::asm_cmov(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* condval = ins->oprnd1();
NanoAssert(condval->isCmp());
LIns* values = ins->oprnd2();
NanoAssert(values->opcode() == LIR_2);
LIns* iftrue = values->oprnd1();
LIns* iffalse = values->oprnd2();
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
const Register rr = prepResultReg(ins, GpRegs);
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
if (op == LIR_cmov) {
switch (condval->opcode()) {
// note that these are all opposites...
case LIR_eq: MRNE(rr, iffalsereg); break;
case LIR_ov: MRNO(rr, iffalsereg); break;
case LIR_cs: MRNC(rr, iffalsereg); break;
case LIR_lt: MRGE(rr, iffalsereg); break;
case LIR_le: MRG(rr, iffalsereg); break;
case LIR_gt: MRLE(rr, iffalsereg); break;
case LIR_ge: MRL(rr, iffalsereg); break;
case LIR_ult: MRAE(rr, iffalsereg); break;
case LIR_ule: MRA(rr, iffalsereg); break;
case LIR_ugt: MRBE(rr, iffalsereg); break;
case LIR_uge: MRB(rr, iffalsereg); break;
debug_only( default: NanoAssert(0); break; )
}
} else if (op == LIR_qcmov) {
NanoAssert(0);
}
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
asm_cmp(condval);
}
void
Assembler::asm_qhi(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
LIns *q = ins->oprnd1();
int d = findMemFor(q);
LD(rr, d+4, FP);
}
void
Assembler::asm_qlo(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
LIns *q = ins->oprnd1();
int d = findMemFor(q);
LD(rr, d, FP);
#if 0
LIns *q = ins->oprnd1();
Reservation *resv = getresv(ins);
Register rr = resv->reg;
if (rr == UnknownReg) {
// store quad in spill loc
int d = disp(resv);
freeRsrcOf(ins, false);
Register qr = findRegFor(q, XmmRegs);
SSE_MOVDm(d, FP, qr);
} else {
freeRsrcOf(ins, false);
Register qr = findRegFor(q, XmmRegs);
SSE_MOVD(rr,qr);
}
#endif
}
void
Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->imm8();
uint32_t kind = ins->imm8b();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
if (a < abi_regcount) {
// incoming arg in register
prepResultReg(ins, rmask(argRegs[a]));
} else {
// incoming arg is on stack, and EBP points nearby (see genPrologue)
Register r = prepResultReg(ins, GpRegs);
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
}
} else {
// saved param
prepResultReg(ins, rmask(savedRegs[a]));
}
}
void
Assembler::asm_short(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
int32_t val = ins->imm16();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
}
void
Assembler::asm_int(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
int32_t val = ins->imm32();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
}
#if 0
void
Assembler::asm_quad(LInsp ins)
{
Reservation *rR = getresv(ins);
Register rr = rR->reg;
if (rr != UnknownReg)
{
// @todo -- add special-cases for 0 and 1
_allocator.retire(rr);
rR->reg = UnknownReg;
NanoAssert((rmask(rr) & FpRegs) != 0);
const double d = ins->constvalf();
const uint64_t q = ins->constvalq();
if (rmask(rr) & XmmRegs) {
if (q == 0.0) {
// test (int64)0 since -0.0 == 0.0
SSE_XORPDr(rr, rr);
} else if (d == 1.0) {
// 1.0 is extremely frequent and worth special-casing!
static const double k_ONE = 1.0;
LDSDm(rr, &k_ONE);
} else {
findMemFor(ins);
const int d = disp(rR);
SSE_LDQ(rr, d, FP);
}
} else {
if (q == 0.0) {
// test (int64)0 since -0.0 == 0.0
FLDZ();
} else if (d == 1.0) {
FLD1();
} else {
findMemFor(ins);
int d = disp(rR);
FLDQ(d,FP);
}
}
}
// @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
int d = disp(rR);
freeRsrcOf(ins, false);
if (d) {
const int32_t* p = (const int32_t*) (ins-2);
STi(FP,d+4,p[1]);
STi(FP,d,p[0]);
}
}
#endif
void
Assembler::asm_arg(ArgSize sz, LInsp p, Register r)
{
if (sz == ARGSIZE_Q) {
// ref arg - use lea
if (r != UnknownReg) {
// arg in specific reg
int da = findMemFor(p);
LEA(r, da, FP);
} else {
NanoAssert(0); // not supported
}
} else if (sz == ARGSIZE_LO) {
if (r != UnknownReg) {
// arg goes in specific register
if (p->isconst()) {
LDi(r, p->constval());
} else {
Reservation* rA = getresv(p);
if (rA) {
if (rA->reg == UnknownReg) {
// load it into the arg reg
int d = findMemFor(p);
if (p->isop(LIR_alloc)) {
LEA(r, d, FP);
} else {
LD(r, d, FP);
}
} else {
// it must be in a saved reg
MR(r, rA->reg);
}
} else {
// this is the last use, so fine to assign it
// to the scratch reg, it's dead after this point.
findSpecificRegFor(p, r);
}
}
} else {
asm_pusharg(p);
}
} else {
NanoAssert(sz == ARGSIZE_F);
asm_farg(p);
}
}
} }
#endif /* FEATURE_NANOJIT */ #endif /* FEATURE_NANOJIT */

View File

@ -42,6 +42,17 @@
#define __nanojit_NativeArm__ #define __nanojit_NativeArm__
#ifdef PERFM
#include "../vprof/vprof.h"
#define count_instr() _nvprof("arm",1)
#define count_prolog() _nvprof("arm-prolog",1); count_instr();
#define count_imt() _nvprof("arm-imt",1) count_instr()
#else
#define count_instr()
#define count_prolog()
#define count_imt()
#endif
namespace nanojit namespace nanojit
{ {
@ -50,7 +61,7 @@ const int NJ_LOG2_PAGE_SIZE = 12; // 4K
// If NJ_ARM_VFP is defined, then VFP is assumed to // If NJ_ARM_VFP is defined, then VFP is assumed to
// be present. If it's not defined, then softfloat // be present. If it's not defined, then softfloat
// is used, and NJ_SOFTFLOAT is defined. // is used, and NJ_SOFTFLOAT is defined.
#define NJ_ARM_VFP //#define NJ_ARM_VFP
#ifdef NJ_ARM_VFP #ifdef NJ_ARM_VFP
@ -107,10 +118,6 @@ typedef enum {
FirstFloatReg = 16, FirstFloatReg = 16,
LastFloatReg = 22, LastFloatReg = 22,
// helpers
FRAME_PTR = 11,
ESP = SP,
FirstReg = 0, FirstReg = 0,
#ifdef NJ_ARM_VFP #ifdef NJ_ARM_VFP
@ -152,13 +159,12 @@ typedef struct _FragInfo {
NIns* epilogue; NIns* epilogue;
} FragInfo; } FragInfo;
#ifdef ARM_VFP // D0-D6 are not saved; D7-D15 are, but we don't use those,
static const RegisterMask SavedFpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6 | 1<<D7; // so we don't have to worry about saving/restoring them
#else
static const RegisterMask SavedFpRegs = 0; static const RegisterMask SavedFpRegs = 0;
#endif static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
static const int NumSavedRegs = 7; static const int NumSavedRegs = 7;
static const RegisterMask SavedRegs = 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10 | SavedFpRegs;
static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2f/u2f. static const RegisterMask FpRegs = 1<<D0 | 1<<D1 | 1<<D2 | 1<<D3 | 1<<D4 | 1<<D5 | 1<<D6; // no D7; S14-S15 are used for i2f/u2f.
static const RegisterMask GpRegs = 0x07FF; static const RegisterMask GpRegs = 0x07FF;
static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10; static const RegisterMask AllowableFlagRegs = 1<<R0 | 1<<R1 | 1<<R2 | 1<<R3 | 1<<R4 | 1<<R5 | 1<<R6 | 1<<R7 | 1<<R8 | 1<<R9 | 1<<R10;
@ -200,6 +206,7 @@ verbose_only( extern const char* regNames[]; )
void nativePageSetup(); \ void nativePageSetup(); \
void asm_quad_nochk(Register, const int32_t*); \ void asm_quad_nochk(Register, const int32_t*); \
void asm_add_imm(Register, Register, int32_t); \ void asm_add_imm(Register, Register, int32_t); \
void asm_fcmp(LInsp); \
int* _nSlot; \ int* _nSlot; \
int* _nExitSlot; int* _nExitSlot;
@ -218,8 +225,6 @@ verbose_only( extern const char* regNames[]; )
#define IMM32(imm) *(--_nIns) = (NIns)((imm)); #define IMM32(imm) *(--_nIns) = (NIns)((imm));
#define FUNCADDR(addr) ( ((int)addr) )
#define OP_IMM (1<<25) #define OP_IMM (1<<25)
#define OP_STAT (1<<20) #define OP_STAT (1<<20)

View File

@ -60,12 +60,7 @@ namespace nanojit
#endif #endif
const Register Assembler::argRegs[] = { R0, R1, R2, R3 }; const Register Assembler::argRegs[] = { R0, R1, R2, R3 };
const Register Assembler::retRegs[] = { R0, R1 }; const Register Assembler::retRegs[] = { R0, R1 };
#ifdef NJ_THUMB_JIT
const Register Assembler::savedRegs[] = { R4, R5, R6, R7 }; const Register Assembler::savedRegs[] = { R4, R5, R6, R7 };
#else
const Register Assembler::savedRegs[] = { R4, R5, R6, R7, R8, R9, R10 };
#endif
void Assembler::nInit(AvmCore*) void Assembler::nInit(AvmCore*)
{ {
@ -355,6 +350,7 @@ namespace nanojit
asm_mmq(rb, dr, FP, da); asm_mmq(rb, dr, FP, da);
} }
void Assembler::asm_quad(LInsp ins) void Assembler::asm_quad(LInsp ins)
{ {
Reservation *rR = getresv(ins); Reservation *rR = getresv(ins);
@ -368,10 +364,477 @@ namespace nanojit
} }
} }
bool Assembler::asm_qlo(LInsp ins, LInsp q) NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
{ {
(void)ins; (void)q; NIns* at = 0;
return false; LOpcode condop = cond->opcode();
NanoAssert(cond->isCond());
#ifndef NJ_SOFTFLOAT
if (condop >= LIR_feq && condop <= LIR_fge)
{
return asm_jmpcc(branchOnFalse, cond, targ);
}
#endif
// produce the branch
if (branchOnFalse)
{
if (condop == LIR_eq)
JNE(targ);
else if (condop == LIR_ov)
JNO(targ);
else if (condop == LIR_cs)
JNC(targ);
else if (condop == LIR_lt)
JNL(targ);
else if (condop == LIR_le)
JNLE(targ);
else if (condop == LIR_gt)
JNG(targ);
else if (condop == LIR_ge)
JNGE(targ);
else if (condop == LIR_ult)
JNB(targ);
else if (condop == LIR_ule)
JNBE(targ);
else if (condop == LIR_ugt)
JNA(targ);
else //if (condop == LIR_uge)
JNAE(targ);
}
else // op == LIR_xt
{
if (condop == LIR_eq)
JE(targ);
else if (condop == LIR_ov)
JO(targ);
else if (condop == LIR_cs)
JC(targ);
else if (condop == LIR_lt)
JL(targ);
else if (condop == LIR_le)
JLE(targ);
else if (condop == LIR_gt)
JG(targ);
else if (condop == LIR_ge)
JGE(targ);
else if (condop == LIR_ult)
JB(targ);
else if (condop == LIR_ule)
JBE(targ);
else if (condop == LIR_ugt)
JA(targ);
else //if (condop == LIR_uge)
JAE(targ);
}
at = _nIns;
asm_cmp(cond);
return at;
}
void Assembler::asm_cmp(LIns *cond)
{
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
return;
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
Reservation *rA, *rB;
// Not supported yet.
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
// ready to issue the compare
if (rhs->isconst())
{
int c = rhs->constval();
if (c == 0 && cond->isop(LIR_eq)) {
Register r = findRegFor(lhs, GpRegs);
TEST(r,r);
// No 64-bit immediates so fall-back to below
}
else if (!rhs->isQuad()) {
Register r = getBaseReg(lhs, c, GpRegs);
CMPi(r, c);
}
}
else
{
findRegFor2(GpRegs, lhs, rA, rhs, rB);
Register ra = rA->reg;
Register rb = rB->reg;
CMP(ra, rb);
}
}
void Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
{
(void)ins;
JMP_long_placeholder(); // jump to SOT
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
loopJumps.add(_nIns);
#ifdef NJ_VERBOSE
// branching from this frag to ourself.
if (_frago->core()->config.show_stats)
LDi(argRegs[1], int((Fragment*)_thisfrag));
#endif
assignSavedParams();
// restore first parameter, the only one we use
LInsp state = _thisfrag->lirbuf->state;
findSpecificRegFor(state, argRegs[state->imm8()]);
}
void Assembler::asm_fcond(LInsp ins)
{
// only want certain regs
Register r = prepResultReg(ins, AllowableFlagRegs);
asm_setcc(r, ins);
#ifdef NJ_ARM_VFP
SETE(r);
#else
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
SETNP(r);
#endif
asm_fcmp(ins);
}
void Assembler::asm_cond(LInsp ins)
{
// only want certain regs
LOpcode op = ins->opcode();
Register r = prepResultReg(ins, AllowableFlagRegs);
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
if (op == LIR_eq)
SETE(r);
else if (op == LIR_ov)
SETO(r);
else if (op == LIR_cs)
SETC(r);
else if (op == LIR_lt)
SETL(r);
else if (op == LIR_le)
SETLE(r);
else if (op == LIR_gt)
SETG(r);
else if (op == LIR_ge)
SETGE(r);
else if (op == LIR_ult)
SETB(r);
else if (op == LIR_ule)
SETBE(r);
else if (op == LIR_ugt)
SETA(r);
else // if (op == LIR_uge)
SETAE(r);
asm_cmp(ins);
}
void Assembler::asm_arith(LInsp ins)
{
LOpcode op = ins->opcode();
LInsp lhs = ins->oprnd1();
LInsp rhs = ins->oprnd2();
Register rb = UnknownReg;
RegisterMask allow = GpRegs;
bool forceReg = (op == LIR_mul || !rhs->isconst());
#ifdef NANOJIT_ARM
// Arm can't do an immediate op with immediates
// outside of +/-255 (for AND) r outside of
// 0..255 for others.
if (!forceReg)
{
if (rhs->isconst() && !isU8(rhs->constval()))
forceReg = true;
}
#endif
if (lhs != rhs && forceReg)
{
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
rb = findRegFor(rhs, allow);
}
allow &= ~rmask(rb);
}
else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
// add alloc+const, use lea
Register rr = prepResultReg(ins, allow);
int d = findMemFor(lhs) + rhs->constval();
LEA(rr, d, FP);
}
Register rr = prepResultReg(ins, allow);
Reservation* rA = getresv(lhs);
Register ra;
// if this is last use of lhs in reg, we can re-use result reg
if (rA == 0 || (ra = rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (forceReg)
{
if (lhs == rhs)
rb = ra;
if (op == LIR_add || op == LIR_addp)
ADD(rr, rb);
else if (op == LIR_sub)
SUB(rr, rb);
else if (op == LIR_mul)
MUL(rr, rb);
else if (op == LIR_and)
AND(rr, rb);
else if (op == LIR_or)
OR(rr, rb);
else if (op == LIR_xor)
XOR(rr, rb);
else if (op == LIR_lsh)
SHL(rr, rb);
else if (op == LIR_rsh)
SAR(rr, rb);
else if (op == LIR_ush)
SHR(rr, rb);
else
NanoAssertMsg(0, "Unsupported");
}
else
{
int c = rhs->constval();
if (op == LIR_add || op == LIR_addp) {
{
ADDi(rr, c);
}
} else if (op == LIR_sub) {
{
SUBi(rr, c);
}
} else if (op == LIR_and)
ANDi(rr, c);
else if (op == LIR_or)
ORi(rr, c);
else if (op == LIR_xor)
XORi(rr, c);
else if (op == LIR_lsh)
SHLi(rr, c);
else if (op == LIR_rsh)
SARi(rr, c);
else if (op == LIR_ush)
SHRi(rr, c);
else
NanoAssertMsg(0, "Unsupported");
}
if ( rr != ra )
MR(rr,ra);
}
void Assembler::asm_neg_not(LInsp ins)
{
LOpcode op = ins->opcode();
Register rr = prepResultReg(ins, GpRegs);
LIns* lhs = ins->oprnd1();
Reservation *rA = getresv(lhs);
// if this is last use of lhs in reg, we can re-use result reg
Register ra;
if (rA == 0 || (ra=rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (op == LIR_not)
NOT(rr);
else
NEG(rr);
if ( rr != ra )
MR(rr,ra);
}
void Assembler::asm_ld(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* base = ins->oprnd1();
LIns* disp = ins->oprnd2();
Register rr = prepResultReg(ins, GpRegs);
int d = disp->constval();
Register ra = getBaseReg(base, d, GpRegs);
if (op == LIR_ldcb)
LD8Z(rr, d, ra);
else
LD(rr, d, ra);
}
void Assembler::asm_cmov(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* condval = ins->oprnd1();
NanoAssert(condval->isCmp());
LIns* values = ins->oprnd2();
NanoAssert(values->opcode() == LIR_2);
LIns* iftrue = values->oprnd1();
LIns* iffalse = values->oprnd2();
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
const Register rr = prepResultReg(ins, GpRegs);
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
if (op == LIR_cmov) {
switch (condval->opcode())
{
// note that these are all opposites...
case LIR_eq: MRNE(rr, iffalsereg); break;
case LIR_ov: MRNO(rr, iffalsereg); break;
case LIR_cs: MRNC(rr, iffalsereg); break;
case LIR_lt: MRGE(rr, iffalsereg); break;
case LIR_le: MRG(rr, iffalsereg); break;
case LIR_gt: MRLE(rr, iffalsereg); break;
case LIR_ge: MRL(rr, iffalsereg); break;
case LIR_ult: MRAE(rr, iffalsereg); break;
case LIR_ule: MRA(rr, iffalsereg); break;
case LIR_ugt: MRBE(rr, iffalsereg); break;
case LIR_uge: MRB(rr, iffalsereg); break;
debug_only( default: NanoAssert(0); break; )
}
} else if (op == LIR_qcmov) {
NanoAssert(0);
}
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
asm_cmp(condval);
}
void Assembler::asm_qhi(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
LIns *q = ins->oprnd1();
int d = findMemFor(q);
LD(rr, d+4, FP);
}
void Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->imm8();
uint32_t kind = ins->imm8b();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
if (a < abi_regcount) {
// incoming arg in register
prepResultReg(ins, rmask(argRegs[a]));
} else {
// incoming arg is on stack, and EBP points nearby (see genPrologue)
Register r = prepResultReg(ins, GpRegs);
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
}
}
else {
// saved param
prepResultReg(ins, rmask(savedRegs[a]));
}
}
void Assembler::asm_short(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
int32_t val = ins->imm16();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
}
void Assembler::asm_int(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
int32_t val = ins->imm32();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
}
void Assembler::asm_quad(LInsp ins)
{
Reservation *rR = getresv(ins);
Register rr = rR->reg;
if (rr != UnknownReg)
{
// @todo -- add special-cases for 0 and 1
_allocator.retire(rr);
rR->reg = UnknownReg;
NanoAssert((rmask(rr) & FpRegs) != 0);
const double d = ins->constvalf();
const uint64_t q = ins->constvalq();
if (rmask(rr) & XmmRegs) {
if (q == 0.0) {
// test (int64)0 since -0.0 == 0.0
SSE_XORPDr(rr, rr);
} else if (d == 1.0) {
// 1.0 is extremely frequent and worth special-casing!
static const double k_ONE = 1.0;
LDSDm(rr, &k_ONE);
} else {
findMemFor(ins);
const int d = disp(rR);
SSE_LDQ(rr, d, FP);
}
} else {
if (q == 0.0) {
// test (int64)0 since -0.0 == 0.0
FLDZ();
} else if (d == 1.0) {
FLD1();
} else {
findMemFor(ins);
int d = disp(rR);
FLDQ(d,FP);
}
}
}
// @todo, if we used xor, ldsd, fldz, etc above, we don't need mem here
int d = disp(rR);
freeRsrcOf(ins, false);
if (d)
{
const int32_t* p = (const int32_t*) (ins-2);
STi(FP,d+4,p[1]);
STi(FP,d,p[0]);
}
}
void Assembler::asm_qlo(LInsp ins)
{
LIns *q = ins->oprnd1();
Reservation *resv = getresv(ins);
Register rr = resv->reg;
if (rr == UnknownReg) {
// store quad in spill loc
int d = disp(resv);
freeRsrcOf(ins, false);
Register qr = findRegFor(q, XmmRegs);
SSE_MOVDm(d, FP, qr);
} else {
freeRsrcOf(ins, false);
Register qr = findRegFor(q, XmmRegs);
SSE_MOVD(rr,qr);
}
} }
void Assembler::asm_nongp_copy(Register r, Register s) void Assembler::asm_nongp_copy(Register r, Register s)

View File

@ -64,27 +64,22 @@ namespace nanojit
/* ARM registers */ /* ARM registers */
typedef enum typedef enum
{ {
R0 = 0, R0 = 0, // 32bit return value, aka A1
R1 = 1, R1 = 1, // msw of 64bit return value, A2
R2 = 2, R2 = 2, // A3
R3 = 3, R3 = 3, // A4
R4 = 4, R4 = 4, // V1
R5 = 5, R5 = 5, // V2
R6 = 6, R6 = 6, // V3
R7 = 7, R7 = 7, // V4
R8 = 8, R8 = 8, // V5
//R9 = 9, R9 = 9, // V6, SB (stack base)
//R10 = 10, R10 = 10, // V7, SL
//R11 = 11, FP = 11, // V8, frame pointer
IP = 12, IP = 12, // intra-procedure call scratch register
SP = 13, SP = 13, // stack pointer
LR = 14, LR = 14, // link register (BL sets LR = return address)
PC = 15, PC = 15, // program counter
FP = SP,
// helpers
FRAME_PTR = R7,
FirstReg = 0, FirstReg = 0,
LastReg = 5, LastReg = 5,

View File

@ -354,7 +354,8 @@ namespace nanojit
#else #else
if (mprotect((void *)addr, count*NJ_PAGE_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC) == -1) { if (mprotect((void *)addr, count*NJ_PAGE_SIZE, PROT_READ|PROT_WRITE|PROT_EXEC) == -1) {
#endif #endif
AvmDebugLog(("FATAL ERROR: mprotect(PROT_EXEC) failed\n")); // todo: we can't abort or assert here, we have to fail gracefully.
NanoAssertMsg(false, "FATAL ERROR: mprotect(PROT_EXEC) failed\n");
abort(); abort();
} }
#endif #endif
@ -539,13 +540,7 @@ namespace nanojit
{ {
if (value->isconst()) if (value->isconst())
{ {
Register rb; Register rb = getBaseReg(base, dr, GpRegs);
if (base->isop(LIR_alloc)) {
rb = FP;
dr += findMemFor(base);
} else {
rb = findRegFor(base, GpRegs);
}
int c = value->constval(); int c = value->constval();
STi(rb, dr, c); STi(rb, dr, c);
} }
@ -610,20 +605,6 @@ namespace nanojit
#endif #endif
} }
void Assembler::asm_spilli(LInsp i, Reservation *resv, bool pop)
{
int d = disp(resv);
Register rr = resv->reg;
bool quad = i->opcode() == LIR_param || i->isQuad();
asm_spill(rr, d, pop, quad);
if (d)
{
verbose_only(if (_verbose) {
outputf(" spill %s",_thisfrag->lirbuf->names->formatRef(i));
})
}
}
void Assembler::asm_load64(LInsp ins) void Assembler::asm_load64(LInsp ins)
{ {
LIns* base = ins->oprnd1(); LIns* base = ins->oprnd1();
@ -634,13 +615,7 @@ namespace nanojit
if (rr != UnknownReg && rmask(rr) & XmmRegs) if (rr != UnknownReg && rmask(rr) & XmmRegs)
{ {
freeRsrcOf(ins, false); freeRsrcOf(ins, false);
Register rb; Register rb = getBaseReg(base, db, GpRegs);
if (base->isop(LIR_alloc)) {
rb = FP;
db += findMemFor(base);
} else {
rb = findRegFor(base, GpRegs);
}
SSE_LDQ(rr, db, rb); SSE_LDQ(rr, db, rb);
} }
#if defined NANOJIT_AMD64 #if defined NANOJIT_AMD64
@ -844,6 +819,462 @@ namespace nanojit
#endif #endif
} }
NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ)
{
NIns* at = 0;
LOpcode condop = cond->opcode();
NanoAssert(cond->isCond());
#ifndef NJ_SOFTFLOAT
if (condop >= LIR_feq && condop <= LIR_fge)
{
return asm_jmpcc(branchOnFalse, cond, targ);
}
#endif
// produce the branch
if (branchOnFalse)
{
if (condop == LIR_eq)
JNE(targ);
else if (condop == LIR_ov)
JNO(targ);
else if (condop == LIR_cs)
JNC(targ);
else if (condop == LIR_lt)
JNL(targ);
else if (condop == LIR_le)
JNLE(targ);
else if (condop == LIR_gt)
JNG(targ);
else if (condop == LIR_ge)
JNGE(targ);
else if (condop == LIR_ult)
JNB(targ);
else if (condop == LIR_ule)
JNBE(targ);
else if (condop == LIR_ugt)
JNA(targ);
else //if (condop == LIR_uge)
JNAE(targ);
}
else // op == LIR_xt
{
if (condop == LIR_eq)
JE(targ);
else if (condop == LIR_ov)
JO(targ);
else if (condop == LIR_cs)
JC(targ);
else if (condop == LIR_lt)
JL(targ);
else if (condop == LIR_le)
JLE(targ);
else if (condop == LIR_gt)
JG(targ);
else if (condop == LIR_ge)
JGE(targ);
else if (condop == LIR_ult)
JB(targ);
else if (condop == LIR_ule)
JBE(targ);
else if (condop == LIR_ugt)
JA(targ);
else //if (condop == LIR_uge)
JAE(targ);
}
at = _nIns;
asm_cmp(cond);
return at;
}
void Assembler::asm_cmp(LIns *cond)
{
LOpcode condop = cond->opcode();
// LIR_ov and LIR_cs recycle the flags set by arithmetic ops
if ((condop == LIR_ov) || (condop == LIR_cs))
return;
LInsp lhs = cond->oprnd1();
LInsp rhs = cond->oprnd2();
Reservation *rA, *rB;
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
// Not supported yet.
#if !defined NANOJIT_64BIT
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
#endif
// ready to issue the compare
if (rhs->isconst())
{
int c = rhs->constval();
if (c == 0 && cond->isop(LIR_eq)) {
Register r = findRegFor(lhs, GpRegs);
if (rhs->isQuad()) {
#if defined NANOJIT_64BIT
TESTQ(r, r);
#endif
} else {
TEST(r,r);
}
// No 64-bit immediates so fall-back to below
}
else if (!rhs->isQuad()) {
Register r = getBaseReg(lhs, c, GpRegs);
CMPi(r, c);
}
}
else
{
findRegFor2(GpRegs, lhs, rA, rhs, rB);
Register ra = rA->reg;
Register rb = rB->reg;
if (rhs->isQuad()) {
#if defined NANOJIT_64BIT
CMPQ(ra, rb);
#endif
} else {
CMP(ra, rb);
}
}
}
void Assembler::asm_loop(LInsp ins, NInsList& loopJumps)
{
(void)ins;
JMP_long_placeholder(); // jump to SOT
verbose_only( if (_verbose && _outputCache) { _outputCache->removeLast(); outputf(" jmp SOT"); } );
loopJumps.add(_nIns);
#ifdef NJ_VERBOSE
// branching from this frag to ourself.
if (_frago->core()->config.show_stats)
#if defined NANOJIT_AMD64
LDQi(argRegs[1], intptr_t((Fragment*)_thisfrag));
#else
LDi(argRegs[1], int((Fragment*)_thisfrag));
#endif
#endif
assignSavedParams();
// restore first parameter, the only one we use
LInsp state = _thisfrag->lirbuf->state;
findSpecificRegFor(state, argRegs[state->imm8()]);
}
void Assembler::asm_fcond(LInsp ins)
{
// only want certain regs
Register r = prepResultReg(ins, AllowableFlagRegs);
asm_setcc(r, ins);
#ifdef NJ_ARM_VFP
SETE(r);
#else
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
SETNP(r);
#endif
asm_fcmp(ins);
}
void Assembler::asm_cond(LInsp ins)
{
// only want certain regs
LOpcode op = ins->opcode();
Register r = prepResultReg(ins, AllowableFlagRegs);
// SETcc only sets low 8 bits, so extend
MOVZX8(r,r);
if (op == LIR_eq)
SETE(r);
else if (op == LIR_ov)
SETO(r);
else if (op == LIR_cs)
SETC(r);
else if (op == LIR_lt)
SETL(r);
else if (op == LIR_le)
SETLE(r);
else if (op == LIR_gt)
SETG(r);
else if (op == LIR_ge)
SETGE(r);
else if (op == LIR_ult)
SETB(r);
else if (op == LIR_ule)
SETBE(r);
else if (op == LIR_ugt)
SETA(r);
else // if (op == LIR_uge)
SETAE(r);
asm_cmp(ins);
}
void Assembler::asm_arith(LInsp ins)
{
LOpcode op = ins->opcode();
LInsp lhs = ins->oprnd1();
LInsp rhs = ins->oprnd2();
Register rb = UnknownReg;
RegisterMask allow = GpRegs;
bool forceReg = (op == LIR_mul || !rhs->isconst());
#ifdef NANOJIT_ARM
// Arm can't do an immediate op with immediates
// outside of +/-255 (for AND) r outside of
// 0..255 for others.
if (!forceReg)
{
if (rhs->isconst() && !isU8(rhs->constval()))
forceReg = true;
}
#endif
if (lhs != rhs && forceReg)
{
if ((rb = asm_binop_rhs_reg(ins)) == UnknownReg) {
rb = findRegFor(rhs, allow);
}
allow &= ~rmask(rb);
}
else if ((op == LIR_add||op == LIR_addp) && lhs->isop(LIR_alloc) && rhs->isconst()) {
// add alloc+const, use lea
Register rr = prepResultReg(ins, allow);
int d = findMemFor(lhs) + rhs->constval();
LEA(rr, d, FP);
}
Register rr = prepResultReg(ins, allow);
Reservation* rA = getresv(lhs);
Register ra;
// if this is last use of lhs in reg, we can re-use result reg
if (rA == 0 || (ra = rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (forceReg)
{
if (lhs == rhs)
rb = ra;
if (op == LIR_add || op == LIR_addp)
ADD(rr, rb);
else if (op == LIR_sub)
SUB(rr, rb);
else if (op == LIR_mul)
MUL(rr, rb);
else if (op == LIR_and)
AND(rr, rb);
else if (op == LIR_or)
OR(rr, rb);
else if (op == LIR_xor)
XOR(rr, rb);
else if (op == LIR_lsh)
SHL(rr, rb);
else if (op == LIR_rsh)
SAR(rr, rb);
else if (op == LIR_ush)
SHR(rr, rb);
else
NanoAssertMsg(0, "Unsupported");
}
else
{
int c = rhs->constval();
if (op == LIR_add || op == LIR_addp) {
#ifdef NANOJIT_IA32_TODO
if (ra != rr) {
// this doesn't set cc's, only use it when cc's not required.
LEA(rr, c, ra);
ra = rr; // suppress mov
} else
#endif
{
ADDi(rr, c);
}
} else if (op == LIR_sub) {
#ifdef NANOJIT_IA32
if (ra != rr) {
LEA(rr, -c, ra);
ra = rr;
} else
#endif
{
SUBi(rr, c);
}
} else if (op == LIR_and)
ANDi(rr, c);
else if (op == LIR_or)
ORi(rr, c);
else if (op == LIR_xor)
XORi(rr, c);
else if (op == LIR_lsh)
SHLi(rr, c);
else if (op == LIR_rsh)
SARi(rr, c);
else if (op == LIR_ush)
SHRi(rr, c);
else
NanoAssertMsg(0, "Unsupported");
}
if ( rr != ra )
MR(rr,ra);
}
void Assembler::asm_neg_not(LInsp ins)
{
LOpcode op = ins->opcode();
Register rr = prepResultReg(ins, GpRegs);
LIns* lhs = ins->oprnd1();
Reservation *rA = getresv(lhs);
// if this is last use of lhs in reg, we can re-use result reg
Register ra;
if (rA == 0 || (ra=rA->reg) == UnknownReg)
ra = findSpecificRegFor(lhs, rr);
// else, rA already has a register assigned.
if (op == LIR_not)
NOT(rr);
else
NEG(rr);
if ( rr != ra )
MR(rr,ra);
}
void Assembler::asm_ld(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* base = ins->oprnd1();
LIns* disp = ins->oprnd2();
Register rr = prepResultReg(ins, GpRegs);
int d = disp->constval();
Register ra = getBaseReg(base, d, GpRegs);
if (op == LIR_ldcb)
LD8Z(rr, d, ra);
else
LD(rr, d, ra);
}
void Assembler::asm_cmov(LInsp ins)
{
LOpcode op = ins->opcode();
LIns* condval = ins->oprnd1();
NanoAssert(condval->isCmp());
LIns* values = ins->oprnd2();
NanoAssert(values->opcode() == LIR_2);
LIns* iftrue = values->oprnd1();
LIns* iffalse = values->oprnd2();
NanoAssert(op == LIR_qcmov || (!iftrue->isQuad() && !iffalse->isQuad()));
const Register rr = prepResultReg(ins, GpRegs);
// this code assumes that neither LD nor MR nor MRcc set any of the condition flags.
// (This is true on Intel, is it true on all architectures?)
const Register iffalsereg = findRegFor(iffalse, GpRegs & ~rmask(rr));
if (op == LIR_cmov) {
switch (condval->opcode())
{
// note that these are all opposites...
case LIR_eq: MRNE(rr, iffalsereg); break;
case LIR_ov: MRNO(rr, iffalsereg); break;
case LIR_cs: MRNC(rr, iffalsereg); break;
case LIR_lt: MRGE(rr, iffalsereg); break;
case LIR_le: MRG(rr, iffalsereg); break;
case LIR_gt: MRLE(rr, iffalsereg); break;
case LIR_ge: MRL(rr, iffalsereg); break;
case LIR_ult: MRAE(rr, iffalsereg); break;
case LIR_ule: MRA(rr, iffalsereg); break;
case LIR_ugt: MRBE(rr, iffalsereg); break;
case LIR_uge: MRB(rr, iffalsereg); break;
debug_only( default: NanoAssert(0); break; )
}
} else if (op == LIR_qcmov) {
#if !defined NANOJIT_64BIT
NanoAssert(0);
#else
switch (condval->opcode())
{
// note that these are all opposites...
case LIR_eq: MRQNE(rr, iffalsereg); break;
case LIR_ov: MRQNO(rr, iffalsereg); break;
case LIR_cs: MRQNC(rr, iffalsereg); break;
case LIR_lt: MRQGE(rr, iffalsereg); break;
case LIR_le: MRQG(rr, iffalsereg); break;
case LIR_gt: MRQLE(rr, iffalsereg); break;
case LIR_ge: MRQL(rr, iffalsereg); break;
case LIR_ult: MRQAE(rr, iffalsereg); break;
case LIR_ule: MRQA(rr, iffalsereg); break;
case LIR_ugt: MRQBE(rr, iffalsereg); break;
case LIR_uge: MRQB(rr, iffalsereg); break;
debug_only( default: NanoAssert(0); break; )
}
#endif
}
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
asm_cmp(condval);
}
void Assembler::asm_qhi(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
LIns *q = ins->oprnd1();
int d = findMemFor(q);
LD(rr, d+4, FP);
}
void Assembler::asm_param(LInsp ins)
{
uint32_t a = ins->imm8();
uint32_t kind = ins->imm8b();
if (kind == 0) {
// ordinary param
AbiKind abi = _thisfrag->lirbuf->abi;
uint32_t abi_regcount = abi == ABI_FASTCALL ? 2 : abi == ABI_THISCALL ? 1 : 0;
if (a < abi_regcount) {
// incoming arg in register
prepResultReg(ins, rmask(argRegs[a]));
} else {
// incoming arg is on stack, and EBP points nearby (see genPrologue)
Register r = prepResultReg(ins, GpRegs);
int d = (a - abi_regcount) * sizeof(intptr_t) + 8;
LD(r, d, FP);
}
}
else {
// saved param
prepResultReg(ins, rmask(savedRegs[a]));
}
}
void Assembler::asm_short(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
int32_t val = ins->imm16();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
}
void Assembler::asm_int(LInsp ins)
{
Register rr = prepResultReg(ins, GpRegs);
int32_t val = ins->imm32();
if (val == 0)
XOR(rr,rr);
else
LDi(rr, val);
}
void Assembler::asm_quad(LInsp ins) void Assembler::asm_quad(LInsp ins)
{ {
#if defined NANOJIT_IA32 #if defined NANOJIT_IA32
@ -935,30 +1366,34 @@ namespace nanojit
#endif #endif
} }
bool Assembler::asm_qlo(LInsp ins, LInsp q) void Assembler::asm_qlo(LInsp ins)
{ {
LIns *q = ins->oprnd1();
#if defined NANOJIT_IA32 #if defined NANOJIT_IA32
if (!avmplus::AvmCore::use_sse2()) if (!avmplus::AvmCore::use_sse2())
{ {
return false; Register rr = prepResultReg(ins, GpRegs);
int d = findMemFor(q);
LD(rr, d, FP);
} }
else
#endif #endif
{
Reservation *resv = getresv(ins); Reservation *resv = getresv(ins);
Register rr = resv->reg; Register rr = resv->reg;
if (rr == UnknownReg) { if (rr == UnknownReg) {
// store quad in spill loc // store quad in spill loc
int d = disp(resv); int d = disp(resv);
freeRsrcOf(ins, false); freeRsrcOf(ins, false);
Register qr = findRegFor(q, XmmRegs); Register qr = findRegFor(q, XmmRegs);
SSE_MOVDm(d, FP, qr); SSE_MOVDm(d, FP, qr);
} else { } else {
freeRsrcOf(ins, false); freeRsrcOf(ins, false);
Register qr = findRegFor(q, XmmRegs); Register qr = findRegFor(q, XmmRegs);
SSE_MOVD(rr,qr); SSE_MOVD(rr,qr);
}
} }
return true;
} }
void Assembler::asm_fneg(LInsp ins) void Assembler::asm_fneg(LInsp ins)
@ -1623,7 +2058,6 @@ namespace nanojit
JMP(eip); JMP(eip);
} }
} }
#endif /* FEATURE_NANOJIT */ #endif /* FEATURE_NANOJIT */
} }

View File

@ -49,7 +49,7 @@ namespace nanojit
{ {
free = 0; free = 0;
used = 0; used = 0;
memset(active, 0, NJ_MAX_REGISTERS * sizeof(LIns*)); memset(active, 0, (LastReg+1) * sizeof(LIns*));
} }
bool RegAlloc::isFree(Register r) bool RegAlloc::isFree(Register r)
@ -120,6 +120,7 @@ namespace nanojit
} }
} }
} }
NanoAssert(a != 0); NanoAssert(a != 0);
return a; return a;
} }
@ -130,7 +131,7 @@ namespace nanojit
if (!frag || !frag->lirbuf) if (!frag || !frag->lirbuf)
return; return;
LirNameMap *names = frag->lirbuf->names; LirNameMap *names = frag->lirbuf->names;
for(int i=0; i<NJ_MAX_REGISTERS; i++) for(int i=0; i<(LastReg+1); i++)
{ {
LIns* ins = regs.active[i]; LIns* ins = regs.active[i];
Register r = (Register)i; Register r = (Register)i;