mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-05 16:46:26 +00:00
Merge tracemonkey to mozilla-central.
This commit is contained in:
commit
dff92ac06b
@ -671,6 +671,9 @@ js_DestroyContext(JSContext *cx, JSDestroyContextMode mode)
|
||||
#endif
|
||||
|
||||
if (last) {
|
||||
/* Clear builtin functions, which are recreated on demand. */
|
||||
memset(rt->builtinFunctions, 0, sizeof rt->builtinFunctions);
|
||||
|
||||
js_GC(cx, GC_LAST_CONTEXT);
|
||||
DUMP_EVAL_CACHE_METER(cx);
|
||||
DUMP_FUNCTION_METER(cx);
|
||||
|
@ -1826,6 +1826,76 @@ EmitEnterBlock(JSContext *cx, JSParseNode *pn, JSCodeGenerator *cg)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* When eval is called from a function, the eval code or function code it
|
||||
* compiles may reference upvars that live in the eval-calling function. The
|
||||
* eval-invoked compiler does not have explicit definitions for these upvars
|
||||
* and we do not attempt to create them a-priori (by inspecting the function's
|
||||
* args and vars) -- we could, but we'd take an avoidable penalty for each
|
||||
* function local not referenced by any upvar. Instead, we map such upvars
|
||||
* lazily, growing upvarMap.vector by powers of two.
|
||||
*
|
||||
* This function knows that it is called with pn pointing to a PN_NAME-arity
|
||||
* node, and cg->compiler->callerFrame having a non-null fun member, and the
|
||||
* static level of cg at least one greater than the eval-calling function's
|
||||
* static level.
|
||||
*/
|
||||
static bool
|
||||
MakeUpvarForEval(JSParseNode *pn, JSCodeGenerator *cg)
|
||||
{
|
||||
JSContext *cx = cg->compiler->context;
|
||||
JSFunction *fun = cg->compiler->callerFrame->fun;
|
||||
JSAtom *atom = pn->pn_atom;
|
||||
|
||||
uintN index;
|
||||
JSLocalKind localKind = js_LookupLocal(cx, fun, atom, &index);
|
||||
if (localKind == JSLOCAL_NONE)
|
||||
return true;
|
||||
|
||||
uintN upvarLevel = fun->u.i.script->staticLevel;
|
||||
JS_ASSERT(cg->staticLevel > upvarLevel);
|
||||
if (upvarLevel >= JS_DISPLAY_SIZE)
|
||||
return true;
|
||||
|
||||
JSAtomListElement *ale = cg->upvarList.lookup(atom);
|
||||
if (!ale) {
|
||||
if ((cg->flags & TCF_IN_FUNCTION) &&
|
||||
!js_AddLocal(cx, cg->fun, atom, JSLOCAL_UPVAR)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
ale = cg->upvarList.add(cg->compiler, atom);
|
||||
if (!ale)
|
||||
return false;
|
||||
JS_ASSERT(ALE_INDEX(ale) == cg->upvarList.count - 1);
|
||||
|
||||
uint32 *vector = cg->upvarMap.vector;
|
||||
uint32 length = cg->upvarMap.length;
|
||||
|
||||
JS_ASSERT(ALE_INDEX(ale) <= length);
|
||||
if (ALE_INDEX(ale) == length) {
|
||||
length = 2 * JS_MAX(2, length);
|
||||
vector = (uint32 *) JS_realloc(cx, vector, length * sizeof *vector);
|
||||
if (!vector)
|
||||
return false;
|
||||
cg->upvarMap.vector = vector;
|
||||
cg->upvarMap.length = length;
|
||||
}
|
||||
|
||||
if (localKind != JSLOCAL_ARG)
|
||||
index += fun->nargs;
|
||||
JS_ASSERT(index < JS_BIT(16));
|
||||
|
||||
uintN skip = cg->staticLevel - upvarLevel;
|
||||
vector[ALE_INDEX(ale)] = MAKE_UPVAR_COOKIE(skip, index);
|
||||
}
|
||||
|
||||
pn->pn_op = JSOP_GETUPVAR;
|
||||
pn->pn_cookie = MAKE_UPVAR_COOKIE(cg->staticLevel, ALE_INDEX(ale));
|
||||
pn->pn_dflags |= PND_BOUND;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* BindNameToSlot attempts to optimize name gets and sets to stack slot loads
|
||||
* and stores, given the compile-time information in cg and a TOK_NAME node pn.
|
||||
@ -1906,10 +1976,6 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
}
|
||||
|
||||
if (cookie == FREE_UPVAR_COOKIE) {
|
||||
/* If free variable reference, leave pn_op as JSOP_NAME, etc. */
|
||||
if (cg->flags & TCF_IN_FUNCTION)
|
||||
return JS_TRUE;
|
||||
|
||||
JSStackFrame *caller = cg->compiler->callerFrame;
|
||||
if (caller) {
|
||||
JS_ASSERT(cg->flags & TCF_COMPILE_N_GO);
|
||||
@ -1922,7 +1988,19 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
return JS_TRUE;
|
||||
|
||||
JS_ASSERT(caller->script);
|
||||
if (!caller->fun || caller->varobj != cg->scopeChain)
|
||||
if (!caller->fun)
|
||||
return JS_TRUE;
|
||||
|
||||
/*
|
||||
* Make sure the variable object used by the compiler to initialize
|
||||
* parent links matches the caller's varobj. Compile-n-go compiler-
|
||||
* created function objects have the top-level cg's scopeChain set
|
||||
* as their parent by JSCompiler::newFunction.
|
||||
*/
|
||||
JSObject *scopeobj = (cg->flags & TCF_IN_FUNCTION)
|
||||
? STOBJ_GET_PARENT(FUN_OBJECT(cg->fun))
|
||||
: cg->scopeChain;
|
||||
if (scopeobj != caller->varobj)
|
||||
return JS_TRUE;
|
||||
|
||||
/*
|
||||
@ -1934,54 +2012,18 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
if (op != JSOP_NAME)
|
||||
return JS_TRUE;
|
||||
|
||||
JSLocalKind localKind = js_LookupLocal(cx, caller->fun, atom, &index);
|
||||
if (localKind == JSLOCAL_NONE)
|
||||
return JS_TRUE;
|
||||
|
||||
uintN upvarLevel = caller->fun->u.i.script->staticLevel;
|
||||
JS_ASSERT(cg->staticLevel > upvarLevel);
|
||||
if (upvarLevel >= JS_DISPLAY_SIZE)
|
||||
return JS_TRUE;
|
||||
|
||||
ale = cg->upvarList.lookup(atom);
|
||||
if (!ale) {
|
||||
uint32 length, *vector;
|
||||
|
||||
ale = cg->upvarList.add(cg->compiler, atom);
|
||||
if (!ale)
|
||||
return JS_FALSE;
|
||||
JS_ASSERT(ALE_INDEX(ale) == cg->upvarList.count - 1);
|
||||
|
||||
length = cg->upvarMap.length;
|
||||
JS_ASSERT(ALE_INDEX(ale) <= length);
|
||||
if (ALE_INDEX(ale) == length) {
|
||||
length = 2 * JS_MAX(2, length);
|
||||
vector = (uint32 *)
|
||||
JS_realloc(cx, cg->upvarMap.vector,
|
||||
length * sizeof *vector);
|
||||
if (!vector)
|
||||
return JS_FALSE;
|
||||
cg->upvarMap.vector = vector;
|
||||
cg->upvarMap.length = length;
|
||||
}
|
||||
|
||||
if (localKind != JSLOCAL_ARG)
|
||||
index += caller->fun->nargs;
|
||||
JS_ASSERT(index < JS_BIT(16));
|
||||
|
||||
uintN skip = cg->staticLevel - upvarLevel;
|
||||
cg->upvarMap.vector[ALE_INDEX(ale)] = MAKE_UPVAR_COOKIE(skip, index);
|
||||
}
|
||||
|
||||
pn->pn_op = JSOP_GETUPVAR;
|
||||
pn->pn_cookie = ALE_INDEX(ale);
|
||||
pn->pn_dflags |= PND_BOUND;
|
||||
return MakeUpvarForEval(pn, cg);
|
||||
}
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
if (dn->pn_dflags & PND_GVAR) {
|
||||
/* If free variable reference, leave pn_op as JSOP_NAME, etc. */
|
||||
/*
|
||||
* If this is a global reference from within a function, leave pn_op as
|
||||
* JSOP_NAME, etc. We could emit JSOP_*GVAR ops within function code if
|
||||
* only we could depend on the global frame's slots being valid for all
|
||||
* calls to the function.
|
||||
*/
|
||||
if (cg->flags & TCF_IN_FUNCTION)
|
||||
return JS_TRUE;
|
||||
|
||||
@ -2013,26 +2055,48 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
/*
|
||||
* A JSDefinition witnessed as a declaration by the parser cannot be an
|
||||
* upvar, unless it is the degenerate kind of upvar selected immediately
|
||||
* below for the special case of compile-and-go code generated from eval
|
||||
* called from a function, where the eval code uses function locals. Before
|
||||
* misinterpreting the frame-skip half of dn->pn_cookie in such a bound
|
||||
* upvar pseudo-definition we detect this case by checking dn->pn_op.
|
||||
*/
|
||||
if (PN_OP(dn) == JSOP_GETUPVAR) {
|
||||
if (op == JSOP_NAME) {
|
||||
pn->pn_op = JSOP_GETUPVAR;
|
||||
pn->pn_cookie = dn->pn_cookie;
|
||||
pn->pn_dflags |= PND_BOUND;
|
||||
}
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
uintN level = UPVAR_FRAME_SKIP(cookie);
|
||||
JS_ASSERT(cg->staticLevel >= level);
|
||||
|
||||
/*
|
||||
* A JSDefinition witnessed as a declaration by the parser cannot be an
|
||||
* upvar, unless it is the degenerate kind of upvar selected above (in the
|
||||
* code before the PND_GVAR test) for the special case of compile-and-go
|
||||
* code generated from eval called from a function, where the eval code
|
||||
* uses local vars defined in the function. We detect this upvar-for-eval
|
||||
* case by checking dn's op.
|
||||
*/
|
||||
if (PN_OP(dn) == JSOP_GETUPVAR) {
|
||||
JS_ASSERT(cg->staticLevel >= level);
|
||||
if (op != JSOP_NAME)
|
||||
return JS_TRUE;
|
||||
|
||||
#ifdef DEBUG
|
||||
JSStackFrame *caller = cg->compiler->callerFrame;
|
||||
JS_ASSERT(caller);
|
||||
|
||||
JSTreeContext *tc = cg;
|
||||
while (tc->staticLevel != level)
|
||||
tc = tc->parent;
|
||||
JS_ASSERT(tc->flags & TCF_COMPILING);
|
||||
|
||||
JSCodeGenerator *evalcg = (JSCodeGenerator *) tc;
|
||||
JS_ASSERT(evalcg->flags & TCF_COMPILE_N_GO);
|
||||
JS_ASSERT(!(evalcg->flags & TCF_IN_FOR_INIT));
|
||||
JS_ASSERT(caller->script);
|
||||
JS_ASSERT(caller->fun && caller->varobj == evalcg->scopeChain);
|
||||
#endif
|
||||
|
||||
if (cg->staticLevel == level) {
|
||||
pn->pn_op = JSOP_GETUPVAR;
|
||||
pn->pn_cookie = cookie;
|
||||
pn->pn_dflags |= PND_BOUND;
|
||||
return JS_TRUE;
|
||||
}
|
||||
|
||||
return MakeUpvarForEval(pn, cg);
|
||||
}
|
||||
|
||||
uintN skip = cg->staticLevel - level;
|
||||
if (skip != 0) {
|
||||
JS_ASSERT(cg->flags & TCF_IN_FUNCTION);
|
||||
@ -2160,9 +2224,15 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
|
||||
case JSDefinition::VAR:
|
||||
if (PN_OP(dn) == JSOP_CALLEE) {
|
||||
JS_ASSERT(cg->fun->flags & JSFUN_LAMBDA && atom == cg->fun->atom);
|
||||
JS_ASSERT(op != JSOP_CALLEE);
|
||||
JS_ASSERT((cg->fun->flags & JSFUN_LAMBDA) && atom == cg->fun->atom);
|
||||
|
||||
switch (op) {
|
||||
case JSOP_NAME:
|
||||
case JSOP_DELNAME:
|
||||
if (!(cg->flags & TCF_FUN_HEAVYWEIGHT))
|
||||
op = JSOP_FALSE;
|
||||
break;
|
||||
default:
|
||||
/*
|
||||
* Leave pn->pn_op == JSOP_NAME if cg->fun is heavyweight, as
|
||||
* we cannot be sure cg->fun is not something of the form:
|
||||
@ -2174,18 +2244,14 @@ BindNameToSlot(JSContext *cx, JSCodeGenerator *cg, JSParseNode *pn)
|
||||
* lexically bound in an outer declarative environment from the
|
||||
* function's activation. See jsfun.cpp:call_resolve.
|
||||
*/
|
||||
if (!(cg->flags & TCF_FUN_HEAVYWEIGHT))
|
||||
if (!(cg->flags & TCF_FUN_HEAVYWEIGHT)) {
|
||||
op = JSOP_CALLEE;
|
||||
break;
|
||||
case JSOP_DELNAME:
|
||||
op = JSOP_FALSE;
|
||||
break;
|
||||
default:
|
||||
op = JSOP_CALLEE;
|
||||
pn->pn_dflags |= PND_CONST;
|
||||
}
|
||||
break;
|
||||
}
|
||||
pn->pn_op = op;
|
||||
pn->pn_dflags |= PND_BOUND | PND_CONST;
|
||||
pn->pn_dflags |= PND_BOUND;
|
||||
return JS_TRUE;
|
||||
}
|
||||
/* FALL THROUGH */
|
||||
|
@ -3618,6 +3618,13 @@ js_AttemptToExtendTree(JSContext* cx, VMSideExit* anchor, VMSideExit* exitedFrom
|
||||
c->root = f;
|
||||
}
|
||||
|
||||
/*
|
||||
* If we are recycling a fragment, it might have a different ip so reset it here. This
|
||||
* can happen when attaching a branch to a NESTED_EXIT, which might extend along separate paths
|
||||
* (i.e. after the loop edge, and after a return statement).
|
||||
*/
|
||||
c->ip = cx->fp->regs->pc;
|
||||
|
||||
debug_only_v(printf("trying to attach another branch to the tree (hits = %d)\n", c->hits());)
|
||||
|
||||
int32_t& hits = c->hits();
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -56,32 +56,15 @@ namespace nanojit
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
const char *regNames[] = {
|
||||
#if defined NANOJIT_IA32
|
||||
"eax", "ecx", "edx", "ebx", "esp", "ebp", "esi", "edi",
|
||||
"xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7",
|
||||
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7"
|
||||
#elif defined NANOJIT_AMD64
|
||||
"rax", "rcx", "rdx", "rbx", "rsp", "rbp", "rsi", "rdi",
|
||||
"r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
|
||||
"xmm0","xmm1","xmm2","xmm3","xmm4","xmm5","xmm6","xmm7",
|
||||
"xmm8","xmm9","xmm10","xmm11","xmm12","xmm13","xmm14","xmm15"
|
||||
#endif
|
||||
};
|
||||
#endif
|
||||
|
||||
#if defined NANOJIT_IA32
|
||||
const Register Assembler::argRegs[] = { ECX, EDX };
|
||||
const Register Assembler::retRegs[] = { EAX, EDX };
|
||||
const Register Assembler::savedRegs[] = { EBX, ESI, EDI };
|
||||
#elif defined NANOJIT_AMD64
|
||||
#if defined WIN64
|
||||
const Register Assembler::argRegs[] = { R8, R9, RCX, RDX };
|
||||
#else
|
||||
const Register Assembler::argRegs[] = { RDI, RSI, RDX, RCX, R8, R9 };
|
||||
#endif
|
||||
const Register Assembler::retRegs[] = { RAX, RDX };
|
||||
const Register Assembler::savedRegs[] = { R13, R14, R15 };
|
||||
#endif
|
||||
|
||||
const static uint8_t max_abi_regs[] = {
|
||||
2, /* ABI_FASTCALL */
|
||||
@ -118,13 +101,7 @@ namespace nanojit
|
||||
// Reserve stackNeeded bytes, padded
|
||||
// to preserve NJ_ALIGN_STACK-byte alignment.
|
||||
if (amt)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
SUBi(SP, amt);
|
||||
#elif defined NANOJIT_AMD64
|
||||
SUBQi(SP, amt);
|
||||
#endif
|
||||
}
|
||||
|
||||
verbose_only( outputAddr=true; asm_output("[frag entry]"); )
|
||||
NIns *fragEntry = _nIns;
|
||||
@ -163,28 +140,15 @@ namespace nanojit
|
||||
lr = 0;
|
||||
} else { // target doesn't exist. Use 0 jump offset and patch later
|
||||
lr = guard->record();
|
||||
#if defined NANOJIT_AMD64
|
||||
/* 8 bytes for address, 4 for imm32, 2 for jmp */
|
||||
underrunProtect(14);
|
||||
_nIns -= 8;
|
||||
*(intptr_t *)_nIns = intptr_t(_epilogue);
|
||||
lr->jmp = _nIns;
|
||||
JMPm_nochk(0);
|
||||
#else
|
||||
JMP_long(_epilogue);
|
||||
lr->jmp = _nIns;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
// first restore ESP from EBP, undoing SUBi(SP,amt) from genPrologue
|
||||
MR(SP,FP);
|
||||
|
||||
// return value is GuardRecord*
|
||||
#if defined NANOJIT_IA32
|
||||
LDi(EAX, int(lr));
|
||||
#elif defined NANOJIT_AMD64
|
||||
LDQi(RAX, intptr_t(lr));
|
||||
#endif
|
||||
}
|
||||
|
||||
NIns *Assembler::genEpilogue()
|
||||
@ -202,7 +166,6 @@ namespace nanojit
|
||||
return _nIns;
|
||||
}
|
||||
|
||||
#if defined NANOJIT_IA32
|
||||
void Assembler::asm_call(LInsp ins)
|
||||
{
|
||||
const CallInfo* call = ins->callInfo();
|
||||
@ -292,35 +255,6 @@ namespace nanojit
|
||||
SUBi(SP, extra);
|
||||
}
|
||||
|
||||
#elif defined NANOJIT_AMD64
|
||||
|
||||
void Assembler::asm_call(LInsp ins)
|
||||
{
|
||||
Register fpu_reg = XMM0;
|
||||
const CallInfo* call = ins->callInfo();
|
||||
int n = 0;
|
||||
|
||||
CALL(call);
|
||||
|
||||
ArgSize sizes[10];
|
||||
uint32_t argc = call->get_sizes(sizes);
|
||||
|
||||
for(uint32_t i=0; i < argc; i++)
|
||||
{
|
||||
uint32_t j = argc-i-1;
|
||||
ArgSize sz = sizes[j];
|
||||
Register r = UnknownReg;
|
||||
if (sz != ARGSIZE_F) {
|
||||
r = argRegs[n++]; // tell asm_arg what reg to use
|
||||
} else {
|
||||
r = fpu_reg;
|
||||
fpu_reg = nextreg(fpu_reg);
|
||||
}
|
||||
findSpecificRegFor(ins->arg(j), r);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::nMarkExecute(Page* page, int flags)
|
||||
{
|
||||
NanoAssert(sizeof(Page) == NJ_PAGE_SIZE);
|
||||
@ -401,16 +335,13 @@ namespace nanojit
|
||||
a.clear();
|
||||
a.used = 0;
|
||||
a.free = SavedRegs | ScratchRegs;
|
||||
#if defined NANOJIT_IA32
|
||||
if (!config.sse2)
|
||||
a.free &= ~XmmRegs;
|
||||
#endif
|
||||
debug_only( a.managed = a.free; )
|
||||
}
|
||||
|
||||
NIns* Assembler::nPatchBranch(NIns* branch, NIns* targ)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
NIns* was = 0;
|
||||
intptr_t offset = intptr_t(targ) - intptr_t(branch);
|
||||
if (branch[0] == JMP32) {
|
||||
@ -423,17 +354,6 @@ namespace nanojit
|
||||
VALGRIND_DISCARD_TRANSLATIONS(&branch[2], sizeof(int32_t));
|
||||
} else
|
||||
NanoAssertMsg(0, "Unknown branch type in nPatchBranch");
|
||||
#else
|
||||
if (branch[0] == 0xFF && branch[1] == 0x25) {
|
||||
NIns *mem;
|
||||
mem = &branch[6] + *(int32_t *)&branch[2];
|
||||
was = *(intptr_t*)mem;
|
||||
*(intptr_t *)mem = intptr_t(targ);
|
||||
VALGRIND_DISCARD_TRANSLATIONS(mem, sizeof(intptr_t));
|
||||
} else {
|
||||
NanoAssertMsg(0, "Unknown branch type in nPatchBranch");
|
||||
}
|
||||
#endif
|
||||
return was;
|
||||
}
|
||||
|
||||
@ -505,22 +425,12 @@ namespace nanojit
|
||||
{
|
||||
if (rmask(r) & FpRegs)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
if (rmask(r) & XmmRegs) {
|
||||
#endif
|
||||
SSE_LDQ(r, d, FP);
|
||||
#if defined NANOJIT_IA32
|
||||
} else {
|
||||
FLDQ(d, FP);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#if defined NANOJIT_AMD64
|
||||
else if (i->opcode() == LIR_param)
|
||||
{
|
||||
LDQ(r, d, FP);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
LD(r, d, FP);
|
||||
@ -585,34 +495,22 @@ namespace nanojit
|
||||
// save to spill location
|
||||
if (rmask(rr) & FpRegs)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
if (rmask(rr) & XmmRegs) {
|
||||
#endif
|
||||
SSE_STQ(d, FP, rr);
|
||||
#if defined NANOJIT_IA32
|
||||
} else {
|
||||
FSTQ((pop?1:0), d, FP);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
#if defined NANOJIT_AMD64
|
||||
else if (quad)
|
||||
{
|
||||
STQ(FP, d, rr);
|
||||
}
|
||||
#endif
|
||||
else
|
||||
{
|
||||
ST(FP, d, rr);
|
||||
}
|
||||
}
|
||||
#if defined NANOJIT_IA32
|
||||
else if (pop && (rmask(rr) & x87Regs))
|
||||
{
|
||||
// pop the fpu result since it isn't used
|
||||
FSTP(FST0);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_load64(LInsp ins)
|
||||
@ -628,30 +526,6 @@ namespace nanojit
|
||||
Register rb = getBaseReg(base, db, GpRegs);
|
||||
SSE_LDQ(rr, db, rb);
|
||||
}
|
||||
#if defined NANOJIT_AMD64
|
||||
else if (rr != UnknownReg && rmask(rr) & GpRegs)
|
||||
{
|
||||
freeRsrcOf(ins, false);
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
LDQ(rr, db, rb);
|
||||
}
|
||||
else
|
||||
{
|
||||
int d = disp(resv);
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
|
||||
/* We need a temporary register we can move the desination into */
|
||||
rr = registerAlloc(GpRegs);
|
||||
|
||||
STQ(FP, d, rr);
|
||||
LDQ(rr, db, rb);
|
||||
|
||||
/* Mark as free */
|
||||
_allocator.addFree(rr);
|
||||
|
||||
freeRsrcOf(ins, false);
|
||||
}
|
||||
#elif defined NANOJIT_IA32
|
||||
else
|
||||
{
|
||||
int dr = disp(resv);
|
||||
@ -677,7 +551,6 @@ namespace nanojit
|
||||
FLDQ(db, rb);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_store64(LInsp value, int dr, LInsp base)
|
||||
@ -699,7 +572,6 @@ namespace nanojit
|
||||
return;
|
||||
}
|
||||
|
||||
#if defined NANOJIT_IA32
|
||||
if (value->isop(LIR_ldq) || value->isop(LIR_ldqc) || value->isop(LIR_qjoin))
|
||||
{
|
||||
// value is 64bit struct or int64_t, or maybe a double.
|
||||
@ -760,41 +632,6 @@ namespace nanojit
|
||||
} else {
|
||||
FSTQ(pop, dr, rb);
|
||||
}
|
||||
#elif defined NANOJIT_AMD64
|
||||
/* If this is not a float operation, we can use GpRegs instead.
|
||||
* We can do this in a few other cases but for now I'll keep it simple.
|
||||
*/
|
||||
Register rb = findRegFor(base, GpRegs);
|
||||
Reservation *rV = getresv(value);
|
||||
|
||||
if (rV != NULL && rV->reg != UnknownReg) {
|
||||
if (rmask(rV->reg) & GpRegs) {
|
||||
STQ(rb, dr, rV->reg);
|
||||
} else {
|
||||
SSE_STQ(dr, rb, rV->reg);
|
||||
}
|
||||
} else {
|
||||
Register rv;
|
||||
|
||||
/* Try to catch some common patterns.
|
||||
* Note: this is a necessity, since in between things like
|
||||
* asm_fop() could see the reservation and try to use a non-SSE
|
||||
* register for adding. Same for asm_qbinop in theory.
|
||||
* There should probably be asserts to catch more cases.
|
||||
*/
|
||||
if (value->isop(LIR_u2f)
|
||||
|| value->isop(LIR_i2f)
|
||||
|| (value->opcode() >= LIR_fneg && value->opcode() <= LIR_fmul)
|
||||
|| value->opcode() == LIR_fdiv
|
||||
|| value->opcode() == LIR_fcall) {
|
||||
rv = findRegFor(value, XmmRegs);
|
||||
SSE_STQ(dr, rb, rv);
|
||||
} else {
|
||||
rv = findRegFor(value, GpRegs);
|
||||
STQ(rb, dr, rv);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/**
|
||||
@ -805,16 +642,13 @@ namespace nanojit
|
||||
// value is either a 64bit struct or maybe a float
|
||||
// that isn't live in an FPU reg. Either way, don't
|
||||
// put it in an FPU reg just to load & store it.
|
||||
#if defined NANOJIT_IA32
|
||||
if (config.sse2)
|
||||
{
|
||||
#endif
|
||||
// use SSE to load+store 64bits
|
||||
Register t = registerAlloc(XmmRegs);
|
||||
_allocator.addFree(t);
|
||||
SSE_STQ(dd, rd, t);
|
||||
SSE_LDQ(t, ds, rs);
|
||||
#if defined NANOJIT_IA32
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -826,7 +660,6 @@ namespace nanojit
|
||||
ST(rd, dd, t);
|
||||
LD(t, ds, rs);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
NIns* Assembler::asm_branch(bool branchOnFalse, LInsp cond, NIns* targ, bool isfar)
|
||||
@ -918,9 +751,7 @@ namespace nanojit
|
||||
NanoAssert((!lhs->isQuad() && !rhs->isQuad()) || (lhs->isQuad() && rhs->isQuad()));
|
||||
|
||||
// Not supported yet.
|
||||
#if !defined NANOJIT_64BIT
|
||||
NanoAssert(!lhs->isQuad() && !rhs->isQuad());
|
||||
#endif
|
||||
|
||||
// ready to issue the compare
|
||||
if (rhs->isconst())
|
||||
@ -928,14 +759,7 @@ namespace nanojit
|
||||
int c = rhs->constval();
|
||||
if (c == 0 && cond->isop(LIR_eq)) {
|
||||
Register r = findRegFor(lhs, GpRegs);
|
||||
if (rhs->isQuad()) {
|
||||
#if defined NANOJIT_64BIT
|
||||
TESTQ(r, r);
|
||||
#endif
|
||||
} else {
|
||||
TEST(r,r);
|
||||
}
|
||||
// No 64-bit immediates so fall-back to below
|
||||
TEST(r,r);
|
||||
}
|
||||
else if (!rhs->isQuad()) {
|
||||
Register r = getBaseReg(lhs, c, GpRegs);
|
||||
@ -947,13 +771,7 @@ namespace nanojit
|
||||
findRegFor2(GpRegs, lhs, rA, rhs, rB);
|
||||
Register ra = rA->reg;
|
||||
Register rb = rB->reg;
|
||||
if (rhs->isQuad()) {
|
||||
#if defined NANOJIT_64BIT
|
||||
CMPQ(ra, rb);
|
||||
#endif
|
||||
} else {
|
||||
CMP(ra, rb);
|
||||
}
|
||||
CMP(ra, rb);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1051,65 +869,76 @@ namespace nanojit
|
||||
if (lhs == rhs)
|
||||
rb = ra;
|
||||
|
||||
if (op == LIR_add || op == LIR_addp)
|
||||
switch (op) {
|
||||
case LIR_add:
|
||||
case LIR_addp:
|
||||
ADD(rr, rb);
|
||||
else if (op == LIR_sub)
|
||||
break;
|
||||
case LIR_sub:
|
||||
SUB(rr, rb);
|
||||
else if (op == LIR_mul)
|
||||
break;
|
||||
case LIR_mul:
|
||||
MUL(rr, rb);
|
||||
else if (op == LIR_and)
|
||||
break;
|
||||
case LIR_and:
|
||||
AND(rr, rb);
|
||||
else if (op == LIR_or)
|
||||
break;
|
||||
case LIR_or:
|
||||
OR(rr, rb);
|
||||
else if (op == LIR_xor)
|
||||
break;
|
||||
case LIR_xor:
|
||||
XOR(rr, rb);
|
||||
else if (op == LIR_lsh)
|
||||
break;
|
||||
case LIR_lsh:
|
||||
SHL(rr, rb);
|
||||
else if (op == LIR_rsh)
|
||||
break;
|
||||
case LIR_rsh:
|
||||
SAR(rr, rb);
|
||||
else if (op == LIR_ush)
|
||||
break;
|
||||
case LIR_ush:
|
||||
SHR(rr, rb);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
int c = rhs->constval();
|
||||
if (op == LIR_add || op == LIR_addp) {
|
||||
#ifdef NANOJIT_IA32_TODO
|
||||
if (ra != rr) {
|
||||
// this doesn't set cc's, only use it when cc's not required.
|
||||
LEA(rr, c, ra);
|
||||
ra = rr; // suppress mov
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
ADDi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_sub) {
|
||||
#ifdef NANOJIT_IA32
|
||||
if (ra != rr) {
|
||||
LEA(rr, -c, ra);
|
||||
ra = rr;
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
SUBi(rr, c);
|
||||
}
|
||||
} else if (op == LIR_and)
|
||||
switch (op) {
|
||||
case LIR_addp:
|
||||
// this doesn't set cc's, only use it when cc's not required.
|
||||
LEA(rr, c, ra);
|
||||
ra = rr; // suppress mov
|
||||
break;
|
||||
case LIR_add:
|
||||
ADDi(rr, c);
|
||||
break;
|
||||
case LIR_sub:
|
||||
SUBi(rr, c);
|
||||
break;
|
||||
case LIR_and:
|
||||
ANDi(rr, c);
|
||||
else if (op == LIR_or)
|
||||
break;
|
||||
case LIR_or:
|
||||
ORi(rr, c);
|
||||
else if (op == LIR_xor)
|
||||
break;
|
||||
case LIR_xor:
|
||||
XORi(rr, c);
|
||||
else if (op == LIR_lsh)
|
||||
break;
|
||||
case LIR_lsh:
|
||||
SHLi(rr, c);
|
||||
else if (op == LIR_rsh)
|
||||
break;
|
||||
case LIR_rsh:
|
||||
SARi(rr, c);
|
||||
else if (op == LIR_ush)
|
||||
break;
|
||||
case LIR_ush:
|
||||
SHRi(rr, c);
|
||||
else
|
||||
break;
|
||||
default:
|
||||
NanoAssertMsg(0, "Unsupported");
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if ( rr != ra )
|
||||
@ -1146,8 +975,6 @@ namespace nanojit
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
int d = disp->constval();
|
||||
|
||||
#ifdef NANOJIT_IA32
|
||||
/* Can't use this on AMD64, no 64-bit immediate addresses. */
|
||||
if (base->isconst()) {
|
||||
intptr_t addr = base->constval();
|
||||
addr += d;
|
||||
@ -1160,7 +987,6 @@ namespace nanojit
|
||||
return;
|
||||
}
|
||||
|
||||
/* :TODO: Use this on AMD64 as well. */
|
||||
/* Search for add(X,Y) */
|
||||
if (base->opcode() == LIR_piadd) {
|
||||
int scale = 0;
|
||||
@ -1182,7 +1008,7 @@ namespace nanojit
|
||||
Reservation *rL = getresv(lhs);
|
||||
|
||||
/* Does LHS have a register yet? If not, re-use the result reg.
|
||||
* :TODO: If LHS is const, we could eliminate a register use.
|
||||
* @todo -- If LHS is const, we could eliminate a register use.
|
||||
*/
|
||||
if (rL == NULL || rL->reg == UnknownReg)
|
||||
rleft = findSpecificRegFor(lhs, rr);
|
||||
@ -1207,7 +1033,6 @@ namespace nanojit
|
||||
|
||||
return;
|
||||
}
|
||||
#endif
|
||||
|
||||
Register ra = getBaseReg(base, d, GpRegs);
|
||||
if (op == LIR_ldcb)
|
||||
@ -1255,26 +1080,7 @@ namespace nanojit
|
||||
default: debug_only( NanoAssert(0); ) break;
|
||||
}
|
||||
} else if (op == LIR_qcmov) {
|
||||
#if !defined NANOJIT_64BIT
|
||||
NanoAssert(0);
|
||||
#else
|
||||
switch (condval->opcode())
|
||||
{
|
||||
// note that these are all opposites...
|
||||
case LIR_eq: MRQNE(rr, iffalsereg); break;
|
||||
case LIR_ov: MRQNO(rr, iffalsereg); break;
|
||||
case LIR_cs: MRQNC(rr, iffalsereg); break;
|
||||
case LIR_lt: MRQGE(rr, iffalsereg); break;
|
||||
case LIR_le: MRQG(rr, iffalsereg); break;
|
||||
case LIR_gt: MRQLE(rr, iffalsereg); break;
|
||||
case LIR_ge: MRQL(rr, iffalsereg); break;
|
||||
case LIR_ult: MRQAE(rr, iffalsereg); break;
|
||||
case LIR_ule: MRQA(rr, iffalsereg); break;
|
||||
case LIR_ugt: MRQBE(rr, iffalsereg); break;
|
||||
case LIR_uge: MRQB(rr, iffalsereg); break;
|
||||
debug_only( default: NanoAssert(0); break; )
|
||||
}
|
||||
#endif
|
||||
}
|
||||
/*const Register iftruereg =*/ findSpecificRegFor(iftrue, rr);
|
||||
asm_cmp(condval);
|
||||
@ -1334,7 +1140,6 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_quad(LInsp ins)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
Reservation *rR = getresv(ins);
|
||||
Register rr = rR->reg;
|
||||
if (rr != UnknownReg)
|
||||
@ -1382,52 +1187,12 @@ namespace nanojit
|
||||
STi(FP,d+4,p[1]);
|
||||
STi(FP,d,p[0]);
|
||||
}
|
||||
#elif defined NANOJIT_AMD64
|
||||
Reservation *rR = getresv(ins);
|
||||
int64_t val = *(int64_t *)(ins - 2);
|
||||
|
||||
if (rR->reg != UnknownReg)
|
||||
{
|
||||
if (rmask(rR->reg) & GpRegs)
|
||||
{
|
||||
LDQi(rR->reg, val);
|
||||
}
|
||||
else if (rmask(rR->reg) & XmmRegs)
|
||||
{
|
||||
if (ins->constvalf() == 0.0)
|
||||
{
|
||||
SSE_XORPDr(rR->reg, rR->reg);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Get a short-lived register, not associated with instruction */
|
||||
Register rd = rR->reg;
|
||||
Register rs = registerAlloc(GpRegs);
|
||||
|
||||
SSE_MOVD(rd, rs);
|
||||
LDQi(rs, val);
|
||||
|
||||
_allocator.addFree(rs);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
const int32_t* p = (const int32_t*) (ins-2);
|
||||
int dr = disp(rR);
|
||||
STi(FP, dr+4, p[1]);
|
||||
STi(FP, dr, p[0]);
|
||||
}
|
||||
|
||||
freeRsrcOf(ins, false);
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_qlo(LInsp ins)
|
||||
{
|
||||
LIns *q = ins->oprnd1();
|
||||
|
||||
#if defined NANOJIT_IA32
|
||||
if (!config.sse2)
|
||||
{
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
@ -1435,7 +1200,6 @@ namespace nanojit
|
||||
LD(rr, d, FP);
|
||||
}
|
||||
else
|
||||
#endif
|
||||
{
|
||||
Reservation *resv = getresv(ins);
|
||||
Register rr = resv->reg;
|
||||
@ -1455,10 +1219,8 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_fneg(LInsp ins)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
if (config.sse2)
|
||||
{
|
||||
#endif
|
||||
LIns *lhs = ins->oprnd1();
|
||||
|
||||
Register rr = prepResultReg(ins, XmmRegs);
|
||||
@ -1489,7 +1251,6 @@ namespace nanojit
|
||||
|
||||
if (rr != ra)
|
||||
SSE_MOVSD(rr, ra);
|
||||
#if defined NANOJIT_IA32
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1511,7 +1272,6 @@ namespace nanojit
|
||||
// if we had more than one fpu reg, this is where
|
||||
// we would move ra into rr if rr != ra.
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_arg(ArgSize sz, LInsp p, Register r)
|
||||
@ -1596,7 +1356,6 @@ namespace nanojit
|
||||
|
||||
void Assembler::asm_farg(LInsp p)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
NanoAssert(p->isQuad());
|
||||
Register r = findRegFor(p, FpRegs);
|
||||
if (rmask(r) & XmmRegs) {
|
||||
@ -1610,18 +1369,13 @@ namespace nanojit
|
||||
evict(FST0);
|
||||
}
|
||||
SUBi(ESP,8);
|
||||
//PUSHr(ECX); // 2*pushr is smaller than sub
|
||||
//PUSHr(ECX);
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_fop(LInsp ins)
|
||||
{
|
||||
LOpcode op = ins->opcode();
|
||||
#if defined NANOJIT_IA32
|
||||
if (config.sse2)
|
||||
{
|
||||
#endif
|
||||
LIns *lhs = ins->oprnd1();
|
||||
LIns *rhs = ins->oprnd2();
|
||||
|
||||
@ -1665,7 +1419,6 @@ namespace nanojit
|
||||
|
||||
if (rr != ra)
|
||||
SSE_MOVSD(rr, ra);
|
||||
#if defined NANOJIT_IA32
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1696,53 +1449,41 @@ namespace nanojit
|
||||
else if (op == LIR_fdiv)
|
||||
{ FDIVR(db, FP); }
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_i2f(LInsp ins)
|
||||
{
|
||||
// where our result goes
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
#if defined NANOJIT_IA32
|
||||
if (rmask(rr) & XmmRegs)
|
||||
{
|
||||
#endif
|
||||
// todo support int value in memory
|
||||
Register gr = findRegFor(ins->oprnd1(), GpRegs);
|
||||
SSE_CVTSI2SD(rr, gr);
|
||||
#if defined NANOJIT_IA32
|
||||
}
|
||||
else
|
||||
{
|
||||
int d = findMemFor(ins->oprnd1());
|
||||
FILD(d, FP);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
Register Assembler::asm_prep_fcall(Reservation *rR, LInsp ins)
|
||||
{
|
||||
#if defined NANOJIT_IA32
|
||||
if (rR) {
|
||||
Register rr;
|
||||
if ((rr=rR->reg) != UnknownReg && (rmask(rr) & XmmRegs))
|
||||
evict(rr);
|
||||
}
|
||||
return prepResultReg(ins, rmask(FST0));
|
||||
#elif defined NANOJIT_AMD64
|
||||
evict(RAX);
|
||||
return prepResultReg(ins, rmask(XMM0));
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_u2f(LInsp ins)
|
||||
{
|
||||
// where our result goes
|
||||
Register rr = prepResultReg(ins, FpRegs);
|
||||
#if defined NANOJIT_IA32
|
||||
if (rmask(rr) & XmmRegs)
|
||||
{
|
||||
#endif
|
||||
// don't call findRegFor, we want a reg we can stomp on for a very short time,
|
||||
// not a reg that will continue to be associated with the LIns
|
||||
Register gr = registerAlloc(GpRegs);
|
||||
@ -1766,32 +1507,7 @@ namespace nanojit
|
||||
// adding back double(0x80000000) makes the range 0..2^32-1.
|
||||
|
||||
static const double k_NEGONE = 2147483648.0;
|
||||
#if defined NANOJIT_IA32
|
||||
SSE_ADDSDm(rr, &k_NEGONE);
|
||||
#elif defined NANOJIT_AMD64
|
||||
/* Squirrel the constant at the bottom of the page. */
|
||||
if (_dblNegPtr != NULL)
|
||||
{
|
||||
underrunProtect(10);
|
||||
}
|
||||
if (_dblNegPtr == NULL)
|
||||
{
|
||||
underrunProtect(30);
|
||||
uint8_t *base, *begin;
|
||||
base = (uint8_t *)((intptr_t)_nIns & ~((intptr_t)NJ_PAGE_SIZE-1));
|
||||
base += sizeof(PageHeader) + _pageData;
|
||||
begin = base;
|
||||
/* Make sure we align */
|
||||
if ((uintptr_t)base & 0xF) {
|
||||
base = (NIns *)((uintptr_t)base & ~(0xF));
|
||||
base += 16;
|
||||
}
|
||||
_pageData += (int32_t)(base - begin) + sizeof(double);
|
||||
_negOnePtr = (NIns *)base;
|
||||
*(double *)_negOnePtr = k_NEGONE;
|
||||
}
|
||||
SSE_ADDSDm(rr, _negOnePtr);
|
||||
#endif
|
||||
|
||||
SSE_CVTSI2SD(rr, gr);
|
||||
|
||||
@ -1810,7 +1526,6 @@ namespace nanojit
|
||||
|
||||
// ok, we're done with it
|
||||
_allocator.addFree(gr);
|
||||
#if defined NANOJIT_IA32
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1822,7 +1537,6 @@ namespace nanojit
|
||||
STi(base, disp+4, 0); // high 32 bits = 0
|
||||
ST(base, disp, gr); // low 32 bits = unsigned value
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::asm_nongp_copy(Register r, Register s)
|
||||
@ -1937,10 +1651,8 @@ namespace nanojit
|
||||
mask = 0x05;
|
||||
}
|
||||
|
||||
#if defined NANOJIT_IA32
|
||||
if (config.sse2)
|
||||
{
|
||||
#endif
|
||||
// UNORDERED: ZF,PF,CF <- 111;
|
||||
// GREATER_THAN: ZF,PF,CF <- 000;
|
||||
// LESS_THAN: ZF,PF,CF <- 001;
|
||||
@ -1952,21 +1664,14 @@ namespace nanojit
|
||||
SSE_UCOMISD(r, r);
|
||||
}
|
||||
else {
|
||||
#if defined NANOJIT_IA32
|
||||
evict(EAX);
|
||||
TEST_AH(mask);
|
||||
LAHF();
|
||||
#elif defined NANOJIT_AMD64
|
||||
evict(RAX);
|
||||
TEST_AL(mask);
|
||||
POPr(RAX);
|
||||
PUSHFQ();
|
||||
#endif
|
||||
|
||||
Reservation *rA, *rB;
|
||||
findRegFor2(XmmRegs, lhs, rA, rhs, rB);
|
||||
SSE_UCOMISD(rA->reg, rB->reg);
|
||||
}
|
||||
#if defined NANOJIT_IA32
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -1999,19 +1704,10 @@ namespace nanojit
|
||||
FLDr(FST0); // DUP
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void Assembler::nativePageReset()
|
||||
{
|
||||
#if defined NANOJIT_AMD64
|
||||
/* We store some stuff at the bottom of the page.
|
||||
* We reserve 8-bytes for long jumps just in case we need them.
|
||||
*/
|
||||
_pageData = 0;
|
||||
_dblNegPtr = NULL;
|
||||
_negOnePtr = NULL;
|
||||
#endif
|
||||
}
|
||||
|
||||
Register Assembler::asm_binop_rhs_reg(LInsp ins)
|
||||
@ -2019,72 +1715,12 @@ namespace nanojit
|
||||
LOpcode op = ins->opcode();
|
||||
LIns *rhs = ins->oprnd2();
|
||||
|
||||
if (op == LIR_lsh || op == LIR_rsh || op == LIR_ush) {
|
||||
#if defined NANOJIT_IA32
|
||||
if (op == LIR_lsh || op == LIR_rsh || op == LIR_ush)
|
||||
return findSpecificRegFor(rhs, ECX);
|
||||
#elif defined NANOJIT_AMD64
|
||||
return findSpecificRegFor(rhs, RCX);
|
||||
#endif
|
||||
}
|
||||
|
||||
return UnknownReg;
|
||||
}
|
||||
|
||||
#if defined NANOJIT_AMD64
|
||||
void Assembler::asm_qbinop(LIns *ins)
|
||||
{
|
||||
LInsp lhs = ins->oprnd1();
|
||||
LInsp rhs = ins->oprnd2();
|
||||
LOpcode op = ins->opcode();
|
||||
|
||||
Register rr = prepResultReg(ins, GpRegs);
|
||||
Reservation *rA = getresv(lhs);
|
||||
Register ra;
|
||||
|
||||
if (rA == NULL || (ra = rA->reg) == UnknownReg) {
|
||||
ra = findSpecificRegFor(lhs, rr);
|
||||
}
|
||||
|
||||
if (rhs->isconst())
|
||||
{
|
||||
int c = rhs->constval();
|
||||
|
||||
if (op == LIR_qiadd)
|
||||
{
|
||||
ADDQi(rr, c);
|
||||
} else if (op == LIR_qiand) {
|
||||
ANDQi(rr, c);
|
||||
} else if (op == LIR_qilsh) {
|
||||
SHLQi(rr, c);
|
||||
} else if (op == LIR_qior) {
|
||||
ORQi(rr, c);
|
||||
}
|
||||
} else {
|
||||
Register rv;
|
||||
|
||||
if (lhs == rhs) {
|
||||
rv = ra;
|
||||
} else {
|
||||
rv = findRegFor(rhs, GpRegs & ~(rmask(rr)));
|
||||
}
|
||||
|
||||
if (op == LIR_qiadd) {
|
||||
ADDQ(rr, rv);
|
||||
} else if (op == LIR_qiand) {
|
||||
ANDQ(rr, rv);
|
||||
} else if (op == LIR_qior) {
|
||||
ORQ(rr, rv);
|
||||
} else {
|
||||
NanoAssert(rhs->isconst());
|
||||
}
|
||||
}
|
||||
|
||||
if (rr != ra) {
|
||||
MR(rr, ra);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::nativePageSetup()
|
||||
{
|
||||
if (!_nIns) _nIns = pageAlloc();
|
||||
|
Loading…
Reference in New Issue
Block a user