mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-05 08:35:26 +00:00
Remove code to unlink trees to reduce the size of GuardRecord. Allow GuardRecords to share one common SideExit structure. The VM places both explicitly into the LIR (460538, r=danderson).
This commit is contained in:
parent
fd925f2638
commit
bcc419e53a
@ -84,7 +84,7 @@ BUILTIN4(extern, BOOL, js_Any_setelem, CONTEXT, OBJECT, UINT32, JSVAL,
|
||||
BUILTIN3(extern, OBJECT, js_FastValueToIterator, CONTEXT, UINT32, JSVAL, 0, 0)
|
||||
BUILTIN2(extern, JSVAL, js_FastCallIteratorNext, CONTEXT, OBJECT, 0, 0)
|
||||
BUILTIN2(extern, BOOL, js_CloseIterator, CONTEXT, JSVAL, 0, 0)
|
||||
BUILTIN2(extern, GUARDRECORD, js_CallTree, INTERPSTATE, FRAGMENT, 0, 0)
|
||||
BUILTIN2(extern, SIDEEXIT, js_CallTree, INTERPSTATE, FRAGMENT, 0, 0)
|
||||
BUILTIN2(extern, OBJECT, js_FastNewObject, CONTEXT, OBJECT, 0, 0)
|
||||
BUILTIN3(extern, BOOL, js_AddProperty, CONTEXT, OBJECT, SCOPEPROP, 0, 0)
|
||||
BUILTIN3(extern, BOOL, js_HasNamedProperty, CONTEXT, OBJECT, STRING, 0, 0)
|
||||
|
@ -251,22 +251,23 @@ js_FastCallIteratorNext(JSContext* cx, JSObject* iterobj)
|
||||
return v;
|
||||
}
|
||||
|
||||
GuardRecord* FASTCALL
|
||||
SideExit* FASTCALL
|
||||
js_CallTree(InterpState* state, Fragment* f)
|
||||
{
|
||||
GuardRecord* lr;
|
||||
union { NIns *code; GuardRecord* (FASTCALL *func)(InterpState*, Fragment*); } u;
|
||||
|
||||
u.code = f->code();
|
||||
JS_ASSERT(u.code);
|
||||
|
||||
GuardRecord* rec;
|
||||
#if defined(JS_NO_FASTCALL) && defined(NANOJIT_IA32)
|
||||
SIMULATE_FASTCALL(lr, state, NULL, u.func);
|
||||
SIMULATE_FASTCALL(rec, state, NULL, u.func);
|
||||
#else
|
||||
lr = u.func(state, NULL);
|
||||
rec = u.func(state, NULL);
|
||||
#endif
|
||||
SideExit* lr = rec->exit;
|
||||
|
||||
if (lr->exit->exitType == NESTED_EXIT) {
|
||||
if (lr->exitType == NESTED_EXIT) {
|
||||
/* This only occurs once a tree call guard mismatches and we unwind the tree call stack.
|
||||
We store the first (innermost) tree call guard in state and we will try to grow
|
||||
the outer tree the failing call was in starting at that guard. */
|
||||
|
@ -129,7 +129,7 @@ struct JSTraceableNative {
|
||||
#define _JS_TYPEINFO_OBJECT _JS_TYPEINFO(JSObject *, _JS_PTR)
|
||||
#define _JS_TYPEINFO_SCOPEPROP _JS_TYPEINFO(JSScopeProperty *, _JS_PTR)
|
||||
#define _JS_TYPEINFO_PC _JS_TYPEINFO(jsbytecode *, _JS_PTR)
|
||||
#define _JS_TYPEINFO_GUARDRECORD _JS_TYPEINFO(nanojit::GuardRecord *, _JS_PTR)
|
||||
#define _JS_TYPEINFO_SIDEEXIT _JS_TYPEINFO(nanojit::SideExit *, _JS_PTR)
|
||||
#define _JS_TYPEINFO_INTERPSTATE _JS_TYPEINFO(avmplus::InterpState *, _JS_PTR)
|
||||
#define _JS_TYPEINFO_FRAGMENT _JS_TYPEINFO(nanojit::Fragment *, _JS_PTR)
|
||||
|
||||
|
@ -961,9 +961,9 @@ mergeTypeMaps(uint8** partial, unsigned* plength, uint8* complete, unsigned clen
|
||||
static void
|
||||
js_TrashTree(JSContext* cx, Fragment* f);
|
||||
|
||||
TraceRecorder::TraceRecorder(JSContext* cx, GuardRecord* _anchor, Fragment* _fragment,
|
||||
TraceRecorder::TraceRecorder(JSContext* cx, SideExit* _anchor, Fragment* _fragment,
|
||||
TreeInfo* ti, unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
GuardRecord* innermostNestedGuard)
|
||||
SideExit* innermostNestedGuard)
|
||||
{
|
||||
JS_ASSERT(!_fragment->vmprivate && ti);
|
||||
|
||||
@ -1020,7 +1020,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, GuardRecord* _anchor, Fragment* _fra
|
||||
|
||||
/* If we are attached to a tree call guard, make sure the guard the inner tree exited from
|
||||
is what we expect it to be. */
|
||||
if (_anchor && _anchor->exit->exitType == NESTED_EXIT) {
|
||||
if (_anchor && _anchor->exitType == NESTED_EXIT) {
|
||||
LIns* nested_ins = addName(lir->insLoad(LIR_ldp, lirbuf->state,
|
||||
offsetof(InterpState, lastTreeExitGuard)),
|
||||
"lastTreeExitGuard");
|
||||
@ -1761,7 +1761,7 @@ TraceRecorder::determineSlotType(jsval* vp) const
|
||||
return m;
|
||||
}
|
||||
|
||||
SideExit*
|
||||
LIns*
|
||||
TraceRecorder::snapshot(ExitType exitType)
|
||||
{
|
||||
JSStackFrame* fp = cx->fp;
|
||||
@ -1774,17 +1774,25 @@ TraceRecorder::snapshot(ExitType exitType)
|
||||
trackNativeStackUse(stackSlots + 1);
|
||||
/* reserve space for the type map */
|
||||
unsigned ngslots = traceMonitor->globalSlots->length();
|
||||
LIns* data = lir_buf_writer->skip((stackSlots + ngslots) * sizeof(uint8));
|
||||
LIns* data = lir_buf_writer->skip(sizeof(GuardRecord) +
|
||||
sizeof(SideExit) +
|
||||
(stackSlots + ngslots) * sizeof(uint8));
|
||||
GuardRecord* rec = (GuardRecord*)data->payload();
|
||||
SideExit* exit = (SideExit*)(rec + 1);
|
||||
/* setup guard record structure */
|
||||
memset(rec, 0, sizeof(GuardRecord));
|
||||
rec->exit = exit;
|
||||
/* setup side exit structure */
|
||||
memset(&exit, 0, sizeof(exit));
|
||||
exit.from = fragment;
|
||||
exit.calldepth = callDepth;
|
||||
exit.numGlobalSlots = ngslots;
|
||||
exit.numStackSlots = stackSlots;
|
||||
exit.numStackSlotsBelowCurrentFrame = cx->fp->callee
|
||||
memset(exit, 0, sizeof(SideExit));
|
||||
exit->guards = rec;
|
||||
exit->from = fragment;
|
||||
exit->calldepth = callDepth;
|
||||
exit->numGlobalSlots = ngslots;
|
||||
exit->numStackSlots = stackSlots;
|
||||
exit->numStackSlotsBelowCurrentFrame = cx->fp->callee
|
||||
? nativeStackOffset(&cx->fp->argv[-2])/sizeof(double)
|
||||
: 0;
|
||||
exit.exitType = exitType;
|
||||
exit->exitType = exitType;
|
||||
/* If we take a snapshot on a goto, advance to the target address. This avoids inner
|
||||
trees returning on a break goto, which the outer recorder then would confuse with
|
||||
a break in the outer tree. */
|
||||
@ -1793,23 +1801,23 @@ TraceRecorder::snapshot(ExitType exitType)
|
||||
pc += GET_JUMP_OFFSET(pc);
|
||||
else if (*pc == JSOP_GOTOX)
|
||||
pc += GET_JUMPX_OFFSET(pc);
|
||||
exit.ip_adj = pc - (jsbytecode*)fragment->root->ip;
|
||||
exit.sp_adj = (stackSlots * sizeof(double)) - treeInfo->nativeStackBase;
|
||||
exit.rp_adj = exit.calldepth * sizeof(FrameInfo);
|
||||
uint8* m = exit.typeMap = (uint8 *)data->payload();
|
||||
exit->ip_adj = pc - (jsbytecode*)fragment->root->ip;
|
||||
exit->sp_adj = (stackSlots * sizeof(double)) - treeInfo->nativeStackBase;
|
||||
exit->rp_adj = exit->calldepth * sizeof(FrameInfo);
|
||||
uint8* m = getTypeMap(exit);
|
||||
/* Determine the type of a store by looking at the current type of the actual value the
|
||||
interpreter is using. For numbers we have to check what kind of store we used last
|
||||
(integer or double) to figure out what the side exit show reflect in its typemap. */
|
||||
FORALL_SLOTS(cx, ngslots, traceMonitor->globalSlots->data(), callDepth,
|
||||
*m++ = determineSlotType(vp);
|
||||
);
|
||||
JS_ASSERT(unsigned(m - exit.typeMap) == ngslots + stackSlots);
|
||||
JS_ASSERT(unsigned(m - getTypeMap(exit)) == ngslots + stackSlots);
|
||||
|
||||
/* If we are capturing the stack state on a JSOP_RESUME instruction, the value on top of
|
||||
the stack is a boxed value. */
|
||||
if (*cx->fp->regs->pc == JSOP_RESUME)
|
||||
m[-1] = JSVAL_BOXED;
|
||||
return &exit;
|
||||
return data;
|
||||
}
|
||||
|
||||
/* Emit a guard for condition (cond), expecting to evaluate to boolean result (expected). */
|
||||
@ -1954,12 +1962,12 @@ TraceRecorder::closeLoop(Fragmento* fragmento)
|
||||
fragment->blacklist();
|
||||
return;
|
||||
}
|
||||
SideExit *exit = snapshot(LOOP_EXIT);
|
||||
exit->target = fragment->root;
|
||||
LIns* skip = snapshot(LOOP_EXIT);
|
||||
if (fragment == fragment->root) {
|
||||
fragment->lastIns = lir->insBranch(LIR_j, NULL, loop_header_ins);
|
||||
}
|
||||
fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), exit);
|
||||
((GuardRecord*)skip->payload())->exit->target = fragment->root;
|
||||
fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), skip);
|
||||
compile(fragmento);
|
||||
|
||||
debug_only_v(printf("recording completed at %s:%u@%u via closeLoop\n", cx->fp->script->filename,
|
||||
@ -1971,8 +1979,7 @@ TraceRecorder::closeLoop(Fragmento* fragmento)
|
||||
void
|
||||
TraceRecorder::endLoop(Fragmento* fragmento)
|
||||
{
|
||||
SideExit *exit = snapshot(LOOP_EXIT);
|
||||
fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), exit);
|
||||
fragment->lastIns = lir->insGuard(LIR_x, lir->insImm(1), snapshot(LOOP_EXIT));
|
||||
compile(fragmento);
|
||||
|
||||
debug_only_v(printf("recording completed at %s:%u@%u via endLoop\n", cx->fp->script->filename,
|
||||
@ -2022,16 +2029,15 @@ TraceRecorder::prepareTreeCall(Fragment* inner)
|
||||
|
||||
/* Record a call to an inner tree. */
|
||||
void
|
||||
TraceRecorder::emitTreeCall(Fragment* inner, GuardRecord* lr)
|
||||
TraceRecorder::emitTreeCall(Fragment* inner, SideExit* exit)
|
||||
{
|
||||
TreeInfo* ti = (TreeInfo*)inner->vmprivate;
|
||||
/* Invoke the inner tree. */
|
||||
LIns* args[] = { INS_CONSTPTR(inner), lirbuf->state }; /* reverse order */
|
||||
LIns* ret = lir->insCall(&js_CallTree_ci, args);
|
||||
/* Read back all registers, in case the called tree changed any of them. */
|
||||
SideExit* exit = lr->exit;
|
||||
import(ti, inner_sp_ins, exit->numGlobalSlots, exit->calldepth,
|
||||
exit->typeMap, exit->typeMap + exit->numGlobalSlots);
|
||||
getTypeMap(exit), getTypeMap(exit) + exit->numGlobalSlots);
|
||||
/* Restore sp and rp to their original values (we still have them in a register). */
|
||||
if (callDepth > 0) {
|
||||
lir->insStorei(lirbuf->sp, lirbuf->state, offsetof(InterpState, sp));
|
||||
@ -2039,7 +2045,7 @@ TraceRecorder::emitTreeCall(Fragment* inner, GuardRecord* lr)
|
||||
}
|
||||
/* Guard that we come out of the inner tree along the same side exit we came out when
|
||||
we called the inner tree at recording time. */
|
||||
guard(true, lir->ins2(LIR_eq, ret, INS_CONSTPTR(lr)), NESTED_EXIT);
|
||||
guard(true, lir->ins2(LIR_eq, ret, INS_CONSTPTR(exit)), NESTED_EXIT);
|
||||
/* Register us as a dependent tree of the inner tree. */
|
||||
((TreeInfo*)inner->vmprivate)->dependentTrees.addUnique(fragment->root);
|
||||
}
|
||||
@ -2109,9 +2115,9 @@ int
|
||||
nanojit::StackFilter::getTop(LInsp guard)
|
||||
{
|
||||
if (sp == lirbuf->sp)
|
||||
return guard->exit()->sp_adj;
|
||||
return guard->record()->exit->sp_adj;
|
||||
JS_ASSERT(sp == lirbuf->rp);
|
||||
return guard->exit()->rp_adj;
|
||||
return guard->record()->exit->rp_adj;
|
||||
}
|
||||
|
||||
#if defined NJ_VERBOSE
|
||||
@ -2121,7 +2127,7 @@ nanojit::LirNameMap::formatGuard(LIns *i, char *out)
|
||||
uint32_t ip;
|
||||
SideExit *x;
|
||||
|
||||
x = (SideExit *)i->exit();
|
||||
x = (SideExit *)i->record()->exit;
|
||||
ip = intptr_t(x->from->ip) + x->ip_adj;
|
||||
sprintf(out,
|
||||
"%s: %s %s -> %s sp%+ld rp%+ld",
|
||||
@ -2135,24 +2141,6 @@ nanojit::LirNameMap::formatGuard(LIns *i, char *out)
|
||||
}
|
||||
#endif
|
||||
|
||||
void
|
||||
nanojit::Assembler::initGuardRecord(LIns *guard, GuardRecord *rec)
|
||||
{
|
||||
SideExit *exit;
|
||||
|
||||
exit = guard->exit();
|
||||
rec->guard = guard;
|
||||
rec->calldepth = exit->calldepth;
|
||||
rec->exit = exit;
|
||||
verbose_only(rec->sid = exit->sid);
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::Assembler::asm_bailout(LIns *guard, Register state)
|
||||
{
|
||||
/* we adjust ip/sp/rp when exiting from the tree in the recovery code */
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::Fragment::onDestroy()
|
||||
{
|
||||
@ -2177,9 +2165,9 @@ js_DeleteRecorder(JSContext* cx)
|
||||
}
|
||||
|
||||
static bool
|
||||
js_StartRecorder(JSContext* cx, GuardRecord* anchor, Fragment* f, TreeInfo* ti,
|
||||
js_StartRecorder(JSContext* cx, SideExit* anchor, Fragment* f, TreeInfo* ti,
|
||||
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
GuardRecord* expectedInnerExit)
|
||||
SideExit* expectedInnerExit)
|
||||
{
|
||||
JSTraceMonitor* tm = &JS_TRACE_MONITOR(cx);
|
||||
|
||||
@ -2424,7 +2412,7 @@ js_RecordTree(JSContext* cx, JSTraceMonitor* tm, Fragment* f)
|
||||
}
|
||||
|
||||
static bool
|
||||
js_AttemptToExtendTree(JSContext* cx, GuardRecord* anchor, GuardRecord* exitedFrom)
|
||||
js_AttemptToExtendTree(JSContext* cx, SideExit* anchor, SideExit* exitedFrom)
|
||||
{
|
||||
Fragment* f = anchor->from->root;
|
||||
JS_ASSERT(f->vmprivate);
|
||||
@ -2438,10 +2426,9 @@ js_AttemptToExtendTree(JSContext* cx, GuardRecord* anchor, GuardRecord* exitedFr
|
||||
|
||||
Fragment* c;
|
||||
if (!(c = anchor->target)) {
|
||||
c = JS_TRACE_MONITOR(cx).fragmento->createBranch(anchor, anchor->exit);
|
||||
c->spawnedFrom = anchor->guard;
|
||||
c = JS_TRACE_MONITOR(cx).fragmento->createBranch(anchor, cx->fp->regs->pc);
|
||||
c->spawnedFrom = anchor;
|
||||
c->parent = f;
|
||||
anchor->exit->target = c;
|
||||
anchor->target = c;
|
||||
c->root = f;
|
||||
}
|
||||
@ -2456,21 +2443,20 @@ js_AttemptToExtendTree(JSContext* cx, GuardRecord* anchor, GuardRecord* exitedFr
|
||||
if (exitedFrom == NULL) {
|
||||
/* If we are coming straight from a simple side exit, just use that exit's type map
|
||||
as starting point. */
|
||||
SideExit* e = anchor->exit;
|
||||
ngslots = e->numGlobalSlots;
|
||||
globalTypeMap = e->typeMap;
|
||||
ngslots = anchor->numGlobalSlots;
|
||||
globalTypeMap = getTypeMap(anchor);
|
||||
stackTypeMap = globalTypeMap + ngslots;
|
||||
} else {
|
||||
/* If we side-exited on a loop exit and continue on a nesting guard, the nesting
|
||||
guard (anchor) has the type information for everything below the current scope,
|
||||
and the actual guard we exited from has the types for everything in the current
|
||||
scope (and whatever it inlined). We have to merge those maps here. */
|
||||
SideExit* e1 = anchor->exit;
|
||||
SideExit* e2 = exitedFrom->exit;
|
||||
fullMap.add(e1->typeMap + e1->numGlobalSlots, e1->numStackSlotsBelowCurrentFrame);
|
||||
fullMap.add(e2->typeMap + e2->numGlobalSlots, e2->numStackSlots);
|
||||
SideExit* e1 = anchor;
|
||||
SideExit* e2 = exitedFrom;
|
||||
fullMap.add(getTypeMap(e1) + e1->numGlobalSlots, e1->numStackSlotsBelowCurrentFrame);
|
||||
fullMap.add(getTypeMap(e2) + e2->numGlobalSlots, e2->numStackSlots);
|
||||
ngslots = e2->numGlobalSlots;
|
||||
globalTypeMap = e2->typeMap;
|
||||
globalTypeMap = getTypeMap(e2);
|
||||
stackTypeMap = fullMap.data();
|
||||
}
|
||||
return js_StartRecorder(cx, anchor, c, (TreeInfo*)f->vmprivate,
|
||||
@ -2479,9 +2465,9 @@ js_AttemptToExtendTree(JSContext* cx, GuardRecord* anchor, GuardRecord* exitedFr
|
||||
return false;
|
||||
}
|
||||
|
||||
static GuardRecord*
|
||||
static SideExit*
|
||||
js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
GuardRecord** innermostNestedGuardp);
|
||||
SideExit** innermostNestedGuardp);
|
||||
|
||||
static void
|
||||
js_CloseLoop(JSContext* cx)
|
||||
@ -2524,15 +2510,15 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
|
||||
r->selectCallablePeerFragment(&f) && /* is there a potentially matching peer fragment? */
|
||||
r->adjustCallerTypes(f)) { /* make sure we can make our arguments fit */
|
||||
r->prepareTreeCall(f);
|
||||
GuardRecord* innermostNestedGuard = NULL;
|
||||
GuardRecord* lr = js_ExecuteTree(cx, &f, inlineCallCount, &innermostNestedGuard);
|
||||
SideExit* innermostNestedGuard = NULL;
|
||||
SideExit* lr = js_ExecuteTree(cx, &f, inlineCallCount, &innermostNestedGuard);
|
||||
if (!lr) {
|
||||
/* js_ExecuteTree might have flushed the cache and aborted us already. */
|
||||
if (JS_TRACE_MONITOR(cx).recorder)
|
||||
js_AbortRecording(cx, "Couldn't call inner tree");
|
||||
return false;
|
||||
}
|
||||
switch (lr->exit->exitType) {
|
||||
switch (lr->exitType) {
|
||||
case LOOP_EXIT:
|
||||
/* If the inner tree exited on an unknown loop exit, grow the tree around it. */
|
||||
if (innermostNestedGuard) {
|
||||
@ -2547,7 +2533,7 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
|
||||
js_AbortRecording(cx, "Inner tree is trying to grow, abort outer recording");
|
||||
return js_AttemptToExtendTree(cx, lr, NULL);
|
||||
default:
|
||||
debug_only_v(printf("exit_type=%d\n", lr->exit->exitType);)
|
||||
debug_only_v(printf("exit_type=%d\n", lr->exitType);)
|
||||
js_AbortRecording(cx, "Inner tree not suitable for calling");
|
||||
return false;
|
||||
}
|
||||
@ -2561,9 +2547,9 @@ js_RecordLoopEdge(JSContext* cx, TraceRecorder* r, uintN& inlineCallCount)
|
||||
return false;
|
||||
}
|
||||
|
||||
static inline GuardRecord*
|
||||
static inline SideExit*
|
||||
js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
GuardRecord** innermostNestedGuardp)
|
||||
SideExit** innermostNestedGuardp)
|
||||
{
|
||||
Fragment* f = *treep;
|
||||
|
||||
@ -2662,23 +2648,25 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
bool onTrace = tm->onTrace;
|
||||
if (!onTrace)
|
||||
tm->onTrace = true;
|
||||
GuardRecord* lr;
|
||||
SideExit* lr;
|
||||
|
||||
debug_only(fflush(NULL);)
|
||||
GuardRecord* rec;
|
||||
#if defined(JS_NO_FASTCALL) && defined(NANOJIT_IA32)
|
||||
SIMULATE_FASTCALL(lr, &state, NULL, u.func);
|
||||
SIMULATE_FASTCALL(rec, &state, NULL, u.func);
|
||||
#else
|
||||
lr = u.func(&state, NULL);
|
||||
rec = u.func(&state, NULL);
|
||||
#endif
|
||||
lr = rec->exit;
|
||||
|
||||
JS_ASSERT(lr->exit->exitType != LOOP_EXIT || !lr->calldepth);
|
||||
JS_ASSERT(lr->exitType != LOOP_EXIT || !lr->calldepth);
|
||||
|
||||
if (!onTrace)
|
||||
tm->onTrace = false;
|
||||
|
||||
/* Except if we find that this is a nested bailout, the guard the call returned is the
|
||||
one we have to use to adjust pc and sp. */
|
||||
GuardRecord* innermost = lr;
|
||||
SideExit* innermost = lr;
|
||||
|
||||
/* While executing a tree we do not update state.sp and state.rp even if they grow. Instead,
|
||||
guards tell us by how much sp and rp should be incremented in case of a side exit. When
|
||||
@ -2689,8 +2677,8 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
stack (rp) is empty, we can process the final frames (which again are not directly
|
||||
visible and only the guard we exited on will tells us about). */
|
||||
FrameInfo* rp = (FrameInfo*)state.rp;
|
||||
if (lr->exit->exitType == NESTED_EXIT) {
|
||||
GuardRecord* nested = state.lastTreeCallGuard;
|
||||
if (lr->exitType == NESTED_EXIT) {
|
||||
SideExit* nested = state.lastTreeCallGuard;
|
||||
if (!nested) {
|
||||
/* If lastTreeCallGuard is not set in state, we only have a single level of
|
||||
nesting in this exit, so lr itself is the innermost and outermost nested
|
||||
@ -2710,9 +2698,9 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
if (innermostNestedGuardp)
|
||||
*innermostNestedGuardp = nested;
|
||||
JS_ASSERT(nested);
|
||||
JS_ASSERT(nested->exit->exitType == NESTED_EXIT);
|
||||
JS_ASSERT(nested->exitType == NESTED_EXIT);
|
||||
JS_ASSERT(state.lastTreeExitGuard);
|
||||
JS_ASSERT(state.lastTreeExitGuard->exit->exitType != NESTED_EXIT);
|
||||
JS_ASSERT(state.lastTreeExitGuard->exitType != NESTED_EXIT);
|
||||
}
|
||||
while (callstack < rp) {
|
||||
/* Synthesize a stack frame and write out the values in it using the type map pointer
|
||||
@ -2757,13 +2745,12 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
/* Adjust sp and pc relative to the tree we exited from (not the tree we entered
|
||||
into). These are our final values for sp and pc since js_SynthesizeFrame has
|
||||
already taken care of all frames in between. */
|
||||
SideExit* e = innermost->exit;
|
||||
JSStackFrame* fp = cx->fp;
|
||||
|
||||
/* If we are not exiting from an inlined frame the state->sp is spbase, otherwise spbase
|
||||
is whatever slots frames around us consume. */
|
||||
fp->regs->pc = (jsbytecode*)innermost->from->root->ip + e->ip_adj;
|
||||
fp->regs->sp = StackBase(fp) + (e->sp_adj / sizeof(double)) - calldepth_slots;
|
||||
fp->regs->pc = (jsbytecode*)innermost->from->root->ip + innermost->ip_adj;
|
||||
fp->regs->sp = StackBase(fp) + (innermost->sp_adj / sizeof(double)) - calldepth_slots;
|
||||
JS_ASSERT(fp->slots + fp->script->nfixed +
|
||||
js_ReconstructStackDepth(cx, fp->script, fp->regs->pc) == fp->regs->sp);
|
||||
|
||||
@ -2773,14 +2760,14 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
uint64 cycles = 0;
|
||||
#endif
|
||||
|
||||
debug_only_v(printf("leaving trace at %s:%u@%u, op=%s, lr=%p, exitType=%d, sp=%d, ip=%p, "
|
||||
debug_only_v(printf("leaving trace at %s:%u@%u, op=%s, lr=%p, exitType=%d, sp=%d, "
|
||||
"calldepth=%d, cycles=%llu\n",
|
||||
fp->script->filename, js_PCToLineNumber(cx, fp->script, fp->regs->pc),
|
||||
fp->regs->pc - fp->script->code,
|
||||
js_CodeName[*fp->regs->pc],
|
||||
lr,
|
||||
lr->exit->exitType,
|
||||
fp->regs->sp - StackBase(fp), lr->jmp,
|
||||
lr->exitType,
|
||||
fp->regs->sp - StackBase(fp),
|
||||
calldepth,
|
||||
cycles));
|
||||
|
||||
@ -2788,10 +2775,10 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
with we don't have any type information available in the side exit. We merge in this
|
||||
information from the entry type-map. See also comment in the constructor of TraceRecorder
|
||||
why this is always safe to do. */
|
||||
unsigned exit_gslots = e->numGlobalSlots;
|
||||
unsigned exit_gslots = innermost->numGlobalSlots;
|
||||
JS_ASSERT(ngslots == tm->globalTypeMap->length());
|
||||
JS_ASSERT(ngslots >= exit_gslots);
|
||||
uint8* globalTypeMap = e->typeMap;
|
||||
uint8* globalTypeMap = getTypeMap(innermost);
|
||||
if (exit_gslots < ngslots)
|
||||
mergeTypeMaps(&globalTypeMap, &exit_gslots, tm->globalTypeMap->data(), ngslots,
|
||||
(uint8*)alloca(sizeof(uint8) * ngslots));
|
||||
@ -2805,10 +2792,11 @@ js_ExecuteTree(JSContext* cx, Fragment** treep, uintN& inlineCallCount,
|
||||
JS_ASSERT(*(uint64*)&global[globalFrameSize] == 0xdeadbeefdeadbeefLL);
|
||||
|
||||
/* write back native stack frame */
|
||||
slots = FlushNativeStackFrame(cx, e->calldepth, e->typeMap + e->numGlobalSlots, stack, NULL);
|
||||
slots = FlushNativeStackFrame(cx, innermost->calldepth, getTypeMap(innermost) +
|
||||
innermost->numGlobalSlots, stack, NULL);
|
||||
if (slots < 0)
|
||||
return NULL;
|
||||
JS_ASSERT(unsigned(slots) == e->numStackSlots);
|
||||
JS_ASSERT(unsigned(slots) == innermost->numStackSlots);
|
||||
|
||||
#ifdef DEBUG
|
||||
// Verify that our state restoration worked
|
||||
@ -2863,8 +2851,8 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
|
||||
|
||||
/* If there is a chance that js_ExecuteTree will actually succeed, invoke it (either the
|
||||
first fragment must contain some code, or at least it must have a peer fragment). */
|
||||
GuardRecord* lr = NULL;
|
||||
GuardRecord* innermostNestedGuard = NULL;
|
||||
SideExit* lr = NULL;
|
||||
SideExit* innermostNestedGuard = NULL;
|
||||
if (f->code() || f->peer)
|
||||
lr = js_ExecuteTree(cx, &f, inlineCallCount, &innermostNestedGuard);
|
||||
if (!lr) {
|
||||
@ -2878,8 +2866,7 @@ js_MonitorLoopEdge(JSContext* cx, uintN& inlineCallCount)
|
||||
/* If we exit on a branch, or on a tree call guard, try to grow the inner tree (in case
|
||||
of a branch exit), or the tree nested around the tree we exited from (in case of the
|
||||
tree call guard). */
|
||||
SideExit* exit = lr->exit;
|
||||
switch (exit->exitType) {
|
||||
switch (lr->exitType) {
|
||||
case BRANCH_EXIT:
|
||||
return js_AttemptToExtendTree(cx, lr, NULL);
|
||||
case LOOP_EXIT:
|
||||
@ -4629,12 +4616,12 @@ TraceRecorder::functionCall(bool constructing)
|
||||
ABORT_TRACE("untraceable native");
|
||||
|
||||
static JSTraceableNative knownNatives[] = {
|
||||
{ (JSFastNative)js_Array, &js_FastNewArray_ci, "pC", "", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Array, &js_Array_1int_ci, "pC", "i", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Array, &js_Array_2obj_ci, "pC", "oo", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Array, &js_Array_3num_ci, "pC", "ddd", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Object, &js_FastNewObject_ci, "fC", "", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Date, &js_FastNewDate_ci, "pC", "", FAIL_NULL },
|
||||
{ (JSFastNative)js_Array, &js_FastNewArray_ci, "pC", "", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Array, &js_Array_1int_ci, "pC", "i", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Array, &js_Array_2obj_ci, "pC", "oo", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Array, &js_Array_3num_ci, "pC", "ddd", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Object, &js_FastNewObject_ci, "fC", "", FAIL_NULL | JSTN_MORE },
|
||||
{ (JSFastNative)js_Date, &js_FastNewDate_ci, "pC", "", FAIL_NULL },
|
||||
};
|
||||
|
||||
LIns* args[5];
|
||||
|
@ -207,7 +207,7 @@ class TraceRecorder : public GCObject {
|
||||
char* entryTypeMap;
|
||||
unsigned callDepth;
|
||||
JSAtom** atoms;
|
||||
nanojit::GuardRecord* anchor;
|
||||
nanojit::SideExit* anchor;
|
||||
nanojit::Fragment* fragment;
|
||||
TreeInfo* treeInfo;
|
||||
nanojit::LirBuffer* lirbuf;
|
||||
@ -227,7 +227,6 @@ class TraceRecorder : public GCObject {
|
||||
nanojit::LIns* eor_ins;
|
||||
nanojit::LIns* rval_ins;
|
||||
nanojit::LIns* inner_sp_ins;
|
||||
nanojit::SideExit exit;
|
||||
bool deepAborted;
|
||||
bool applyingArguments;
|
||||
bool trashTree;
|
||||
@ -337,13 +336,13 @@ class TraceRecorder : public GCObject {
|
||||
public:
|
||||
friend bool js_MonitorRecording(TraceRecorder* tr);
|
||||
|
||||
TraceRecorder(JSContext* cx, nanojit::GuardRecord*, nanojit::Fragment*, TreeInfo*,
|
||||
TraceRecorder(JSContext* cx, nanojit::SideExit*, nanojit::Fragment*, TreeInfo*,
|
||||
unsigned ngslots, uint8* globalTypeMap, uint8* stackTypeMap,
|
||||
nanojit::GuardRecord* expectedInnerExit);
|
||||
nanojit::SideExit* expectedInnerExit);
|
||||
~TraceRecorder();
|
||||
|
||||
uint8 determineSlotType(jsval* vp) const;
|
||||
nanojit::SideExit* snapshot(nanojit::ExitType exitType);
|
||||
nanojit::LIns* snapshot(nanojit::ExitType exitType);
|
||||
nanojit::Fragment* getFragment() const { return fragment; }
|
||||
bool isLoopHeader(JSContext* cx) const;
|
||||
void compile(nanojit::Fragmento* fragmento);
|
||||
@ -353,7 +352,7 @@ public:
|
||||
bool adjustCallerTypes(nanojit::Fragment* f);
|
||||
bool selectCallablePeerFragment(nanojit::Fragment** first);
|
||||
void prepareTreeCall(nanojit::Fragment* inner);
|
||||
void emitTreeCall(nanojit::Fragment* inner, nanojit::GuardRecord* lr);
|
||||
void emitTreeCall(nanojit::Fragment* inner, nanojit::SideExit* exit);
|
||||
unsigned getCallDepth() const;
|
||||
|
||||
bool record_EnterFrame();
|
||||
|
@ -694,25 +694,26 @@ namespace nanojit
|
||||
|
||||
void Assembler::patch(GuardRecord *lr)
|
||||
{
|
||||
Fragment *frag = lr->target;
|
||||
Fragment *frag = lr->exit->target;
|
||||
NanoAssert(frag->fragEntry != 0);
|
||||
NIns* was = asm_adjustBranch((NIns*)lr->jmp, frag->fragEntry);
|
||||
if (!lr->origTarget) lr->origTarget = was;
|
||||
verbose_only(verbose_outputf("patching jump at %p to target %p (was %p)\n",
|
||||
lr->jmp, frag->fragEntry, was);)
|
||||
}
|
||||
|
||||
void Assembler::unpatch(GuardRecord *lr)
|
||||
void Assembler::patch(SideExit *exit)
|
||||
{
|
||||
NIns* was = asm_adjustBranch((NIns*)lr->jmp, (NIns*)lr->origTarget);
|
||||
(void)was;
|
||||
verbose_only(verbose_outputf("unpatching jump at %p to original target %p (was %p)\n",
|
||||
lr->jmp, lr->origTarget, was);)
|
||||
GuardRecord *rec = exit->guards;
|
||||
AvmAssert(rec);
|
||||
while (rec) {
|
||||
patch(rec);
|
||||
rec = rec->peer;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
NIns* Assembler::asm_exit(LInsp guard)
|
||||
{
|
||||
SideExit *exit = guard->exit();
|
||||
SideExit *exit = guard->record()->exit;
|
||||
NIns* at = 0;
|
||||
if (!_branchStateMap->get(exit))
|
||||
{
|
||||
@ -739,7 +740,7 @@ namespace nanojit
|
||||
verbose_only(bool priorVerbose = _verbose; )
|
||||
verbose_only( _verbose = verbose_enabled() && _frago->core()->config.verbose_exits; )
|
||||
verbose_only( int32_t nativeSave = _stats.native );
|
||||
verbose_only(verbose_outputf("--------------------------------------- end exit block SID %d", guard->exit()->sid);)
|
||||
verbose_only(verbose_outputf("--------------------------------------- end exit block %p", guard);)
|
||||
|
||||
RegAlloc capture = _allocator;
|
||||
|
||||
@ -759,11 +760,9 @@ namespace nanojit
|
||||
// restore the callee-saved register (aka saved params)
|
||||
assignSavedParams();
|
||||
|
||||
// if/when we patch this exit to jump over to another fragment,
|
||||
// that fragment will need its parameters set up just like ours.
|
||||
LInsp stateins = _thisfrag->lirbuf->state;
|
||||
Register state = findSpecificRegFor(stateins, argRegs[stateins->imm8()]);
|
||||
asm_bailout(guard, state);
|
||||
// restore first parameter, the only one we use
|
||||
LInsp state = _thisfrag->lirbuf->state;
|
||||
findSpecificRegFor(state, argRegs[state->imm8()]);
|
||||
|
||||
intersectRegisterState(capture);
|
||||
|
||||
@ -857,11 +856,10 @@ namespace nanojit
|
||||
verbose_only(_thisfrag->compileNbr++; )
|
||||
verbose_only(_frago->_stats.compiles++; )
|
||||
verbose_only(_frago->_stats.totalCompiles++; )
|
||||
_latestGuard = 0;
|
||||
_inExit = false;
|
||||
gen(rdr, loopJumps);
|
||||
frag->fragEntry = _nIns;
|
||||
frag->outbound = core->config.tree_opt? _latestGuard : 0;
|
||||
//frag->outbound = core->config.tree_opt? _latestGuard : 0;
|
||||
//fprintf(stderr, "assemble frag %X entry %X\n", (int)frag, (int)frag->fragEntry);
|
||||
|
||||
if (!error()) {
|
||||
@ -2127,49 +2125,6 @@ namespace nanojit
|
||||
debug_only(saved.used = 0); // marker that we are no longer in exit path
|
||||
}
|
||||
|
||||
/**
|
||||
* Guard records are laid out in the exit block buffer (_nInsExit),
|
||||
* intersperced with the code. Preceding the record are the native
|
||||
* instructions associated with the record (i.e. the exit code).
|
||||
*
|
||||
* The layout is as follows:
|
||||
*
|
||||
* [ native code ] [ GuardRecord1 ]
|
||||
* ...
|
||||
* [ native code ] [ GuardRecordN ]
|
||||
*
|
||||
* The guard record 'code' field should be used to locate
|
||||
* the start of the native code associated with the
|
||||
* exit block. N.B the code may lie in a different page
|
||||
* than the guard record
|
||||
*
|
||||
* The last guard record is used for the unconditional jump
|
||||
* at the end of the trace.
|
||||
*
|
||||
* NOTE: It is also not guaranteed that the native code
|
||||
* is contained on a single page.
|
||||
*/
|
||||
GuardRecord* Assembler::placeGuardRecord(LInsp guard)
|
||||
{
|
||||
// we align the guards to 4Byte boundary
|
||||
size_t size = GuardRecordSize(guard);
|
||||
SideExit *exit = guard->exit();
|
||||
NIns* ptr = (NIns*)alignTo(_nIns-size, 4);
|
||||
underrunProtect( (intptr_t)_nIns-(intptr_t)ptr ); // either got us a new page or there is enough space for us
|
||||
GuardRecord* rec = (GuardRecord*) alignTo(_nIns-size,4);
|
||||
rec->outgoing = _latestGuard;
|
||||
_latestGuard = rec;
|
||||
_nIns = (NIns*)rec;
|
||||
rec->next = 0;
|
||||
rec->origTarget = 0;
|
||||
rec->target = exit->target;
|
||||
rec->from = _thisfrag;
|
||||
initGuardRecord(guard,rec);
|
||||
if (exit->target)
|
||||
exit->target->addLink(rec);
|
||||
return rec;
|
||||
}
|
||||
|
||||
void Assembler::setCallTable(const CallInfo* functions)
|
||||
{
|
||||
_functions = functions;
|
||||
|
@ -187,7 +187,7 @@ namespace nanojit
|
||||
void copyRegisters(RegAlloc* copyTo);
|
||||
void releaseRegisters();
|
||||
void patch(GuardRecord *lr);
|
||||
void unpatch(GuardRecord *lr);
|
||||
void patch(SideExit *exit);
|
||||
AssmError error() { return _err; }
|
||||
void setError(AssmError e) { _err = e; }
|
||||
void setCallTable(const CallInfo *functions);
|
||||
@ -211,9 +211,6 @@ namespace nanojit
|
||||
NIns* genPrologue();
|
||||
NIns* genEpilogue();
|
||||
|
||||
GuardRecord* placeGuardRecord(LInsp guard);
|
||||
void initGuardRecord(LInsp guard, GuardRecord*);
|
||||
|
||||
uint32_t arReserve(LIns* l);
|
||||
void arFree(uint32_t idx);
|
||||
void arReset();
|
||||
@ -254,7 +251,6 @@ namespace nanojit
|
||||
GC* _gc;
|
||||
DWB(Fragment*) _thisfrag;
|
||||
RegAllocMap* _branchStateMap;
|
||||
GuardRecord* _latestGuard;
|
||||
|
||||
const CallInfo *_functions;
|
||||
|
||||
@ -302,7 +298,6 @@ namespace nanojit
|
||||
void asm_u2f(LInsp ins);
|
||||
Register asm_prep_fcall(Reservation *rR, LInsp ins);
|
||||
void asm_nongp_copy(Register r, Register s);
|
||||
void asm_bailout(LInsp guard, Register state);
|
||||
void asm_call(LInsp);
|
||||
void asm_arg(ArgSize, LInsp, Register);
|
||||
Register asm_binop_rhs_reg(LInsp ins);
|
||||
|
@ -262,7 +262,7 @@ namespace nanojit
|
||||
|
||||
Fragment *Fragmento::getMerge(GuardRecord *lr, const void* ip)
|
||||
{
|
||||
Fragment *anchor = lr->from->anchor;
|
||||
Fragment *anchor = lr->exit->from->anchor;
|
||||
for (Fragment *f = anchor->branches; f != 0; f = f->nextbranch) {
|
||||
if (f->kind == MergeTrace && f->ip == ip /*&& f->calldepth == lr->calldepth*/) {
|
||||
// found existing shared branch on anchor
|
||||
@ -273,7 +273,7 @@ namespace nanojit
|
||||
Fragment *f = newBranch(anchor, ip);
|
||||
f->root = f;
|
||||
f->kind = MergeTrace;
|
||||
f->calldepth = lr->calldepth;
|
||||
f->calldepth = lr->exit->calldepth;
|
||||
verbose_only(
|
||||
int mergeid = 1;
|
||||
for (Fragment *g = anchor->branches; g != 0; g = g->nextbranch)
|
||||
@ -284,12 +284,11 @@ namespace nanojit
|
||||
return f;
|
||||
}
|
||||
|
||||
Fragment *Fragmento::createBranch(GuardRecord *lr, const void* ip)
|
||||
Fragment *Fragmento::createBranch(SideExit* exit, const void* ip)
|
||||
{
|
||||
Fragment *from = lr->from;
|
||||
Fragment *f = newBranch(from, ip);
|
||||
Fragment *f = newBranch(exit->from, ip);
|
||||
f->kind = BranchTrace;
|
||||
f->calldepth = lr->calldepth;
|
||||
f->calldepth = exit->calldepth;
|
||||
f->treeBranches = f->root->treeBranches;
|
||||
f->root->treeBranches = f;
|
||||
return f;
|
||||
@ -511,139 +510,70 @@ namespace nanojit
|
||||
onDestroy();
|
||||
NanoAssert(_pages == 0);
|
||||
}
|
||||
|
||||
void Fragment::addLink(GuardRecord* lnk)
|
||||
{
|
||||
//fprintf(stderr,"addLink %x from %X target %X\n",(int)lnk,(int)lnk->from,(int)lnk->target);
|
||||
lnk->next = _links;
|
||||
_links = lnk;
|
||||
}
|
||||
|
||||
void Fragment::removeLink(GuardRecord* lnk)
|
||||
{
|
||||
GuardRecord* lr = _links;
|
||||
GuardRecord** lrp = &_links;
|
||||
while(lr)
|
||||
{
|
||||
if (lr == lnk)
|
||||
{
|
||||
*lrp = lr->next;
|
||||
lnk->next = 0;
|
||||
break;
|
||||
}
|
||||
lrp = &(lr->next);
|
||||
lr = lr->next;
|
||||
}
|
||||
}
|
||||
|
||||
void Fragment::link(Assembler* assm)
|
||||
{
|
||||
// patch all jumps into this fragment
|
||||
GuardRecord* lr = _links;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord* next = lr->next;
|
||||
Fragment* from = lr->target;
|
||||
if (from && from->fragEntry) assm->patch(lr);
|
||||
lr = next;
|
||||
}
|
||||
void Fragment::addLink(GuardRecord* lnk)
|
||||
{
|
||||
//fprintf(stderr,"addLink %x from %X target %X\n",(int)lnk,(int)lnk->from,(int)lnk->target);
|
||||
lnk->next = _links;
|
||||
_links = lnk;
|
||||
}
|
||||
|
||||
// and then patch all jumps leading out
|
||||
lr = outbound;
|
||||
while(lr)
|
||||
{
|
||||
GuardRecord* next = lr->outgoing;
|
||||
Fragment* targ = lr->target;
|
||||
if (targ && targ->fragEntry) assm->patch(lr);
|
||||
lr = next;
|
||||
}
|
||||
}
|
||||
|
||||
void Fragment::unlink(Assembler* assm)
|
||||
{
|
||||
// remove our guards from others' in-bound list, so they don't patch to us
|
||||
GuardRecord* lr = outbound;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord* next = lr->outgoing;
|
||||
Fragment* targ = lr->target;
|
||||
if (targ) targ->removeLink(lr);
|
||||
lr = next;
|
||||
}
|
||||
|
||||
// then unpatch all jumps into this fragment
|
||||
lr = _links;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord* next = lr->next;
|
||||
Fragment* from = lr->target;
|
||||
if (from && from->fragEntry) assm->unpatch(lr);
|
||||
lr = next;
|
||||
}
|
||||
}
|
||||
void Fragment::addLink(SideExit* exit)
|
||||
{
|
||||
GuardRecord* rec = exit->guards;
|
||||
AvmAssert(rec);
|
||||
while (rec) {
|
||||
addLink(rec);
|
||||
rec = rec->peer;
|
||||
}
|
||||
}
|
||||
|
||||
void Fragment::link(Assembler* assm)
|
||||
{
|
||||
// patch all jumps into this fragment
|
||||
GuardRecord* lr = _links;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord* next = lr->next;
|
||||
Fragment* from = lr->exit->target;
|
||||
if (from && from->fragEntry) assm->patch(lr);
|
||||
lr = next;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef _DEBUG
|
||||
bool Fragment::hasOnlyTreeLinks()
|
||||
{
|
||||
// check that all incoming links are on the same tree
|
||||
bool isIt = true;
|
||||
GuardRecord *lr = _links;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord *next = lr->next;
|
||||
NanoAssert(lr->target == this); // def'n of GuardRecord
|
||||
if (lr->from->root != root)
|
||||
{
|
||||
isIt = false;
|
||||
break;
|
||||
}
|
||||
lr = next;
|
||||
}
|
||||
return isIt;
|
||||
}
|
||||
bool Fragment::hasOnlyTreeLinks()
|
||||
{
|
||||
// check that all incoming links are on the same tree
|
||||
bool isIt = true;
|
||||
GuardRecord *lr = _links;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord *next = lr->next;
|
||||
NanoAssert(lr->exit->target == this); // def'n of GuardRecord
|
||||
if (lr->exit->from->root != root)
|
||||
{
|
||||
isIt = false;
|
||||
break;
|
||||
}
|
||||
lr = next;
|
||||
}
|
||||
return isIt;
|
||||
}
|
||||
#endif
|
||||
|
||||
void Fragment::removeIntraLinks()
|
||||
{
|
||||
// should only be called on root of tree
|
||||
NanoAssert(isRoot());
|
||||
GuardRecord *lr = _links;
|
||||
while (lr)
|
||||
{
|
||||
GuardRecord *next = lr->next;
|
||||
NanoAssert(lr->target == this); // def'n of GuardRecord
|
||||
if (lr->from->root == root)
|
||||
removeLink(lr);
|
||||
lr = next;
|
||||
}
|
||||
}
|
||||
|
||||
void Fragment::unlinkBranches(Assembler* /*assm*/)
|
||||
{
|
||||
// should only be called on root of tree
|
||||
NanoAssert(isRoot());
|
||||
Fragment* frag = treeBranches;
|
||||
while(frag)
|
||||
{
|
||||
NanoAssert(frag->kind == BranchTrace && frag->hasOnlyTreeLinks());
|
||||
frag->_links = 0;
|
||||
frag->fragEntry = 0;
|
||||
frag = frag->treeBranches;
|
||||
}
|
||||
}
|
||||
void Fragment::linkBranches(Assembler* assm)
|
||||
{
|
||||
// should only be called on root of tree
|
||||
NanoAssert(isRoot());
|
||||
Fragment* frag = treeBranches;
|
||||
while(frag)
|
||||
{
|
||||
if (frag->fragEntry) frag->link(assm);
|
||||
frag = frag->treeBranches;
|
||||
}
|
||||
}
|
||||
|
||||
void Fragment::linkBranches(Assembler* assm)
|
||||
{
|
||||
// should only be called on root of tree
|
||||
NanoAssert(isRoot());
|
||||
Fragment* frag = treeBranches;
|
||||
while(frag)
|
||||
{
|
||||
if (frag->fragEntry) frag->link(assm);
|
||||
frag = frag->treeBranches;
|
||||
}
|
||||
}
|
||||
|
||||
void Fragment::blacklist()
|
||||
{
|
||||
blacklistLevel++;
|
||||
|
@ -104,7 +104,7 @@ namespace nanojit
|
||||
Fragment* getAnchor(const void* ip);
|
||||
void clearFrags(); // clear all fragments from the cache
|
||||
Fragment* getMerge(GuardRecord *lr, const void* ip);
|
||||
Fragment* createBranch(GuardRecord *lr, const void* ip);
|
||||
Fragment* createBranch(SideExit *exit, const void* ip);
|
||||
Fragment* newFrag(const void* ip);
|
||||
Fragment* newBranch(Fragment *from, const void* ip);
|
||||
|
||||
@ -180,13 +180,10 @@ namespace nanojit
|
||||
bool isBlacklisted() { return _hits < 0; }
|
||||
void resetLinks();
|
||||
void addLink(GuardRecord* lnk);
|
||||
void removeLink(GuardRecord* lnk);
|
||||
void addLink(SideExit* exit);
|
||||
void link(Assembler* assm);
|
||||
void linkBranches(Assembler* assm);
|
||||
void unlink(Assembler* assm);
|
||||
void unlinkBranches(Assembler* assm);
|
||||
debug_only( bool hasOnlyTreeLinks(); )
|
||||
void removeIntraLinks();
|
||||
void releaseLirBuffer();
|
||||
void releaseCode(Fragmento* frago);
|
||||
void releaseTreeMem(Fragmento* frago);
|
||||
@ -217,8 +214,7 @@ namespace nanojit
|
||||
DWB(BlockHist*) mergeCounts;
|
||||
DWB(LirBuffer*) lirbuf;
|
||||
LIns* lastIns;
|
||||
LIns* spawnedFrom;
|
||||
GuardRecord* outbound;
|
||||
SideExit* spawnedFrom;
|
||||
|
||||
TraceKind kind;
|
||||
const void* ip;
|
||||
|
@ -362,10 +362,8 @@ namespace nanojit
|
||||
return ins2(op,base,d);
|
||||
}
|
||||
|
||||
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, SideExit *x)
|
||||
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, LInsp data)
|
||||
{
|
||||
LInsp data = skip(SideExitSize(x));
|
||||
*((SideExit*)data->payload()) = *x;
|
||||
return ins2(op, c, data);
|
||||
}
|
||||
|
||||
@ -935,7 +933,7 @@ namespace nanojit
|
||||
return out->ins2(v, oprnd1, oprnd2);
|
||||
}
|
||||
|
||||
LIns* ExprFilter::insGuard(LOpcode v, LInsp c, SideExit *x)
|
||||
LIns* ExprFilter::insGuard(LOpcode v, LInsp c, LInsp x)
|
||||
{
|
||||
if (v == LIR_xt || v == LIR_xf) {
|
||||
if (c->isconst()) {
|
||||
@ -1468,10 +1466,10 @@ namespace nanojit
|
||||
return k;
|
||||
}
|
||||
|
||||
SideExit *LIns::exit()
|
||||
GuardRecord *LIns::record()
|
||||
{
|
||||
NanoAssert(isGuard());
|
||||
return (SideExit*)oprnd2()->payload();
|
||||
return (GuardRecord*)oprnd2()->payload();
|
||||
}
|
||||
|
||||
#ifdef NJ_VERBOSE
|
||||
@ -1498,7 +1496,7 @@ namespace nanojit
|
||||
}
|
||||
void add(LInsp i, LInsp use) {
|
||||
if (!i->isconst() && !i->isconstq() && !live.containsKey(i)) {
|
||||
NanoAssert(i->opcode() < sizeof(lirNames) / sizeof(lirNames[0]));
|
||||
NanoAssert(unsigned(i->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
|
||||
live.put(i,use);
|
||||
}
|
||||
}
|
||||
@ -1550,7 +1548,7 @@ namespace nanojit
|
||||
if (live.contains(i))
|
||||
{
|
||||
live.retire(i,gc);
|
||||
NanoAssert(i->opcode() < sizeof(operandCount) / sizeof(operandCount[0]));
|
||||
NanoAssert(unsigned(i->opcode()) < sizeof(operandCount) / sizeof(operandCount[0]));
|
||||
if (i->isStore()) {
|
||||
live.add(i->oprnd2(),i); // base
|
||||
live.add(i->oprnd1(),i); // val
|
||||
@ -1690,7 +1688,7 @@ namespace nanojit
|
||||
}
|
||||
#endif
|
||||
} else {
|
||||
NanoAssert(ref->opcode() < sizeof(lirNames) / sizeof(lirNames[0]));
|
||||
NanoAssert(unsigned(ref->opcode()) < sizeof(lirNames) / sizeof(lirNames[0]));
|
||||
copyName(ref, lirNames[ref->opcode()], lircounts.add(ref->opcode()));
|
||||
}
|
||||
StringNullTerminatedUTF8 cname(gc, names.get(ref)->name);
|
||||
@ -1955,7 +1953,7 @@ namespace nanojit
|
||||
return out->insLoad(v,base,disp);
|
||||
}
|
||||
|
||||
LInsp CseFilter::insGuard(LOpcode v, LInsp c, SideExit *x)
|
||||
LInsp CseFilter::insGuard(LOpcode v, LInsp c, LInsp x)
|
||||
{
|
||||
if (isCse(v)) {
|
||||
// conditional guard
|
||||
@ -2027,9 +2025,6 @@ namespace nanojit
|
||||
{
|
||||
// recompile the entire tree
|
||||
root = triggerFrag->root;
|
||||
root->removeIntraLinks();
|
||||
root->unlink(assm); // unlink all incoming jumps ; since the compile() can fail
|
||||
root->unlinkBranches(assm); // no one jumps into a branch (except from within the tree) so safe to clear the links table
|
||||
root->fragEntry = 0;
|
||||
root->releaseCode(frago);
|
||||
|
||||
@ -2050,7 +2045,7 @@ namespace nanojit
|
||||
RegAlloc* regs = new (gc) RegAlloc();
|
||||
assm->copyRegisters(regs);
|
||||
assm->releaseRegisters();
|
||||
SideExit* exit = frag->spawnedFrom->exit();
|
||||
SideExit* exit = frag->spawnedFrom;
|
||||
regMap.put(exit, regs);
|
||||
}
|
||||
frag = frag->treeBranches;
|
||||
@ -2069,14 +2064,11 @@ namespace nanojit
|
||||
verbose_only( assm->_outputCache = 0; )
|
||||
verbose_only(for(int i=asmOutput.size()-1; i>=0; --i) { assm->outputf("%s",asmOutput.get(i)); } );
|
||||
|
||||
if (assm->error())
|
||||
{
|
||||
if (assm->error()) {
|
||||
root->fragEntry = 0;
|
||||
}
|
||||
else
|
||||
{
|
||||
root->link(assm);
|
||||
if (treeCompile) root->linkBranches(assm);
|
||||
} else {
|
||||
root->link(assm);
|
||||
if (treeCompile) root->linkBranches(assm);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -202,6 +202,7 @@ namespace nanojit
|
||||
return (argc+3)>>2;
|
||||
}
|
||||
|
||||
struct GuardRecord;
|
||||
struct SideExit;
|
||||
struct Page;
|
||||
|
||||
@ -506,7 +507,7 @@ namespace nanojit
|
||||
LIns **targetAddr();
|
||||
LIns* getTarget();
|
||||
|
||||
SideExit *exit();
|
||||
GuardRecord *record();
|
||||
|
||||
inline uint32_t argc() const {
|
||||
NanoAssert(isCall());
|
||||
@ -554,7 +555,7 @@ namespace nanojit
|
||||
virtual LInsp ins2(LOpcode v, LIns* a, LIns* b) {
|
||||
return out->ins2(v, a, b);
|
||||
}
|
||||
virtual LInsp insGuard(LOpcode v, LIns *c, SideExit *x) {
|
||||
virtual LInsp insGuard(LOpcode v, LIns *c, LIns *x) {
|
||||
return out->insGuard(v, c, x);
|
||||
}
|
||||
virtual LInsp insBranch(LOpcode v, LInsp condition, LInsp to) {
|
||||
@ -721,7 +722,7 @@ namespace nanojit
|
||||
}
|
||||
}
|
||||
|
||||
LIns* insGuard(LOpcode op, LInsp cond, SideExit *x) {
|
||||
LIns* insGuard(LOpcode op, LInsp cond, LIns *x) {
|
||||
return add_flush(out->insGuard(op,cond,x));
|
||||
}
|
||||
|
||||
@ -771,7 +772,7 @@ namespace nanojit
|
||||
ExprFilter(LirWriter *out) : LirWriter(out) {}
|
||||
LIns* ins1(LOpcode v, LIns* a);
|
||||
LIns* ins2(LOpcode v, LIns* a, LIns* b);
|
||||
LIns* insGuard(LOpcode, LIns *cond, SideExit *);
|
||||
LIns* insGuard(LOpcode, LIns *cond, LIns *);
|
||||
LIns* insBranch(LOpcode, LIns *cond, LIns *target);
|
||||
};
|
||||
|
||||
@ -822,7 +823,7 @@ namespace nanojit
|
||||
LIns* ins2(LOpcode v, LInsp, LInsp);
|
||||
LIns* insLoad(LOpcode v, LInsp b, LInsp d);
|
||||
LIns* insCall(const CallInfo *call, LInsp args[]);
|
||||
LIns* insGuard(LOpcode op, LInsp cond, SideExit *x);
|
||||
LIns* insGuard(LOpcode op, LInsp cond, LIns *x);
|
||||
};
|
||||
|
||||
class LirBuffer : public GCFinalizedObject
|
||||
@ -888,7 +889,7 @@ namespace nanojit
|
||||
LInsp insImm(int32_t imm);
|
||||
LInsp insImmq(uint64_t imm);
|
||||
LInsp insCall(const CallInfo *call, LInsp args[]);
|
||||
LInsp insGuard(LOpcode op, LInsp cond, SideExit *x);
|
||||
LInsp insGuard(LOpcode op, LInsp cond, LIns *x);
|
||||
LInsp insBranch(LOpcode v, LInsp condition, LInsp to);
|
||||
LInsp insAlloc(int32_t size);
|
||||
|
||||
|
@ -160,7 +160,7 @@ namespace nanojit
|
||||
|
||||
void Assembler::nFragExit(LInsp guard)
|
||||
{
|
||||
SideExit *exit = guard->exit();
|
||||
SideExit *exit = guard->record()->exit;
|
||||
bool trees = _frago->core()->config.tree_opt;
|
||||
Fragment *frag = exit->target;
|
||||
GuardRecord *lr = 0;
|
||||
@ -174,7 +174,7 @@ namespace nanojit
|
||||
else
|
||||
{
|
||||
// target doesn't exit yet. emit jump to epilog, and set up to patch later.
|
||||
lr = placeGuardRecord(guard);
|
||||
lr = guard->record();
|
||||
#if defined NANOJIT_AMD64
|
||||
/* 8 bytes for address, 4 for imm32, 2 for jmp */
|
||||
underrunProtect(14);
|
||||
@ -185,12 +185,6 @@ namespace nanojit
|
||||
#else
|
||||
JMP_long(_epilogue);
|
||||
lr->jmp = _nIns;
|
||||
#endif
|
||||
#if 0
|
||||
// @todo optimization ; is it worth it? It means we can remove the loop over outbound in Fragment.link()
|
||||
// for trees we need the patch entry on the incoming fragment so we can unhook it later if needed
|
||||
if (tress && destKnown)
|
||||
patch(lr);
|
||||
#endif
|
||||
}
|
||||
// first restore ESP from EBP, undoing SUBi(SP,amt) from genPrologue
|
||||
|
@ -175,45 +175,34 @@ namespace nanojit
|
||||
OVERFLOW_EXIT
|
||||
};
|
||||
|
||||
struct SideExit
|
||||
{
|
||||
class LIns;
|
||||
|
||||
struct SideExit;
|
||||
|
||||
typedef struct GuardRecord
|
||||
{
|
||||
void *jmp;
|
||||
GuardRecord* next;
|
||||
GuardRecord* peer;
|
||||
SideExit* exit;
|
||||
};
|
||||
|
||||
typedef struct SideExit
|
||||
{
|
||||
GuardRecord* guards;
|
||||
Fragment *from;
|
||||
Fragment *target;
|
||||
intptr_t ip_adj;
|
||||
intptr_t sp_adj;
|
||||
intptr_t rp_adj;
|
||||
Fragment *target;
|
||||
Fragment *from;
|
||||
int32_t calldepth;
|
||||
uint32 numGlobalSlots;
|
||||
uint32 numStackSlots;
|
||||
uint32 numStackSlotsBelowCurrentFrame;
|
||||
uint8 *typeMap;
|
||||
ExitType exitType;
|
||||
#if defined NJ_VERBOSE
|
||||
uint32_t sid;
|
||||
#endif
|
||||
};
|
||||
|
||||
class LIns;
|
||||
|
||||
struct GuardRecord
|
||||
{
|
||||
Fragment *target;
|
||||
Fragment *from;
|
||||
void *jmp;
|
||||
void *origTarget;
|
||||
SideExit *exit;
|
||||
GuardRecord *outgoing;
|
||||
GuardRecord *next;
|
||||
LIns *guard;
|
||||
int32_t calldepth;
|
||||
#if defined NJ_VERBOSE
|
||||
uint32_t compileNbr;
|
||||
uint32_t sid;
|
||||
#endif
|
||||
};
|
||||
|
||||
#define GuardRecordSize(g) sizeof(GuardRecord)
|
||||
#define SideExitSize(e) sizeof(SideExit)
|
||||
|
||||
static inline uint8* getTypeMap(SideExit* exit) { return (uint8*)(exit + 1); }
|
||||
}
|
||||
|
||||
class GC;
|
||||
@ -343,9 +332,9 @@ namespace avmplus
|
||||
JSContext *cx; /* current VM context handle */
|
||||
void* eos; /* first unusable word after the native stack */
|
||||
void* eor; /* first unusable word after the call stack */
|
||||
nanojit::GuardRecord* lastTreeExitGuard; /* guard we exited on during a tree call */
|
||||
nanojit::GuardRecord* lastTreeCallGuard; /* guard we want to grow from if the tree
|
||||
call exit guard mismatched */
|
||||
nanojit::SideExit* lastTreeExitGuard; /* guard we exited on during a tree call */
|
||||
nanojit::SideExit* lastTreeCallGuard; /* guard we want to grow from if the tree
|
||||
call exit guard mismatched */
|
||||
void* rpAtLastTreeCall; /* value of rp at innermost tree call guard */
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user