Bug 495734 - NJ: don't store non-LIR data in LIR buffers, r=gal.

--HG--
extra : rebase_source : 4a73fd251f3077fe3623ef1341a8aa3729a4e8bf
This commit is contained in:
Graydon Hoare 2009-09-22 16:06:52 -07:00
parent 33a52c64ac
commit 2a4fada6f5
7 changed files with 64 additions and 69 deletions

View File

@ -2536,16 +2536,16 @@ class RegExpNativeCompiler {
*/
RECharSet *charSet = &re->classList[node->u.ucclass.index];
size_t bitmapLen = (charSet->length >> 3) + 1;
/* insSkip() can't hold large data blocks. */
/* Arbitrary size limit on bitmap. */
if (bitmapLen > 1024)
return NULL;
Allocator &alloc = *JS_TRACE_MONITOR(cx).dataAlloc;
/* The following line allocates charSet.u.bits if successful. */
if (!charSet->converted && !ProcessCharSet(cx, re, charSet))
return NULL;
LIns* skip = lirBufWriter->insSkip(bitmapLen);
void* bitmapData = alloc.alloc(bitmapLen);
if (outOfMemory())
return NULL;
void* bitmapData = skip->payload();
memcpy(bitmapData, charSet->u.bits, bitmapLen);
LIns* to_fail = lir->insBranch(LIR_jf, lir->ins2(LIR_plt, pos, cpend), 0);
@ -3081,15 +3081,17 @@ class RegExpNativeCompiler {
lir->ins1(LIR_live, lirbuf->param1);
}
LIns* skip = lirBufWriter->insSkip(sizeof(GuardRecord) +
sizeof(SideExit) +
(re_length-1) * sizeof(jschar));
GuardRecord* guard = (GuardRecord *) skip->payload();
Allocator &alloc = *JS_TRACE_MONITOR(cx).dataAlloc;
GuardRecord* guard = (GuardRecord *)
alloc.alloc(sizeof(GuardRecord) +
sizeof(SideExit) +
(re_length-1) * sizeof(jschar));
memset(guard, 0, sizeof(*guard));
SideExit* exit = (SideExit*)(guard+1);
guard->exit = exit;
guard->exit->target = fragment;
fragment->lastIns = lir->insGuard(LIR_x, NULL, skip);
fragment->lastIns = lir->insGuard(LIR_x, NULL, guard);
// guard->profCount is memset'd to zero
verbose_only(
guard->profGuardID = fragment->guardNumberer++;

View File

@ -3924,8 +3924,9 @@ TraceRecorder::snapshot(ExitType exitType)
}
/* We couldn't find a matching side exit, so create a new one. */
LIns* data = lir->insSkip(sizeof(VMSideExit) + (stackSlots + ngslots) * sizeof(JSTraceType));
VMSideExit* exit = (VMSideExit*) data->payload();
VMSideExit* exit = (VMSideExit*)
traceMonitor->dataAlloc->alloc(sizeof(VMSideExit) +
(stackSlots + ngslots) * sizeof(JSTraceType));
/* Setup side exit structure. */
memset(exit, 0, sizeof(VMSideExit));
@ -3956,11 +3957,10 @@ TraceRecorder::snapshot(ExitType exitType)
return exit;
}
JS_REQUIRES_STACK LIns*
JS_REQUIRES_STACK GuardRecord*
TraceRecorder::createGuardRecord(VMSideExit* exit)
{
LIns* guardRec = lir->insSkip(sizeof(GuardRecord));
GuardRecord* gr = (GuardRecord*) guardRec->payload();
GuardRecord* gr = new (*traceMonitor->dataAlloc) GuardRecord();
memset(gr, 0, sizeof(GuardRecord));
gr->exit = exit;
@ -3973,7 +3973,7 @@ TraceRecorder::createGuardRecord(VMSideExit* exit)
fragment->guardsForFrag = gr;
)
return guardRec;
return gr;
}
/*
@ -3988,7 +3988,7 @@ TraceRecorder::guard(bool expected, LIns* cond, VMSideExit* exit)
"SideExit=%p exitType=%s\n",
(void*)exit, getExitName(exit->exitType));
LIns* guardRec = createGuardRecord(exit);
GuardRecord* guardRec = createGuardRecord(exit);
/*
* BIG FAT WARNING: If compilation fails we don't reset the lirbuf, so it's
@ -4016,8 +4016,9 @@ JS_REQUIRES_STACK VMSideExit*
TraceRecorder::copy(VMSideExit* copy)
{
size_t typemap_size = copy->numGlobalSlots + copy->numStackSlots;
LIns* data = lir->insSkip(sizeof(VMSideExit) + typemap_size * sizeof(JSTraceType));
VMSideExit* exit = (VMSideExit*) data->payload();
VMSideExit* exit = (VMSideExit*)
traceMonitor->dataAlloc->alloc(sizeof(VMSideExit) +
typemap_size * sizeof(JSTraceType));
/* Copy side exit structure. */
memcpy(exit, copy, sizeof(VMSideExit) + typemap_size * sizeof(JSTraceType));
@ -4783,7 +4784,7 @@ TraceRecorder::prepareTreeCall(VMFragment* inner)
*
* (The ExitType of this snapshot is nugatory. The exit can't be taken.)
*/
LIns* guardRec = createGuardRecord(exit);
GuardRecord* guardRec = createGuardRecord(exit);
lir->insGuard(LIR_xbarrier, NULL, guardRec);
}
@ -7643,8 +7644,7 @@ TraceRecorder::callProp(JSObject* obj, JSProperty* prop, jsid id, jsval*& vp,
LIns* parent_ins = stobj_get_parent(get(&cx->fp->argv[-2]));
CHECK_STATUS(traverseScopeChain(parent, parent_ins, obj, obj_ins));
LIns* cv_ins = lir_buf_writer->insSkip(sizeof(ClosureVarInfo));
ClosureVarInfo* cv = (ClosureVarInfo*) cv_ins->payload();
ClosureVarInfo* cv = new (traceMonitor->dataAlloc) ClosureVarInfo();
cv->id = id;
cv->slot = slot;
cv->callDepth = callDepth;
@ -8020,8 +8020,7 @@ TraceRecorder::tableswitch()
return switchop();
/* Generate switch LIR. */
LIns* si_ins = lir_buf_writer->insSkip(sizeof(SwitchInfo));
SwitchInfo* si = (SwitchInfo*) si_ins->payload();
SwitchInfo* si = new (*traceMonitor->dataAlloc) SwitchInfo();
si->count = high + 1 - low;
si->table = 0;
si->index = (uint32) -1;
@ -9851,7 +9850,7 @@ TraceRecorder::emitNativePropertyOp(JSScope* scope, JSScopeProperty* sprop, LIns
if (setflag)
lir->insStorei(boxed_ins, vp_ins, 0);
CallInfo* ci = (CallInfo*) lir->insSkip(sizeof(struct CallInfo))->payload();
CallInfo* ci = new (*traceMonitor->dataAlloc) CallInfo();
ci->_address = uintptr_t(setflag ? sprop->setter : sprop->getter);
ci->_argtypes = ARGSIZE_I << (0*ARGSIZE_SHIFT) |
ARGSIZE_P << (1*ARGSIZE_SHIFT) |
@ -10234,7 +10233,7 @@ TraceRecorder::callNative(uintN argc, JSOp mode)
// Do not use JSTN_UNBOX_AFTER for mode JSOP_NEW because
// record_NativeCallComplete unboxes the result specially.
CallInfo* ci = (CallInfo*) lir->insSkip(sizeof(struct CallInfo))->payload();
CallInfo* ci = new (*traceMonitor->dataAlloc) CallInfo();
ci->_address = uintptr_t(fun->u.n.native);
ci->_cse = ci->_fold = 0;
ci->_abi = ABI_CDECL;
@ -10717,7 +10716,7 @@ TraceRecorder::enterDeepBailCall()
lir->insStorei(INS_CONSTPTR(exit), cx_ins, offsetof(JSContext, bailExit));
// Tell nanojit not to discard or defer stack writes before this call.
LIns* guardRec = createGuardRecord(exit);
GuardRecord* guardRec = createGuardRecord(exit);
lir->insGuard(LIR_xbarrier, NULL, guardRec);
return exit;
}
@ -11018,7 +11017,7 @@ TraceRecorder::record_JSOP_GETELEM()
unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
if (stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
ABORT_TRACE("|arguments| requires saving too much stack");
JSTraceType* typemap = (JSTraceType*) lir->insSkip(stackSlots * sizeof(JSTraceType))->payload();
JSTraceType* typemap = new (*traceMonitor->dataAlloc) JSTraceType[stackSlots];
DetermineTypesVisitor detVisitor(*this, typemap);
VisitStackSlots(detVisitor, cx, 0);
typemap_ins = INS_CONSTPTR(typemap + 2 /* callee, this */);
@ -11501,8 +11500,9 @@ TraceRecorder::interpretedFunctionCall(jsval& fval, JSFunction* fun, uintN argc,
unsigned stackSlots = NativeStackSlots(cx, 0 /* callDepth */);
if (sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
ABORT_TRACE("interpreted function call requires saving too much stack");
LIns* data = lir->insSkip(sizeof(FrameInfo) + stackSlots * sizeof(JSTraceType));
FrameInfo* fi = (FrameInfo*)data->payload();
FrameInfo* fi = (FrameInfo*)
traceMonitor->dataAlloc->alloc(sizeof(FrameInfo) +
stackSlots * sizeof(JSTraceType));
JSTraceType* typemap = reinterpret_cast<JSTraceType *>(fi + 1);
DetermineTypesVisitor detVisitor(*this, typemap);

View File

@ -1026,7 +1026,7 @@ public:
* The instruction is suitable for use as the final argument of a single
* call to LirBuffer::insGuard; do not reuse the returned value.
*/
JS_REQUIRES_STACK nanojit::LIns* createGuardRecord(VMSideExit* exit);
JS_REQUIRES_STACK nanojit::GuardRecord* createGuardRecord(VMSideExit* exit);
nanojit::Fragment* getFragment() const { return fragment; }
TreeInfo* getTreeInfo() const { return treeInfo; }

View File

@ -291,7 +291,7 @@ private:
FragmentAssembler(const FragmentAssembler &);
FragmentAssembler & operator=(const FragmentAssembler &);
LasmSideExit *createSideExit();
LIns *createGuardRecord(LasmSideExit *exit);
GuardRecord *createGuardRecord(LasmSideExit *exit);
Lirasm &mParent;
const string mFragName;
@ -315,7 +315,6 @@ private:
void tokenizeLine(LirTokenStream &in, LirToken &token);
void need(size_t);
LIns *ref(const string &);
LIns *do_skip(size_t);
LIns *assemble_call(const string &);
LIns *assemble_general();
LIns *assemble_guard();
@ -521,14 +520,6 @@ FragmentAssembler::ref(const string &lab)
return mLabels.find(lab)->second;
}
LIns *
FragmentAssembler::do_skip(size_t i)
{
LIns *s = mLir->insSkip(i);
memset(s->payload(), 0xba, i);
return s;
}
LIns *
FragmentAssembler::assemble_jump()
{
@ -650,8 +641,7 @@ FragmentAssembler::assemble_call(const string &op)
LasmSideExit*
FragmentAssembler::createSideExit()
{
LIns *exitIns = do_skip(sizeof(LasmSideExit));
LasmSideExit* exit = (LasmSideExit*) exitIns->payload();
LasmSideExit* exit = new (mParent.mAlloc) LasmSideExit();
memset(exit, 0, sizeof(LasmSideExit));
exit->from = mFragment;
exit->target = NULL;
@ -659,22 +649,21 @@ FragmentAssembler::createSideExit()
return exit;
}
LIns*
GuardRecord*
FragmentAssembler::createGuardRecord(LasmSideExit *exit)
{
LIns *guardRec = do_skip(sizeof(GuardRecord));
GuardRecord *rec = (GuardRecord*) guardRec->payload();
GuardRecord *rec = new (mParent.mAlloc) GuardRecord();
memset(rec, 0, sizeof(GuardRecord));
rec->exit = exit;
exit->addGuard(rec);
return guardRec;
return rec;
}
LIns *
FragmentAssembler::assemble_guard()
{
LIns* guard = createGuardRecord(createSideExit());
GuardRecord* guard = createGuardRecord(createSideExit());
need(mOpcount);
@ -911,13 +900,7 @@ FragmentAssembler::assembleFragment(LirTokenStream &in, bool implicitBegin, cons
break;
case LIR_skip:
need(1);
{
int32_t count = imm(mTokens[0]);
if (uint32_t(count) > LirBuffer::MAX_SKIP_PAYLOAD_SZB)
bad("oversize skip");
ins = do_skip(count);
}
bad("skip instruction is deprecated");
break;
case LIR_xt:

View File

@ -99,9 +99,19 @@ inline void* operator new(size_t size, nanojit::Allocator &a) {
return a.alloc(size);
}
/** global new overload enabling this pattern: new (allocator) T(...) */
inline void* operator new(size_t size, nanojit::Allocator *a) {
return a->alloc(size);
}
/** global new[] overload enabling this pattern: new (allocator) T[] */
inline void* operator new[](size_t size, nanojit::Allocator& a) {
return a.alloc(size);
}
/** global new[] overload enabling this pattern: new (allocator) T[] */
inline void* operator new[](size_t size, nanojit::Allocator* a) {
return a->alloc(size);
}
#endif // __nanojit_Allocator__

View File

@ -246,10 +246,10 @@ namespace nanojit
return ins;
}
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, LInsp data)
LInsp LirBufWriter::insGuard(LOpcode op, LInsp c, GuardRecord *gr)
{
debug_only( if (LIR_x == op || LIR_xbarrier == op) NanoAssert(!c); )
return ins2(op, c, data);
return ins2(op, c, (LIns*)gr);
}
LInsp LirBufWriter::insBranch(LOpcode op, LInsp condition, LInsp toLabel)
@ -778,7 +778,7 @@ namespace nanojit
return out->ins3(v, oprnd1, oprnd2, oprnd3);
}
LIns* ExprFilter::insGuard(LOpcode v, LInsp c, LInsp x)
LIns* ExprFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr)
{
if (v == LIR_xt || v == LIR_xf) {
if (c->isconst()) {
@ -794,7 +794,7 @@ namespace nanojit
// so assert in debug builds.
NanoAssertMsg(0, "Constantly false guard detected");
#endif
return out->insGuard(LIR_x, NULL, x);
return out->insGuard(LIR_x, NULL, gr);
}
}
else {
@ -807,7 +807,7 @@ namespace nanojit
}
}
}
return out->insGuard(v, c, x);
return out->insGuard(v, c, gr);
}
LIns* ExprFilter::insBranch(LOpcode v, LIns *c, LIns *t)
@ -1878,7 +1878,7 @@ namespace nanojit
return out->insLoad(v,base,disp);
}
LInsp CseFilter::insGuard(LOpcode v, LInsp c, LInsp x)
LInsp CseFilter::insGuard(LOpcode v, LInsp c, GuardRecord *gr)
{
// LIR_xt and LIR_xf guards are CSEable. Note that we compare the
// opcode and condition when determining if two guards are equivalent
@ -1904,9 +1904,9 @@ namespace nanojit
LInsp found = exprs.find1(v, c, k);
if (found)
return 0;
return exprs.add(out->insGuard(v,c,x), k);
return exprs.add(out->insGuard(v,c,gr), k);
}
return out->insGuard(v, c, x);
return out->insGuard(v, c, gr);
}
LInsp CseFilter::insCall(const CallInfo *ci, LInsp args[])

View File

@ -928,7 +928,7 @@ namespace nanojit
GuardRecord *LIns::record() const {
NanoAssert(isGuard());
return (GuardRecord*)oprnd2()->payload();
return (GuardRecord*)oprnd2();
}
int32_t LIns::disp() const {
@ -1043,8 +1043,8 @@ namespace nanojit
virtual LInsp ins3(LOpcode v, LIns* a, LIns* b, LIns* c) {
return out->ins3(v, a, b, c);
}
virtual LInsp insGuard(LOpcode v, LIns *c, LIns *x) {
return out->insGuard(v, c, x);
virtual LInsp insGuard(LOpcode v, LIns *c, GuardRecord *gr) {
return out->insGuard(v, c, gr);
}
virtual LInsp insBranch(LOpcode v, LInsp condition, LInsp to) {
return out->insBranch(v, condition, to);
@ -1212,8 +1212,8 @@ namespace nanojit
}
}
LIns* insGuard(LOpcode op, LInsp cond, LIns *x) {
return add_flush(out->insGuard(op,cond,x));
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr) {
return add_flush(out->insGuard(op,cond,gr));
}
LIns* insBranch(LOpcode v, LInsp condition, LInsp to) {
@ -1271,7 +1271,7 @@ namespace nanojit
LIns* ins1(LOpcode v, LIns* a);
LIns* ins2(LOpcode v, LIns* a, LIns* b);
LIns* ins3(LOpcode v, LIns* a, LIns* b, LIns* c);
LIns* insGuard(LOpcode, LIns *cond, LIns *);
LIns* insGuard(LOpcode, LIns *cond, GuardRecord *);
LIns* insBranch(LOpcode, LIns *cond, LIns *target);
LIns* insLoad(LOpcode op, LInsp base, int32_t off);
};
@ -1329,7 +1329,7 @@ namespace nanojit
LIns* ins3(LOpcode v, LInsp, LInsp, LInsp);
LIns* insLoad(LOpcode op, LInsp cond, int32_t d);
LIns* insCall(const CallInfo *call, LInsp args[]);
LIns* insGuard(LOpcode op, LInsp cond, LIns *x);
LIns* insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
};
class LirBuffer
@ -1405,7 +1405,7 @@ namespace nanojit
LInsp insImmq(uint64_t imm);
LInsp insImmf(double d);
LInsp insCall(const CallInfo *call, LInsp args[]);
LInsp insGuard(LOpcode op, LInsp cond, LIns *x);
LInsp insGuard(LOpcode op, LInsp cond, GuardRecord *gr);
LInsp insBranch(LOpcode v, LInsp condition, LInsp to);
LInsp insAlloc(int32_t size);
LInsp insSkip(size_t);