Backout latest JM merge for OSX 10.5 M-oth permaorange (see bug 687257) on a CLOSED TREE

This commit is contained in:
Ed Morley 2011-09-17 16:32:43 +01:00
parent 382cca3524
commit 5acc65b28b
52 changed files with 874 additions and 1351 deletions

View File

@ -947,10 +947,12 @@ namespace JSC {
return m_buffer.sizeOfConstantPool();
}
int flushCount()
#ifdef DEBUG
void allowPoolFlush(bool allowFlush)
{
return m_buffer.flushCount();
m_buffer.allowPoolFlush(allowFlush);
}
#endif
JmpDst label()
{
@ -1454,12 +1456,7 @@ namespace JSC {
// Encoded as bits [5,3:0].
return ((reg << 5) & 0x20) | ((reg >> 1) & 0xf);
}
ARMWord SN(int reg)
{
ASSERT(reg <= ARMRegisters::d31);
// Encoded as bits [19:16,7].
return ((reg << 15) & 0xf0000) | ((reg & 1) << 7);
}
static ARMWord getConditionalField(ARMWord i)
{
return i & 0xf0000000;
@ -1603,6 +1600,7 @@ namespace JSC {
emitVFPInst(static_cast<ARMWord>(cc) | VFP_DXFER | VFP_MOV |
(fromFP ? DT_LOAD : 0) |
(isDbl ? VFP_DBL : 0), RD(r1), RN(r2), isDbl ? DM(rFP) : SM(rFP));
}
void fcpyd_r(int dd, int dm, Condition cc = AL)
@ -1612,7 +1610,7 @@ namespace JSC {
nameFpRegD(dd), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FCPYD, DD(dd), DM(dm), 0);
emitInst(static_cast<ARMWord>(cc) | FCPYD, dd, dd, dm);
}
void faddd_r(int dd, int dn, int dm, Condition cc = AL)
@ -1621,7 +1619,7 @@ namespace JSC {
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vadd.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FADDD, DD(dd), DN(dn), DM(dm));
emitInst(static_cast<ARMWord>(cc) | FADDD, dd, dn, dm);
}
void fnegd_r(int dd, int dm, Condition cc = AL)
@ -1637,7 +1635,7 @@ namespace JSC {
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vdiv.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FDIVD, DD(dd), DN(dn), DM(dm));
emitInst(static_cast<ARMWord>(cc) | FDIVD, dd, dn, dm);
}
void fsubd_r(int dd, int dn, int dm, Condition cc = AL)
@ -1646,7 +1644,7 @@ namespace JSC {
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vsub.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FSUBD, DD(dd), DN(dn), DM(dm));
emitInst(static_cast<ARMWord>(cc) | FSUBD, dd, dn, dm);
}
void fabsd_r(int dd, int dm, Condition cc = AL)
@ -1662,7 +1660,7 @@ namespace JSC {
IPFX "%-15s %s, %s, %s\n", MAYBE_PAD, "vmul.f64", nameFpRegD(dd), nameFpRegD(dn), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FMULD, DD(dd), DN(dn), DM(dm));
emitInst(static_cast<ARMWord>(cc) | FMULD, dd, dn, dm);
}
void fcmpd_r(int dd, int dm, Condition cc = AL)
@ -1671,7 +1669,7 @@ namespace JSC {
IPFX "%-15s %s, %s\n", MAYBE_PAD, "vcmp.f64", nameFpRegD(dd), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FCMPD, DD(dd), 0, DM(dm));
emitInst(static_cast<ARMWord>(cc) | FCMPD, dd, 0, dm);
}
void fsqrtd_r(int dd, int dm, Condition cc = AL)
@ -1680,49 +1678,49 @@ namespace JSC {
IPFX "%-15s %s, %s\n", MAYBE_PAD, "vsqrt.f64", nameFpRegD(dd), nameFpRegD(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FSQRTD, DD(dd), 0, DM(dm));
emitInst(static_cast<ARMWord>(cc) | FSQRTD, dd, 0, dm);
}
void fmsr_r(int dd, int rn, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FMSR, RD(rn), SN(dd), 0);
emitInst(static_cast<ARMWord>(cc) | FMSR, rn, dd, 0);
}
void fmrs_r(int rd, int dn, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FMRS, RD(rd), SN(dn), 0);
emitInst(static_cast<ARMWord>(cc) | FMRS, rd, dn, 0);
}
// dear god :(
// integer registers ar encoded the same as single registers
void fsitod_r(int dd, int dm, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FSITOD, DD(dd), 0, SM(dm));
emitInst(static_cast<ARMWord>(cc) | FSITOD, dd, 0, dm);
}
void fuitod_r(int dd, int dm, Condition cc = AL)
{
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitVFPInst(static_cast<ARMWord>(cc) | FUITOD, DD(dd), 0, SM(dm));
emitInst(static_cast<ARMWord>(cc) | FUITOD, dd, 0, dm);
}
void ftosid_r(int fd, int dm, Condition cc = AL)
{
// TODO: I don't actually know what the encoding is i'm guessing SD and DM.
emitVFPInst(static_cast<ARMWord>(cc) | FTOSID, SD(fd), 0, DM(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitInst(static_cast<ARMWord>(cc) | FTOSID, fd, 0, dm);
}
void ftosizd_r(int fd, int dm, Condition cc = AL)
{
// TODO: I don't actually know what the encoding is i'm guessing SD and DM.
emitVFPInst(static_cast<ARMWord>(cc) | FTOSIZD, SD(fd), 0, DM(dm));
// TODO: emitInst doesn't work for VFP instructions, though it
// seems to work for current usage.
emitInst(static_cast<ARMWord>(cc) | FTOSIZD, fd, 0, dm);
}
void fmstat(Condition cc = AL)

View File

@ -106,7 +106,9 @@ public:
, m_numConsts(0)
, m_maxDistance(maxPoolSize)
, m_lastConstDelta(0)
, m_flushCount(0)
#ifdef DEBUG
, m_allowFlush(true)
#endif
{
m_pool = static_cast<uint32_t*>(malloc(maxPoolSize));
m_mask = static_cast<char*>(malloc(maxPoolSize / sizeof(uint32_t)));
@ -239,16 +241,19 @@ public:
return m_numConsts;
}
int flushCount()
#ifdef DEBUG
// Guard constant pool flushes to ensure that they don't occur during
// regions where offsets into the code have to be maintained (such as PICs).
void allowPoolFlush(bool allowFlush)
{
return m_flushCount;
m_allowFlush = allowFlush;
}
#endif
private:
void correctDeltas(int insnSize)
{
m_maxDistance -= insnSize;
ASSERT(m_maxDistance >= 0);
m_lastConstDelta -= insnSize;
if (m_lastConstDelta < 0)
m_lastConstDelta = 0;
@ -259,7 +264,6 @@ private:
correctDeltas(insnSize);
m_maxDistance -= m_lastConstDelta;
ASSERT(m_maxDistance >= 0);
m_lastConstDelta = constSize;
}
@ -267,9 +271,9 @@ private:
{
js::JaegerSpew(js::JSpew_Insns, " -- FLUSHING CONSTANT POOL WITH %d CONSTANTS --\n",
m_numConsts);
ASSERT(m_allowFlush);
if (m_numConsts == 0)
return;
m_flushCount++;
int alignPool = (AssemblerBuffer::size() + (useBarrier ? barrierSize : 0)) & (sizeof(uint64_t) - 1);
if (alignPool)
@ -300,16 +304,12 @@ private:
m_loadOffsets.clear();
m_numConsts = 0;
m_maxDistance = maxPoolSize;
ASSERT(m_maxDistance >= 0);
}
void flushIfNoSpaceFor(int nextInsnSize)
{
if (m_numConsts == 0) {
m_maxDistance = maxPoolSize;
if (m_numConsts == 0)
return;
}
int lastConstDelta = m_lastConstDelta > nextInsnSize ? m_lastConstDelta - nextInsnSize : 0;
if ((m_maxDistance < nextInsnSize + lastConstDelta + barrierSize + (int)sizeof(uint32_t)))
flushConstantPool();
@ -317,10 +317,8 @@ private:
void flushIfNoSpaceFor(int nextInsnSize, int nextConstSize)
{
if (m_numConsts == 0) {
m_maxDistance = maxPoolSize;
if (m_numConsts == 0)
return;
}
if ((m_maxDistance < nextInsnSize + m_lastConstDelta + nextConstSize + barrierSize + (int)sizeof(uint32_t)) ||
(m_numConsts * sizeof(uint32_t) + nextConstSize >= maxPoolSize))
flushConstantPool();
@ -333,7 +331,10 @@ private:
int m_numConsts;
int m_maxDistance;
int m_lastConstDelta;
int m_flushCount;
#ifdef DEBUG
bool m_allowFlush;
#endif
};
} // namespace JSC

View File

@ -1292,14 +1292,14 @@ public:
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.fmsr_r(floatShadow(dest), src);
m_assembler.fsitod_r(dest, floatShadow(dest));
m_assembler.fmsr_r(dest, src);
m_assembler.fsitod_r(dest, dest);
}
void convertUInt32ToDouble(RegisterID src, FPRegisterID dest)
{
m_assembler.fmsr_r(floatShadow(dest), src);
m_assembler.fuitod_r(dest, floatShadow(dest));
m_assembler.fmsr_r(dest, src);
m_assembler.fuitod_r(dest, dest);
}
void convertInt32ToDouble(Address src, FPRegisterID dest)
@ -1337,11 +1337,11 @@ public:
// May also branch for some values that are representable in 32 bits
Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest)
{
m_assembler.ftosizd_r(floatShadow(ARMRegisters::SD0), src);
m_assembler.ftosizd_r(ARMRegisters::SD0, src);
// If FTOSIZD (VCVT.S32.F64) can't fit the result into a 32-bit
// integer, it saturates at INT_MAX or INT_MIN. Testing this is
// probably quicker than testing FPSCR for exception.
m_assembler.fmrs_r(dest, floatShadow(ARMRegisters::SD0));
m_assembler.fmrs_r(dest, ARMRegisters::SD0);
m_assembler.cmn_r(dest, ARMAssembler::getOp2(-0x7fffffff));
m_assembler.cmp_r(dest, ARMAssembler::getOp2(0x80000000), ARMCondition(NonZero));
return Jump(m_assembler.jmp(ARMCondition(Zero)));
@ -1353,11 +1353,11 @@ public:
// (specifically, in this case, 0).
void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
{
m_assembler.ftosid_r(floatShadow(ARMRegisters::SD0), src);
m_assembler.fmrs_r(dest, floatShadow(ARMRegisters::SD0));
m_assembler.ftosid_r(ARMRegisters::SD0, src);
m_assembler.fmrs_r(dest, ARMRegisters::SD0);
// Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
m_assembler.fsitod_r(ARMRegisters::SD0, floatShadow(ARMRegisters::SD0));
m_assembler.fsitod_r(ARMRegisters::SD0, ARMRegisters::SD0);
failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, ARMRegisters::SD0));
// If the result is zero, it might have been -0.0, and 0.0 equals to -0.0
@ -1380,10 +1380,12 @@ public:
m_assembler.forceFlushConstantPool();
}
int flushCount()
#ifdef DEBUG
void allowPoolFlush(bool allowFlush)
{
return m_assembler.flushCount();
m_assembler.allowPoolFlush(allowFlush);
}
#endif
protected:
ARMAssembler::Condition ARMCondition(Condition cond)

View File

@ -1,11 +0,0 @@
function foo() {
function D(){}
arr = [
new (function D ( ) {
D += '' + foo;
}),
new D
];
}
foo();

View File

@ -1,16 +0,0 @@
(function () {
assertEquals = function assertEquals(expected, found, name_opt) { };
})();
function testOne(receiver, key, result) {
for(var i = 0; i != 10; i++ ) {
assertEquals(result, receiver[key]());
}
}
function TypeOfThis() { return typeof this; }
Number.prototype.type = TypeOfThis;
String.prototype.type = TypeOfThis;
Boolean.prototype.type = TypeOfThis;
testOne(2.3, 'type', 'object');
testOne('x', 'type', 'object');
testOne(true, 'type', 'object');

View File

@ -15,10 +15,9 @@
var HOTLOOP = this.tracemonkey ? tracemonkey.HOTLOOP : 8;
var a;
function f(n) {
for (var i = 0; i < HOTLOOP; i++) {
for (var i = 0; i < HOTLOOP; i++)
if (i == HOTLOOP - 2)
a = this;
}
}
/*

View File

@ -1,8 +0,0 @@
function Function() {
try {
var g = this;
g.c("evil", eval);
} catch(b) {}
}
var o0 = Function.prototype;
var f = new Function( (null ) );

View File

@ -1,7 +0,0 @@
function X(n) {
while ('' + (n--)) {
break;
}
}
X();

View File

@ -1,7 +0,0 @@
function foo(x) {
for (var i = 0; i < 100; i++) {
x.f === i;
}
}
foo({f:"three"});

View File

@ -1,13 +0,0 @@
// TI does not account for GETELEM accessing strings, so the GETELEM PIC must
// update type constraints according to generated stubs.
function foo(a, b) {
for (var j = 0; j < 5; j++)
a[b[j]] + " what";
}
var a = {a:"zero", b:"one", c:"two", d:"three", e:"four"};
var b = ["a", "b", "c", "d", "e"];
foo(a, b);
foo(a, b);
a.e = 4;
foo(a, b);

View File

@ -1,18 +0,0 @@
// GETPROP PIC with multiple stubs containing getter hooks.
function foo(arr) {
for (var i = 0; i < 100; i++)
arr[i].caller;
}
arr = Object.create(Object.prototype);
first = Object.create({});
first.caller = null;
second = Object.create({});
second.caller = null;
for (var i = 0; i < 100; ) {
arr[i++] = first;
arr[i++] = foo;
arr[i++] = second;
}
foo.caller;
foo(arr);

View File

@ -1,19 +0,0 @@
// PIC on CALLPROP invoking getter hook.
function foo(arr) {
for (var i = 0; i < 100; i++)
arr[i].caller(false);
}
arr = Object.create(Object.prototype);
first = Object.create({});
first.caller = bar;
second = Object.create({});
second.caller = bar;
for (var i = 0; i < 100; )
arr[i++] = foo;
foo.caller;
function bar(x) {
if (x)
foo(arr);
}
bar(true);

View File

@ -140,14 +140,6 @@ class Bytecode
/* Call whose result should be monitored. */
bool monitoredTypesReturn : 1;
/*
* Dynamically observed state about the execution of this opcode. These are
* hints about the script for use during compilation.
*/
bool arrayWriteHole: 1; /* SETELEM which has written to an array hole. */
bool getStringElement:1; /* GETELEM which has accessed string properties. */
bool accessGetter: 1; /* Property read on a shape with a getter hook. */
/* Stack depth before this opcode. */
uint32 stackDepth;
@ -972,6 +964,7 @@ class ScriptAnalysis
/* Accessors for bytecode information. */
Bytecode& getCode(uint32 offset) {
JS_ASSERT(script->compartment()->activeAnalysis);
JS_ASSERT(offset < script->length);
JS_ASSERT(codeArray[offset]);
return *codeArray[offset];
@ -979,6 +972,7 @@ class ScriptAnalysis
Bytecode& getCode(const jsbytecode *pc) { return getCode(pc - script->code); }
Bytecode* maybeCode(uint32 offset) {
JS_ASSERT(script->compartment()->activeAnalysis);
JS_ASSERT(offset < script->length);
return codeArray[offset];
}

View File

@ -426,6 +426,42 @@ JSCompartment::wrap(JSContext *cx, AutoIdVector &props)
return true;
}
#if defined JS_METHODJIT && defined JS_MONOIC
/*
* Check if the pool containing the code for jit should be destroyed, per the
* heuristics in JSCompartment::sweep.
*/
static inline bool
ScriptPoolDestroyed(JSContext *cx, mjit::JITScript *jit,
uint32 releaseInterval, uint32 &counter)
{
JSC::ExecutablePool *pool = jit->code.m_executablePool;
if (pool->m_gcNumber != cx->runtime->gcNumber) {
/*
* The m_destroy flag may have been set in a previous GC for a pool which had
* references we did not remove (e.g. from the compartment's ExecutableAllocator)
* and is still around. Forget we tried to destroy it in such cases.
*/
pool->m_destroy = false;
pool->m_gcNumber = cx->runtime->gcNumber;
if (--counter == 0) {
pool->m_destroy = true;
counter = releaseInterval;
}
}
return pool->m_destroy;
}
static inline void
ScriptTryDestroyCode(JSContext *cx, JSScript *script, bool normal,
uint32 releaseInterval, uint32 &counter)
{
mjit::JITScript *jit = normal ? script->jitNormal : script->jitCtor;
if (jit && ScriptPoolDestroyed(cx, jit, releaseInterval, counter))
mjit::ReleaseScriptCode(cx, script, !normal);
}
#endif // JS_METHODJIT && JS_MONOIC
/*
* This method marks pointers that cross compartment boundaries. It should be
* called only for per-compartment GCs, since full GCs naturally follow pointers
@ -470,7 +506,7 @@ JSCompartment::markTypes(JSTracer *trc)
}
void
JSCompartment::sweep(JSContext *cx, bool releaseTypes)
JSCompartment::sweep(JSContext *cx, uint32 releaseInterval)
{
/* Remove dead wrappers from the table. */
for (WrapperMap::Enum e(crossCompartmentWrappers); !e.empty(); e.popFront()) {
@ -509,13 +545,47 @@ JSCompartment::sweep(JSContext *cx, bool releaseTypes)
traceMonitor()->sweep(cx);
#endif
#ifdef JS_METHODJIT
/* Purge ICs in the compartment. These can reference GC things. */
# if defined JS_METHODJIT && defined JS_POLYIC
/*
* Purge all PICs in the compartment. These can reference type data and
* need to know which types are pending collection.
*/
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
mjit::PurgeICs(cx, script);
if (script->hasJITCode())
mjit::ic::PurgePICs(cx, script);
}
# endif
bool discardScripts = !active && (releaseInterval != 0 || hasDebugModeCodeToDrop);
#if defined JS_METHODJIT && defined JS_MONOIC
/*
* The release interval is the frequency with which we should try to destroy
* executable pools by releasing all JIT code in them, zero to never destroy pools.
* Initialize counter so that the first pool will be destroyed, and eventually drive
* the amount of JIT code in never-used compartments to zero. Don't discard anything
* for compartments which currently have active stack frames.
*/
uint32 counter = 1;
if (discardScripts)
hasDebugModeCodeToDrop = false;
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
if (script->hasJITCode()) {
mjit::ic::SweepCallICs(cx, script, discardScripts);
if (discardScripts) {
ScriptTryDestroyCode(cx, script, true, releaseInterval, counter);
ScriptTryDestroyCode(cx, script, false, releaseInterval, counter);
}
}
}
#endif
#ifdef JS_METHODJIT
if (types.inferenceEnabled)
mjit::ClearAllFrames(this);
#endif
@ -548,35 +618,23 @@ JSCompartment::sweep(JSContext *cx, bool releaseTypes)
* enabled in the compartment.
*/
if (types.inferenceEnabled) {
if (active)
releaseTypes = false;
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
if (script->types) {
types::TypeScript::Sweep(cx, script);
/*
* Periodically release observed types for all scripts.
* This is always safe to do when there are no frames for
* the compartment on the stack.
* On each 1/8 lifetime, release observed types for all scripts.
* This is always safe to do when there are no frames for the
* compartment on the stack.
*/
if (releaseTypes) {
if (discardScripts) {
script->types->destroy();
script->types = NULL;
script->typesPurged = true;
}
}
}
} else {
#ifdef JS_METHODJIT
/* :XXX: bug 685358 only releasing jitcode if there are no frames on the stack */
if (!active) {
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
mjit::ReleaseScriptCode(cx, script);
}
}
#endif
}
types.sweep(cx);
@ -626,6 +684,20 @@ JSCompartment::purge(JSContext *cx)
if (hasTraceMonitor())
traceMonitor()->needFlush = JS_TRUE;
#endif
#if defined JS_METHODJIT && defined JS_MONOIC
/*
* MICs do not refer to data which can be GC'ed and do not generate stubs
* which might need to be discarded, but are sensitive to shape regeneration.
*/
if (cx->runtime->gcRegenShapes) {
for (CellIterUnderGC i(this, FINALIZE_SCRIPT); !i.done(); i.next()) {
JSScript *script = i.get<JSScript>();
if (script->hasJITCode())
mjit::ic::PurgeMICs(cx, script);
}
}
#endif
}
MathCache *

View File

@ -532,7 +532,7 @@ struct JS_FRIEND_API(JSCompartment) {
bool wrap(JSContext *cx, js::AutoIdVector &props);
void markTypes(JSTracer *trc);
void sweep(JSContext *cx, bool releaseTypes);
void sweep(JSContext *cx, uint32 releaseInterval);
void purge(JSContext *cx);
void setGCLastBytes(size_t lastBytes, JSGCInvocationKind gckind);

View File

@ -2166,6 +2166,8 @@ JS_GetFunctionCallback(JSContext *cx)
JS_PUBLIC_API(void)
JS_DumpBytecode(JSContext *cx, JSScript *script)
{
JS_ASSERT(!cx->runtime->gcRunning);
#if defined(DEBUG)
AutoArenaAllocator mark(&cx->tempPool);
Sprinter sprinter;

View File

@ -93,15 +93,6 @@ JS_SplicePrototype(JSContext *cx, JSObject *obj, JSObject *proto)
* does not nuke type information for the object.
*/
CHECK_REQUEST(cx);
if (!obj->hasSingletonType()) {
/*
* We can see non-singleton objects when trying to splice prototypes
* due to mutable __proto__ (ugh).
*/
return JS_SetPrototype(cx, obj, proto);
}
return obj->splicePrototype(cx, proto);
}

View File

@ -1266,7 +1266,7 @@ StackFrame::getValidCalleeObject(JSContext *cx, Value *vp)
* track of the method, so we associate it with the first barriered
* object found starting from thisp on the prototype chain.
*/
JSObject *newfunobj = CloneFunctionObject(cx, fun);
JSObject *newfunobj = CloneFunctionObject(cx, fun, fun->getParent(), true);
if (!newfunobj)
return false;
newfunobj->setMethodObj(*first_barriered_thisp);

View File

@ -463,24 +463,6 @@ CloneFunctionObject(JSContext *cx, JSFunction *fun, JSObject *parent,
return js_CloneFunctionObject(cx, fun, parent, proto);
}
inline JSObject *
CloneFunctionObject(JSContext *cx, JSFunction *fun)
{
/*
* Variant which makes an exact clone of fun, preserving parent and proto.
* Calling the above version CloneFunctionObject(cx, fun, fun->getParent())
* is not equivalent: API clients, including XPConnect, can reparent
* objects so that fun->getGlobal() != fun->getProto()->getGlobal().
* See ReparentWrapperIfFound.
*/
JS_ASSERT(fun->getParent() && fun->getProto());
if (fun->hasSingletonType())
return fun;
return js_CloneFunctionObject(cx, fun, fun->getParent(), fun->getProto());
}
extern JSObject * JS_FASTCALL
js_AllocFlatClosure(JSContext *cx, JSFunction *fun, JSObject *scopeChain);

View File

@ -647,8 +647,12 @@ js_GCThingIsMarked(void *thing, uintN color = BLACK)
return reinterpret_cast<Cell *>(thing)->isMarked(color);
}
/* Lifetime for type sets attached to scripts containing observed types. */
static const int64 JIT_SCRIPT_RELEASE_TYPES_INTERVAL = 60 * 1000 * 1000;
/*
* 1/8 life for JIT code. After this number of microseconds have passed, 1/8 of all
* JIT code is discarded in inactive compartments, regardless of how often that
* code runs.
*/
static const int64 JIT_SCRIPT_EIGHTH_LIFETIME = 60 * 1000 * 1000;
JSBool
js_InitGC(JSRuntime *rt, uint32 maxbytes)
@ -690,7 +694,7 @@ js_InitGC(JSRuntime *rt, uint32 maxbytes)
*/
rt->setGCLastBytes(8192, GC_NORMAL);
rt->gcJitReleaseTime = PRMJ_Now() + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
rt->gcJitReleaseTime = PRMJ_Now() + JIT_SCRIPT_EIGHTH_LIFETIME;
return true;
}
@ -2124,12 +2128,20 @@ static void
SweepCrossCompartmentWrappers(JSContext *cx)
{
JSRuntime *rt = cx->runtime;
bool releaseTypes = false;
/*
* Figure out how much JIT code should be released from inactive compartments.
* If multiple eighth-lives have passed, compound the release interval linearly;
* if enough time has passed, all inactive JIT code will be released.
*/
uint32 releaseInterval = 0;
int64 now = PRMJ_Now();
if (now >= rt->gcJitReleaseTime) {
releaseTypes = true;
rt->gcJitReleaseTime = now + JIT_SCRIPT_RELEASE_TYPES_INTERVAL;
releaseInterval = 8;
while (now >= rt->gcJitReleaseTime) {
if (--releaseInterval == 1)
rt->gcJitReleaseTime = now;
rt->gcJitReleaseTime += JIT_SCRIPT_EIGHTH_LIFETIME;
}
}
/*
@ -2140,7 +2152,7 @@ SweepCrossCompartmentWrappers(JSContext *cx)
* (4) Sweep the method JIT ICs and release infrequently used JIT code.
*/
for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
(*c)->sweep(cx, releaseTypes);
(*c)->sweep(cx, releaseInterval);
}
static void

View File

@ -691,17 +691,16 @@ public:
JSScript *script;
jsbytecode *callpc;
Type type;
TypeSet *types;
TypeConstraintPropagateThis(JSScript *script, jsbytecode *callpc, Type type, TypeSet *types)
: TypeConstraint("propagatethis"), script(script), callpc(callpc), type(type), types(types)
TypeConstraintPropagateThis(JSScript *script, jsbytecode *callpc, Type type)
: TypeConstraint("propagatethis"), script(script), callpc(callpc), type(type)
{}
void newType(JSContext *cx, TypeSet *source, Type type);
};
void
TypeSet::addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type type, TypeSet *types)
TypeSet::addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type type)
{
/* Don't add constraints when the call will be 'new' (see addCallProperty). */
jsbytecode *callpc = script->analysis()->getCallPC(pc);
@ -709,7 +708,7 @@ TypeSet::addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type
if (JSOp(*callpc) == JSOP_NEW)
return;
add(cx, ArenaNew<TypeConstraintPropagateThis>(cx->compartment->pool, script, callpc, type, types));
add(cx, ArenaNew<TypeConstraintPropagateThis>(cx->compartment->pool, script, callpc, type));
}
/* Subset constraint which filters out primitive types. */
@ -1064,10 +1063,10 @@ TypeConstraintCallProp::newType(JSContext *cx, TypeSet *source, Type type)
UntrapOpcode untrap(cx, script, callpc);
/*
* For CALLPROP, we need to update not just the pushed types but also the
* 'this' types of possible callees. If we can't figure out that set of
* callees, monitor the call to make sure discovered callees get their
* 'this' types updated.
* For CALLPROP and CALLELEM, we need to update not just the pushed types
* but also the 'this' types of possible callees. If we can't figure out
* that set of callees, monitor the call to make sure discovered callees
* get their 'this' types updated.
*/
if (UnknownPropertyAccess(script, type)) {
@ -1087,8 +1086,7 @@ TypeConstraintCallProp::newType(JSContext *cx, TypeSet *source, Type type)
object->getFromPrototypes(cx, id, types);
/* Bypass addPropagateThis, we already have the callpc. */
types->add(cx, ArenaNew<TypeConstraintPropagateThis>(cx->compartment->pool,
script, callpc, type,
(TypeSet *) NULL));
script, callpc, type));
}
}
}
@ -1232,8 +1230,8 @@ TypeConstraintPropagateThis::newType(JSContext *cx, TypeSet *source, Type type)
/*
* The callee is unknown, make sure the call is monitored so we pick up
* possible this/callee correlations. This only comes into play for
* CALLPROP, for other calls we are past the type barrier and a
* TypeConstraintCall will also monitor the call.
* CALLPROP and CALLELEM, for other calls we are past the type barrier
* already and a TypeConstraintCall will also monitor the call.
*/
cx->compartment->types.monitorBytecode(cx, script, callpc - script->code);
return;
@ -1260,11 +1258,7 @@ TypeConstraintPropagateThis::newType(JSContext *cx, TypeSet *source, Type type)
if (!callee->script()->ensureHasTypes(cx, callee))
return;
TypeSet *thisTypes = TypeScript::ThisTypes(callee->script());
if (this->types)
this->types->addSubset(cx, thisTypes);
else
thisTypes->addType(cx, this->type);
TypeScript::ThisTypes(callee->script())->addType(cx, this->type);
}
void
@ -1897,8 +1891,10 @@ TypeCompartment::init(JSContext *cx)
{
PodZero(this);
#ifndef JS_CPU_ARM
if (cx && cx->getRunOptions() & JSOPTION_TYPE_INFERENCE)
inferenceEnabled = true;
#endif
}
TypeObject *
@ -3225,14 +3221,6 @@ ScriptAnalysis::resolveNameAccess(JSContext *cx, jsid id, bool addDependency)
return access;
}
/*
* The script's bindings do not contain a name for the function itself,
* don't resolve name accesses on lambdas in DeclEnv objects on the
* scope chain.
*/
if (atom == CallObjectLambdaName(script->function()))
return access;
if (!script->nesting()->parent)
return access;
script = script->nesting()->parent;
@ -3688,18 +3676,19 @@ ScriptAnalysis::analyzeTypesBytecode(JSContext *cx, unsigned offset,
TypeSet *seen = script->analysis()->bytecodeTypes(pc);
poppedTypes(pc, 1)->addGetProperty(cx, script, pc, seen, JSID_VOID);
if (op == JSOP_CALLELEM)
poppedTypes(pc, 1)->addCallProperty(cx, script, pc, JSID_VOID);
seen->addSubset(cx, &pushed[0]);
if (op == JSOP_CALLELEM) {
if (op == JSOP_CALLELEM)
poppedTypes(pc, 1)->addFilterPrimitives(cx, &pushed[1], TypeSet::FILTER_NULL_VOID);
pushed[0].addPropagateThis(cx, script, pc, Type::UndefinedType(), &pushed[1]);
}
if (CheckNextTest(pc))
pushed[0].addType(cx, Type::UndefinedType());
break;
}
case JSOP_SETELEM:
case JSOP_SETHOLE:
poppedTypes(pc, 1)->addSetElement(cx, script, pc, poppedTypes(pc, 2), poppedTypes(pc, 0));
poppedTypes(pc, 0)->addSubset(cx, &pushed[0]);
break;
@ -6032,13 +6021,6 @@ TypeScript::Sweep(JSContext *cx, JSScript *script)
#ifdef JS_METHODJIT
mjit::ReleaseScriptCode(cx, script);
#endif
/*
* Use counts for scripts are reset on GC. After discarding code we need to
* let it warm back up to get information like which opcodes are setting
* array holes or accessing getter properties.
*/
script->resetUseCount();
}
void

View File

@ -452,8 +452,7 @@ class TypeSet
void addCall(JSContext *cx, TypeCallsite *site);
void addArith(JSContext *cx, TypeSet *target, TypeSet *other = NULL);
void addTransformThis(JSContext *cx, JSScript *script, TypeSet *target);
void addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc,
Type type, TypeSet *types = NULL);
void addPropagateThis(JSContext *cx, JSScript *script, jsbytecode *pc, Type type);
void addFilterPrimitives(JSContext *cx, TypeSet *target, FilterKind filter);
void addSubsetBarrier(JSContext *cx, JSScript *script, jsbytecode *pc, TypeSet *target);
void addLazyArguments(JSContext *cx, TypeSet *target);

View File

@ -1599,14 +1599,10 @@ static inline void
TypeCheckNextBytecode(JSContext *cx, JSScript *script, unsigned n, const FrameRegs &regs)
{
#ifdef DEBUG
if (*regs.pc != JSOP_TRAP &&
if (cx->typeInferenceEnabled() &&
*regs.pc != JSOP_TRAP &&
n == analyze::GetBytecodeLength(regs.pc)) {
if (script->hasAnalysis() && !regs.fp()->hasImacropc()) {
jsbytecode *nextpc = regs.pc + GetBytecodeLength(cx, script, regs.pc);
JS_ASSERT(regs.sp == regs.fp()->base() + script->analysis()->getCode(nextpc).stackDepth);
}
if (cx->typeInferenceEnabled())
TypeScript::CheckBytecode(cx, script, regs.pc, regs.sp);
TypeScript::CheckBytecode(cx, script, regs.pc, regs.sp);
}
#endif
}
@ -3907,13 +3903,13 @@ BEGIN_CASE(JSOP_GETELEM)
}
}
if (JSID_IS_STRING(id) && script->hasAnalysis() && !regs.fp()->hasImacropc())
script->analysis()->getCode(regs.pc).getStringElement = true;
if (!obj->getProperty(cx, id, &rval))
goto error;
copyFrom = &rval;
if (!JSID_IS_INT(id))
TypeScript::MonitorUnknown(cx, script, regs.pc);
end_getelem:
regs.sp--;
regs.sp[-1] = *copyFrom;
@ -3951,11 +3947,14 @@ BEGIN_CASE(JSOP_CALLELEM)
regs.sp[-1] = thisv;
}
if (!JSID_IS_INT(id))
TypeScript::MonitorUnknown(cx, script, regs.pc);
TypeScript::Monitor(cx, script, regs.pc, regs.sp[-2]);
}
END_CASE(JSOP_CALLELEM)
BEGIN_CASE(JSOP_SETELEM)
BEGIN_CASE(JSOP_SETHOLE)
{
JSObject *obj;
FETCH_OBJECT(cx, -3, obj);
@ -3973,12 +3972,12 @@ BEGIN_CASE(JSOP_SETELEM)
break;
if ((jsuint)i >= obj->getArrayLength())
obj->setArrayLength(cx, i + 1);
*regs.pc = JSOP_SETHOLE;
}
obj->setDenseArrayElementWithType(cx, i, regs.sp[-1]);
goto end_setelem;
} else {
if (script->hasAnalysis() && !regs.fp()->hasImacropc())
script->analysis()->getCode(regs.pc).arrayWriteHole = true;
*regs.pc = JSOP_SETHOLE;
}
}
} while (0);
@ -4687,7 +4686,6 @@ BEGIN_CASE(JSOP_DEFFUN)
obj = CloneFunctionObject(cx, fun, obj2, true);
if (!obj)
goto error;
JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
}
/*
@ -4821,8 +4819,6 @@ BEGIN_CASE(JSOP_DEFLOCALFUN)
}
}
JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
uint32 slot = GET_SLOTNO(regs.pc);
TRACE_2(DefLocalFunSetSlot, slot, obj);
@ -4941,8 +4937,6 @@ BEGIN_CASE(JSOP_LAMBDA)
} while (0);
JS_ASSERT(obj->getProto());
JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
PUSH_OBJECT(*obj);
}
END_CASE(JSOP_LAMBDA)
@ -4955,7 +4949,6 @@ BEGIN_CASE(JSOP_LAMBDA_FC)
JSObject *obj = js_NewFlatClosure(cx, fun, JSOP_LAMBDA_FC, JSOP_LAMBDA_FC_LENGTH);
if (!obj)
goto error;
JS_ASSERT_IF(script->hasGlobal(), obj->getProto() == fun->getProto());
PUSH_OBJECT(*obj);
}

View File

@ -5688,8 +5688,6 @@ js_NativeGetInline(JSContext *cx, JSObject *receiver, JSObject *obj, JSObject *p
if (slot != SHAPE_INVALID_SLOT) {
*vp = pobj->nativeGetSlot(slot);
JS_ASSERT(!vp->isMagic());
JS_ASSERT_IF(!pobj->hasSingletonType() && shape->hasDefaultGetterOrIsMethod(),
js::types::TypeHasProperty(cx, pobj->type(), shape->propid, *vp));
} else {
vp->setUndefined();
}
@ -5701,14 +5699,6 @@ js_NativeGetInline(JSContext *cx, JSObject *receiver, JSObject *obj, JSObject *p
return true;
}
jsbytecode *pc;
JSScript *script = cx->stack.currentScript(&pc);
if (script && script->hasAnalysis() && !cx->fp()->hasImacropc()) {
analyze::Bytecode *code = script->analysis()->maybeCode(pc);
if (code)
code->accessGetter = true;
}
sample = cx->runtime->propertyRemovals;
if (!shape->get(cx, receiver, obj, pobj, vp))
return false;
@ -5721,6 +5711,9 @@ js_NativeGetInline(JSContext *cx, JSObject *receiver, JSObject *obj, JSObject *p
pobj->nativeSetSlot(slot, *vp);
}
/* Record values produced by shapes without a default getter. */
AddTypePropertyId(cx, obj, shape->propid, *vp);
return true;
}
@ -6021,7 +6014,7 @@ CloneFunctionForSetMethod(JSContext *cx, Value *vp)
* need to be cloned again.
*/
if (fun == funobj) {
funobj = CloneFunctionObject(cx, fun);
funobj = CloneFunctionObject(cx, fun, fun->parent, true);
if (!funobj)
return false;
vp->setObject(*funobj);

View File

@ -144,6 +144,8 @@ JSObject::getProperty(JSContext *cx, JSObject *receiver, jsid id, js::Value *vp)
} else {
if (!js_GetProperty(cx, this, receiver, id, vp))
return false;
JS_ASSERT_IF(!hasSingletonType() && nativeContains(cx, js_CheckForStringIndex(id)),
js::types::TypeHasProperty(cx, type(), id, *vp));
}
return true;
}
@ -265,7 +267,7 @@ JSObject::methodReadBarrier(JSContext *cx, const js::Shape &shape, js::Value *vp
JS_ASSERT(fun == funobj);
JS_ASSERT(fun->isNullClosure());
funobj = CloneFunctionObject(cx, fun);
funobj = CloneFunctionObject(cx, fun, funobj->getParent(), true);
if (!funobj)
return NULL;
funobj->setMethodObj(*this);

View File

@ -3966,6 +3966,7 @@ Decompile(SprintStack *ss, jsbytecode *pc, intN nb, JSOp nextop)
break;
case JSOP_SETELEM:
case JSOP_SETHOLE:
rval = POP_STR();
op = JSOP_NOP; /* turn off parens */
xval = POP_STR();

View File

@ -592,5 +592,8 @@ OPDEF(JSOP_UNBRANDTHIS, 229,"unbrandthis", NULL, 1, 0, 0, 0, JOF_BYTE)
OPDEF(JSOP_SHARPINIT, 230,"sharpinit", NULL, 3, 0, 0, 0, JOF_UINT16|JOF_SHARPSLOT)
/* Substituted for JSOP_SETELEM to indicate opcodes which have written holes in dense arrays. */
OPDEF(JSOP_SETHOLE, 231, "sethole", NULL, 1, 3, 1, 3, JOF_BYTE |JOF_ELEM|JOF_SET|JOF_DETECTING)
/* Pop the stack, convert to a jsid (int or string), and push back. */
OPDEF(JSOP_TOID, 231, "toid", NULL, 1, 1, 1, 0, JOF_BYTE)
OPDEF(JSOP_TOID, 232, "toid", NULL, 1, 1, 1, 0, JOF_BYTE)

View File

@ -6875,7 +6875,8 @@ LeaveTree(TraceMonitor *tm, TracerState& state, VMSideExit* lr)
* Since this doesn't re-enter the recorder, the post-state snapshot
* is invalid. Fix it up here.
*/
if (op == JSOP_SETELEM && JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
if ((op == JSOP_SETELEM || op == JSOP_SETHOLE) &&
JSOp(regs->pc[JSOP_SETELEM_LENGTH]) == JSOP_POP) {
regs->sp -= js_CodeSpec[JSOP_SETELEM].nuses;
regs->sp += js_CodeSpec[JSOP_SETELEM].ndefs;
regs->pc += JSOP_SETELEM_LENGTH;
@ -13427,7 +13428,7 @@ TraceRecorder::setElem(int lval_spindex, int idx_spindex, int v_spindex)
}
jsbytecode* pc = cx->regs().pc;
if (*pc == JSOP_SETELEM && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
if ((*pc == JSOP_SETELEM || *pc == JSOP_SETHOLE) && pc[JSOP_SETELEM_LENGTH] != JSOP_POP)
set(&lval, v_ins);
return ARECORD_CONTINUE;
@ -13439,6 +13440,12 @@ TraceRecorder::record_JSOP_SETELEM()
return setElem(-3, -2, -1);
}
JS_REQUIRES_STACK AbortableRecordingStatus
TraceRecorder::record_JSOP_SETHOLE()
{
return setElem(-3, -2, -1);
}
static JSBool FASTCALL
CheckSameGlobal(JSObject *obj, JSObject *globalObj)
{
@ -17065,7 +17072,7 @@ LoopProfile::profileOperation(JSContext* cx, JSOp op)
if (op == JSOP_NEW)
increment(OP_NEW);
if (op == JSOP_GETELEM || op == JSOP_SETELEM) {
if (op == JSOP_GETELEM || op == JSOP_SETELEM || op == JSOP_SETHOLE) {
Value& lval = cx->regs().sp[op == JSOP_GETELEM ? -2 : -3];
if (lval.isObject() && js_IsTypedArray(&lval.toObject()))
increment(OP_TYPED_ARRAY);

View File

@ -125,6 +125,15 @@ class Assembler : public ValueAssembler
DataLabelPtr label;
};
/* Need a temp reg that is not ArgReg1. */
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
#elif defined(JS_CPU_ARM)
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#elif defined(JS_CPU_SPARC)
static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
#endif
/* :TODO: OOM */
Label startLabel;
Vector<CallPatch, 64, SystemAllocPolicy> callPatches;
@ -544,14 +553,14 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
}
}
void storeArg(uint32 i, ImmPtr imm) {
void storeArg(uint32 i, Imm32 imm) {
JS_ASSERT(callIsAligned);
RegisterID to;
if (Registers::regForArg(callConvention, i, &to)) {
move(imm, to);
availInCall.takeRegUnchecked(to);
} else {
storePtr(imm, addressOfArg(i));
store32(imm, addressOfArg(i));
}
}
@ -606,7 +615,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
Call callWithVMFrame(bool inlining, type stub, jsbytecode *pc, \
DataLabelPtr *pinlined, uint32 fd) { \
return fallibleVMCall(inlining, JS_FUNC_TO_DATA_PTR(void *, stub), \
pc, NULL, pinlined, fd); \
pc, pinlined, fd); \
}
STUB_CALL_TYPE(JSObjStub);
@ -616,7 +625,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
#undef STUB_CALL_TYPE
void setupFrameDepth(int32 frameDepth) {
void setupInfallibleVMFrame(int32 frameDepth) {
// |frameDepth < 0| implies ic::SplatApplyArgs has been called which
// means regs.sp has already been set in the VMFrame.
if (frameDepth >= 0) {
@ -624,13 +633,9 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
// regs->sp = sp
addPtr(Imm32(sizeof(StackFrame) + frameDepth * sizeof(jsval)),
JSFrameReg,
Registers::ClobberInCall);
storePtr(Registers::ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
ClobberInCall);
storePtr(ClobberInCall, FrameAddress(offsetof(VMFrame, regs.sp)));
}
}
void setupInfallibleVMFrame(int32 frameDepth) {
setupFrameDepth(frameDepth);
// The JIT has moved Arg1 already, and we've guaranteed to not clobber
// it. Move ArgReg0 into place now. setupFallibleVMFrame will not
@ -638,7 +643,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
move(MacroAssembler::stackPointerRegister, Registers::ArgReg0);
}
void setupFallibleVMFrame(bool inlining, jsbytecode *pc, CallSite *inlined,
void setupFallibleVMFrame(bool inlining, jsbytecode *pc,
DataLabelPtr *pinlined, int32 frameDepth) {
setupInfallibleVMFrame(frameDepth);
@ -650,30 +655,15 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
if (inlining) {
/* inlined -> regs->inlined :( */
if (inlined) {
storePtr(ImmPtr(inlined), FrameAddress(VMFrame::offsetOfInlined));
} else {
DataLabelPtr ptr = storePtrWithPatch(ImmPtr(NULL),
FrameAddress(VMFrame::offsetOfInlined));
if (pinlined)
*pinlined = ptr;
}
DataLabelPtr ptr = storePtrWithPatch(ImmPtr(NULL),
FrameAddress(VMFrame::offsetOfInlined));
if (pinlined)
*pinlined = ptr;
}
restoreStackBase();
}
void setupFallibleABICall(bool inlining, jsbytecode *pc, CallSite *inlined, int32 frameDepth) {
setupFrameDepth(frameDepth);
/* Store fp/pc/inlined */
storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
storePtr(ImmPtr(pc), FrameAddress(offsetof(VMFrame, regs.pc)));
if (inlining)
storePtr(ImmPtr(inlined), FrameAddress(VMFrame::offsetOfInlined));
}
void restoreStackBase() {
#if defined(JS_CPU_X86)
/*
@ -699,8 +689,8 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
// parameter) that needs the entire VMFrame to be coherent, meaning that
// |pc|, |inlined| and |fp| are guaranteed to be up-to-date.
Call fallibleVMCall(bool inlining, void *ptr, jsbytecode *pc,
CallSite *inlined, DataLabelPtr *pinlined, int32 frameDepth) {
setupFallibleVMFrame(inlining, pc, inlined, pinlined, frameDepth);
DataLabelPtr *pinlined, int32 frameDepth) {
setupFallibleVMFrame(inlining, pc, pinlined, frameDepth);
Call call = wrapVMCall(ptr);
// Restore the frame pointer from the VM.
@ -877,7 +867,6 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
const js::Shape *shape,
RegisterID typeReg, RegisterID dataReg)
{
JS_ASSERT(shape->hasSlot());
if (shape->isMethod())
loadValueAsComponents(ObjectValue(shape->methodObject()), typeReg, dataReg);
else if (obj->isFixedSlot(shape->slot))
@ -1152,7 +1141,7 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
* in the specified set. Updates mismatches with any failure jumps. Assumes
* no data registers are live.
*/
bool generateTypeCheck(JSContext *cx, Address address, RegisterID reg,
bool generateTypeCheck(JSContext *cx, Address address,
types::TypeSet *types, Vector<Jump> *mismatches)
{
if (types->unknown())
@ -1200,6 +1189,9 @@ static const JSC::MacroAssembler::RegisterID JSParamReg_Argc = JSC::SparcRegist
if (count != 0) {
if (!mismatches->append(testObject(Assembler::NotEqual, address)))
return false;
Registers tempRegs(Registers::AvailRegs);
RegisterID reg = tempRegs.takeAnyReg().reg();
loadPayload(address, reg);
Jump notSingleton = branchTest32(Assembler::Zero,

View File

@ -175,41 +175,6 @@ class LinkerHelper : public JSC::LinkBuffer
}
};
class NativeStubLinker : public LinkerHelper
{
public:
#ifdef JS_CPU_X64
typedef JSC::MacroAssembler::DataLabelPtr FinalJump;
#else
typedef JSC::MacroAssembler::Jump FinalJump;
#endif
NativeStubLinker(Assembler &masm, JITScript *jit, jsbytecode *pc, CallSite *inlined, FinalJump done)
: LinkerHelper(masm, JSC::METHOD_CODE), jit(jit), pc(pc), inlined(inlined), done(done)
{}
bool init(JSContext *cx);
void patchJump(JSC::CodeLocationLabel target) {
#ifdef JS_CPU_X64
patch(done, target);
#else
link(done, target);
#endif
}
private:
JITScript *jit;
jsbytecode *pc;
CallSite *inlined;
FinalJump done;
};
bool
NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
int32 initialFrameDepth, int32 vpOffset,
MaybeRegisterID typeReg, MaybeRegisterID dataReg);
/*
* On ARM, we periodically flush a constant pool into the instruction stream
* where constants are found using PC-relative addressing. This is necessary
@ -228,43 +193,67 @@ class AutoReserveICSpace {
typedef Assembler::Label Label;
Assembler &masm;
#ifdef DEBUG
Label startLabel;
bool didCheck;
bool *overflowSpace;
int flushCount;
#endif
public:
AutoReserveICSpace(Assembler &masm, bool *overflowSpace)
: masm(masm), didCheck(false), overflowSpace(overflowSpace)
{
AutoReserveICSpace(Assembler &masm) : masm(masm) {
masm.ensureSpace(reservedSpace);
flushCount = masm.flushCount();
#ifdef DEBUG
didCheck = false;
startLabel = masm.label();
/* Assert that the constant pool is not flushed until we reach a safe point. */
masm.allowPoolFlush(false);
JaegerSpew(JSpew_Insns, " -- BEGIN CONSTANT-POOL-FREE REGION -- \n");
#endif
}
/* Allow manual IC space checks so that non-patchable code at the end of an IC section can be
* free to use constant pools. */
void check() {
#ifdef DEBUG
JS_ASSERT(!didCheck);
didCheck = true;
if (masm.flushCount() != flushCount)
*overflowSpace = true;
Label endLabel = masm.label();
int spaceUsed = masm.differenceBetween(startLabel, endLabel);
/* Spew the space used, to help tuning of reservedSpace. */
JaegerSpew(JSpew_Insns,
" -- END CONSTANT-POOL-FREE REGION: %u bytes used of %u reserved. -- \n",
spaceUsed, reservedSpace);
/* Assert that we didn't emit more code than we protected. */
JS_ASSERT(spaceUsed >= 0);
JS_ASSERT(size_t(spaceUsed) <= reservedSpace);
/* Allow the pool to be flushed. */
masm.allowPoolFlush(true);
#endif
}
~AutoReserveICSpace() {
#ifdef DEBUG
/* Automatically check the IC space if we didn't already do it manually. */
if (!didCheck) {
check();
}
#endif
}
};
# define RESERVE_IC_SPACE(__masm) AutoReserveICSpace<256> arics(__masm, &this->overflowICSpace)
# define RESERVE_IC_SPACE(__masm) AutoReserveICSpace<128> arics(__masm)
# define CHECK_IC_SPACE() arics.check()
/* The OOL path can need a lot of space because we save and restore a lot of registers. The actual
* sequene varies. However, dumping the literal pool before an OOL block is probably a good idea
* anyway, as we branch directly to the start of the block from the fast path. */
# define RESERVE_OOL_SPACE(__masm) AutoReserveICSpace<2048> arics_ool(__masm, &this->overflowICSpace)
# define RESERVE_OOL_SPACE(__masm) AutoReserveICSpace<256> arics_ool(__masm)
/* Allow the OOL patch to be checked before object destruction. Often, non-patchable epilogues or
* rejoining sequences are emitted, and it isn't necessary to protect these from literal pools. */

View File

@ -133,7 +133,6 @@ mjit::Compiler::Compiler(JSContext *cx, JSScript *outerScript, bool isConstructi
inlining_(false),
hasGlobalReallocation(false),
oomInVector(false),
overflowICSpace(false),
gcNumber(cx->runtime->gcNumber),
applyTricks(NoApplyTricks),
pcLengths(NULL)
@ -895,11 +894,6 @@ mjit::Compiler::finishThisUp(JITScript **jitp)
if (cx->runtime->gcNumber != gcNumber)
return Compile_Retry;
if (overflowICSpace) {
JaegerSpew(JSpew_Scripts, "dumped a constant pool while generating an IC\n");
return Compile_Abort;
}
for (size_t i = 0; i < branchPatches.length(); i++) {
Label label = labelOf(branchPatches[i].pc, branchPatches[i].inlineIndex);
branchPatches[i].jump.linkTo(label, &masm);
@ -2033,10 +2027,8 @@ mjit::Compiler::generateMethod()
END_CASE(JSOP_TOID)
BEGIN_CASE(JSOP_SETELEM)
BEGIN_CASE(JSOP_SETHOLE)
{
typeCheckPopped(0);
typeCheckPopped(1);
typeCheckPopped(2);
jsbytecode *next = &PC[JSOP_SETELEM_LENGTH];
bool pop = (JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next));
if (!jsop_setelem(pop))
@ -2387,13 +2379,18 @@ mjit::Compiler::generateMethod()
END_CASE(JSOP_BINDNAME)
BEGIN_CASE(JSOP_SETPROP)
{
jsbytecode *next = &PC[JSOP_SETPROP_LENGTH];
bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
if (!jsop_setprop(script->getAtom(fullAtomIndex(PC)), true, pop))
return Compile_Error;
}
END_CASE(JSOP_SETPROP)
BEGIN_CASE(JSOP_SETNAME)
BEGIN_CASE(JSOP_SETMETHOD)
{
typeCheckPopped(0);
if (op != JSOP_SETNAME)
typeCheckPopped(1);
jsbytecode *next = &PC[JSOP_SETPROP_LENGTH];
jsbytecode *next = &PC[JSOP_SETNAME_LENGTH];
bool pop = JSOp(*next) == JSOP_POP && !analysis->jumpTarget(next);
if (!jsop_setprop(script->getAtom(fullAtomIndex(PC)), true, pop))
return Compile_Error;
@ -2791,22 +2788,6 @@ mjit::Compiler::generateMethod()
frame.extra(fe).types = analysis->pushedTypes(lastPC - script->code, i);
}
}
#ifdef DEBUG
if ((js_CodeSpec[op].format & JOF_TYPESET) &&
js_GetOpcode(cx, script, PC) != JSOP_POP) {
FrameEntry *fe = frame.getStack(opinfo->stackDepth - nuses);
Jump j = frame.typeCheckEntry(fe, frame.extra(fe).types);
stubcc.linkExit(j, Uses(0));
stubcc.leave();
jsbytecode *oldPC = PC;
PC = lastPC;
OOL_STUBCALL(stubs::TypeCheckPushed, REJOIN_FALLTHROUGH);
PC = oldPC;
stubcc.rejoin(Changes(0));
}
#endif
}
if (script->pcCounters) {
@ -3233,7 +3214,7 @@ mjit::Compiler::emitStubCall(void *ptr, DataLabelPtr *pinline)
masm.bumpStubCounter(script, PC, Registers::tempCallReg());
Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
ptr, outerPC(), NULL, pinline, frame.totalDepth());
ptr, outerPC(), pinline, frame.totalDepth());
JaegerSpew(JSpew_Insns, " ---- END STUB CALL ---- \n");
return cl;
}
@ -4076,7 +4057,7 @@ mjit::Compiler::addCallSite(const InternalCallSite &site)
}
void
mjit::Compiler::inlineStubCall(void *stub, RejoinState rejoin, Uses uses)
mjit::Compiler::inlineStubCall(void *stub, RejoinState rejoin)
{
DataLabelPtr inlinePatch;
Call cl = emitStubCall(stub, &inlinePatch);
@ -4086,7 +4067,7 @@ mjit::Compiler::inlineStubCall(void *stub, RejoinState rejoin, Uses uses)
if (loop && loop->generatingInvariants()) {
Jump j = masm.jump();
Label l = masm.label();
loop->addInvariantCall(j, l, false, false, callSites.length(), uses);
loop->addInvariantCall(j, l, false, false, callSites.length());
}
addCallSite(site);
}
@ -4219,7 +4200,6 @@ mjit::Compiler::jsop_getprop_slow(JSAtom *atom, bool usePropCache)
prepareStubCall(Uses(1));
if (usePropCache) {
INLINE_STUBCALL(stubs::GetProp, rejoin);
testPushedType(rejoin, -1, /* ool = */ false);
} else {
masm.move(ImmPtr(atom), Registers::ArgReg1);
INLINE_STUBCALL(stubs::GetPropNoCache, rejoin);
@ -4235,7 +4215,6 @@ mjit::Compiler::jsop_callprop_slow(JSAtom *atom)
prepareStubCall(Uses(1));
masm.move(ImmPtr(atom), Registers::ArgReg1);
INLINE_STUBCALL(stubs::CallProp, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -1, /* ool = */ false);
frame.pop();
pushSyncedEntry(0);
pushSyncedEntry(1);
@ -4331,8 +4310,6 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
stubcc.linkExit(notObject, Uses(1));
stubcc.leave();
OOL_STUBCALL(stubs::GetProp, rejoin);
if (rejoin == REJOIN_GETTER)
testPushedType(rejoin, -1);
}
RegisterID reg = frame.tempRegForData(top);
frame.pop();
@ -4353,8 +4330,6 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
stubcc.linkExit(notObject, Uses(1));
stubcc.leave();
OOL_STUBCALL(stubs::GetProp, rejoin);
if (rejoin == REJOIN_GETTER)
testPushedType(rejoin, -1);
}
RegisterID reg = frame.copyDataIntoReg(top);
frame.pop();
@ -4417,8 +4392,6 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
stubcc.linkExit(notObject, Uses(1));
stubcc.leave();
OOL_STUBCALL(stubs::GetProp, rejoin);
if (rejoin == REJOIN_GETTER)
testPushedType(rejoin, -1);
}
RegisterID reg = frame.tempRegForData(top);
frame.pop();
@ -4474,19 +4447,6 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
shapeReg = frame.allocReg();
}
/*
* If this access has been on a shape with a getter hook, make preparations
* so that we can generate a stub to call the hook directly (rather than be
* forced to make a stub call). Sync the stack up front and kill all
* registers so that PIC stubs can contain calls, and always generate a
* type barrier if inference is enabled (known property types do not
* reflect properties with getter hooks).
*/
pic.canCallHook = pic.forcedTypeBarrier =
usePropCache && JSOp(*PC) == JSOP_GETPROP && analysis->getCode(PC).accessGetter;
if (pic.canCallHook)
frame.syncAndKillEverything();
pic.shapeReg = shapeReg;
pic.atom = atom;
@ -4507,8 +4467,6 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
passICAddress(&pic);
pic.slowPathCall = OOL_STUBCALL(usePropCache ? ic::GetProp : ic::GetPropNoCache, rejoin);
CHECK_OOL_SPACE();
if (rejoin == REJOIN_GETTER)
testPushedType(rejoin, -1);
/* Load the base slot address. */
Label dslotsLoadLabel = masm.loadPtrWithPatchToLEA(Address(objReg, offsetof(JSObject, slots)),
@ -4537,12 +4495,9 @@ mjit::Compiler::jsop_getprop(JSAtom *atom, JSValueType knownType,
labels.setInlineShapeJump(masm, pic.shapeGuard, inlineShapeJump);
#endif
CHECK_IC_SPACE();
pic.objReg = objReg;
frame.pushRegs(shapeReg, objReg, knownType);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
/* force = */ pic.canCallHook);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg);
stubcc.rejoin(Changes(1));
pics.append(pic);
@ -4592,10 +4547,6 @@ mjit::Compiler::jsop_callprop_generic(JSAtom *atom)
pic.shapeReg = shapeReg;
pic.atom = atom;
pic.canCallHook = pic.forcedTypeBarrier = analysis->getCode(PC).accessGetter;
if (pic.canCallHook)
frame.syncAndKillEverything();
/*
* Store the type and object back. Don't bother keeping them in registers,
* since a sync will be needed for the upcoming call.
@ -4630,8 +4581,6 @@ mjit::Compiler::jsop_callprop_generic(JSAtom *atom)
pic.slowPathCall = OOL_STUBCALL(ic::CallProp, REJOIN_FALLTHROUGH);
CHECK_OOL_SPACE();
testPushedType(REJOIN_FALLTHROUGH, -1);
/* Load the base slot address. */
Label dslotsLoadLabel = masm.loadPtrWithPatchToLEA(Address(objReg, offsetof(JSObject, slots)),
objReg);
@ -4659,13 +4608,10 @@ mjit::Compiler::jsop_callprop_generic(JSAtom *atom)
labels.setInlineShapeJump(masm, pic.shapeGuard, inlineShapeJump);
#endif
CHECK_IC_SPACE();
/* Adjust the frame. */
frame.pop();
frame.pushRegs(shapeReg, objReg, knownPushedType(0));
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg, false, false,
/* force = */ pic.canCallHook);
BarrierState barrier = testBarrier(pic.shapeReg, pic.objReg);
pushSyncedEntry(1);
@ -4764,10 +4710,6 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
objReg = frame.copyDataIntoReg(top);
}
pic.canCallHook = pic.forcedTypeBarrier = analysis->getCode(PC).accessGetter;
if (pic.canCallHook)
frame.syncAndKillEverything();
/* Guard on shape. */
masm.loadShape(objReg, shapeReg);
pic.shapeGuard = masm.label();
@ -4786,8 +4728,6 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
pic.slowPathCall = OOL_STUBCALL(ic::CallProp, REJOIN_FALLTHROUGH);
CHECK_OOL_SPACE();
testPushedType(REJOIN_FALLTHROUGH, -1);
/* Load the base slot address. */
Label dslotsLoadLabel = masm.loadPtrWithPatchToLEA(Address(objReg, offsetof(JSObject, slots)),
objReg);
@ -4800,8 +4740,6 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
pic.fastPathRejoin = masm.label();
pic.objReg = objReg;
CHECK_IC_SPACE();
/*
* 1) Dup the |this| object.
* 2) Store the property value below the |this| value.
@ -4811,8 +4749,7 @@ mjit::Compiler::jsop_callprop_obj(JSAtom *atom)
*/
frame.dup();
frame.storeRegs(-2, shapeReg, objReg, knownPushedType(0));
BarrierState barrier = testBarrier(shapeReg, objReg, false, false,
/* force = */ pic.canCallHook);
BarrierState barrier = testBarrier(shapeReg, objReg);
/*
* Assert correctness of hardcoded offsets.
@ -5079,7 +5016,6 @@ mjit::Compiler::jsop_callprop_dispatch(JSAtom *atom)
stubcc.leave();
stubcc.masm.move(ImmPtr(atom), Registers::ArgReg1);
OOL_STUBCALL(stubs::CallProp, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -1);
frame.dup();
// THIS THIS
@ -5110,7 +5046,6 @@ mjit::Compiler::jsop_callprop(JSAtom *atom)
stubcc.leave();
stubcc.masm.move(ImmPtr(atom), Registers::ArgReg1);
OOL_STUBCALL(stubs::CallProp, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -1);
}
// THIS
@ -5381,7 +5316,6 @@ mjit::Compiler::jsop_name(JSAtom *atom, JSValueType type, bool isCall)
passICAddress(&pic);
pic.slowPathCall = OOL_STUBCALL(isCall ? ic::CallName : ic::Name, rejoin);
CHECK_OOL_SPACE();
testPushedType(rejoin, 0);
}
pic.fastPathRejoin = masm.label();
@ -5389,8 +5323,6 @@ mjit::Compiler::jsop_name(JSAtom *atom, JSValueType type, bool isCall)
ScopeNameLabels &labels = pic.scopeNameLabels();
labels.setInlineJump(masm, pic.fastPathStart, inlineJump);
CHECK_IC_SPACE();
/*
* We can't optimize away the PIC for the NAME access itself, but if we've
* only seen a single value pushed by this access, mark it as such and
@ -5469,7 +5401,6 @@ mjit::Compiler::jsop_xname(JSAtom *atom)
passICAddress(&pic);
pic.slowPathCall = OOL_STUBCALL(ic::XName, REJOIN_GETTER);
CHECK_OOL_SPACE();
testPushedType(REJOIN_GETTER, -1);
}
pic.fastPathRejoin = masm.label();
@ -5480,8 +5411,6 @@ mjit::Compiler::jsop_xname(JSAtom *atom)
ScopeNameLabels &labels = pic.scopeNameLabels();
labels.setInlineJumpOffset(masm.differenceBetween(pic.fastPathStart, inlineJump));
CHECK_IC_SPACE();
frame.pop();
frame.pushRegs(pic.shapeReg, pic.objReg, knownPushedType(0));
@ -5566,8 +5495,7 @@ void
mjit::Compiler::jsop_name(JSAtom *atom, JSValueType type, bool isCall)
{
prepareStubCall(Uses(0));
INLINE_STUBCALL(isCall ? stubs::CallName : stubs::Name, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, 0, /* ool = */ false);
INLINE_STUBCALL(isCall ? stubs::CallName : stubs::Name, REJOIN_FALLTHROUGH);
frame.pushSynced(type);
if (isCall)
frame.pushSynced(JSVAL_TYPE_UNKNOWN);
@ -5943,7 +5871,6 @@ mjit::Compiler::jsop_getgname_slow(uint32 index)
{
prepareStubCall(Uses(0));
INLINE_STUBCALL(stubs::GetGlobalName, REJOIN_GETTER);
testPushedType(REJOIN_GETTER, 0, /* ool = */ false);
frame.pushSynced(JSVAL_TYPE_UNKNOWN);
}
@ -6057,10 +5984,6 @@ mjit::Compiler::jsop_getgname(uint32 index)
passMICAddress(ic);
ic.slowPathCall = OOL_STUBCALL(ic::GetGlobalName, REJOIN_GETTER);
CHECK_IC_SPACE();
testPushedType(REJOIN_GETTER, 0);
/* Garbage value. */
uint32 slot = 1 << 24;
@ -6332,7 +6255,6 @@ mjit::Compiler::jsop_getelem_slow()
{
prepareStubCall(Uses(2));
INLINE_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -2, /* ool = */ false);
frame.popn(2);
pushSyncedEntry(0);
}
@ -6917,54 +6839,39 @@ mjit::Compiler::constructThis()
JSFunction *fun = script->function();
do {
if (!cx->typeInferenceEnabled() || fun->getType(cx)->unknownProperties())
break;
if (cx->typeInferenceEnabled() && !fun->getType(cx)->unknownProperties()) {
jsid id = ATOM_TO_JSID(cx->runtime->atomState.classPrototypeAtom);
types::TypeSet *protoTypes = fun->getType(cx)->getProperty(cx, id, false);
JSObject *proto = protoTypes->getSingleton(cx, true);
if (!proto)
break;
if (proto) {
JSObject *templateObject = js_CreateThisForFunctionWithProto(cx, fun, proto);
if (!templateObject)
return false;
/*
* Generate an inline path to create a 'this' object with the given
* prototype. Only do this if the type is actually known as a possible
* 'this' type of the script.
*/
types::TypeObject *type = proto->getNewType(cx, fun);
if (!type)
return false;
if (!types::TypeScript::ThisTypes(script)->hasType(types::Type::ObjectType(type)))
break;
/*
* The template incorporates a shape and/or fixed slots from any
* newScript on its type, so make sure recompilation is triggered
* should this information change later.
*/
if (templateObject->type()->newScript)
types::TypeSet::WatchObjectStateChange(cx, templateObject->type());
JSObject *templateObject = js_CreateThisForFunctionWithProto(cx, fun, proto);
if (!templateObject)
return false;
RegisterID result = frame.allocReg();
Jump emptyFreeList = masm.getNewObject(cx, result, templateObject);
/*
* The template incorporates a shape and/or fixed slots from any
* newScript on its type, so make sure recompilation is triggered
* should this information change later.
*/
if (templateObject->type()->newScript)
types::TypeSet::WatchObjectStateChange(cx, templateObject->type());
stubcc.linkExit(emptyFreeList, Uses(0));
stubcc.leave();
RegisterID result = frame.allocReg();
Jump emptyFreeList = masm.getNewObject(cx, result, templateObject);
stubcc.masm.move(ImmPtr(proto), Registers::ArgReg1);
OOL_STUBCALL(stubs::CreateThis, REJOIN_RESUME);
stubcc.linkExit(emptyFreeList, Uses(0));
stubcc.leave();
frame.setThis(result);
stubcc.masm.move(ImmPtr(proto), Registers::ArgReg1);
OOL_STUBCALL(stubs::CreateThis, REJOIN_RESUME);
frame.setThis(result);
stubcc.rejoin(Changes(1));
return true;
} while (false);
stubcc.rejoin(Changes(1));
return true;
}
}
// Load the callee.
frame.pushCallee();
@ -7092,7 +6999,6 @@ mjit::Compiler::jsop_callelem_slow()
{
prepareStubCall(Uses(2));
INLINE_STUBCALL(stubs::CallElem, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -2, /* ool = */ false);
frame.popn(2);
pushSyncedEntry(0);
pushSyncedEntry(1);
@ -7251,12 +7157,7 @@ mjit::Compiler::updateJoinVarTypes()
if (newv->slot < TotalSlots(script)) {
VarType &vt = a->varTypes[newv->slot];
vt.types = analysis->getValueTypes(newv->value);
JSValueType newType = vt.types->getKnownTypeTag(cx);
if (newType != vt.type) {
FrameEntry *fe = frame.getSlotEntry(newv->slot);
frame.forgetLoopReg(fe);
}
vt.type = newType;
vt.type = vt.types->getKnownTypeTag(cx);
}
newv++;
}
@ -7335,6 +7236,11 @@ mjit::Compiler::hasTypeBarriers(jsbytecode *pc)
if (!cx->typeInferenceEnabled())
return false;
#if 0
/* Stress test. */
return js_CodeSpec[*pc].format & JOF_TYPESET;
#endif
return analysis->typeBarriers(cx, pc) != NULL;
}
@ -7534,7 +7440,7 @@ mjit::Compiler::addTypeTest(types::TypeSet *types, RegisterID typeReg, RegisterI
mjit::Compiler::BarrierState
mjit::Compiler::testBarrier(RegisterID typeReg, RegisterID dataReg,
bool testUndefined, bool testReturn, bool force)
bool testUndefined, bool testReturn)
{
BarrierState state;
state.typeReg = typeReg;
@ -7556,12 +7462,18 @@ mjit::Compiler::testBarrier(RegisterID typeReg, RegisterID dataReg,
JS_ASSERT(!testUndefined);
if (!analysis->getCode(PC).monitoredTypesReturn)
return state;
} else if (!hasTypeBarriers(PC) && !force) {
} else if (!hasTypeBarriers(PC)) {
if (testUndefined && !types->hasType(types::Type::UndefinedType()))
state.jump.setJump(masm.testUndefined(Assembler::Equal, typeReg));
return state;
}
#if 0
/* Stress test. */
state.jump.setJump(masm.testInt32(Assembler::NotEqual, typeReg));
return state;
#endif
types->addFreeze(cx);
/* Cannot have type barriers when the result of the operation is already unknown. */
@ -7600,59 +7512,3 @@ mjit::Compiler::finishBarrier(const BarrierState &barrier, RejoinState rejoin, u
OOL_STUBCALL(stubs::TypeBarrierHelper, rejoin);
stubcc.rejoin(Changes(0));
}
void
mjit::Compiler::testPushedType(RejoinState rejoin, int which, bool ool)
{
if (!cx->typeInferenceEnabled() || !(js_CodeSpec[*PC].format & JOF_TYPESET))
return;
types::TypeSet *types = analysis->bytecodeTypes(PC);
if (types->unknown())
return;
Assembler &masm = ool ? stubcc.masm : this->masm;
JS_ASSERT(which <= 0);
Address address = (which == 0) ? frame.addressOfTop() : frame.addressOf(frame.peek(which));
Registers tempRegs(Registers::AvailRegs);
RegisterID scratch = tempRegs.takeAnyReg().reg();
Vector<Jump> mismatches(cx);
if (!masm.generateTypeCheck(cx, address, scratch, types, &mismatches)) {
oomInVector = true;
return;
}
Jump j = masm.jump();
for (unsigned i = 0; i < mismatches.length(); i++)
mismatches[i].linkTo(masm.label(), &masm);
masm.move(Imm32(which), Registers::ArgReg1);
if (ool)
OOL_STUBCALL(stubs::StubTypeHelper, rejoin);
else
INLINE_STUBCALL(stubs::StubTypeHelper, rejoin);
j.linkTo(masm.label(), &masm);
}
#ifdef DEBUG
void
mjit::Compiler::typeCheckPopped(int which)
{
if (!cx->typeInferenceEnabled())
return;
FrameEntry *fe = frame.peek(-1 - which);
Jump j = frame.typeCheckEntry(fe, analysis->poppedTypes(PC, which));
stubcc.linkExit(j, Uses(0));
stubcc.leave();
stubcc.masm.move(Imm32(which), Registers::ArgReg1);
OOL_STUBCALL(stubs::TypeCheckPopped, REJOIN_RESUME);
stubcc.rejoin(Changes(0));
}
#endif /* DEBUG */

View File

@ -184,7 +184,7 @@ class Compiler : public BaseCompiler
};
struct BaseICInfo {
BaseICInfo(JSOp op) : op(op), canCallHook(false), forcedTypeBarrier(false)
BaseICInfo(JSOp op) : op(op)
{ }
Label fastPathStart;
Label fastPathRejoin;
@ -192,16 +192,12 @@ class Compiler : public BaseCompiler
Call slowPathCall;
DataLabelPtr paramAddr;
JSOp op;
bool canCallHook;
bool forcedTypeBarrier;
void copyTo(ic::BaseIC &to, JSC::LinkBuffer &full, JSC::LinkBuffer &stub) {
to.fastPathStart = full.locationOf(fastPathStart);
to.fastPathRejoin = full.locationOf(fastPathRejoin);
to.slowPathStart = stub.locationOf(slowPathStart);
to.slowPathCall = stub.locationOf(slowPathCall);
to.canCallHook = canCallHook;
to.forcedTypeBarrier = forcedTypeBarrier;
to.op = op;
JS_ASSERT(to.op == op);
}
@ -472,7 +468,6 @@ class Compiler : public BaseCompiler
bool inlining_;
bool hasGlobalReallocation;
bool oomInVector; // True if we have OOM'd appending to a vector.
bool overflowICSpace; // True if we added a constant pool in a reserved space.
uint32 gcNumber;
enum { NoApplyTricks, LazyArgsObj } applyTricks;
PCLengthEntry *pcLengths;
@ -491,7 +486,7 @@ class Compiler : public BaseCompiler
Label labelOf(jsbytecode *target, uint32 inlineIndex);
void addCallSite(const InternalCallSite &callSite);
void addReturnSite();
void inlineStubCall(void *stub, RejoinState rejoin, Uses uses);
void inlineStubCall(void *stub, RejoinState rejoin);
bool debugMode() { return debugMode_; }
bool inlining() { return inlining_; }
@ -558,11 +553,6 @@ class Compiler : public BaseCompiler
CompileStatus addInlineFrame(JSScript *script, uint32 depth, uint32 parent, jsbytecode *parentpc);
CompileStatus scanInlineCalls(uint32 index, uint32 depth);
CompileStatus checkAnalysis(JSScript *script);
#ifdef DEBUG
void typeCheckPopped(int which);
#else
void typeCheckPopped(int which) {}
#endif
struct BarrierState {
MaybeJump jump;
@ -575,12 +565,9 @@ class Compiler : public BaseCompiler
BarrierState pushAddressMaybeBarrier(Address address, JSValueType type, bool reuseBase,
bool testUndefined = false);
BarrierState testBarrier(RegisterID typeReg, RegisterID dataReg,
bool testUndefined = false, bool testReturn = false,
bool force = false);
bool testUndefined = false, bool testReturn = false);
void finishBarrier(const BarrierState &barrier, RejoinState rejoin, uint32 which);
void testPushedType(RejoinState rejoin, int which, bool ool = true);
/* Non-emitting helpers. */
void pushSyncedEntry(uint32 pushed);
uint32 fullAtomIndex(jsbytecode *pc);
@ -788,20 +775,16 @@ class Compiler : public BaseCompiler
// Given a stub call, emits the call into the inline assembly path. rejoin
// indicates how to rejoin should this call trigger expansion/discarding.
#define INLINE_STUBCALL(stub, rejoin) \
inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0))
#define INLINE_STUBCALL_USES(stub, rejoin, uses) \
inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, uses)
inlineStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin)
// Given a stub call, emits the call into the out-of-line assembly path.
// Unlike the INLINE_STUBCALL variant, this returns the Call offset.
#define OOL_STUBCALL(stub, rejoin) \
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0))
#define OOL_STUBCALL_USES(stub, rejoin, uses) \
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, uses)
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin)
// Same as OOL_STUBCALL, but specifies a slot depth.
#define OOL_STUBCALL_LOCAL_SLOTS(stub, rejoin, slots) \
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, Uses(0), (slots))
stubcc.emitStubCall(JS_FUNC_TO_DATA_PTR(void *, (stub)), rejoin, (slots))
} /* namespace js */
} /* namespace mjit */

View File

@ -634,7 +634,7 @@ mjit::Compiler::jsop_not()
default:
{
prepareStubCall(Uses(1));
INLINE_STUBCALL_USES(stubs::ValueToBoolean, REJOIN_NONE, Uses(1));
INLINE_STUBCALL(stubs::ValueToBoolean, REJOIN_NONE);
RegisterID reg = Registers::ReturnReg;
frame.takeReg(reg);
@ -1746,7 +1746,6 @@ mjit::Compiler::jsop_getelem_dense(bool isPacked)
stubcc.leave();
OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -2);
frame.popn(2);
@ -1838,7 +1837,6 @@ mjit::Compiler::jsop_getelem_args()
stubcc.leave();
OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -2);
frame.popn(2);
frame.pushRegs(typeReg, dataReg, knownPushedType(0));
@ -1970,7 +1968,6 @@ mjit::Compiler::jsop_getelem_typed(int atype)
stubcc.leave();
OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
testPushedType(REJOIN_FALLTHROUGH, -2);
frame.popn(2);
@ -2152,17 +2149,11 @@ mjit::Compiler::jsop_getelem(bool isCall)
ic.slowPathCall = OOL_STUBCALL(stubs::GetElem, REJOIN_FALLTHROUGH);
#endif
testPushedType(REJOIN_FALLTHROUGH, -2);
ic.fastPathRejoin = masm.label();
ic.forcedTypeBarrier = analysis->getCode(PC).getStringElement;
CHECK_IC_SPACE();
frame.popn(2);
frame.pushRegs(ic.typeReg, ic.objReg, knownPushedType(0));
BarrierState barrier = testBarrier(ic.typeReg, ic.objReg, false, false,
/* force = */ ic.forcedTypeBarrier);
BarrierState barrier = testBarrier(ic.typeReg, ic.objReg, false);
if (isCall)
frame.pushSynced(knownPushedType(1));
@ -2353,9 +2344,9 @@ mjit::Compiler::jsop_stricteq(JSOp op)
prepareStubCall(Uses(2));
if (op == JSOP_STRICTEQ)
INLINE_STUBCALL_USES(stubs::StrictEq, REJOIN_NONE, Uses(2));
INLINE_STUBCALL(stubs::StrictEq, REJOIN_NONE);
else
INLINE_STUBCALL_USES(stubs::StrictNe, REJOIN_NONE, Uses(2));
INLINE_STUBCALL(stubs::StrictNe, REJOIN_NONE);
frame.popn(2);
frame.pushSynced(JSVAL_TYPE_BOOLEAN);
@ -2407,9 +2398,9 @@ mjit::Compiler::jsop_stricteq(JSOp op)
if (needStub) {
stubcc.leave();
if (op == JSOP_STRICTEQ)
OOL_STUBCALL_USES(stubs::StrictEq, REJOIN_NONE, Uses(2));
OOL_STUBCALL(stubs::StrictEq, REJOIN_NONE);
else
OOL_STUBCALL_USES(stubs::StrictNe, REJOIN_NONE, Uses(2));
OOL_STUBCALL(stubs::StrictNe, REJOIN_NONE);
}
frame.popn(2);
@ -2422,9 +2413,9 @@ mjit::Compiler::jsop_stricteq(JSOp op)
prepareStubCall(Uses(2));
if (op == JSOP_STRICTEQ)
INLINE_STUBCALL_USES(stubs::StrictEq, REJOIN_NONE, Uses(2));
INLINE_STUBCALL(stubs::StrictEq, REJOIN_NONE);
else
INLINE_STUBCALL_USES(stubs::StrictNe, REJOIN_NONE, Uses(2));
INLINE_STUBCALL(stubs::StrictNe, REJOIN_NONE);
frame.popn(2);
frame.pushSynced(JSVAL_TYPE_BOOLEAN);

View File

@ -1107,77 +1107,6 @@ FrameState::storeTo(FrameEntry *fe, Address address, bool popped)
unpinReg(address.base);
}
#ifdef DEBUG
JSC::MacroAssembler::Jump
FrameState::typeCheckEntry(const FrameEntry *fe, types::TypeSet *types) const
{
if (fe->isCopy())
fe = fe->copyOf();
Address addr1 = addressOfTop();
Address addr2 = Address(JSFrameReg, addr1.offset + sizeof(Value));
Registers tempRegs(Registers::AvailRegs);
RegisterID scratch = tempRegs.takeAnyReg().reg();
masm.storePtr(scratch, addr1);
do {
if (fe->isConstant()) {
masm.storeValue(fe->getValue(), addr2);
break;
}
if (fe->data.inFPRegister()) {
masm.storeDouble(fe->data.fpreg(), addr2);
break;
}
if (fe->isType(JSVAL_TYPE_DOUBLE)) {
JS_ASSERT(fe->data.inMemory());
masm.loadDouble(addressOf(fe), Registers::FPConversionTemp);
masm.storeDouble(Registers::FPConversionTemp, addr2);
break;
}
if (fe->data.inRegister())
masm.storePayload(fe->data.reg(), addr2);
else
JS_ASSERT(fe->data.inMemory());
if (fe->isTypeKnown())
masm.storeTypeTag(ImmType(fe->getKnownType()), addr2);
else if (fe->type.inRegister())
masm.storeTypeTag(fe->type.reg(), addr2);
else
JS_ASSERT(fe->type.inMemory());
if (fe->data.inMemory()) {
masm.loadPayload(addressOf(fe), scratch);
masm.storePayload(scratch, addr2);
}
if (fe->type.inMemory()) {
masm.loadTypeTag(addressOf(fe), scratch);
masm.storeTypeTag(scratch, addr2);
}
} while (false);
Vector<Jump> mismatches(cx);
masm.generateTypeCheck(cx, addr2, scratch, types, &mismatches);
masm.loadPtr(addr1, scratch);
Jump j = masm.jump();
for (unsigned i = 0; i < mismatches.length(); i++)
mismatches[i].linkTo(masm.label(), &masm);
masm.loadPtr(addr1, scratch);
Jump mismatch = masm.jump();
j.linkTo(masm.label(), &masm);
return mismatch;
}
#endif /* DEBUG */
void
FrameState::loadThisForReturn(RegisterID typeReg, RegisterID dataReg, RegisterID tempReg)
{
@ -1395,7 +1324,11 @@ FrameState::sync(Assembler &masm, Uses uses) const
Registers avail(freeRegs.freeMask & Registers::AvailRegs);
Registers temp(Registers::TempAnyRegs);
for (FrameEntry *fe = a->sp - 1; fe >= entries; fe--) {
FrameEntry *bottom = (cx->typeInferenceEnabled() || cx->compartment->debugMode())
? entries
: a->sp - uses.nuses;
for (FrameEntry *fe = a->sp - 1; fe >= bottom; fe--) {
if (!fe->isTracked())
continue;
@ -1445,7 +1378,7 @@ FrameState::sync(Assembler &masm, Uses uses) const
/* Fall back to a slower sync algorithm if load required. */
if ((!fe->type.synced() && backing->type.inMemory()) ||
(!fe->data.synced() && backing->data.inMemory())) {
syncFancy(masm, avail, fe, entries);
syncFancy(masm, avail, fe, bottom);
return;
}
#endif
@ -1526,7 +1459,11 @@ FrameState::syncAndKill(Registers kill, Uses uses, Uses ignore)
uint32 maxvisits = tracker.nentries;
for (FrameEntry *fe = a->sp - 1; fe >= entries && maxvisits; fe--) {
FrameEntry *bottom = (cx->typeInferenceEnabled() || cx->compartment->debugMode())
? entries
: a->sp - uses.nuses;
for (FrameEntry *fe = a->sp - 1; fe >= bottom && maxvisits; fe--) {
if (!fe->isTracked())
continue;
@ -2935,7 +2872,7 @@ FrameState::clearTemporaries()
}
Vector<TemporaryCopy> *
FrameState::getTemporaryCopies(Uses uses)
FrameState::getTemporaryCopies()
{
/* :XXX: handle OOM */
Vector<TemporaryCopy> *res = NULL;
@ -2946,7 +2883,7 @@ FrameState::getTemporaryCopies(Uses uses)
if (fe->isCopied()) {
for (uint32 i = fe->trackerIndex() + 1; i < tracker.nentries; i++) {
FrameEntry *nfe = tracker[i];
if (!deadEntry(nfe, uses.nuses) && nfe->isCopy() && nfe->copyOf() == fe) {
if (!deadEntry(nfe) && nfe->isCopy() && nfe->copyOf() == fe) {
if (!res)
res = cx->new_< Vector<TemporaryCopy> >(cx);
res->append(TemporaryCopy(addressOf(nfe), addressOf(fe)));

View File

@ -618,19 +618,9 @@ class FrameState
*/
inline FrameEntry *peek(int32 depth);
#ifdef DEBUG
/*
* Check that a frame entry matches a type, returning a jump taken on
* mismatch. Does not affect register state or sync state of any entries.
*/
Jump typeCheckEntry(const FrameEntry *fe, types::TypeSet *types) const;
#endif
/*
* Fully stores a FrameEntry at an arbitrary address. popHint specifies
* how hard the register allocator should try to keep the FE in registers.
* If scratchData and scratchType are specified, the frame entry and
* register state will not be modified.
*/
void storeTo(FrameEntry *fe, Address address, bool popHint = false);
@ -957,11 +947,8 @@ class FrameState
void clearTemporaries();
inline FrameEntry *getTemporary(uint32 which);
/*
* Return NULL or a new vector with all current copies of temporaries,
* excluding those about to be popped per 'uses'.
*/
Vector<TemporaryCopy> *getTemporaryCopies(Uses uses);
/* Return NULL or a new vector with all current copies of temporaries. */
Vector<TemporaryCopy> *getTemporaryCopies();
inline void syncAndForgetFe(FrameEntry *fe, bool markSynced = false);
inline void forgetLoopReg(FrameEntry *fe);

View File

@ -617,46 +617,50 @@ js_InternalThrow(VMFrame &f)
StackFrame *fp = cx->fp();
JSScript *script = fp->script();
/*
* Fall back to EnterMethodJIT and finish the frame in the interpreter.
* We may wipe out all JIT code on the stack without patching ncode values
* to jump to the interpreter, and thus can only enter JIT code via
* EnterMethodJIT (which overwrites its entry frame's ncode).
* See ClearAllFrames.
*/
cx->compartment->jaegerCompartment()->setLastUnfinished(Jaeger_Unfinished);
if (cx->typeInferenceEnabled() || !fp->jit()) {
/*
* Fall back to EnterMethodJIT and finish the frame in the interpreter.
* With type inference enabled, we may wipe out all JIT code on the
* stack without patching ncode values to jump to the interpreter, and
* thus can only enter JIT code via EnterMethodJIT (which overwrites
* its entry frame's ncode). See ClearAllFrames.
*/
cx->compartment->jaegerCompartment()->setLastUnfinished(Jaeger_Unfinished);
if (!script->ensureRanAnalysis(cx)) {
js_ReportOutOfMemory(cx);
return NULL;
}
analyze::AutoEnterAnalysis enter(cx);
cx->regs().pc = pc;
cx->regs().sp = fp->base() + script->analysis()->getCode(pc).stackDepth;
/*
* Interpret the ENTERBLOCK and EXCEPTION opcodes, so that we don't go
* back into the interpreter with a pending exception. This will cause
* it to immediately rethrow.
*/
if (cx->isExceptionPending()) {
JS_ASSERT(js_GetOpcode(cx, script, pc) == JSOP_ENTERBLOCK);
JSObject *obj = script->getObject(GET_SLOTNO(pc));
Value *vp = cx->regs().sp + OBJ_BLOCK_COUNT(cx, obj);
SetValueRangeToUndefined(cx->regs().sp, vp);
cx->regs().sp = vp;
JS_ASSERT(js_GetOpcode(cx, script, pc + JSOP_ENTERBLOCK_LENGTH) == JSOP_EXCEPTION);
cx->regs().sp[0] = cx->getPendingException();
cx->clearPendingException();
cx->regs().sp++;
cx->regs().pc = pc + JSOP_ENTERBLOCK_LENGTH + JSOP_EXCEPTION_LENGTH;
}
*f.oldregs = f.regs;
if (!script->ensureRanAnalysis(cx)) {
js_ReportOutOfMemory(cx);
return NULL;
}
analyze::AutoEnterAnalysis enter(cx);
cx->regs().pc = pc;
cx->regs().sp = fp->base() + script->analysis()->getCode(pc).stackDepth;
/*
* Interpret the ENTERBLOCK and EXCEPTION opcodes, so that we don't go
* back into the interpreter with a pending exception. This will cause
* it to immediately rethrow.
*/
if (cx->isExceptionPending()) {
JS_ASSERT(js_GetOpcode(cx, script, pc) == JSOP_ENTERBLOCK);
JSObject *obj = script->getObject(GET_SLOTNO(pc));
Value *vp = cx->regs().sp + OBJ_BLOCK_COUNT(cx, obj);
SetValueRangeToUndefined(cx->regs().sp, vp);
cx->regs().sp = vp;
JS_ASSERT(js_GetOpcode(cx, script, pc + JSOP_ENTERBLOCK_LENGTH) == JSOP_EXCEPTION);
cx->regs().sp[0] = cx->getPendingException();
cx->clearPendingException();
cx->regs().sp++;
cx->regs().pc = pc + JSOP_ENTERBLOCK_LENGTH + JSOP_EXCEPTION_LENGTH;
}
*f.oldregs = f.regs;
return NULL;
return script->nativeCodeForPC(fp->isConstructing(), pc);
}
void JS_FASTCALL
@ -1157,7 +1161,7 @@ RunTracer(VMFrame &f)
#if defined JS_TRACER
# if defined JS_MONOIC
void * JS_FASTCALL
void *JS_FASTCALL
stubs::InvokeTracer(VMFrame &f, ic::TraceICInfo *ic)
{
return RunTracer(f, *ic);
@ -1165,7 +1169,7 @@ stubs::InvokeTracer(VMFrame &f, ic::TraceICInfo *ic)
# else
void * JS_FASTCALL
void *JS_FASTCALL
stubs::InvokeTracer(VMFrame &f)
{
return RunTracer(f);
@ -1328,34 +1332,22 @@ js_InternalInterpret(void *returnData, void *returnType, void *returnReg, js::VM
break;
case REJOIN_NATIVE:
case REJOIN_NATIVE_LOWERED:
case REJOIN_NATIVE_GETTER: {
case REJOIN_NATIVE_LOWERED: {
/*
* We don't rejoin until after the native stub finishes execution, in
* which case the return value will be in memory. For lowered natives,
* the return value will be in the 'this' value's slot. For getters,
* the result is at nextsp[0] (see ic::CallProp).
* the return value will be in the 'this' value's slot.
*/
if (rejoin == REJOIN_NATIVE_LOWERED) {
if (rejoin == REJOIN_NATIVE_LOWERED)
nextsp[-1] = nextsp[0];
} else if (rejoin == REJOIN_NATIVE_GETTER) {
if (js_CodeSpec[op].format & JOF_CALLOP) {
/*
* If we went through jsop_callprop_obj then the 'this' value
* is still in its original slot and hasn't been shifted yet,
* so fix that now. Yuck.
*/
if (nextsp[-2].isObject())
nextsp[-1] = nextsp[-2];
nextsp[-2] = nextsp[0];
} else {
nextsp[-1] = nextsp[0];
}
}
/* Release this reference on the orphaned native stub. */
RemoveOrphanedNative(cx, fp);
/*
* Note: there is no need to monitor the result of the native, the
* native stub will always do a type check before finishing.
*/
f.regs.pc = nextpc;
break;
}
@ -1574,16 +1566,6 @@ js_InternalInterpret(void *returnData, void *returnType, void *returnReg, js::VM
nextDepth = analysis->getCode(f.regs.pc).stackDepth;
f.regs.sp = fp->base() + nextDepth;
/*
* Monitor the result of the previous op when finishing a JOF_TYPESET op.
* The result may not have been marked if we bailed out while inside a stub
* for the op.
*/
if (f.regs.pc == nextpc && (js_CodeSpec[op].format & JOF_TYPESET)) {
int which = (js_CodeSpec[op].format & JOF_CALLOP) ? -2 : -1; /* Yuck. */
types::TypeScript::Monitor(cx, script, pc, f.regs.sp[which]);
}
/* Mark the entry frame as unfinished, and update the regs to resume at. */
JaegerStatus status = skipTrap ? Jaeger_UnfinishedAtTrap : Jaeger_Unfinished;
cx->compartment->jaegerCompartment()->setLastUnfinished(status);

View File

@ -195,7 +195,7 @@ LoopState::addJoin(unsigned index, bool script)
}
void
LoopState::addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, Uses uses)
LoopState::addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex)
{
RestoreInvariantCall call;
call.jump = jump;
@ -203,7 +203,7 @@ LoopState::addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsign
call.ool = ool;
call.entry = entry;
call.patchIndex = patchIndex;
call.temporaryCopies = frame.getTemporaryCopies(uses);
call.temporaryCopies = frame.getTemporaryCopies();
restoreInvariantCalls.append(call);
}
@ -259,7 +259,7 @@ LoopState::flushLoop(StubCompiler &stubcc)
if (call.entry) {
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::InvariantFailure),
pc, NULL, NULL, 0);
pc, NULL, 0);
} else {
/* f.regs are already coherent, don't write new values to them. */
masm.infallibleVMCall(JS_FUNC_TO_DATA_PTR(void *, stubs::InvariantFailure), -1);
@ -1820,6 +1820,7 @@ LoopState::analyzeLoopBody(unsigned frame)
skipAnalysis = true;
break;
case JSOP_SETHOLE:
case JSOP_SETELEM: {
SSAValue objValue = analysis->poppedValue(pc, 2);
SSAValue elemValue = analysis->poppedValue(pc, 1);
@ -1843,7 +1844,7 @@ LoopState::analyzeLoopBody(unsigned frame)
continue;
if (!addModifiedProperty(object, JSID_VOID))
return;
if (analysis->getCode(pc).arrayWriteHole && !addGrowArray(object))
if (op == JSOP_SETHOLE && !addGrowArray(object))
return;
}

View File

@ -253,7 +253,7 @@ class LoopState : public MacroAssemblerTypedefs
bool generatingInvariants() { return !skipAnalysis; }
/* Add a call with trailing jump/label, after which invariants need to be restored. */
void addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex, Uses uses);
void addInvariantCall(Jump jump, Label label, bool ool, bool entry, unsigned patchIndex);
uint32 headOffset() { return lifetime->head; }
uint32 getLoopRegs() { return loopRegs.freeMask; }

View File

@ -118,7 +118,7 @@ struct Registers {
#elif defined(JS_CPU_X64)
static const RegisterID JSFrameReg = JSC::X86Registers::ebx;
#elif defined(JS_CPU_ARM)
static const RegisterID JSFrameReg = JSC::ARMRegisters::r10;
static const RegisterID JSFrameReg = JSC::ARMRegisters::r11;
#elif defined(JS_CPU_SPARC)
static const RegisterID JSFrameReg = JSC::SparcRegisters::l0;
#endif
@ -130,13 +130,11 @@ struct Registers {
static const RegisterID ArgReg1 = JSC::X86Registers::edx;
# if defined(JS_CPU_X64)
static const RegisterID ArgReg2 = JSC::X86Registers::r8;
static const RegisterID ArgReg3 = JSC::X86Registers::r9;
# endif
# else
static const RegisterID ArgReg0 = JSC::X86Registers::edi;
static const RegisterID ArgReg1 = JSC::X86Registers::esi;
static const RegisterID ArgReg2 = JSC::X86Registers::edx;
static const RegisterID ArgReg3 = JSC::X86Registers::ecx;
# endif
#elif JS_CPU_ARM
static const RegisterID ReturnReg = JSC::ARMRegisters::r0;
@ -227,8 +225,9 @@ struct Registers {
| (1 << JSC::ARMRegisters::r6)
| (1 << JSC::ARMRegisters::r7)
// r8 is reserved as a scratch register for the assembler.
| (1 << JSC::ARMRegisters::r9);
// r10 is reserved for JSFrameReg.
| (1 << JSC::ARMRegisters::r9)
| (1 << JSC::ARMRegisters::r10);
// r11 is reserved for JSFrameReg.
// r13 is SP and must always point to VMFrame whilst in generated code.
// r14 is LR and is used for return sequences.
// r15 is PC (program counter).
@ -389,15 +388,6 @@ struct Registers {
# error "Unsupported platform"
#endif
/* Temp reg that can be clobbered when setting up a fallible fast or ABI call. */
#if defined(JS_CPU_X86) || defined(JS_CPU_X64)
static const RegisterID ClobberInCall = JSC::X86Registers::ecx;
#elif defined(JS_CPU_ARM)
static const RegisterID ClobberInCall = JSC::ARMRegisters::r2;
#elif defined(JS_CPU_SPARC)
static const RegisterID ClobberInCall = JSC::SparcRegisters::l1;
#endif
static const uint32 AvailFPRegs = TempFPRegs;
static inline uint32 maskReg(FPRegisterID reg) {
@ -422,20 +412,6 @@ struct Registers {
return regs.takeAnyReg().reg();
}
/* Get a register which is not live before a normal ABI call with at most four args. */
static inline Registers tempCallRegMask() {
Registers regs(AvailRegs);
#ifndef JS_CPU_X86
regs.takeReg(ArgReg0);
regs.takeReg(ArgReg1);
regs.takeReg(ArgReg2);
#if defined(JS_CPU_SPARC) || defined(JS_CPU_X64)
regs.takeReg(ArgReg3);
#endif
#endif
return regs;
}
Registers(uint32 freeMask)
: freeMask(freeMask)
{ }

View File

@ -482,7 +482,7 @@ JS_STATIC_ASSERT(VMFrame::offsetOfFp == (4*7));
JS_STATIC_ASSERT(offsetof(VMFrame, scratch) == (4*3));
JS_STATIC_ASSERT(offsetof(VMFrame, previous) == (4*2));
JS_STATIC_ASSERT(JSFrameReg == JSC::ARMRegisters::r10);
JS_STATIC_ASSERT(JSFrameReg == JSC::ARMRegisters::r11);
JS_STATIC_ASSERT(JSReturnReg_Type == JSC::ARMRegisters::r5);
JS_STATIC_ASSERT(JSReturnReg_Data == JSC::ARMRegisters::r4);
@ -547,8 +547,8 @@ SYMBOL_STRING(JaegerTrampoline) ":" "\n"
/* Preserve 'code' (r2) in an arbitrary callee-saved register. */
" mov r4, r2" "\n"
/* Preserve 'fp' (r1) in r10 (JSFrameReg). */
" mov r10, r1" "\n"
/* Preserve 'fp' (r1) in r11 (JSFrameReg). */
" mov r11, r1" "\n"
" mov r0, sp" "\n"
" blx " SYMBOL_STRING_VMFRAME(SetVMFrameRegs) "\n"
@ -564,7 +564,7 @@ asm (
FUNCTION_HEADER_EXTRA
".globl " SYMBOL_STRING(JaegerTrampolineReturn) "\n"
SYMBOL_STRING(JaegerTrampolineReturn) ":" "\n"
" strd r4, r5, [r10, #24]" "\n" /* fp->rval type,data */
" strd r4, r5, [r11, #24]" "\n" /* fp->rval type,data */
/* Tidy up. */
" mov r0, sp" "\n"
@ -610,8 +610,8 @@ FUNCTION_HEADER_EXTRA
SYMBOL_STRING(JaegerInterpolineScripted) ":" "\n"
/* The only difference between JaegerInterpoline and JaegerInpolineScripted is that the
* scripted variant has to walk up to the previous StackFrame first. */
" ldr r10, [r10, #(4*4)]" "\n" /* Load f->prev_ */
" str r10, [sp, #(4*7)]" "\n" /* Update f->regs->fp_ */
" ldr r11, [r11, #(4*4)]" "\n" /* Load f->prev_ */
" str r11, [sp, #(4*7)]" "\n" /* Update f->regs->fp_ */
/* Fall through into JaegerInterpoline. */
FUNCTION_HEADER_EXTRA
@ -623,8 +623,8 @@ SYMBOL_STRING(JaegerInterpoline) ":" "\n"
" mov r0, r4" "\n" /* returnData */
" blx " SYMBOL_STRING_RELOC(js_InternalInterpret) "\n"
" cmp r0, #0" "\n"
" ldr r10, [sp, #(4*7)]" "\n" /* Load (StackFrame*)f->regs->fp_ */
" ldrd r4, r5, [r10, #(4*6)]" "\n" /* Load rval payload and type. */
" ldr ip, [sp, #(4*7)]" "\n" /* Load (StackFrame*)f->regs->fp_ */
" ldrd r4, r5, [ip, #(4*6)]" "\n" /* Load rval payload and type. */
" ldr r1, [sp, #(4*3)]" "\n" /* Load scratch. */
" it ne" "\n"
" bxne r0" "\n"
@ -1087,6 +1087,8 @@ static inline void Destroy(T &t)
mjit::JITScript::~JITScript()
{
code.release();
if (pcLengths)
Foreground::free_(pcLengths);
@ -1113,25 +1115,26 @@ mjit::JITScript::~JITScript()
(*pExecPool)->release();
}
for (unsigned i = 0; i < nativeCallStubs.length(); i++) {
JSC::ExecutablePool *pool = nativeCallStubs[i].pool;
if (pool)
pool->release();
}
ic::CallICInfo *callICs_ = callICs();
for (uint32 i = 0; i < nCallICs; i++)
callICs_[i].purge();
for (uint32 i = 0; i < nCallICs; i++) {
callICs_[i].releasePools();
if (callICs_[i].fastGuardedObject)
callICs_[i].purgeGuardedObject();
}
// Fixup any ICs still referring to this JIT.
while (!JS_CLIST_IS_EMPTY(&callers)) {
JS_STATIC_ASSERT(offsetof(ic::CallICInfo, links) == 0);
ic::CallICInfo *ic = (ic::CallICInfo *) callers.next;
ic->purge();
uint8 *start = (uint8 *)ic->funGuard.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
repatch.repatch(ic->funGuard, NULL);
repatch.relink(ic->funJump, ic->slowPathStart);
ic->purgeGuardedObject();
}
#endif
code.release();
}
size_t
@ -1311,25 +1314,4 @@ JITScript::trace(JSTracer *trc)
MarkObject(trc, *rootedObjects()[i], "mjit rooted object");
}
void
mjit::PurgeICs(JSContext *cx, JSScript *script)
{
#ifdef JS_MONOIC
if (script->jitNormal) {
script->jitNormal->purgeMICs();
script->jitNormal->sweepCallICs(cx);
}
if (script->jitCtor) {
script->jitCtor->purgeMICs();
script->jitCtor->sweepCallICs(cx);
}
#endif
#ifdef JS_POLYIC
if (script->jitNormal)
script->jitNormal->purgePICs();
if (script->jitCtor)
script->jitCtor->purgePICs();
#endif
}
/* static */ const double mjit::Assembler::oneDouble = 1.0;

View File

@ -258,12 +258,10 @@ enum RejoinState {
/*
* As for REJOIN_FALLTHROUGH, but holds a reference on the compartment's
* orphaned native pools which needs to be reclaimed by InternalInterpret.
* The return value needs to be adjusted if REJOIN_NATIVE_LOWERED, and
* REJOIN_NATIVE_GETTER is for ABI calls made for property accesses.
* The return value needs to be adjusted if REJOIN_NATIVE_LOWERED.
*/
REJOIN_NATIVE,
REJOIN_NATIVE_LOWERED,
REJOIN_NATIVE_GETTER,
/*
* Dummy rejoin stored in VMFrames to indicate they return into a native
@ -555,31 +553,6 @@ struct PCLengthEntry {
double picsLength; /* amount of PIC stub code generated */
};
/*
* Pools and patch locations for managing stubs for non-FASTCALL C++ calls made
* from native call and PropertyOp stubs. Ownership of these may be transferred
* into the orphanedNativePools for the compartment.
*/
struct NativeCallStub {
/* pc/inlined location of the stub. */
jsbytecode *pc;
CallSite *inlined;
/* Pool for the stub, NULL if it has been removed from the script. */
JSC::ExecutablePool *pool;
/*
* Fallthrough jump returning to jitcode which may be patched during
* recompilation. On x64 this is an indirect jump to avoid issues with far
* jumps on relative branches.
*/
#ifdef JS_CPU_X64
JSC::CodeLocationDataLabelPtr jump;
#else
JSC::CodeLocationJump jump;
#endif
};
struct JITScript {
typedef JSC::MacroAssemblerCodeRef CodeRef;
CodeRef code; /* pool & code addresses */
@ -638,9 +611,6 @@ struct JITScript {
ExecPoolVector execPools;
#endif
// Additional ExecutablePools for native call and getter stubs.
Vector<NativeCallStub, 0, SystemAllocPolicy> nativeCallStubs;
NativeMapEntry *nmap() const;
js::mjit::InlineFrame *inlineFrames() const;
js::mjit::CallSite *callSites() const;
@ -666,9 +636,8 @@ struct JITScript {
return jcheck >= jitcode && jcheck < jitcode + code.m_size;
}
void purgeGetterPICs();
void sweepCallICs(JSContext *cx);
void nukeScriptDependentICs();
void sweepCallICs(JSContext *cx, bool purgeAll);
void purgeMICs();
void purgePICs();
@ -686,8 +655,6 @@ struct JITScript {
char *polyICSectionsLimit() const;
};
void PurgeICs(JSContext *cx, JSScript *script);
/*
* Execute the given mjit code. This is a low-level call and callers must
* provide the same guarantees as JaegerShot/CheckStackAndEnterMethodJIT.

View File

@ -555,120 +555,6 @@ SlowNewFromIC(VMFrame &f, ic::CallICInfo *ic)
return NULL;
}
void
CallICInfo::purge()
{
uint8 *start = (uint8 *)funGuard.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
repatch.repatch(funGuard, NULL);
repatch.relink(funJump, slowPathStart);
releasePools();
fastGuardedNative = NULL;
if (fastGuardedObject) {
hasJsFunCheck = false;
fastGuardedObject = NULL;
JS_REMOVE_LINK(&links);
}
}
bool
NativeStubLinker::init(JSContext *cx)
{
JSC::ExecutablePool *pool = LinkerHelper::init(cx);
if (!pool)
return false;
NativeCallStub stub;
stub.pc = pc;
stub.inlined = inlined;
stub.pool = pool;
stub.jump = locationOf(done);
if (!jit->nativeCallStubs.append(stub)) {
pool->release();
return false;
}
return true;
}
/*
* Generate epilogue code to run after a stub ABI call to a native or getter.
* This checks for an exception, and either type checks the result against the
* observed types for the opcode or loads the result into a register pair
* (it will go through a type barrier afterwards).
*/
bool
mjit::NativeStubEpilogue(VMFrame &f, Assembler &masm, NativeStubLinker::FinalJump *result,
int32 initialFrameDepth, int32 vpOffset,
MaybeRegisterID typeReg, MaybeRegisterID dataReg)
{
/* Reload fp, which may have been clobbered by restoreStackBase(). */
masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
Address resultAddress(JSFrameReg, vpOffset);
Vector<Jump> mismatches(f.cx);
if (f.cx->typeInferenceEnabled()) {
if (!typeReg.isSet()) {
/*
* Test the result of this native against the known result type set
* for the call. We don't assume knowledge about the types that
* natives can return, except when generating specialized paths in
* FastBuiltins.
*/
types::TypeSet *types = f.script()->analysis()->bytecodeTypes(f.pc());
if (!masm.generateTypeCheck(f.cx, resultAddress, Registers::ReturnReg, types, &mismatches))
THROWV(false);
}
}
/*
* Can no longer trigger recompilation in this stub, clear the stub
* rejoin on the VMFrame.
*/
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
if (typeReg.isSet())
masm.loadValueAsComponents(resultAddress, typeReg.reg(), dataReg.reg());
/*
* The final jump is a indirect on x64, so that we'll always be able
* to repatch it to the interpoline later.
*/
Label finished = masm.label();
#ifdef JS_CPU_X64
JSC::MacroAssembler::DataLabelPtr done = masm.moveWithPatch(ImmPtr(NULL), Registers::ValueReg);
masm.jump(Registers::ValueReg);
#else
Jump done = masm.jump();
#endif
/* Generate a call for type check failures on the native result. */
if (!mismatches.empty()) {
for (unsigned i = 0; i < mismatches.length(); i++)
mismatches[i].linkTo(masm.label(), &masm);
masm.addPtr(Imm32(vpOffset), JSFrameReg, Registers::ArgReg1);
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::TypeBarrierReturn),
f.regs.pc, f.regs.inlined(), NULL, initialFrameDepth);
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.jump().linkTo(finished, &masm);
}
/* Move JaegerThrowpoline into register for very far jump on x64. */
hasException.linkTo(masm.label(), &masm);
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.throwInJIT();
*result = done;
return true;
}
/*
* Calls have an inline path and an out-of-line path. The inline path is used
* in the fastest case: the method has JIT'd code, and |argc == nargs|.
@ -771,27 +657,29 @@ class CallCompiler : public BaseCompiler
masm.loadPtr(Address(t0, offset), t0);
Jump hasCode = masm.branchPtr(Assembler::Above, t0, ImmPtr(JS_UNJITTABLE_SCRIPT));
/*
* Write the rejoin state to indicate this is a compilation call
* made from an IC (the recompiler cannot detect calls made from
* ICs automatically).
*/
masm.storePtr(ImmPtr((void *) ic.frameSize.rejoinState(f.pc(), false)),
FrameAddress(offsetof(VMFrame, stubRejoin)));
if (cx->typeInferenceEnabled()) {
/*
* Write the rejoin state to indicate this is a compilation call
* made from an IC (the recompiler cannot detect calls made from
* ICs automatically).
*/
masm.storePtr(ImmPtr((void *) ic.frameSize.rejoinState(f.pc(), false)),
FrameAddress(offsetof(VMFrame, stubRejoin)));
}
masm.bumpStubCounter(f.script(), f.pc(), Registers::tempCallReg());
/* Try and compile. On success we get back the nmap pointer. */
void *compilePtr = JS_FUNC_TO_DATA_PTR(void *, stubs::CompileFunction);
DataLabelPtr inlined;
if (ic.frameSize.isStatic()) {
masm.move(Imm32(ic.frameSize.staticArgc()), Registers::ArgReg1);
masm.fallibleVMCall(cx->typeInferenceEnabled(),
compilePtr, f.regs.pc, f.regs.inlined(), NULL,
ic.frameSize.staticLocalSlots());
compilePtr, f.regs.pc, &inlined, ic.frameSize.staticLocalSlots());
} else {
masm.load32(FrameAddress(offsetof(VMFrame, u.call.dynamicArgc)), Registers::ArgReg1);
masm.fallibleVMCall(cx->typeInferenceEnabled(),
compilePtr, f.regs.pc, f.regs.inlined(), NULL, -1);
compilePtr, f.regs.pc, &inlined, -1);
}
Jump notCompiled = masm.branchTestPtr(Assembler::Zero, Registers::ReturnReg,
@ -829,6 +717,11 @@ class CallCompiler : public BaseCompiler
JaegerSpew(JSpew_PICs, "generated CALL stub %p (%lu bytes)\n", cs.executableAddress(),
(unsigned long) masm.size());
if (f.regs.inlined()) {
JSC::LinkBuffer code((uint8 *) cs.executableAddress(), masm.size(), JSC::METHOD_CODE);
code.patch(inlined, f.regs.inlined());
}
Repatcher repatch(from);
JSC::CodeLocationJump oolJump = ic.slowPathStart.jumpAtOffset(ic.oolJumpOffset);
repatch.relink(oolJump, cs);
@ -965,6 +858,10 @@ class CallCompiler : public BaseCompiler
if (ic.fastGuardedNative || ic.hasJsFunCheck)
return true;
/* Don't generate native MICs within inlined frames, we can't recompile them yet. */
if (f.regs.inlined())
return true;
/* Native MIC needs to warm up first. */
if (!ic.hit) {
ic.hit = true;
@ -977,29 +874,51 @@ class CallCompiler : public BaseCompiler
/* Guard on the function object identity, for now. */
Jump funGuard = masm.branchPtr(Assembler::NotEqual, ic.funObjReg, ImmPtr(obj));
/*
* Write the rejoin state for the recompiler to use if this call
* triggers recompilation. Natives use a different stack address to
* store the return value than FASTCALLs, and without additional
* information we cannot tell which one is active on a VMFrame.
*/
masm.storePtr(ImmPtr((void *) ic.frameSize.rejoinState(f.pc(), true)),
FrameAddress(offsetof(VMFrame, stubRejoin)));
if (cx->typeInferenceEnabled()) {
/*
* Write the rejoin state for the recompiler to use if this call
* triggers recompilation. Natives use a different stack address to
* store the return value than FASTCALLs, and without additional
* information we cannot tell which one is active on a VMFrame.
*/
masm.storePtr(ImmPtr((void *) ic.frameSize.rejoinState(f.pc(), true)),
FrameAddress(offsetof(VMFrame, stubRejoin)));
}
/* N.B. After this call, the frame will have a dynamic frame size. */
if (ic.frameSize.isDynamic()) {
masm.bumpStubCounter(f.script(), f.pc(), Registers::tempCallReg());
masm.fallibleVMCall(cx->typeInferenceEnabled(),
JS_FUNC_TO_DATA_PTR(void *, ic::SplatApplyArgs),
f.pc(), NULL, NULL, initialFrameDepth);
f.regs.pc, NULL, initialFrameDepth);
}
Registers tempRegs = Registers::tempCallRegMask();
Registers tempRegs(Registers::AvailRegs);
#ifndef JS_CPU_X86
tempRegs.takeReg(Registers::ArgReg0);
tempRegs.takeReg(Registers::ArgReg1);
tempRegs.takeReg(Registers::ArgReg2);
#endif
RegisterID t0 = tempRegs.takeAnyReg().reg();
masm.bumpStubCounter(f.script(), f.pc(), t0);
int32 storeFrameDepth = ic.frameSize.isStatic() ? initialFrameDepth : -1;
masm.setupFallibleABICall(cx->typeInferenceEnabled(), f.pc(), f.regs.inlined(), storeFrameDepth);
/* Store pc. */
masm.storePtr(ImmPtr(f.regs.pc),
FrameAddress(offsetof(VMFrame, regs.pc)));
/* Store inlined. */
masm.storePtr(ImmPtr(f.regs.inlined()),
FrameAddress(VMFrame::offsetOfInlined));
/* Store sp (if not already set by ic::SplatApplyArgs). */
if (ic.frameSize.isStatic()) {
uint32 spOffset = sizeof(StackFrame) + initialFrameDepth * sizeof(Value);
masm.addPtr(Imm32(spOffset), JSFrameReg, t0);
masm.storePtr(t0, FrameAddress(offsetof(VMFrame, regs.sp)));
}
/* Store fp. */
masm.storePtr(JSFrameReg, FrameAddress(VMFrame::offsetOfFp));
/* Grab cx. */
#ifdef JS_CPU_X86
@ -1040,7 +959,7 @@ class CallCompiler : public BaseCompiler
masm.setupABICall(Registers::NormalCall, 3);
masm.storeArg(2, vpReg);
if (ic.frameSize.isStatic())
masm.storeArg(1, ImmPtr((void *) ic.frameSize.staticArgc()));
masm.storeArg(1, Imm32(ic.frameSize.staticArgc()));
else
masm.storeArg(1, argcReg.reg());
masm.storeArg(0, cxReg);
@ -1058,21 +977,83 @@ class CallCompiler : public BaseCompiler
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, native), false);
NativeStubLinker::FinalJump done;
if (!NativeStubEpilogue(f, masm, &done, initialFrameDepth, vpOffset, MaybeRegisterID(), MaybeRegisterID()))
return false;
NativeStubLinker linker(masm, f.jit(), f.pc(), f.regs.inlined(), done);
if (!linker.init(f.cx))
/* Reload fp, which may have been clobbered by restoreStackBase(). */
masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
Jump hasException = masm.branchTest32(Assembler::Zero, Registers::ReturnReg,
Registers::ReturnReg);
Vector<Jump> mismatches(f.cx);
if (cx->typeInferenceEnabled()) {
types::AutoEnterTypeInference enter(f.cx);
/*
* Test the result of this native against the known result type
* set for the call. We don't assume knowledge about the types that
* natives can return, except when generating specialized paths in
* FastBuiltins. We don't need to record dependencies on the result
* type set, as the compiler will already have done so when making
* the call IC.
*/
Address address(JSFrameReg, vpOffset);
types::TypeSet *types = f.script()->analysis()->bytecodeTypes(f.pc());
if (!masm.generateTypeCheck(f.cx, address, types, &mismatches))
THROWV(true);
/*
* Can no longer trigger recompilation in this stub, clear the stub
* rejoin on the VMFrame.
*/
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
}
/*
* The final jump is a indirect on x64, so that we'll always be able
* to repatch it to the interpoline later.
*/
Label finished = masm.label();
#ifdef JS_CPU_X64
void *slowJoin = ic.slowPathStart.labelAtOffset(ic.slowJoinOffset).executableAddress();
DataLabelPtr done = masm.moveWithPatch(ImmPtr(slowJoin), Registers::ValueReg);
masm.jump(Registers::ValueReg);
#else
Jump done = masm.jump();
#endif
/* Generate a call for type check failures on the native result. */
if (!mismatches.empty()) {
for (unsigned i = 0; i < mismatches.length(); i++)
mismatches[i].linkTo(masm.label(), &masm);
masm.addPtr(Imm32(vpOffset), JSFrameReg, Registers::ArgReg1);
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::TypeBarrierReturn),
f.regs.pc, NULL, initialFrameDepth);
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.jump().linkTo(finished, &masm);
}
/* Move JaegerThrowpoline into register for very far jump on x64. */
hasException.linkTo(masm.label(), &masm);
if (cx->typeInferenceEnabled())
masm.storePtr(ImmPtr(NULL), FrameAddress(offsetof(VMFrame, stubRejoin)));
masm.throwInJIT();
LinkerHelper linker(masm, JSC::METHOD_CODE);
JSC::ExecutablePool *ep = poolForSize(linker, CallICInfo::Pool_NativeStub);
if (!ep)
THROWV(true);
ic.fastGuardedNative = obj;
if (!linker.verifyRange(jit)) {
disable(jit);
return true;
}
linker.patchJump(ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
ic.nativeJump = linker.locationOf(done);
ic.fastGuardedNative = obj;
#ifndef JS_CPU_X64
linker.link(done, ic.slowPathStart.labelAtOffset(ic.slowJoinOffset));
#endif
linker.link(funGuard, ic.slowPathStart);
JSC::CodeLocationLabel start = linker.finalize();
@ -1339,20 +1320,17 @@ ic::GenerateArgumentCheckStub(VMFrame &f)
Assembler masm;
Vector<Jump> mismatches(f.cx);
Registers tempRegs(Registers::AvailRegs);
RegisterID scratch = tempRegs.takeAnyReg().reg();
if (!f.fp()->isConstructing()) {
types::TypeSet *types = types::TypeScript::ThisTypes(script);
Address address(JSFrameReg, StackFrame::offsetOfThis(fun));
if (!masm.generateTypeCheck(f.cx, address, scratch, types, &mismatches))
if (!masm.generateTypeCheck(f.cx, address, types, &mismatches))
return;
}
for (unsigned i = 0; i < fun->nargs; i++) {
types::TypeSet *types = types::TypeScript::ArgTypes(script, i);
Address address(JSFrameReg, StackFrame::offsetOfFormalArg(fun, i));
if (!masm.generateTypeCheck(f.cx, address, scratch, types, &mismatches))
if (!masm.generateTypeCheck(f.cx, address, types, &mismatches))
return;
}
@ -1420,10 +1398,49 @@ JITScript::purgeMICs()
}
void
JITScript::sweepCallICs(JSContext *cx)
ic::PurgeMICs(JSContext *cx, JSScript *script)
{
/* MICs are purged during GC to handle changing shapes. */
JS_ASSERT(cx->runtime->gcRegenShapes);
if (script->jitNormal)
script->jitNormal->purgeMICs();
if (script->jitCtor)
script->jitCtor->purgeMICs();
}
void
JITScript::nukeScriptDependentICs()
{
if (!nCallICs)
return;
Repatcher repatcher(this);
ic::CallICInfo *callICs_ = callICs();
for (uint32 i = 0; i < nCallICs; i++) {
ic::CallICInfo &ic = callICs_[i];
if (!ic.fastGuardedObject)
continue;
repatcher.repatch(ic.funGuard, NULL);
repatcher.relink(ic.funJump, ic.slowPathStart);
ic.releasePool(CallICInfo::Pool_ClosureStub);
ic.fastGuardedObject = NULL;
ic.hasJsFunCheck = false;
}
}
void
JITScript::sweepCallICs(JSContext *cx, bool purgeAll)
{
Repatcher repatcher(this);
/*
* If purgeAll is set, purge stubs in the script except those covered by PurgePICs
* (which is always called during GC). We want to remove references which can keep
* alive pools that we are trying to destroy (see JSCompartment::sweep).
*/
ic::CallICInfo *callICs_ = callICs();
for (uint32 i = 0; i < nCallICs; i++) {
ic::CallICInfo &ic = callICs_[i];
@ -1434,19 +1451,20 @@ JITScript::sweepCallICs(JSContext *cx)
* precisely GC call ICs while keeping the identity guard safe.
*/
bool fastFunDead = ic.fastGuardedObject &&
IsAboutToBeFinalized(cx, ic.fastGuardedObject);
(purgeAll || IsAboutToBeFinalized(cx, ic.fastGuardedObject));
bool nativeDead = ic.fastGuardedNative &&
IsAboutToBeFinalized(cx, ic.fastGuardedNative);
(purgeAll || IsAboutToBeFinalized(cx, ic.fastGuardedNative));
/*
* There are two conditions where we need to relink:
* (1) The native is dead, since it always has a stub.
* (2) The fastFun is dead *and* there is a closure stub.
* There are three conditions where we need to relink:
* (1) purgeAll is true.
* (2) The native is dead, since it always has a stub.
* (3) The fastFun is dead *and* there is a closure stub.
*
* Note although both objects can be non-NULL, there can only be one
* of [closure, native] stub per call IC.
*/
if (nativeDead || (fastFunDead && ic.hasJsFunCheck)) {
if (purgeAll || nativeDead || (fastFunDead && ic.hasJsFunCheck)) {
repatcher.relink(ic.funJump, ic.slowPathStart);
ic.hit = false;
}
@ -1456,13 +1474,65 @@ JITScript::sweepCallICs(JSContext *cx)
ic.purgeGuardedObject();
}
if (nativeDead)
if (nativeDead) {
ic.releasePool(CallICInfo::Pool_NativeStub);
ic.fastGuardedNative = NULL;
}
if (purgeAll) {
ic.releasePool(CallICInfo::Pool_ScriptStub);
JSC::CodeLocationJump oolJump = ic.slowPathStart.jumpAtOffset(ic.oolJumpOffset);
JSC::CodeLocationLabel icCall = ic.slowPathStart.labelAtOffset(ic.icCallOffset);
repatcher.relink(oolJump, icCall);
}
}
/* The arguments type check IC can refer to type objects which might be swept. */
if (argsCheckPool)
resetArgsCheck();
if (purgeAll) {
/* Purge ICs generating stubs into execPools. */
uint32 released = 0;
ic::EqualityICInfo *equalityICs_ = equalityICs();
for (uint32 i = 0; i < nEqualityICs; i++) {
ic::EqualityICInfo &ic = equalityICs_[i];
if (!ic.generated)
continue;
JSC::FunctionPtr fptr(JS_FUNC_TO_DATA_PTR(void *, ic::Equality));
repatcher.relink(ic.stubCall, fptr);
repatcher.relink(ic.jumpToStub, ic.stubEntry);
ic.generated = false;
released++;
}
ic::SetGlobalNameIC *setGlobalNames_ = setGlobalNames();
for (uint32 i = 0; i < nSetGlobalNames; i ++) {
ic::SetGlobalNameIC &ic = setGlobalNames_[i];
if (!ic.hasExtraStub)
continue;
repatcher.relink(ic.fastPathStart.jumpAtOffset(ic.inlineShapeJump), ic.slowPathStart);
ic.hasExtraStub = false;
released++;
}
JS_ASSERT(released == execPools.length());
for (uint32 i = 0; i < released; i++)
execPools[i]->release();
execPools.clear();
}
}
void
ic::SweepCallICs(JSContext *cx, JSScript *script, bool purgeAll)
{
if (script->jitNormal)
script->jitNormal->sweepCallICs(cx, purgeAll);
if (script->jitCtor)
script->jitCtor->sweepCallICs(cx, purgeAll);
}
#endif /* JS_MONOIC */

View File

@ -216,6 +216,7 @@ struct CallICInfo {
enum PoolIndex {
Pool_ScriptStub,
Pool_ClosureStub,
Pool_NativeStub,
Total_Pools
};
@ -239,6 +240,17 @@ struct CallICInfo {
/* Inline to OOL jump, redirected by stubs. */
JSC::CodeLocationJump funJump;
/*
* Native stub fallthrough jump which may be patched during recompilation.
* On x64 this is an indirect jump to avoid issues with far jumps on
* relative branches.
*/
#ifdef JS_CPU_X64
JSC::CodeLocationDataLabelPtr nativeJump;
#else
JSC::CodeLocationJump nativeJump;
#endif
/* Offset to inline scripted call, from funGuard. */
uint32 hotJumpOffset : 16;
uint32 joinPointOffset : 16;
@ -269,12 +281,13 @@ struct CallICInfo {
fastGuardedNative = NULL;
hit = false;
hasJsFunCheck = false;
PodArrayZero(pools);
pools[0] = pools[1] = pools[2] = NULL;
}
inline void releasePools() {
releasePool(Pool_ScriptStub);
releasePool(Pool_ClosureStub);
releasePool(Pool_NativeStub);
}
inline void releasePool(PoolIndex index) {
@ -291,8 +304,6 @@ struct CallICInfo {
fastGuardedObject = NULL;
JS_REMOVE_LINK(&links);
}
void purge();
};
void * JS_FASTCALL New(VMFrame &f, ic::CallICInfo *ic);
@ -303,6 +314,9 @@ JSBool JS_FASTCALL SplatApplyArgs(VMFrame &f);
void GenerateArgumentCheckStub(VMFrame &f);
void PurgeMICs(JSContext *cx, JSScript *script);
void SweepCallICs(JSContext *cx, JSScript *script, bool purgeAll);
} /* namespace ic */
} /* namespace mjit */
} /* namespace js */

View File

@ -151,11 +151,9 @@ class PICStubCompiler : public BaseCompiler
uint32 gcNumber;
public:
bool canCallHook;
PICStubCompiler(const char *type, VMFrame &f, JSScript *script, ic::PICInfo &pic, void *stub)
: BaseCompiler(f.cx), type(type), f(f), script(script), pic(pic), stub(stub),
gcNumber(f.cx->runtime->gcNumber), canCallHook(pic.canCallHook)
gcNumber(f.cx->runtime->gcNumber)
{ }
bool isCallOp() const {
@ -804,17 +802,10 @@ struct GetPropertyHelper {
LookupStatus testForGet() {
if (!shape->hasDefaultGetter()) {
if (shape->isMethod()) {
if (!ic.isCallOp())
return ic.disable(cx, "method valued shape");
} else {
if (shape->hasGetterValue())
return ic.disable(cx, "getter value shape");
if (shape->hasSlot() && holder != obj)
return ic.disable(cx, "slotful getter hook through prototype");
if (!ic.canCallHook)
return ic.disable(cx, "can't call getter hook");
}
if (!shape->isMethod())
return ic.disable(cx, "getter");
if (!ic.isCallOp())
return ic.disable(cx, "method valued shape");
} else if (!shape->hasSlot()) {
return ic.disable(cx, "no slot");
}
@ -1012,8 +1003,6 @@ class GetPropCompiler : public PICStubCompiler
return status;
if (getprop.obj != getprop.holder)
return disable("proto walk on String.prototype");
if (!getprop.shape->hasDefaultGetterOrIsMethod())
return disable("getter hook on String.prototype");
if (hadGC())
return Lookup_Uncacheable;
@ -1155,91 +1144,6 @@ class GetPropCompiler : public PICStubCompiler
return Lookup_Cacheable;
}
void generateGetterStub(Assembler &masm, const Shape *shape,
Label start, const Vector<Jump, 8> &shapeMismatches)
{
/*
* Getter hook needs to be called from the stub. The state is fully
* synced and no registers are live except the result registers.
*/
JS_ASSERT(pic.canCallHook);
PropertyOp getter = shape->getterOp();
masm.storePtr(ImmPtr((void *) REJOIN_NATIVE_GETTER),
FrameAddress(offsetof(VMFrame, stubRejoin)));
Registers tempRegs = Registers::tempCallRegMask();
if (tempRegs.hasReg(Registers::ClobberInCall))
tempRegs.takeReg(Registers::ClobberInCall);
/* Get a register to hold obj while we set up the rest of the frame. */
RegisterID holdObjReg = pic.objReg;
if (tempRegs.hasReg(pic.objReg)) {
tempRegs.takeReg(pic.objReg);
} else {
holdObjReg = tempRegs.takeAnyReg().reg();
masm.move(pic.objReg, holdObjReg);
}
RegisterID t0 = tempRegs.takeAnyReg().reg();
masm.bumpStubCounter(f.script(), f.pc(), t0);
/*
* Initialize vp, which is either a slot in the object (the holder,
* actually, which must equal the object here) or undefined.
* Use vp == sp (which for CALLPROP will actually be the original
* sp + 1), to avoid clobbering stack values.
*/
int32 vpOffset = (char *) f.regs.sp - (char *) f.fp();
if (shape->hasSlot()) {
masm.loadObjProp(obj, holdObjReg, shape,
Registers::ClobberInCall, t0);
masm.storeValueFromComponents(Registers::ClobberInCall, t0, Address(JSFrameReg, vpOffset));
} else {
masm.storeValue(UndefinedValue(), Address(JSFrameReg, vpOffset));
}
int32 initialFrameDepth = f.regs.sp - f.fp()->slots();
masm.setupFallibleABICall(cx->typeInferenceEnabled(), f.pc(), f.regs.inlined(), initialFrameDepth);
/* Grab cx. */
#ifdef JS_CPU_X86
RegisterID cxReg = tempRegs.takeAnyReg().reg();
#else
RegisterID cxReg = Registers::ArgReg0;
#endif
masm.loadPtr(FrameAddress(offsetof(VMFrame, cx)), cxReg);
/* Grap vp. */
RegisterID vpReg = t0;
masm.addPtr(Imm32(vpOffset), JSFrameReg, vpReg);
masm.restoreStackBase();
masm.setupABICall(Registers::NormalCall, 4);
masm.storeArg(3, vpReg);
masm.storeArg(2, ImmPtr((void *) JSID_BITS(SHAPE_USERID(shape))));
masm.storeArg(1, holdObjReg);
masm.storeArg(0, cxReg);
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, getter), false);
NativeStubLinker::FinalJump done;
if (!NativeStubEpilogue(f, masm, &done, 0, vpOffset, pic.shapeReg, pic.objReg))
return;
NativeStubLinker linker(masm, f.jit(), f.pc(), f.regs.inlined(), done);
if (!linker.init(f.cx))
THROW();
if (!linker.verifyRange(f.jit())) {
disable("code memory is out of range");
return;
}
linker.patchJump(pic.fastPathRejoin);
linkerEpilogue(linker, start, shapeMismatches);
}
LookupStatus generateStub(JSObject *holder, const Shape *shape)
{
Vector<Jump, 8> shapeMismatches(cx);
@ -1294,13 +1198,6 @@ class GetPropCompiler : public PICStubCompiler
pic.secondShapeGuard = 0;
}
if (!shape->hasDefaultGetterOrIsMethod()) {
generateGetterStub(masm, shape, start, shapeMismatches);
if (setStubShapeOffset)
pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
return Lookup_Cacheable;
}
/* Load the value out of the object. */
masm.loadObjProp(holder, holderReg, shape, pic.shapeReg, pic.objReg);
Jump done = masm.jump();
@ -1316,22 +1213,12 @@ class GetPropCompiler : public PICStubCompiler
return disable("code memory is out of range");
}
// The final exit jumps to the store-back in the inline stub.
buffer.link(done, pic.fastPathRejoin);
linkerEpilogue(buffer, start, shapeMismatches);
if (setStubShapeOffset)
pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
return Lookup_Cacheable;
}
void linkerEpilogue(LinkerHelper &buffer, Label start, const Vector<Jump, 8> &shapeMismatches)
{
// The guard exit jumps to the original slow case.
for (Jump *pj = shapeMismatches.begin(); pj != shapeMismatches.end(); ++pj)
buffer.link(*pj, pic.slowPathStart);
// The final exit jumps to the store-back in the inline stub.
buffer.link(done, pic.fastPathRejoin);
CodeLocationLabel cs = buffer.finalize();
JaegerSpew(JSpew_PICs, "generated %s stub at %p\n", type, cs.executableAddress());
@ -1340,10 +1227,15 @@ class GetPropCompiler : public PICStubCompiler
pic.stubsGenerated++;
pic.updateLastPath(buffer, start);
if (setStubShapeOffset)
pic.getPropLabels().setStubShapeJump(masm, start, stubShapeJumpLabel);
if (pic.stubsGenerated == MAX_PIC_STUBS)
disable("max stubs reached");
if (obj->isDenseArray())
disable("dense array");
return Lookup_Cacheable;
}
void patchPreviousToHere(CodeLocationLabel cs)
@ -1359,14 +1251,8 @@ class GetPropCompiler : public PICStubCompiler
shapeGuardJumpOffset = pic.getPropLabels().getStubShapeJumpOffset();
else
shapeGuardJumpOffset = pic.shapeGuard + pic.getPropLabels().getInlineShapeJumpOffset();
int secondGuardOffset = getLastStubSecondShapeGuard();
JaegerSpew(JSpew_PICs, "Patching previous (%d stubs) (start %p) (offset %d) (second %d)\n",
(int) pic.stubsGenerated, label.executableAddress(),
shapeGuardJumpOffset, secondGuardOffset);
repatcher.relink(label.jumpAtOffset(shapeGuardJumpOffset), cs);
if (secondGuardOffset)
if (int secondGuardOffset = getLastStubSecondShapeGuard())
repatcher.relink(label.jumpAtOffset(secondGuardOffset), cs);
}
@ -1381,11 +1267,8 @@ class GetPropCompiler : public PICStubCompiler
if (hadGC())
return Lookup_Uncacheable;
if (obj == getprop.holder &&
getprop.shape->hasDefaultGetterOrIsMethod() &&
!pic.inlinePathPatched) {
if (obj == getprop.holder && !pic.inlinePathPatched)
return patchInline(getprop.holder, getprop.shape);
}
return generateStub(getprop.holder, getprop.shape);
}
@ -1936,9 +1819,11 @@ ic::GetProp(VMFrame &f, ic::PICInfo *pic)
THROW();
JSString *str = f.regs.sp[-1].toString();
f.regs.sp[-1].setInt32(str->length());
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-1]);
return;
} else if (f.regs.sp[-1].isMagic(JS_LAZY_ARGUMENTS)) {
f.regs.sp[-1].setInt32(f.regs.fp()->numActualArgs());
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-1]);
return;
} else if (!f.regs.sp[-1].isPrimitive()) {
JSObject *obj = &f.regs.sp[-1].toObject();
@ -1963,12 +1848,15 @@ ic::GetProp(VMFrame &f, ic::PICInfo *pic)
JSString *str = obj->getPrimitiveThis().toString();
f.regs.sp[-1].setInt32(str->length());
}
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-1]);
return;
}
}
atom = f.cx->runtime->atomState.lengthAtom;
}
bool usePropCache = pic->usePropCache;
/*
* ValueToObject can trigger recompilations if it lazily initializes any
* of the primitive classes (Boolean, Number, String). :XXX: if these
@ -1995,6 +1883,16 @@ ic::GetProp(VMFrame &f, ic::PICInfo *pic)
if (!obj->getProperty(f.cx, ATOM_TO_JSID(atom), &v))
THROW();
/*
* Ignore undefined reads for the 'prototype' property in constructors,
* which will be at the start of the script and are never holes due to fun_resolve.
* Any undefined value was explicitly stored here, and is known by inference.
* :FIXME: looking under the usePropCache abstraction, which is only unset for
* reads of the prototype.
*/
if (usePropCache)
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), v);
f.regs.sp[-1] = v;
}
@ -2119,11 +2017,6 @@ ic::CallProp(VMFrame &f, ic::PICInfo *pic)
NATIVE_GET(cx, &objv.toObject(), obj2, shape, JSGET_NO_METHOD_BARRIER, &rval,
THROW());
}
/*
* Adjust the stack to reflect the height after the GETPROP, here and
* below. Getter hook ICs depend on this to know which value of sp they
* are updating for consistent rejoins, don't modify this!
*/
regs.sp++;
regs.sp[-2] = rval;
regs.sp[-1] = lval;
@ -2164,6 +2057,8 @@ ic::CallProp(VMFrame &f, ic::PICInfo *pic)
}
#endif
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), regs.sp[-2]);
if (monitor.recompiled())
return;
@ -2213,6 +2108,8 @@ ic::XName(VMFrame &f, ic::PICInfo *pic)
if (!cc.retrieve(&rval, NULL, PICInfo::XNAME))
THROW();
f.regs.sp[-1] = rval;
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), rval);
}
void JS_FASTCALL
@ -2230,6 +2127,8 @@ ic::Name(VMFrame &f, ic::PICInfo *pic)
if (!cc.retrieve(&rval, NULL, PICInfo::NAME))
THROW();
f.regs.sp[0] = rval;
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), rval);
}
static void JS_FASTCALL
@ -2255,6 +2154,8 @@ ic::CallName(VMFrame &f, ic::PICInfo *pic)
f.regs.sp[0] = rval;
f.regs.sp[1] = thisval;
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), rval);
}
static void JS_FASTCALL
@ -2431,12 +2332,6 @@ GetElementIC::attachGetProp(VMFrame &f, JSContext *cx, JSObject *obj, const Valu
if (status != Lookup_Cacheable)
return status;
// With TI enabled, string property stubs can only be added to an opcode if
// the value read will go through a type barrier afterwards. TI only
// accounts for integer-valued properties accessed by GETELEM/CALLELEM.
if (cx->typeInferenceEnabled() && !forcedTypeBarrier)
return disable(cx, "string element access may not have type barrier");
Assembler masm;
// Guard on the string's type and identity.
@ -2900,6 +2795,9 @@ ic::CallElement(VMFrame &f, ic::GetElementIC *ic)
// If the result can be cached, the value was already retrieved.
JS_ASSERT(!f.regs.sp[-2].isMagic());
f.regs.sp[-1].setObject(*thisObj);
if (!JSID_IS_INT(id))
types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
return;
}
}
@ -2919,6 +2817,9 @@ ic::CallElement(VMFrame &f, ic::GetElementIC *ic)
{
f.regs.sp[-1] = thisv;
}
if (!JSID_IS_INT(id))
types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
}
void JS_FASTCALL
@ -2960,12 +2861,18 @@ ic::GetElement(VMFrame &f, ic::GetElementIC *ic)
// If the result can be cached, the value was already retrieved.
JS_ASSERT(!f.regs.sp[-2].isMagic());
if (!JSID_IS_INT(id))
types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
return;
}
}
if (!obj->getProperty(cx, id, &f.regs.sp[-2]))
THROW();
if (!JSID_IS_INT(id))
types::TypeScript::MonitorUnknown(f.cx, f.script(), f.pc());
types::TypeScript::Monitor(f.cx, f.script(), f.pc(), f.regs.sp[-2]);
}
#define APPLY_STRICTNESS(f, s) \
@ -3232,25 +3139,6 @@ ic::SetElement(VMFrame &f, ic::SetElementIC *ic)
template void JS_FASTCALL ic::SetElement<true>(VMFrame &f, SetElementIC *ic);
template void JS_FASTCALL ic::SetElement<false>(VMFrame &f, SetElementIC *ic);
void
JITScript::purgeGetterPICs()
{
Repatcher repatcher(this);
PICInfo *pics_ = pics();
for (uint32 i = 0; i < nPICs; i++) {
PICInfo &pic = pics_[i];
switch (pic.kind) {
case PICInfo::CALL: /* fall-through */
case PICInfo::GET:
GetPropCompiler::reset(repatcher, pic);
pic.reset();
break;
default:
break;
}
}
}
void
JITScript::purgePICs()
{
@ -3294,5 +3182,14 @@ JITScript::purgePICs()
setElems_[i].purge(repatcher);
}
void
ic::PurgePICs(JSContext *cx, JSScript *script)
{
if (script->jitNormal)
script->jitNormal->purgePICs();
if (script->jitCtor)
script->jitCtor->purgePICs();
}
#endif /* JS_POLYIC */

View File

@ -93,12 +93,6 @@ struct BaseIC : public MacroAssemblerTypedefs {
bool hit : 1;
bool slowCallPatched : 1;
// Whether getter/setter hooks can be called from IC stubs.
bool canCallHook : 1;
// Whether a type barrier is in place for the result of the op.
bool forcedTypeBarrier : 1;
// Number of stubs generated.
uint32 stubsGenerated : 5;
@ -108,7 +102,6 @@ struct BaseIC : public MacroAssemblerTypedefs {
void reset() {
hit = false;
slowCallPatched = false;
forcedTypeBarrier = false;
stubsGenerated = 0;
secondShapeGuard = 0;
}
@ -558,6 +551,7 @@ struct PICInfo : public BasePolyIC {
};
#ifdef JS_POLYIC
void PurgePICs(JSContext *cx, JSScript *script);
void JS_FASTCALL GetProp(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL GetPropNoCache(VMFrame &f, ic::PICInfo *);
void JS_FASTCALL SetProp(VMFrame &f, ic::PICInfo *);

View File

@ -100,20 +100,6 @@ SetRejoinState(StackFrame *fp, const CallSite &site, void **location)
}
}
static inline bool
CallsiteMatches(uint8 *codeStart, const CallSite &site, void *location)
{
if (codeStart + site.codeOffset == location)
return true;
#ifdef JS_CPU_ARM
if (codeStart + site.codeOffset + 4 == location)
return true;
#endif
return false;
}
void
Recompiler::patchCall(JITScript *jit, StackFrame *fp, void **location)
{
@ -121,7 +107,7 @@ Recompiler::patchCall(JITScript *jit, StackFrame *fp, void **location)
CallSite *callSites_ = jit->callSites();
for (uint32 i = 0; i < jit->nCallSites; i++) {
if (CallsiteMatches(codeStart, callSites_[i], *location)) {
if (callSites_[i].codeOffset + codeStart == *location) {
JS_ASSERT(callSites_[i].inlineIndex == analyze::CrossScriptSSA::OUTER_FRAME);
SetRejoinState(fp, callSites_[i], location);
return;
@ -136,73 +122,68 @@ Recompiler::patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *
jsbytecode *pc, CallSite *inlined, RejoinState rejoin)
{
/*
* There is a native call or getter IC at pc which triggered recompilation.
* The recompilation could have been triggered either by the native call
* itself, or by a SplatApplyArgs preparing for the native call. Either
* way, we don't want to patch up the call, but will instead steal the pool
* for the IC so it doesn't get freed with the JITScript, and patch up the
* jump at the end to go to the interpoline.
* There is a native IC at pc which triggered a recompilation. The recompilation
* could have been triggered either by the native call itself, or by a SplatApplyArgs
* preparing for the native call. Either way, we don't want to patch up the call,
* but will instead steal the pool for the native IC so it doesn't get freed
* with the old script, and patch up the jump at the end to go to the interpoline.
*/
fp->setRejoin(StubRejoin(rejoin));
/* :XXX: We might crash later if this fails. */
compartment->jaegerCompartment()->orphanedNativeFrames.append(fp);
DebugOnly<bool> found = false;
/*
* Find and patch all native call stubs attached to the given PC. There may
* be multiple ones for getter stubs attached to e.g. a GETELEM.
*/
for (unsigned i = 0; i < jit->nativeCallStubs.length(); i++) {
NativeCallStub &stub = jit->nativeCallStubs[i];
if (stub.pc != pc || stub.inlined != inlined)
continue;
found = true;
/* Check for pools that were already patched. */
if (!stub.pool)
continue;
/* Patch the native fallthrough to go to the interpoline. */
{
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
/* Win64 needs stack adjustment */
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
#else
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
#endif
uint8 *start = (uint8 *)stub.jump.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
#ifdef JS_CPU_X64
repatch.repatch(stub.jump, interpoline);
#else
repatch.relink(stub.jump, JSC::CodeLocationLabel(interpoline));
#endif
unsigned i;
ic::CallICInfo *callICs = jit->callICs();
for (i = 0; i < jit->nCallICs; i++) {
CallSite *call = callICs[i].call;
if (inlined) {
/*
* The IC and regs.inlined will have two different call sites for
* the same point in the script. The IC site refers to the scripted
* return and regs.inlined has the prologue site (which was in use
* when the native stub was generated.
*/
if (call->inlineIndex == inlined->inlineIndex && call->pcOffset == inlined->pcOffset)
break;
} else if (call->inlineIndex == uint32(-1) &&
call->pcOffset == uint32(pc - jit->script->code)) {
break;
}
}
JS_ASSERT(i < jit->nCallICs);
ic::CallICInfo &ic = callICs[i];
JS_ASSERT(ic.fastGuardedNative);
/* :XXX: We leak the pool if this fails. Oh well. */
compartment->jaegerCompartment()->orphanedNativePools.append(stub.pool);
JSC::ExecutablePool *&pool = ic.pools[ic::CallICInfo::Pool_NativeStub];
/* Mark as stolen in case there are multiple calls on the stack. */
stub.pool = NULL;
if (!pool) {
/* Already stole this stub. */
return;
}
JS_ASSERT(found);
if (inlined) {
/*
* Purge all ICs in the script which can make native calls, to make
* sure the stolen stub is not reentered. This is only necessary if we
* are expanding inline frames, as in other circumstances the jitcode
* is about to be discarded.
*/
jit->purgeGetterPICs();
ic::CallICInfo *callICs_ = jit->callICs();
for (uint32 i = 0; i < jit->nCallICs; i++)
callICs_[i].purge();
/* Patch the native fallthrough to go to the interpoline. */
{
#if (defined(JS_NO_FASTCALL) && defined(JS_CPU_X86)) || defined(_WIN64)
/* Win64 needs stack adjustment */
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpolinePatched);
#else
void *interpoline = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
#endif
uint8 *start = (uint8 *)ic.nativeJump.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
#ifdef JS_CPU_X64
repatch.repatch(ic.nativeJump, interpoline);
#else
repatch.relink(ic.nativeJump, JSC::CodeLocationLabel(interpoline));
#endif
}
/* :XXX: We leak the pool if this fails. Oh well. */
compartment->jaegerCompartment()->orphanedNativePools.append(pool);
/* Mark as stolen in case there are multiple calls on the stack. */
pool = NULL;
}
void
@ -214,16 +195,15 @@ Recompiler::patchFrame(JSCompartment *compartment, VMFrame *f, JSScript *script)
* where the call occurred, irregardless of any frames which were pushed
* inside the call.
*/
JS_ASSERT(!f->regs.inlined());
StackFrame *fp = f->fp();
void **addr = f->returnAddressLocation();
RejoinState rejoin = (RejoinState) f->stubRejoin;
if (rejoin == REJOIN_NATIVE ||
rejoin == REJOIN_NATIVE_LOWERED ||
rejoin == REJOIN_NATIVE_GETTER) {
rejoin == REJOIN_NATIVE_LOWERED) {
/* Native call. */
if (fp->script() == script) {
patchNative(compartment, fp->jit(), fp, f->regs.pc, NULL, rejoin);
patchNative(compartment, fp->jit(), fp,
f->regs.pc, NULL, rejoin);
f->stubRejoin = REJOIN_NATIVE_PATCHED;
}
} else if (rejoin == REJOIN_NATIVE_PATCHED) {
@ -312,22 +292,15 @@ Recompiler::expandInlineFrames(JSCompartment *compartment,
/* Check if the VMFrame returns into the inlined frame. */
if (f->stubRejoin && f->fp() == fp) {
RejoinState rejoin = (RejoinState) f->stubRejoin;
JS_ASSERT(rejoin != REJOIN_NATIVE_PATCHED);
if (rejoin == REJOIN_NATIVE ||
rejoin == REJOIN_NATIVE_LOWERED ||
rejoin == REJOIN_NATIVE_GETTER) {
/* The VMFrame is calling a native. */
patchNative(compartment, fp->jit(), innerfp, innerpc, inlined, rejoin);
f->stubRejoin = REJOIN_NATIVE_PATCHED;
} else {
/* The VMFrame is calling CompileFunction. */
innerfp->setRejoin(StubRejoin(rejoin));
*frameAddr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
f->stubRejoin = 0;
}
/* The VMFrame is calling CompileFunction. */
JS_ASSERT(f->stubRejoin != REJOIN_NATIVE &&
f->stubRejoin != REJOIN_NATIVE_LOWERED &&
f->stubRejoin != REJOIN_NATIVE_PATCHED);
innerfp->setRejoin(StubRejoin((RejoinState) f->stubRejoin));
*frameAddr = JS_FUNC_TO_DATA_PTR(void *, JaegerInterpoline);
f->stubRejoin = 0;
}
if (CallsiteMatches(codeStart, *inlined, *frameAddr)) {
if (*frameAddr == codeStart + inlined->codeOffset) {
/* The VMFrame returns directly into the expanded frame. */
SetRejoinState(innerfp, *inlined, frameAddr);
}
@ -524,7 +497,12 @@ Recompiler::cleanup(JITScript *jit)
JS_STATIC_ASSERT(offsetof(ic::CallICInfo, links) == 0);
ic::CallICInfo *ic = (ic::CallICInfo *) jit->callers.next;
ic->purge();
uint8 *start = (uint8 *)ic->funGuard.executableAddress();
JSC::RepatchBuffer repatch(JSC::JITCode(start - 32, 64));
repatch.repatch(ic->funGuard, NULL);
repatch.relink(ic->funJump, ic->slowPathStart);
ic->purgeGuardedObject();
}
}

View File

@ -100,7 +100,7 @@ private:
static void patchCall(JITScript *jit, StackFrame *fp, void **location);
static void patchNative(JSCompartment *compartment, JITScript *jit, StackFrame *fp,
jsbytecode *pc, CallSite *inlined, RejoinState rejoin);
jsbytecode *pc, CallSite *inline_, RejoinState rejoin);
static StackFrame *
expandInlineFrameChain(StackFrame *outer, InlineFrame *inner);

View File

@ -375,6 +375,7 @@ NameOp(VMFrame &f, JSObject *obj, bool callname)
if (op2 == JSOP_TYPEOF) {
f.regs.sp++;
f.regs.sp[-1].setUndefined();
TypeScript::Monitor(cx, f.script(), f.pc(), f.regs.sp[-1]);
return obj;
}
ReportAtomNotDefined(cx, atom);
@ -401,6 +402,8 @@ NameOp(VMFrame &f, JSObject *obj, bool callname)
AddTypePropertyId(cx, obj, id, Type::UndefinedType());
}
TypeScript::Monitor(cx, f.script(), f.pc(), rval);
*f.regs.sp++ = rval;
if (callname)
@ -440,6 +443,7 @@ stubs::GetElem(VMFrame &f)
if (!str)
THROW();
f.regs.sp[-2].setString(str);
TypeScript::Monitor(cx, f.script(), f.pc(), f.regs.sp[-2]);
return;
}
}
@ -447,6 +451,7 @@ stubs::GetElem(VMFrame &f)
if (lref.isMagic(JS_LAZY_ARGUMENTS)) {
if (rref.isInt32() && size_t(rref.toInt32()) < regs.fp()->numActualArgs()) {
regs.sp[-2] = regs.fp()->canonicalActualArg(rref.toInt32());
TypeScript::Monitor(cx, f.script(), f.pc(), regs.sp[-2]);
return;
}
MarkArgumentsCreated(cx, f.script());
@ -503,8 +508,12 @@ stubs::GetElem(VMFrame &f)
THROW();
copyFrom = &rval;
if (!JSID_IS_INT(id))
TypeScript::MonitorUnknown(cx, f.script(), f.pc());
end_getelem:
f.regs.sp[-2] = *copyFrom;
TypeScript::Monitor(cx, f.script(), f.pc(), f.regs.sp[-2]);
}
static inline bool
@ -550,6 +559,9 @@ stubs::CallElem(VMFrame &f)
{
regs.sp[-1] = thisv;
}
if (!JSID_IS_INT(id))
TypeScript::MonitorUnknown(cx, f.script(), f.pc());
TypeScript::Monitor(cx, f.script(), f.pc(), regs.sp[-2]);
}
template<JSBool strict>
@ -585,12 +597,18 @@ stubs::SetElem(VMFrame &f)
break;
if ((jsuint)i >= obj->getArrayLength())
obj->setArrayLength(cx, i + 1);
/*
* Note: this stub is used for ENUMELEM, so watch out
* before overwriting the op.
*/
if (JSOp(*f.pc()) == JSOP_SETELEM)
*f.pc() = JSOP_SETHOLE;
}
obj->setDenseArrayElementWithType(cx, i, rval);
goto end_setelem;
} else {
if (f.script()->hasAnalysis())
f.script()->analysis()->getCode(f.pc()).arrayWriteHole = true;
if (JSOp(*f.pc()) == JSOP_SETELEM)
*f.pc() = JSOP_SETHOLE;
}
}
} while (0);
@ -779,7 +797,6 @@ stubs::DefFun(VMFrame &f, JSFunction *fun)
obj = CloneFunctionObject(cx, fun, obj2, true);
if (!obj)
THROW();
JS_ASSERT_IF(f.script()->compileAndGo, obj->getGlobal() == fun->getGlobal());
}
/*
@ -1430,8 +1447,6 @@ stubs::DefLocalFun(VMFrame &f, JSFunction *fun)
}
}
JS_ASSERT_IF(f.script()->compileAndGo, obj->getGlobal() == fun->getGlobal());
return obj;
}
@ -1546,7 +1561,6 @@ stubs::Lambda(VMFrame &f, JSFunction *fun)
if (!obj)
THROWV(NULL);
JS_ASSERT_IF(f.script()->compileAndGo, obj->getGlobal() == fun->getGlobal());
return obj;
}
@ -1561,6 +1575,7 @@ InlineGetProp(VMFrame &f)
if (vp->isMagic(JS_LAZY_ARGUMENTS)) {
JS_ASSERT(js_GetOpcode(cx, f.script(), f.pc()) == JSOP_LENGTH);
regs.sp[-1] = Int32Value(regs.fp()->numActualArgs());
TypeScript::Monitor(cx, f.script(), f.pc(), regs.sp[-1]);
return true;
}
@ -1609,6 +1624,8 @@ InlineGetProp(VMFrame &f)
}
} while(0);
TypeScript::Monitor(cx, f.script(), f.pc(), rval);
regs.sp[-1] = rval;
return true;
}
@ -1727,6 +1744,7 @@ stubs::CallProp(VMFrame &f, JSAtom *origAtom)
THROW();
}
#endif
TypeScript::Monitor(cx, f.script(), f.pc(), rval);
}
void JS_FASTCALL
@ -2378,19 +2396,6 @@ stubs::TypeBarrierHelper(VMFrame &f, uint32 which)
TypeScript::Monitor(f.cx, f.script(), f.pc(), result);
}
void JS_FASTCALL
stubs::StubTypeHelper(VMFrame &f, int32 which)
{
const Value &result = f.regs.sp[which];
if (f.script()->hasAnalysis() && f.script()->analysis()->ranInference()) {
AutoEnterTypeInference enter(f.cx);
f.script()->analysis()->breakTypeBarriers(f.cx, f.pc() - f.script()->code, false);
}
TypeScript::Monitor(f.cx, f.script(), f.pc(), result);
}
/*
* Variant of TypeBarrierHelper for checking types after making a native call.
* The stack is already correct, and no fixup should be performed.
@ -2408,6 +2413,25 @@ stubs::NegZeroHelper(VMFrame &f)
TypeScript::MonitorOverflow(f.cx, f.script(), f.pc());
}
void JS_FASTCALL
stubs::CallPropSwap(VMFrame &f)
{
/*
* CALLPROP operations on strings are implemented in terms of GETPROP.
* If we rejoin from such a GETPROP, we come here at the end of the
* CALLPROP to fix up the stack. Right now the stack looks like:
*
* STRING PROP
*
* We need it to be:
*
* PROP STRING
*/
Value v = f.regs.sp[-1];
f.regs.sp[-1] = f.regs.sp[-2];
f.regs.sp[-2] = v;
}
void JS_FASTCALL
stubs::CheckArgumentTypes(VMFrame &f)
{
@ -2458,34 +2482,6 @@ stubs::AssertArgumentTypes(VMFrame &f)
TypeFailure(f.cx, "Missing type for arg %d: %s", i, TypeString(type));
}
}
void JS_FASTCALL
stubs::TypeCheckPushed(VMFrame &f)
{
TypeScript::CheckBytecode(f.cx, f.script(), f.pc(), f.regs.sp);
}
void JS_FASTCALL
stubs::TypeCheckPopped(VMFrame &f, int32 which)
{
JSScript *script = f.script();
jsbytecode *pc = f.pc();
if (!script->hasAnalysis() || !script->analysis()->ranInference())
return;
AutoEnterTypeInference enter(f.cx);
const js::Value &val = f.regs.sp[-1 - which];
TypeSet *types = script->analysis()->poppedTypes(pc, which);
Type type = GetValueType(f.cx, val);
if (!types->hasType(type)) {
/* Display fine-grained debug information first */
fprintf(stderr, "Missing type at #%u:%05u popped %u: %s\n",
script->id(), unsigned(pc - script->code), which, TypeString(type));
TypeFailure(f.cx, "Missing type popped %u", which);
}
}
#endif
/*

View File

@ -210,14 +210,11 @@ void JS_FASTCALL TypeBarrierHelper(VMFrame &f, uint32 which);
void JS_FASTCALL TypeBarrierReturn(VMFrame &f, Value *vp);
void JS_FASTCALL NegZeroHelper(VMFrame &f);
void JS_FASTCALL StubTypeHelper(VMFrame &f, int32 which);
void JS_FASTCALL CallPropSwap(VMFrame &f);
void JS_FASTCALL CheckArgumentTypes(VMFrame &f);
#ifdef DEBUG
void JS_FASTCALL AssertArgumentTypes(VMFrame &f);
void JS_FASTCALL TypeCheckPushed(VMFrame &f);
void JS_FASTCALL TypeCheckPopped(VMFrame &f, int32 which);
#endif
void JS_FASTCALL MissedBoundsCheckEntry(VMFrame &f);

View File

@ -170,19 +170,19 @@ typedef JSC::MacroAssembler::Imm32 Imm32;
typedef JSC::MacroAssembler::DataLabelPtr DataLabelPtr;
JSC::MacroAssembler::Call
StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses)
StubCompiler::emitStubCall(void *ptr, RejoinState rejoin)
{
return emitStubCall(ptr, rejoin, uses, frame.totalDepth());
return emitStubCall(ptr, rejoin, frame.totalDepth());
}
JSC::MacroAssembler::Call
StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32 slots)
StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, int32 slots)
{
JaegerSpew(JSpew_Insns, " ---- BEGIN SLOW CALL CODE ---- \n");
masm.bumpStubCounter(cc.script, cc.PC, Registers::tempCallReg());
DataLabelPtr inlinePatch;
Call cl = masm.fallibleVMCall(cx->typeInferenceEnabled(),
ptr, cc.outerPC(), NULL, &inlinePatch, slots);
ptr, cc.outerPC(), &inlinePatch, slots);
JaegerSpew(JSpew_Insns, " ---- END SLOW CALL CODE ---- \n");
/* Add the call site for debugging and recompilation. */
@ -199,7 +199,7 @@ StubCompiler::emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32 slots
/* MissedBoundsCheck* are not actually called, so f.regs need to be written before InvariantFailure. */
bool entry = (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckEntry))
|| (ptr == JS_FUNC_TO_DATA_PTR(void *, stubs::MissedBoundsCheckHead));
cc.loop->addInvariantCall(j, l, true, entry, cc.callSites.length(), uses);
cc.loop->addInvariantCall(j, l, true, entry, cc.callSites.length());
}
cc.addCallSite(site);

View File

@ -137,8 +137,8 @@ class StubCompiler
bool jumpInScript(Jump j, jsbytecode *target);
unsigned crossJump(Jump j, Label l);
Call emitStubCall(void *ptr, RejoinState rejoin, Uses uses);
Call emitStubCall(void *ptr, RejoinState rejoin, Uses uses, int32 slots);
Call emitStubCall(void *ptr, RejoinState rejoin);
Call emitStubCall(void *ptr, RejoinState rejoin, int32 slots);
void patchJoin(unsigned i, bool script, Assembler::Address address, AnyRegisterID reg);
};

View File

@ -122,7 +122,7 @@ TrampolineCompiler::generateForceReturn(Assembler &masm)
masm.loadPtr(FrameAddress(VMFrame::offsetOfFp), JSFrameReg);
/* Perform the frame epilogue. */
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::AnyFrameEpilogue), NULL, NULL, NULL, 0);
masm.fallibleVMCall(true, JS_FUNC_TO_DATA_PTR(void *, stubs::AnyFrameEpilogue), NULL, NULL, 0);
/* Store any known return value */
masm.loadValueAsComponents(UndefinedValue(), JSReturnReg_Type, JSReturnReg_Data);