mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-29 15:52:07 +00:00
Merge mozilla-central and tracemonkey. (a=blockers)
This commit is contained in:
commit
c449e0ada7
@ -184,9 +184,9 @@ nsEventListenerInfo::GetDebugObject(nsISupports** aRetVal)
|
||||
nsCOMPtr<jsdIValue> jsdValue;
|
||||
jsd->WrapJSValue(v, getter_AddRefs(jsdValue));
|
||||
*aRetVal = jsdValue.forget().get();
|
||||
return NS_OK;
|
||||
}
|
||||
}
|
||||
stack->Pop(&cx);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
@ -146,7 +146,9 @@ public:
|
||||
ASSERT_VALID_CODE_POINTER(m_value);
|
||||
}
|
||||
|
||||
void* executableAddress() const { return m_value; }
|
||||
void* executableAddress() const {
|
||||
return m_value;
|
||||
}
|
||||
#if WTF_CPU_ARM_THUMB2
|
||||
// To use this pointer as a data address remove the decoration.
|
||||
void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; }
|
||||
|
19
js/src/jit-test/tests/basic/bug599854.js
Normal file
19
js/src/jit-test/tests/basic/bug599854.js
Normal file
@ -0,0 +1,19 @@
|
||||
function assertEqArray(actual, expected) {
|
||||
if (actual.length != expected.length) {
|
||||
throw new Error(
|
||||
"array lengths not equal: got " +
|
||||
uneval(actual) + ", expected " + uneval(expected));
|
||||
}
|
||||
|
||||
for (var i = 0; i < actual.length; ++i) {
|
||||
if (actual[i] != expected[i]) {
|
||||
throw new Error(
|
||||
"arrays not equal at element " + i + ": got " +
|
||||
uneval(actual) + ", expected " + uneval(expected));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
assertEqArray(/(?:(?:(")(c)")?)*/.exec('"c"'), [ '"c"', '"', "c" ]);
|
||||
assertEqArray(/(?:(?:a*?(")(c)")?)*/.exec('"c"'), [ '"c"', '"', "c" ]);
|
||||
assertEqArray(/<script\s*(?![^>]*type=['"]?(?:dojo\/|text\/html\b))(?:[^>]*?(?:src=(['"]?)([^>]*?)\1[^>]*)?)*>([\s\S]*?)<\/script>/gi.exec('<script type="text/javascript" src="..."></script>'), ['<script type="text/javascript" src="..."></script>', '"', "...", ""]);
|
3
js/src/jit-test/tests/basic/bug606882-1.js
Normal file
3
js/src/jit-test/tests/basic/bug606882-1.js
Normal file
@ -0,0 +1,3 @@
|
||||
// don't crash
|
||||
|
||||
"ABC".match("A+(?:X?(?:|(?:))(?:(?:B)?C+w?w?)?)*");
|
16
js/src/jit-test/tests/basic/bug606882-2.js
Normal file
16
js/src/jit-test/tests/basic/bug606882-2.js
Normal file
@ -0,0 +1,16 @@
|
||||
// don't crash
|
||||
var book = 'Ps';
|
||||
var pattern = "(?:"
|
||||
+ "(?:"
|
||||
+ "(?:"
|
||||
+ "(?:-|)"
|
||||
+ "\\s?"
|
||||
+ ")"
|
||||
+ "|"
|
||||
+ ")"
|
||||
+ " ?"
|
||||
+ "\\d+"
|
||||
+ "\\w?"
|
||||
+ ")*";
|
||||
var re = new RegExp(pattern);
|
||||
'8:5-8'.match(re);
|
12
js/src/jit-test/tests/basic/testMethodWriteBarrier4.js
Normal file
12
js/src/jit-test/tests/basic/testMethodWriteBarrier4.js
Normal file
@ -0,0 +1,12 @@
|
||||
var z = 0;
|
||||
function f() {
|
||||
this.b = function() {};
|
||||
this.b = undefined;
|
||||
if (z++ > HOTLOOP)
|
||||
this.b();
|
||||
}
|
||||
|
||||
try {
|
||||
for (var i = 0; i < HOTLOOP + 2; i++)
|
||||
new f();
|
||||
} catch (exc) {}
|
@ -5706,6 +5706,32 @@ JS_SetErrorReporter(JSContext *cx, JSErrorReporter er)
|
||||
|
||||
/************************************************************************/
|
||||
|
||||
/*
|
||||
* Dates.
|
||||
*/
|
||||
JS_PUBLIC_API(JSObject *)
|
||||
JS_NewDateObject(JSContext *cx, int year, int mon, int mday, int hour, int min, int sec)
|
||||
{
|
||||
CHECK_REQUEST(cx);
|
||||
return js_NewDateObject(cx, year, mon, mday, hour, min, sec);
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSObject *)
|
||||
JS_NewDateObjectMsec(JSContext *cx, jsdouble msec)
|
||||
{
|
||||
CHECK_REQUEST(cx);
|
||||
return js_NewDateObjectMsec(cx, msec);
|
||||
}
|
||||
|
||||
JS_PUBLIC_API(JSBool)
|
||||
JS_ObjectIsDate(JSContext *cx, JSObject *obj)
|
||||
{
|
||||
JS_ASSERT(obj);
|
||||
return obj->isDate();
|
||||
}
|
||||
|
||||
/************************************************************************/
|
||||
|
||||
/*
|
||||
* Regular Expressions.
|
||||
*/
|
||||
|
@ -3596,6 +3596,24 @@ JS_SetErrorReporter(JSContext *cx, JSErrorReporter er);
|
||||
|
||||
/************************************************************************/
|
||||
|
||||
/*
|
||||
* Dates.
|
||||
*/
|
||||
|
||||
extern JS_PUBLIC_API(JSObject *)
|
||||
JS_NewDateObject(JSContext *cx, int year, int mon, int mday, int hour, int min, int sec);
|
||||
|
||||
extern JS_PUBLIC_API(JSObject *)
|
||||
JS_NewDateObjectMsec(JSContext *cx, jsdouble msec);
|
||||
|
||||
/*
|
||||
* Infallible predicate to test whether obj is a date object.
|
||||
*/
|
||||
extern JS_PUBLIC_API(JSBool)
|
||||
JS_ObjectIsDate(JSContext *cx, JSObject *obj);
|
||||
|
||||
/************************************************************************/
|
||||
|
||||
/*
|
||||
* Regular Expressions.
|
||||
*/
|
||||
|
@ -136,8 +136,6 @@ static const size_t MAX_SLOW_NATIVE_EXTRA_SLOTS = 16;
|
||||
/* Forward declarations of tracer types. */
|
||||
class VMAllocator;
|
||||
class FrameInfoCache;
|
||||
struct REHashFn;
|
||||
struct REHashKey;
|
||||
struct FrameInfo;
|
||||
struct VMSideExit;
|
||||
struct TreeFragment;
|
||||
@ -145,8 +143,6 @@ struct TracerState;
|
||||
template<typename T> class Queue;
|
||||
typedef Queue<uint16> SlotList;
|
||||
class TypeMap;
|
||||
struct REFragment;
|
||||
typedef nanojit::HashMap<REHashKey, REFragment*, REHashFn> REHashMap;
|
||||
class LoopProfile;
|
||||
|
||||
#if defined(JS_JIT_SPEW) || defined(DEBUG)
|
||||
|
@ -239,11 +239,6 @@ struct TraceMonitor {
|
||||
*/
|
||||
JSBool needFlush;
|
||||
|
||||
/*
|
||||
* Fragment map for the regular expression compiler.
|
||||
*/
|
||||
REHashMap* reFragments;
|
||||
|
||||
// Cached temporary typemap to avoid realloc'ing every time we create one.
|
||||
// This must be used in only one place at a given time. It must be cleared
|
||||
// before use.
|
||||
@ -257,8 +252,8 @@ struct TraceMonitor {
|
||||
nanojit::Seq<nanojit::Fragment*>* branches;
|
||||
uint32 lastFragID;
|
||||
/*
|
||||
* profAlloc has a lifetime which spans exactly from js_InitJIT to
|
||||
* js_FinishJIT.
|
||||
* profAlloc has a lifetime which spans exactly from InitJIT to
|
||||
* FinishJIT.
|
||||
*/
|
||||
VMAllocator* profAlloc;
|
||||
FragStatsMap* profTab;
|
||||
|
@ -141,6 +141,9 @@ PurgeCallICs(JSContext *cx, JSScript *start)
|
||||
JS_FRIEND_API(JSBool)
|
||||
js_SetDebugMode(JSContext *cx, JSBool debug)
|
||||
{
|
||||
if (!cx->compartment)
|
||||
return JS_TRUE;
|
||||
|
||||
cx->compartment->debugMode = debug;
|
||||
#ifdef JS_METHODJIT
|
||||
for (JSScript *script = (JSScript *)cx->compartment->scripts.next;
|
||||
@ -1924,13 +1927,14 @@ JS_StopProfiling()
|
||||
static JSBool
|
||||
StartProfiling(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
JS_SET_RVAL(cx, vp, BOOLEAN_TO_JSVAL(Probes::startProfiling()));
|
||||
JS_SET_RVAL(cx, vp, BOOLEAN_TO_JSVAL(JS_StartProfiling()));
|
||||
return true;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
StopProfiling(JSContext *cx, uintN argc, jsval *vp)
|
||||
{
|
||||
JS_StopProfiling();
|
||||
JS_SET_RVAL(cx, vp, JSVAL_VOID);
|
||||
return true;
|
||||
}
|
||||
|
@ -2162,7 +2162,7 @@ SweepCrossCompartmentWrappers(JSContext *cx)
|
||||
JSRuntime *rt = cx->runtime;
|
||||
/*
|
||||
* Figure out how much JIT code should be released from inactive compartments.
|
||||
* If multiple eighth-lifes have passed, compound the release interval linearly;
|
||||
* If multiple eighth-lives have passed, compound the release interval linearly;
|
||||
* if enough time has passed, all inactive JIT code will be released.
|
||||
*/
|
||||
uint32 releaseInterval = 0;
|
||||
@ -2177,10 +2177,8 @@ SweepCrossCompartmentWrappers(JSContext *cx)
|
||||
}
|
||||
|
||||
/* Remove dead wrappers from the compartment map. */
|
||||
for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c) {
|
||||
for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); ++c)
|
||||
(*c)->sweep(cx, releaseInterval);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void
|
||||
@ -2188,31 +2186,29 @@ SweepCompartments(JSContext *cx, JSGCInvocationKind gckind)
|
||||
{
|
||||
JSRuntime *rt = cx->runtime;
|
||||
JSCompartmentCallback callback = rt->compartmentCallback;
|
||||
JSCompartment **read = rt->compartments.begin();
|
||||
|
||||
/* Skip the atomsCompartment. */
|
||||
JSCompartment **read = rt->compartments.begin() + 1;
|
||||
JSCompartment **end = rt->compartments.end();
|
||||
JSCompartment **write = read;
|
||||
|
||||
/* Delete atomsCompartment only during runtime shutdown */
|
||||
rt->atomsCompartment->marked = true;
|
||||
JS_ASSERT(rt->compartments.length() >= 1);
|
||||
JS_ASSERT(*rt->compartments.begin() == rt->atomsCompartment);
|
||||
|
||||
while (read < end) {
|
||||
JSCompartment *compartment = (*read++);
|
||||
if (compartment->marked) {
|
||||
compartment->marked = false;
|
||||
*write++ = compartment;
|
||||
} else {
|
||||
JSCompartment *compartment = *read++;
|
||||
|
||||
/* Unmarked compartments containing marked objects don't get deleted, except LAST_CONTEXT GC is performed. */
|
||||
if ((!compartment->marked && compartment->arenaListsAreEmpty()) || gckind == GC_LAST_CONTEXT) {
|
||||
JS_ASSERT(compartment->freeLists.isEmpty());
|
||||
if (compartment->arenaListsAreEmpty() || gckind == GC_LAST_CONTEXT) {
|
||||
if (callback)
|
||||
(void) callback(cx, compartment, JSCOMPARTMENT_DESTROY);
|
||||
if (compartment->principals)
|
||||
JSPRINCIPALS_DROP(cx, compartment->principals);
|
||||
js_delete(compartment);
|
||||
} else {
|
||||
compartment->marked = false;
|
||||
*write++ = compartment;
|
||||
}
|
||||
if (callback)
|
||||
(void) callback(cx, compartment, JSCOMPARTMENT_DESTROY);
|
||||
if (compartment->principals)
|
||||
JSPRINCIPALS_DROP(cx, compartment->principals);
|
||||
js_delete(compartment);
|
||||
continue;
|
||||
}
|
||||
compartment->marked = false;
|
||||
*write++ = compartment;
|
||||
}
|
||||
rt->compartments.resize(write - rt->compartments.begin());
|
||||
}
|
||||
@ -2354,7 +2350,6 @@ MarkAndSweepCompartment(JSContext *cx, JSCompartment *comp, JSGCInvocationKind g
|
||||
* state. We finalize objects before other GC things to ensure that
|
||||
* object's finalizer can access them even if they will be freed.
|
||||
*/
|
||||
|
||||
comp->sweep(cx, 0);
|
||||
|
||||
comp->finalizeObjectArenaLists(cx);
|
||||
@ -2464,14 +2459,13 @@ MarkAndSweep(JSContext *cx, JSGCInvocationKind gckind GCTIMER_PARAM)
|
||||
* state. We finalize objects before other GC things to ensure that
|
||||
* object's finalizer can access them even if they will be freed.
|
||||
*/
|
||||
|
||||
for (JSCompartment **comp = rt->compartments.begin(); comp != rt->compartments.end(); comp++)
|
||||
(*comp)->finalizeObjectArenaLists(cx);
|
||||
for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
|
||||
(*c)->finalizeObjectArenaLists(cx);
|
||||
|
||||
TIMESTAMP(sweepObjectEnd);
|
||||
|
||||
for (JSCompartment **comp = rt->compartments.begin(); comp != rt->compartments.end(); comp++)
|
||||
(*comp)->finalizeStringArenaLists(cx);
|
||||
for (JSCompartment **c = rt->compartments.begin(); c != rt->compartments.end(); c++)
|
||||
(*c)->finalizeStringArenaLists(cx);
|
||||
|
||||
TIMESTAMP(sweepStringEnd);
|
||||
|
||||
|
@ -2180,12 +2180,14 @@ IteratorMore(JSContext *cx, JSObject *iterobj, bool *cond, Value *rval)
|
||||
{
|
||||
if (iterobj->getClass() == &js_IteratorClass) {
|
||||
NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
|
||||
*cond = (ni->props_cursor < ni->props_end);
|
||||
} else {
|
||||
if (!js_IteratorMore(cx, iterobj, rval))
|
||||
return false;
|
||||
*cond = rval->isTrue();
|
||||
if (ni->isKeyIter()) {
|
||||
*cond = (ni->props_cursor < ni->props_end);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if (!js_IteratorMore(cx, iterobj, rval))
|
||||
return false;
|
||||
*cond = rval->isTrue();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -2194,19 +2196,15 @@ IteratorNext(JSContext *cx, JSObject *iterobj, Value *rval)
|
||||
{
|
||||
if (iterobj->getClass() == &js_IteratorClass) {
|
||||
NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
|
||||
JS_ASSERT(ni->props_cursor < ni->props_end);
|
||||
if (ni->isKeyIter()) {
|
||||
jsid id = *ni->currentKey();
|
||||
JS_ASSERT(ni->props_cursor < ni->props_end);
|
||||
jsid id = *ni->current();
|
||||
if (JSID_IS_ATOM(id)) {
|
||||
rval->setString(JSID_TO_STRING(id));
|
||||
ni->incKeyCursor();
|
||||
ni->incCursor();
|
||||
return true;
|
||||
}
|
||||
/* Take the slow path if we have to stringify a numeric property name. */
|
||||
} else {
|
||||
*rval = *ni->currentValue();
|
||||
ni->incValueCursor();
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return js_IteratorNext(cx, iterobj, rval);
|
||||
|
@ -118,10 +118,7 @@ Class js_IteratorClass = {
|
||||
void
|
||||
NativeIterator::mark(JSTracer *trc)
|
||||
{
|
||||
if (isKeyIter())
|
||||
MarkIdRange(trc, beginKey(), endKey(), "props");
|
||||
else
|
||||
MarkValueRange(trc, beginValue(), endValue(), "props");
|
||||
MarkIdRange(trc, begin(), end(), "props");
|
||||
if (obj)
|
||||
MarkObject(trc, *obj, "obj");
|
||||
}
|
||||
@ -172,46 +169,10 @@ NewKeyValuePair(JSContext *cx, jsid id, const Value &val, Value *rval)
|
||||
return true;
|
||||
}
|
||||
|
||||
struct KeyEnumeration
|
||||
{
|
||||
typedef AutoIdVector ResultVector;
|
||||
|
||||
static JS_ALWAYS_INLINE bool
|
||||
append(JSContext *, AutoIdVector &keys, JSObject *, jsid id, uintN flags)
|
||||
{
|
||||
JS_ASSERT((flags & JSITER_FOREACH) == 0);
|
||||
return keys.append(id);
|
||||
}
|
||||
};
|
||||
|
||||
struct ValueEnumeration
|
||||
{
|
||||
typedef AutoValueVector ResultVector;
|
||||
|
||||
static JS_ALWAYS_INLINE bool
|
||||
append(JSContext *cx, AutoValueVector &vals, JSObject *obj, jsid id, uintN flags)
|
||||
{
|
||||
JS_ASSERT(flags & JSITER_FOREACH);
|
||||
|
||||
if (!vals.growBy(1))
|
||||
return false;
|
||||
|
||||
/* Do the lookup on the original object instead of the prototype. */
|
||||
Value *vp = vals.end() - 1;
|
||||
if (!obj->getProperty(cx, id, vp))
|
||||
return false;
|
||||
if ((flags & JSITER_KEYVALUE) && !NewKeyValuePair(cx, id, *vp, vp))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
template <class EnumPolicy>
|
||||
static inline bool
|
||||
Enumerate(JSContext *cx, JSObject *obj, JSObject *pobj, jsid id,
|
||||
bool enumerable, bool sharedPermanent, uintN flags, IdSet& ht,
|
||||
typename EnumPolicy::ResultVector *props)
|
||||
AutoIdVector *props)
|
||||
{
|
||||
IdSet::AddPtr p = ht.lookupForAdd(id);
|
||||
JS_ASSERT_IF(obj == pobj && !obj->isProxy(), !p);
|
||||
@ -244,15 +205,14 @@ Enumerate(JSContext *cx, JSObject *obj, JSObject *pobj, jsid id,
|
||||
}
|
||||
|
||||
if (enumerable || (flags & JSITER_HIDDEN))
|
||||
return EnumPolicy::append(cx, *props, obj, id, flags);
|
||||
return props->append(id);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class EnumPolicy>
|
||||
static bool
|
||||
EnumerateNativeProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uintN flags, IdSet &ht,
|
||||
typename EnumPolicy::ResultVector *props)
|
||||
AutoIdVector *props)
|
||||
{
|
||||
size_t initialLength = props->length();
|
||||
|
||||
@ -262,8 +222,8 @@ EnumerateNativeProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uintN fl
|
||||
|
||||
if (!JSID_IS_DEFAULT_XML_NAMESPACE(shape.id) &&
|
||||
!shape.isAlias() &&
|
||||
!Enumerate<EnumPolicy>(cx, obj, pobj, shape.id, shape.enumerable(),
|
||||
shape.isSharedPermanent(), flags, ht, props))
|
||||
!Enumerate(cx, obj, pobj, shape.id, shape.enumerable(),
|
||||
shape.isSharedPermanent(), flags, ht, props))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -273,13 +233,12 @@ EnumerateNativeProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uintN fl
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class EnumPolicy>
|
||||
static bool
|
||||
EnumerateDenseArrayProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uintN flags,
|
||||
IdSet &ht, typename EnumPolicy::ResultVector *props)
|
||||
IdSet &ht, AutoIdVector *props)
|
||||
{
|
||||
if (!Enumerate<EnumPolicy>(cx, obj, pobj, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom), false, true,
|
||||
flags, ht, props)) {
|
||||
if (!Enumerate(cx, obj, pobj, ATOM_TO_JSID(cx->runtime->atomState.lengthAtom), false, true,
|
||||
flags, ht, props)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -289,7 +248,7 @@ EnumerateDenseArrayProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uint
|
||||
for (size_t i = 0; i < capacity; ++i, ++vp) {
|
||||
if (!vp->isMagic(JS_ARRAY_HOLE)) {
|
||||
/* Dense arrays never get so large that i would not fit into an integer id. */
|
||||
if (!Enumerate<EnumPolicy>(cx, obj, pobj, INT_TO_JSID(i), true, false, flags, ht, props))
|
||||
if (!Enumerate(cx, obj, pobj, INT_TO_JSID(i), true, false, flags, ht, props))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -298,9 +257,8 @@ EnumerateDenseArrayProperties(JSContext *cx, JSObject *obj, JSObject *pobj, uint
|
||||
return true;
|
||||
}
|
||||
|
||||
template <class EnumPolicy>
|
||||
static bool
|
||||
Snapshot(JSContext *cx, JSObject *obj, uintN flags, typename EnumPolicy::ResultVector *props)
|
||||
Snapshot(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector *props)
|
||||
{
|
||||
/*
|
||||
* FIXME: Bug 575997 - We won't need to initialize this hash table if
|
||||
@ -319,10 +277,10 @@ Snapshot(JSContext *cx, JSObject *obj, uintN flags, typename EnumPolicy::ResultV
|
||||
!(clasp->flags & JSCLASS_NEW_ENUMERATE)) {
|
||||
if (!clasp->enumerate(cx, pobj))
|
||||
return false;
|
||||
if (!EnumerateNativeProperties<EnumPolicy>(cx, obj, pobj, flags, ht, props))
|
||||
if (!EnumerateNativeProperties(cx, obj, pobj, flags, ht, props))
|
||||
return false;
|
||||
} else if (pobj->isDenseArray()) {
|
||||
if (!EnumerateDenseArrayProperties<EnumPolicy>(cx, obj, pobj, flags, ht, props))
|
||||
if (!EnumerateDenseArrayProperties(cx, obj, pobj, flags, ht, props))
|
||||
return false;
|
||||
} else {
|
||||
if (pobj->isProxy()) {
|
||||
@ -335,7 +293,7 @@ Snapshot(JSContext *cx, JSObject *obj, uintN flags, typename EnumPolicy::ResultV
|
||||
return false;
|
||||
}
|
||||
for (size_t n = 0, len = proxyProps.length(); n < len; n++) {
|
||||
if (!Enumerate<EnumPolicy>(cx, obj, pobj, proxyProps[n], true, false, flags, ht, props))
|
||||
if (!Enumerate(cx, obj, pobj, proxyProps[n], true, false, flags, ht, props))
|
||||
return false;
|
||||
}
|
||||
/* Proxy objects enumerate the prototype on their own, so we are done here. */
|
||||
@ -346,7 +304,7 @@ Snapshot(JSContext *cx, JSObject *obj, uintN flags, typename EnumPolicy::ResultV
|
||||
if (!pobj->enumerate(cx, op, &state, NULL))
|
||||
return false;
|
||||
if (state.isMagic(JS_NATIVE_ENUMERATE)) {
|
||||
if (!EnumerateNativeProperties<EnumPolicy>(cx, obj, pobj, flags, ht, props))
|
||||
if (!EnumerateNativeProperties(cx, obj, pobj, flags, ht, props))
|
||||
return false;
|
||||
} else {
|
||||
while (true) {
|
||||
@ -355,7 +313,7 @@ Snapshot(JSContext *cx, JSObject *obj, uintN flags, typename EnumPolicy::ResultV
|
||||
return false;
|
||||
if (state.isNull())
|
||||
break;
|
||||
if (!Enumerate<EnumPolicy>(cx, obj, pobj, id, true, false, flags, ht, props))
|
||||
if (!Enumerate(cx, obj, pobj, id, true, false, flags, ht, props))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
@ -390,7 +348,7 @@ VectorToIdArray(JSContext *cx, AutoIdVector &props, JSIdArray **idap)
|
||||
JS_FRIEND_API(bool)
|
||||
GetPropertyNames(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector *props)
|
||||
{
|
||||
return Snapshot<KeyEnumeration>(cx, obj, flags & (JSITER_OWNONLY | JSITER_HIDDEN), props);
|
||||
return Snapshot(cx, obj, flags & (JSITER_OWNONLY | JSITER_HIDDEN), props);
|
||||
}
|
||||
|
||||
}
|
||||
@ -472,7 +430,7 @@ NewIteratorObject(JSContext *cx, uintN flags)
|
||||
}
|
||||
|
||||
NativeIterator *
|
||||
NativeIterator::allocateKeyIterator(JSContext *cx, uint32 slength, const AutoIdVector &props)
|
||||
NativeIterator::allocateIterator(JSContext *cx, uint32 slength, const AutoIdVector &props)
|
||||
{
|
||||
size_t plength = props.length();
|
||||
NativeIterator *ni = (NativeIterator *)
|
||||
@ -486,21 +444,6 @@ NativeIterator::allocateKeyIterator(JSContext *cx, uint32 slength, const AutoIdV
|
||||
return ni;
|
||||
}
|
||||
|
||||
NativeIterator *
|
||||
NativeIterator::allocateValueIterator(JSContext *cx, const AutoValueVector &props)
|
||||
{
|
||||
size_t plength = props.length();
|
||||
NativeIterator *ni = (NativeIterator *)
|
||||
cx->malloc(sizeof(NativeIterator) + plength * sizeof(Value));
|
||||
if (!ni)
|
||||
return NULL;
|
||||
ni->props_array = ni->props_cursor = (Value *) (ni + 1);
|
||||
ni->props_end = (Value *)ni->props_array + plength;
|
||||
if (plength)
|
||||
memcpy(ni->props_array, props.begin(), plength * sizeof(Value));
|
||||
return ni;
|
||||
}
|
||||
|
||||
inline void
|
||||
NativeIterator::init(JSObject *obj, uintN flags, uint32 slength, uint32 key)
|
||||
{
|
||||
@ -534,7 +477,7 @@ VectorToKeyIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &key
|
||||
if (!iterobj)
|
||||
return false;
|
||||
|
||||
NativeIterator *ni = NativeIterator::allocateKeyIterator(cx, slength, keys);
|
||||
NativeIterator *ni = NativeIterator::allocateIterator(cx, slength, keys);
|
||||
if (!ni)
|
||||
return false;
|
||||
ni->init(obj, flags, slength, key);
|
||||
@ -572,7 +515,7 @@ VectorToKeyIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &pro
|
||||
}
|
||||
|
||||
bool
|
||||
VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, AutoValueVector &vals,
|
||||
VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVector &keys,
|
||||
Value *vp)
|
||||
{
|
||||
JS_ASSERT(flags & JSITER_FOREACH);
|
||||
@ -581,7 +524,7 @@ VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, AutoValueVector
|
||||
if (!iterobj)
|
||||
return false;
|
||||
|
||||
NativeIterator *ni = NativeIterator::allocateValueIterator(cx, vals);
|
||||
NativeIterator *ni = NativeIterator::allocateIterator(cx, 0, keys);
|
||||
if (!ni)
|
||||
return false;
|
||||
ni->init(obj, flags, 0, 0);
|
||||
@ -599,20 +542,7 @@ EnumeratedIdVectorToIterator(JSContext *cx, JSObject *obj, uintN flags, AutoIdVe
|
||||
if (!(flags & JSITER_FOREACH))
|
||||
return VectorToKeyIterator(cx, obj, flags, props, vp);
|
||||
|
||||
/* For for-each iteration, we need to look up the value of each id. */
|
||||
|
||||
size_t plength = props.length();
|
||||
|
||||
AutoValueVector vals(cx);
|
||||
if (!vals.reserve(plength))
|
||||
return NULL;
|
||||
|
||||
for (size_t i = 0; i < plength; ++i) {
|
||||
if (!ValueEnumeration::append(cx, vals, obj, props[i], flags))
|
||||
return false;
|
||||
}
|
||||
|
||||
return VectorToValueIterator(cx, obj, flags, vals, vp);
|
||||
return VectorToValueIterator(cx, obj, flags, props, vp);
|
||||
}
|
||||
|
||||
typedef Vector<uint32, 8> ShapeVector;
|
||||
@ -718,16 +648,15 @@ GetIterator(JSContext *cx, JSObject *obj, uintN flags, Value *vp)
|
||||
|
||||
/* NB: for (var p in null) succeeds by iterating over no properties. */
|
||||
|
||||
AutoIdVector keys(cx);
|
||||
if (flags & JSITER_FOREACH) {
|
||||
AutoValueVector vals(cx);
|
||||
if (JS_LIKELY(obj != NULL) && !Snapshot<ValueEnumeration>(cx, obj, flags, &vals))
|
||||
if (JS_LIKELY(obj != NULL) && !Snapshot(cx, obj, flags, &keys))
|
||||
return false;
|
||||
JS_ASSERT(shapes.empty());
|
||||
if (!VectorToValueIterator(cx, obj, flags, vals, vp))
|
||||
if (!VectorToValueIterator(cx, obj, flags, keys, vp))
|
||||
return false;
|
||||
} else {
|
||||
AutoIdVector keys(cx);
|
||||
if (JS_LIKELY(obj != NULL) && !Snapshot<KeyEnumeration>(cx, obj, flags, &keys))
|
||||
if (JS_LIKELY(obj != NULL) && !Snapshot(cx, obj, flags, &keys))
|
||||
return false;
|
||||
if (!VectorToKeyIterator(cx, obj, flags, keys, shapes.length(), key, vp))
|
||||
return false;
|
||||
@ -906,8 +835,8 @@ SuppressDeletedPropertyHelper(JSContext *cx, JSObject *obj, IdPredicate predicat
|
||||
/* This only works for identified surpressed keys, not values. */
|
||||
if (ni->isKeyIter() && ni->obj == obj && ni->props_cursor < ni->props_end) {
|
||||
/* Check whether id is still to come. */
|
||||
jsid *props_cursor = ni->currentKey();
|
||||
jsid *props_end = ni->endKey();
|
||||
jsid *props_cursor = ni->current();
|
||||
jsid *props_end = ni->end();
|
||||
for (jsid *idp = props_cursor; idp < props_end; ++idp) {
|
||||
if (predicate(*idp)) {
|
||||
/*
|
||||
@ -945,10 +874,10 @@ SuppressDeletedPropertyHelper(JSContext *cx, JSObject *obj, IdPredicate predicat
|
||||
* If it is the next property to be enumerated, just skip it.
|
||||
*/
|
||||
if (idp == props_cursor) {
|
||||
ni->incKeyCursor();
|
||||
ni->incCursor();
|
||||
} else {
|
||||
memmove(idp, idp + 1, (props_end - (idp + 1)) * sizeof(jsid));
|
||||
ni->props_end = ni->endKey() - 1;
|
||||
ni->props_end = ni->end() - 1;
|
||||
}
|
||||
if (predicate.matchesAtMostOne())
|
||||
break;
|
||||
@ -997,14 +926,15 @@ JSBool
|
||||
js_IteratorMore(JSContext *cx, JSObject *iterobj, Value *rval)
|
||||
{
|
||||
/* Fast path for native iterators */
|
||||
NativeIterator *ni = NULL;
|
||||
if (iterobj->getClass() == &js_IteratorClass) {
|
||||
/*
|
||||
* Implement next directly as all the methods of native iterator are
|
||||
* read-only and permanent.
|
||||
*/
|
||||
NativeIterator *ni = iterobj->getNativeIterator();
|
||||
rval->setBoolean(ni->props_cursor < ni->props_end);
|
||||
return true;
|
||||
/* Key iterators are handled by fast-paths. */
|
||||
ni = iterobj->getNativeIterator();
|
||||
bool more = ni->props_cursor < ni->props_end;
|
||||
if (ni->isKeyIter() || !more) {
|
||||
rval->setBoolean(more);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
/* We might still have a pending value. */
|
||||
@ -1014,18 +944,28 @@ js_IteratorMore(JSContext *cx, JSObject *iterobj, Value *rval)
|
||||
}
|
||||
|
||||
/* Fetch and cache the next value from the iterator. */
|
||||
jsid id = ATOM_TO_JSID(cx->runtime->atomState.nextAtom);
|
||||
if (!js_GetMethod(cx, iterobj, id, JSGET_METHOD_BARRIER, rval))
|
||||
return false;
|
||||
if (!ExternalInvoke(cx, iterobj, *rval, 0, NULL, rval)) {
|
||||
/* Check for StopIteration. */
|
||||
if (!cx->isExceptionPending() || !js_ValueIsStopIteration(cx->getPendingException()))
|
||||
if (!ni) {
|
||||
jsid id = ATOM_TO_JSID(cx->runtime->atomState.nextAtom);
|
||||
if (!js_GetMethod(cx, iterobj, id, JSGET_METHOD_BARRIER, rval))
|
||||
return false;
|
||||
if (!ExternalInvoke(cx, iterobj, *rval, 0, NULL, rval)) {
|
||||
/* Check for StopIteration. */
|
||||
if (!cx->isExceptionPending() || !js_ValueIsStopIteration(cx->getPendingException()))
|
||||
return false;
|
||||
|
||||
cx->clearPendingException();
|
||||
cx->iterValue.setMagic(JS_NO_ITER_VALUE);
|
||||
rval->setBoolean(false);
|
||||
return true;
|
||||
cx->clearPendingException();
|
||||
cx->iterValue.setMagic(JS_NO_ITER_VALUE);
|
||||
rval->setBoolean(false);
|
||||
return true;
|
||||
}
|
||||
} else {
|
||||
JS_ASSERT(!ni->isKeyIter());
|
||||
jsid id = *ni->current();
|
||||
ni->incCursor();
|
||||
if (!ni->obj->getProperty(cx, id, rval))
|
||||
return false;
|
||||
if ((ni->flags & JSITER_KEYVALUE) && !NewKeyValuePair(cx, id, *rval, rval))
|
||||
return false;
|
||||
}
|
||||
|
||||
/* Cache the value returned by iterobj.next() so js_IteratorNext() can find it. */
|
||||
@ -1045,30 +985,27 @@ js_IteratorNext(JSContext *cx, JSObject *iterobj, Value *rval)
|
||||
* read-only and permanent.
|
||||
*/
|
||||
NativeIterator *ni = iterobj->getNativeIterator();
|
||||
JS_ASSERT(ni->props_cursor < ni->props_end);
|
||||
if (ni->isKeyIter()) {
|
||||
*rval = IdToValue(*ni->currentKey());
|
||||
ni->incKeyCursor();
|
||||
} else {
|
||||
*rval = *ni->currentValue();
|
||||
ni->incValueCursor();
|
||||
}
|
||||
JS_ASSERT(ni->props_cursor < ni->props_end);
|
||||
*rval = IdToValue(*ni->current());
|
||||
ni->incCursor();
|
||||
|
||||
if (rval->isString() || !ni->isKeyIter())
|
||||
if (rval->isString())
|
||||
return true;
|
||||
|
||||
JSString *str;
|
||||
jsint i;
|
||||
if (rval->isInt32() && (jsuint(i = rval->toInt32()) < INT_STRING_LIMIT)) {
|
||||
str = JSString::intString(i);
|
||||
} else {
|
||||
str = js_ValueToString(cx, *rval);
|
||||
if (!str)
|
||||
return false;
|
||||
}
|
||||
|
||||
rval->setString(str);
|
||||
return true;
|
||||
|
||||
JSString *str;
|
||||
jsint i;
|
||||
if (rval->isInt32() && (jsuint(i = rval->toInt32()) < INT_STRING_LIMIT)) {
|
||||
str = JSString::intString(i);
|
||||
} else {
|
||||
str = js_ValueToString(cx, *rval);
|
||||
if (!str)
|
||||
return false;
|
||||
}
|
||||
|
||||
rval->setString(str);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_ASSERT(!cx->iterValue.isMagic(JS_NO_ITER_VALUE));
|
||||
|
@ -69,9 +69,9 @@ namespace js {
|
||||
|
||||
struct NativeIterator {
|
||||
JSObject *obj;
|
||||
void *props_array;
|
||||
void *props_cursor;
|
||||
void *props_end;
|
||||
jsid *props_array;
|
||||
jsid *props_cursor;
|
||||
jsid *props_end;
|
||||
uint32 *shapes_array;
|
||||
uint32 shapes_length;
|
||||
uint32 shapes_key;
|
||||
@ -80,58 +80,29 @@ struct NativeIterator {
|
||||
|
||||
bool isKeyIter() const { return (flags & JSITER_FOREACH) == 0; }
|
||||
|
||||
inline jsid *beginKey() const {
|
||||
JS_ASSERT(isKeyIter());
|
||||
return (jsid *)props_array;
|
||||
inline jsid *begin() const {
|
||||
return props_array;
|
||||
}
|
||||
|
||||
inline jsid *endKey() const {
|
||||
JS_ASSERT(isKeyIter());
|
||||
return (jsid *)props_end;
|
||||
inline jsid *end() const {
|
||||
return props_end;
|
||||
}
|
||||
|
||||
size_t numKeys() const {
|
||||
return endKey() - beginKey();
|
||||
return end() - begin();
|
||||
}
|
||||
|
||||
jsid *currentKey() const {
|
||||
JS_ASSERT(isKeyIter());
|
||||
return reinterpret_cast<jsid *>(props_cursor);
|
||||
jsid *current() const {
|
||||
JS_ASSERT(props_cursor < props_end);
|
||||
return props_cursor;
|
||||
}
|
||||
|
||||
void incKeyCursor() {
|
||||
JS_ASSERT(isKeyIter());
|
||||
props_cursor = reinterpret_cast<jsid *>(props_cursor) + 1;
|
||||
void incCursor() {
|
||||
props_cursor = props_cursor + 1;
|
||||
}
|
||||
|
||||
inline js::Value *beginValue() const {
|
||||
JS_ASSERT(!isKeyIter());
|
||||
return (js::Value *)props_array;
|
||||
}
|
||||
|
||||
inline js::Value *endValue() const {
|
||||
JS_ASSERT(!isKeyIter());
|
||||
return (js::Value *)props_end;
|
||||
}
|
||||
|
||||
size_t numValues() const {
|
||||
return endValue() - beginValue();
|
||||
}
|
||||
|
||||
js::Value *currentValue() const {
|
||||
JS_ASSERT(!isKeyIter());
|
||||
return reinterpret_cast<js::Value *>(props_cursor);
|
||||
}
|
||||
|
||||
void incValueCursor() {
|
||||
JS_ASSERT(!isKeyIter());
|
||||
props_cursor = reinterpret_cast<js::Value *>(props_cursor) + 1;
|
||||
}
|
||||
|
||||
static NativeIterator *allocateKeyIterator(JSContext *cx, uint32 slength,
|
||||
const js::AutoIdVector &props);
|
||||
static NativeIterator *allocateValueIterator(JSContext *cx,
|
||||
const js::AutoValueVector &props);
|
||||
static NativeIterator *allocateIterator(JSContext *cx, uint32 slength,
|
||||
const js::AutoIdVector &props);
|
||||
void init(JSObject *obj, uintN flags, uint32 slength, uint32 key);
|
||||
|
||||
void mark(JSTracer *trc);
|
||||
@ -150,7 +121,7 @@ bool
|
||||
VectorToKeyIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoIdVector &props, js::Value *vp);
|
||||
|
||||
bool
|
||||
VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoValueVector &props, js::Value *vp);
|
||||
VectorToValueIterator(JSContext *cx, JSObject *obj, uintN flags, js::AutoIdVector &props, js::Value *vp);
|
||||
|
||||
/*
|
||||
* Creates either a key or value iterator, depending on flags. For a value
|
||||
|
@ -149,15 +149,27 @@ JSParseNode::become(JSParseNode *pn2)
|
||||
pn2->pn_used = false;
|
||||
}
|
||||
|
||||
/* If this is a function node fix up the pn_funbox->node back-pointer. */
|
||||
if (PN_TYPE(pn2) == TOK_FUNCTION && pn2->pn_arity == PN_FUNC)
|
||||
pn2->pn_funbox->node = this;
|
||||
|
||||
pn_type = pn2->pn_type;
|
||||
pn_op = pn2->pn_op;
|
||||
pn_arity = pn2->pn_arity;
|
||||
pn_parens = pn2->pn_parens;
|
||||
pn_u = pn2->pn_u;
|
||||
|
||||
/*
|
||||
* If any pointers are pointing to pn2, change them to point to this
|
||||
* instead, since pn2 will be cleared and probably recycled.
|
||||
*/
|
||||
if (PN_TYPE(this) == TOK_FUNCTION && pn_arity == PN_FUNC) {
|
||||
/* Function node: fix up the pn_funbox->node back-pointer. */
|
||||
JS_ASSERT(pn_funbox->node == pn2);
|
||||
pn_funbox->node = this;
|
||||
} else if (pn_arity == PN_LIST && !pn_head) {
|
||||
/* Empty list: fix up the pn_tail pointer. */
|
||||
JS_ASSERT(pn_count == 0);
|
||||
JS_ASSERT(pn_tail == &pn2->pn_head);
|
||||
pn_tail = &pn_head;
|
||||
}
|
||||
|
||||
pn2->clear();
|
||||
}
|
||||
|
||||
@ -3501,7 +3513,7 @@ Parser::condition()
|
||||
JSParseNode *pn;
|
||||
|
||||
MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_COND);
|
||||
pn = parenExpr(NULL, NULL);
|
||||
pn = parenExpr();
|
||||
if (!pn)
|
||||
return NULL;
|
||||
MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_COND);
|
||||
@ -5034,7 +5046,7 @@ Parser::switchStatement()
|
||||
MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_SWITCH);
|
||||
|
||||
/* pn1 points to the switch's discriminant. */
|
||||
JSParseNode *pn1 = parenExpr(NULL, NULL);
|
||||
JSParseNode *pn1 = parenExpr();
|
||||
if (!pn1)
|
||||
return NULL;
|
||||
|
||||
@ -5653,7 +5665,7 @@ Parser::withStatement()
|
||||
if (!pn)
|
||||
return NULL;
|
||||
MUST_MATCH_TOKEN(TOK_LP, JSMSG_PAREN_BEFORE_WITH);
|
||||
JSParseNode *pn2 = parenExpr(NULL, NULL);
|
||||
JSParseNode *pn2 = parenExpr();
|
||||
if (!pn2)
|
||||
return NULL;
|
||||
MUST_MATCH_TOKEN(TOK_RP, JSMSG_PAREN_AFTER_WITH);
|
||||
@ -6939,28 +6951,35 @@ CompExprTransplanter::transplant(JSParseNode *pn)
|
||||
|
||||
switch (pn->pn_arity) {
|
||||
case PN_LIST:
|
||||
for (JSParseNode *pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next)
|
||||
transplant(pn2);
|
||||
for (JSParseNode *pn2 = pn->pn_head; pn2; pn2 = pn2->pn_next) {
|
||||
if (!transplant(pn2))
|
||||
return false;
|
||||
}
|
||||
if (pn->pn_pos >= root->pn_pos)
|
||||
AdjustBlockId(pn, adjust, tc);
|
||||
break;
|
||||
|
||||
case PN_TERNARY:
|
||||
transplant(pn->pn_kid1);
|
||||
transplant(pn->pn_kid2);
|
||||
transplant(pn->pn_kid3);
|
||||
if (!transplant(pn->pn_kid1) ||
|
||||
!transplant(pn->pn_kid2) ||
|
||||
!transplant(pn->pn_kid3))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case PN_BINARY:
|
||||
transplant(pn->pn_left);
|
||||
if (!transplant(pn->pn_left))
|
||||
return false;
|
||||
|
||||
/* Binary TOK_COLON nodes can have left == right. See bug 492714. */
|
||||
if (pn->pn_right != pn->pn_left)
|
||||
transplant(pn->pn_right);
|
||||
if (pn->pn_right != pn->pn_left) {
|
||||
if (!transplant(pn->pn_right))
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
|
||||
case PN_UNARY:
|
||||
transplant(pn->pn_kid);
|
||||
if (!transplant(pn->pn_kid))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case PN_FUNC:
|
||||
@ -6995,7 +7014,8 @@ CompExprTransplanter::transplant(JSParseNode *pn)
|
||||
}
|
||||
|
||||
case PN_NAME:
|
||||
transplant(pn->maybeExpr());
|
||||
if (!transplant(pn->maybeExpr()))
|
||||
return false;
|
||||
if (pn->pn_arity == PN_FUNC)
|
||||
--funcLevel;
|
||||
|
||||
@ -7033,21 +7053,22 @@ CompExprTransplanter::transplant(JSParseNode *pn)
|
||||
JS_ASSERT(!tc->decls.lookup(atom));
|
||||
|
||||
if (dn->pn_pos < root->pn_pos || dn->isPlaceholder()) {
|
||||
JSAtomListElement *ale = tc->lexdeps.add(tc->parser, dn->pn_atom);
|
||||
JSAtomListElement *ale = tc->lexdeps.add(tc->parser, atom);
|
||||
if (!ale)
|
||||
return false;
|
||||
|
||||
if (dn->pn_pos >= root->pn_pos) {
|
||||
tc->parent->lexdeps.remove(tc->parser, atom);
|
||||
} else {
|
||||
JSDefinition *dn2 = (JSDefinition *)NameNode::create(dn->pn_atom, tc);
|
||||
JSDefinition *dn2 = (JSDefinition *)NameNode::create(atom, tc);
|
||||
if (!dn2)
|
||||
return false;
|
||||
|
||||
dn2->pn_type = dn->pn_type;
|
||||
dn2->pn_pos = root->pn_pos;
|
||||
dn2->pn_type = TOK_NAME;
|
||||
dn2->pn_op = JSOP_NOP;
|
||||
dn2->pn_defn = true;
|
||||
dn2->pn_dflags |= PND_PLACEHOLDER;
|
||||
dn2->pn_pos = root->pn_pos;
|
||||
|
||||
JSParseNode **pnup = &dn->dn_uses;
|
||||
JSParseNode *pnu;
|
||||
@ -7073,7 +7094,8 @@ CompExprTransplanter::transplant(JSParseNode *pn)
|
||||
break;
|
||||
|
||||
case PN_NAMESET:
|
||||
transplant(pn->pn_tree);
|
||||
if (!transplant(pn->pn_tree))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
return true;
|
||||
@ -7280,20 +7302,22 @@ Parser::comprehensionTail(JSParseNode *kid, uintN blockid,
|
||||
* generator function that is immediately called to evaluate to the generator
|
||||
* iterator that is the value of this generator expression.
|
||||
*
|
||||
* Callers pass a blank unary node via pn, which generatorExpr fills in as the
|
||||
* yield expression, which ComprehensionTail in turn wraps in a TOK_SEMI-type
|
||||
* expression-statement node that constitutes the body of the |for| loop(s) in
|
||||
* the generator function.
|
||||
* |kid| must be the expression before the |for| keyword; we return an
|
||||
* application of a generator function that includes the |for| loops and
|
||||
* |if| guards, with |kid| as the operand of a |yield| expression as the
|
||||
* innermost loop body.
|
||||
*
|
||||
* Note how unlike Python, we do not evaluate the expression to the right of
|
||||
* the first |in| in the chain of |for| heads. Instead, a generator expression
|
||||
* is merely sugar for a generator function expression and its application.
|
||||
*/
|
||||
JSParseNode *
|
||||
Parser::generatorExpr(JSParseNode *pn, JSParseNode *kid)
|
||||
Parser::generatorExpr(JSParseNode *kid)
|
||||
{
|
||||
/* Initialize pn, connecting it to kid. */
|
||||
JS_ASSERT(pn->pn_arity == PN_UNARY);
|
||||
/* Create a |yield| node for |kid|. */
|
||||
JSParseNode *pn = UnaryNode::create(tc);
|
||||
if (!pn)
|
||||
return NULL;
|
||||
pn->pn_type = TOK_YIELD;
|
||||
pn->pn_op = JSOP_YIELD;
|
||||
pn->pn_parens = true;
|
||||
@ -7395,10 +7419,7 @@ Parser::argumentList(JSParseNode *listNode)
|
||||
#endif
|
||||
#if JS_HAS_GENERATOR_EXPRS
|
||||
if (tokenStream.matchToken(TOK_FOR)) {
|
||||
JSParseNode *pn = UnaryNode::create(tc);
|
||||
if (!pn)
|
||||
return JS_FALSE;
|
||||
argNode = generatorExpr(pn, argNode);
|
||||
argNode = generatorExpr(argNode);
|
||||
if (!argNode)
|
||||
return JS_FALSE;
|
||||
if (listNode->pn_count > 1 ||
|
||||
@ -8713,7 +8734,7 @@ Parser::primaryExpr(TokenKind tt, JSBool afterDot)
|
||||
{
|
||||
JSBool genexp;
|
||||
|
||||
pn = parenExpr(NULL, &genexp);
|
||||
pn = parenExpr(&genexp);
|
||||
if (!pn)
|
||||
return NULL;
|
||||
pn->pn_parens = true;
|
||||
@ -8954,7 +8975,7 @@ Parser::primaryExpr(TokenKind tt, JSBool afterDot)
|
||||
}
|
||||
|
||||
JSParseNode *
|
||||
Parser::parenExpr(JSParseNode *pn1, JSBool *genexp)
|
||||
Parser::parenExpr(JSBool *genexp)
|
||||
{
|
||||
TokenPtr begin;
|
||||
JSParseNode *pn;
|
||||
@ -8979,12 +9000,7 @@ Parser::parenExpr(JSParseNode *pn1, JSBool *genexp)
|
||||
js_generator_str);
|
||||
return NULL;
|
||||
}
|
||||
if (!pn1) {
|
||||
pn1 = UnaryNode::create(tc);
|
||||
if (!pn1)
|
||||
return NULL;
|
||||
}
|
||||
pn = generatorExpr(pn1, pn);
|
||||
pn = generatorExpr(pn);
|
||||
if (!pn)
|
||||
return NULL;
|
||||
pn->pn_pos.begin = begin;
|
||||
|
@ -1151,7 +1151,7 @@ private:
|
||||
JSParseNode *unaryExpr();
|
||||
JSParseNode *memberExpr(JSBool allowCallSyntax);
|
||||
JSParseNode *primaryExpr(js::TokenKind tt, JSBool afterDot);
|
||||
JSParseNode *parenExpr(JSParseNode *pn1, JSBool *genexp);
|
||||
JSParseNode *parenExpr(JSBool *genexp = NULL);
|
||||
|
||||
/*
|
||||
* Additional JS parsers.
|
||||
@ -1166,7 +1166,7 @@ private:
|
||||
JSParseNode *condition();
|
||||
JSParseNode *comprehensionTail(JSParseNode *kid, uintN blockid,
|
||||
js::TokenKind type = js::TOK_SEMI, JSOp op = JSOP_NOP);
|
||||
JSParseNode *generatorExpr(JSParseNode *pn, JSParseNode *kid);
|
||||
JSParseNode *generatorExpr(JSParseNode *kid);
|
||||
JSBool argumentList(JSParseNode *listNode);
|
||||
JSParseNode *bracketedExpr();
|
||||
JSParseNode *letBlock(JSBool statement);
|
||||
|
@ -133,27 +133,84 @@ using namespace js;
|
||||
using namespace js::gc;
|
||||
using namespace js::tjit;
|
||||
|
||||
/*
|
||||
* This macro is just like JS_NOT_REACHED but it exists in non-debug builds
|
||||
* too. Its presence indicates shortcomings in jstracer's handling of some
|
||||
* OOM situations:
|
||||
* - OOM failures in constructors, which lack a return value to pass back a
|
||||
* failure code (though it can and should be done indirectly).
|
||||
* - OOM failures in the "infallible" allocators used for Nanojit.
|
||||
*
|
||||
* FIXME: bug 624590 is open to fix these problems.
|
||||
*/
|
||||
#define OUT_OF_MEMORY_ABORT(msg) JS_Assert(msg, __FILE__, __LINE__);
|
||||
|
||||
/* Implement embedder-specific nanojit members. */
|
||||
|
||||
/*
|
||||
* Nanojit requires infallible allocations most of the time. We satisfy this
|
||||
* by reserving some space in each allocator which is used as a fallback if
|
||||
* js_calloc() fails. Ideallly this reserve space should be big enough to
|
||||
* allow for all infallible requests made to the allocator until the next OOM
|
||||
* check occurs, but it turns out that's impossible to guarantee (though it
|
||||
* should be unlikely). So we abort if the reserve runs out; this is better
|
||||
* than allowing memory errors to occur.
|
||||
*
|
||||
* The space calculations are as follows... between OOM checks, each
|
||||
* VMAllocator can do (ie. has been seen to do) the following maximum
|
||||
* allocations on 64-bits:
|
||||
*
|
||||
* - dataAlloc: 31 minimum-sized chunks (MIN_CHUNK_SZB) in assm->compile()
|
||||
* (though arbitrarily more could occur due to LabelStateMap additions done
|
||||
* when handling labels): 62,248 bytes. This one is the most likely to
|
||||
* overflow.
|
||||
*
|
||||
* - traceAlloc: 1 minimum-sized chunk: 2,008 bytes.
|
||||
*
|
||||
* - tempAlloc: 1 LIR code chunk (CHUNK_SZB) and 5 minimum-sized chunks for
|
||||
* sundry small allocations: 18,048 bytes.
|
||||
*
|
||||
* The reserve sizes are chosen by exceeding this by a reasonable amount.
|
||||
* Reserves for 32-bits are slightly more than half, because most of the
|
||||
* allocated space is used to hold pointers.
|
||||
*
|
||||
* FIXME: Bug 624590 is open to get rid of all this.
|
||||
*/
|
||||
static const size_t DataReserveSize = 12500 * sizeof(uintptr_t);
|
||||
static const size_t TraceReserveSize = 5000 * sizeof(uintptr_t);
|
||||
static const size_t TempReserveSize = 1000 * sizeof(uintptr_t);
|
||||
|
||||
void*
|
||||
nanojit::Allocator::allocChunk(size_t nbytes)
|
||||
nanojit::Allocator::allocChunk(size_t nbytes, bool fallible)
|
||||
{
|
||||
VMAllocator *vma = (VMAllocator*)this;
|
||||
JS_ASSERT(!vma->outOfMemory());
|
||||
/*
|
||||
* Nb: it's conceivable that request 1 might fail (in which case
|
||||
* mOutOfMemory will be set) and then request 2 succeeds. The subsequent
|
||||
* OOM check will still fail, which is what we want, and the success of
|
||||
* request 2 makes it less likely that the reserve space will overflow.
|
||||
*/
|
||||
void *p = js_calloc(nbytes);
|
||||
if (!p) {
|
||||
JS_ASSERT(nbytes < sizeof(vma->mReserve));
|
||||
if (p) {
|
||||
vma->mSize += nbytes;
|
||||
} else {
|
||||
vma->mOutOfMemory = true;
|
||||
p = (void*) &vma->mReserve[0];
|
||||
if (!fallible) {
|
||||
p = (void *)vma->mReserveCurr;
|
||||
vma->mReserveCurr += nbytes;
|
||||
if (vma->mReserveCurr > vma->mReserveLimit)
|
||||
OUT_OF_MEMORY_ABORT("nanojit::Allocator::allocChunk: out of memory");
|
||||
memset(p, 0, nbytes);
|
||||
vma->mSize += nbytes;
|
||||
}
|
||||
}
|
||||
vma->mSize += nbytes;
|
||||
return p;
|
||||
}
|
||||
|
||||
void
|
||||
nanojit::Allocator::freeChunk(void *p) {
|
||||
VMAllocator *vma = (VMAllocator*)this;
|
||||
if (p != &vma->mReserve[0])
|
||||
if (p < vma->mReserve || uintptr_t(p) >= vma->mReserveLimit)
|
||||
js_free(p);
|
||||
}
|
||||
|
||||
@ -162,6 +219,7 @@ nanojit::Allocator::postReset() {
|
||||
VMAllocator *vma = (VMAllocator*)this;
|
||||
vma->mOutOfMemory = false;
|
||||
vma->mSize = 0;
|
||||
vma->mReserveCurr = uintptr_t(vma->mReserve);
|
||||
}
|
||||
|
||||
int
|
||||
@ -501,12 +559,6 @@ InitJITStatsClass(JSContext *cx, JSObject *glob)
|
||||
static avmplus::AvmCore s_core = avmplus::AvmCore();
|
||||
static avmplus::AvmCore* core = &s_core;
|
||||
|
||||
static void OutOfMemoryAbort()
|
||||
{
|
||||
JS_NOT_REACHED("out of memory");
|
||||
abort();
|
||||
}
|
||||
|
||||
#ifdef JS_JIT_SPEW
|
||||
static void
|
||||
DumpPeerStability(TraceMonitor* tm, const void* ip, JSObject* globalObj, uint32 globalShape, uint32 argc);
|
||||
@ -626,8 +678,8 @@ InitJITLogController()
|
||||
/*
|
||||
* All the allocations done by this profile data-collection and
|
||||
* display machinery, are done in TraceMonitor::profAlloc. That is
|
||||
* emptied out at the end of js_FinishJIT. It has a lifetime from
|
||||
* js_InitJIT to js_FinishJIT, which exactly matches the span
|
||||
* emptied out at the end of FinishJIT. It has a lifetime from
|
||||
* InitJIT to FinishJIT, which exactly matches the span
|
||||
* js_FragProfiling_init to js_FragProfiling_showResults.
|
||||
*/
|
||||
template<class T>
|
||||
@ -1399,7 +1451,7 @@ FrameInfoCache::FrameInfoCache(VMAllocator *allocator)
|
||||
: allocator(allocator)
|
||||
{
|
||||
if (!set.init())
|
||||
OutOfMemoryAbort();
|
||||
OUT_OF_MEMORY_ABORT("FrameInfoCache::FrameInfoCache(): out of memory");
|
||||
}
|
||||
|
||||
#define PC_HASH_COUNT 1024
|
||||
@ -2285,7 +2337,7 @@ TraceRecorder::TraceRecorder(JSContext* cx, VMSideExit* anchor, VMFragment* frag
|
||||
*/
|
||||
|
||||
if (!guardedShapeTable.init())
|
||||
abort();
|
||||
OUT_OF_MEMORY_ABORT("TraceRecorder::TraceRecorder: out of memory");
|
||||
|
||||
#ifdef JS_JIT_SPEW
|
||||
debug_only_print0(LC_TMMinimal, "\n");
|
||||
@ -2465,7 +2517,6 @@ TraceRecorder::finishAbort(const char* reason)
|
||||
{
|
||||
JS_ASSERT(!traceMonitor->profile);
|
||||
JS_ASSERT(traceMonitor->recorder == this);
|
||||
JS_ASSERT(!fragment->code());
|
||||
|
||||
AUDIT(recorderAborted);
|
||||
#ifdef DEBUG
|
||||
@ -2782,18 +2833,16 @@ TraceMonitor::flush()
|
||||
oracle->clear();
|
||||
loopProfiles->clear();
|
||||
|
||||
Allocator& alloc = *dataAlloc;
|
||||
|
||||
for (size_t i = 0; i < MONITOR_N_GLOBAL_STATES; ++i) {
|
||||
globalStates[i].globalShape = -1;
|
||||
globalStates[i].globalSlots = new (alloc) SlotList(&alloc);
|
||||
globalStates[i].globalSlots = new (*dataAlloc) SlotList(dataAlloc);
|
||||
}
|
||||
|
||||
assembler = new (alloc) Assembler(*codeAlloc, alloc, alloc, core, &LogController, avmplus::AvmCore::config);
|
||||
assembler = new (*dataAlloc) Assembler(*codeAlloc, *dataAlloc, *dataAlloc, core,
|
||||
&LogController, avmplus::AvmCore::config);
|
||||
verbose_only( branches = NULL; )
|
||||
|
||||
PodArrayZero(vmfragments);
|
||||
reFragments = new (alloc) REHashMap(alloc);
|
||||
tracedScripts.clear();
|
||||
|
||||
needFlush = JS_FALSE;
|
||||
@ -4518,10 +4567,10 @@ TraceRecorder::compile()
|
||||
#endif
|
||||
|
||||
Assembler *assm = traceMonitor->assembler;
|
||||
JS_ASSERT(assm->error() == nanojit::None);
|
||||
JS_ASSERT(!assm->error());
|
||||
assm->compile(fragment, tempAlloc(), /*optimize*/true verbose_only(, lirbuf->printer));
|
||||
|
||||
if (assm->error() != nanojit::None) {
|
||||
if (assm->error()) {
|
||||
assm->setError(nanojit::None);
|
||||
debug_only_print0(LC_TMTracer, "Blacklisted: error during compilation\n");
|
||||
Blacklist((jsbytecode*)tree->ip);
|
||||
@ -5707,7 +5756,8 @@ RecordTree(JSContext* cx, TreeFragment* first, JSScript* outerScript, jsbytecode
|
||||
|
||||
if (tm->outOfMemory() ||
|
||||
OverfullJITCache(cx, tm) ||
|
||||
!tm->tracedScripts.put(cx->fp()->script())) {
|
||||
!tm->tracedScripts.put(cx->fp()->script()))
|
||||
{
|
||||
if (!OverfullJITCache(cx, tm))
|
||||
js_ReportOutOfMemory(cx);
|
||||
Backoff(cx, (jsbytecode*) f->root->ip);
|
||||
@ -7632,7 +7682,7 @@ InitJIT(TraceMonitor *tm)
|
||||
}
|
||||
/* Set up fragprofiling, if required. */
|
||||
if (LogController.lcbits & LC_FragProfile) {
|
||||
tm->profAlloc = js_new<VMAllocator>();
|
||||
tm->profAlloc = js_new<VMAllocator>((char*)NULL, 0); /* no reserve needed in debug builds */
|
||||
JS_ASSERT(tm->profAlloc);
|
||||
tm->profTab = new (*tm->profAlloc) FragStatsMap(*tm->profAlloc);
|
||||
}
|
||||
@ -7684,9 +7734,13 @@ InitJIT(TraceMonitor *tm)
|
||||
|
||||
tm->flushEpoch = 0;
|
||||
|
||||
CHECK_ALLOC(tm->dataAlloc, js_new<VMAllocator>());
|
||||
CHECK_ALLOC(tm->traceAlloc, js_new<VMAllocator>());
|
||||
CHECK_ALLOC(tm->tempAlloc, js_new<VMAllocator>());
|
||||
char *dataReserve, *traceReserve, *tempReserve;
|
||||
CHECK_ALLOC(dataReserve, (char *)js_malloc(DataReserveSize));
|
||||
CHECK_ALLOC(traceReserve, (char *)js_malloc(TraceReserveSize));
|
||||
CHECK_ALLOC(tempReserve, (char *)js_malloc(TempReserveSize));
|
||||
CHECK_ALLOC(tm->dataAlloc, js_new<VMAllocator>(dataReserve, DataReserveSize));
|
||||
CHECK_ALLOC(tm->traceAlloc, js_new<VMAllocator>(traceReserve, TraceReserveSize));
|
||||
CHECK_ALLOC(tm->tempAlloc, js_new<VMAllocator>(tempReserve, TempReserveSize));
|
||||
CHECK_ALLOC(tm->codeAlloc, js_new<CodeAlloc>());
|
||||
CHECK_ALLOC(tm->frameCache, js_new<FrameInfoCache>(tm->dataAlloc));
|
||||
CHECK_ALLOC(tm->storage, js_new<TraceNativeStorage>());
|
||||
@ -7782,12 +7836,6 @@ FinishJIT(TraceMonitor *tm)
|
||||
FragProfiling_FragFinalizer(p, tm);
|
||||
}
|
||||
}
|
||||
REHashMap::Iter iter(*(tm->reFragments));
|
||||
while (iter.next()) {
|
||||
VMFragment* frag = (VMFragment*)iter.value();
|
||||
FragProfiling_FragFinalizer(frag, tm);
|
||||
}
|
||||
|
||||
FragProfiling_showResults(tm);
|
||||
js_delete(tm->profAlloc);
|
||||
|
||||
@ -7891,7 +7939,7 @@ OverfullJITCache(JSContext *cx, TraceMonitor* tm)
|
||||
*
|
||||
* Presently, the code in this file doesn't check the outOfMemory condition
|
||||
* often enough, and frequently misuses the unchecked results of
|
||||
* lirbuffer insertions on the asssumption that it will notice the
|
||||
* lirbuffer insertions on the assumption that it will notice the
|
||||
* outOfMemory flag "soon enough" when it returns to the monitorRecording
|
||||
* function. This turns out to be a false assumption if we use outOfMemory
|
||||
* to signal condition 2: we regularly provoke "passing our intended
|
||||
@ -7909,11 +7957,7 @@ OverfullJITCache(JSContext *cx, TraceMonitor* tm)
|
||||
*
|
||||
*/
|
||||
jsuint maxsz = JS_THREAD_DATA(cx)->maxCodeCacheBytes;
|
||||
VMAllocator *dataAlloc = tm->dataAlloc;
|
||||
VMAllocator *traceAlloc = tm->traceAlloc;
|
||||
CodeAlloc *codeAlloc = tm->codeAlloc;
|
||||
|
||||
return (codeAlloc->size() + dataAlloc->size() + traceAlloc->size() > maxsz);
|
||||
return (tm->codeAlloc->size() + tm->dataAlloc->size() + tm->traceAlloc->size() > maxsz);
|
||||
}
|
||||
|
||||
JS_FORCES_STACK JS_FRIEND_API(void)
|
||||
@ -14729,29 +14773,38 @@ TraceRecorder::record_JSOP_MOREITER()
|
||||
LIns* iterobj_ins = get(&iterobj_val);
|
||||
LIns* cond_ins;
|
||||
|
||||
/* JSOP_FOR* already guards on this, but in certain rare cases we might record misformed loop traces. */
|
||||
/*
|
||||
* JSOP_FOR* already guards on this, but in certain rare cases we might
|
||||
* record misformed loop traces. Note that it's not necessary to guard on
|
||||
* ni->flags (nor do we in unboxNextValue), because the different
|
||||
* iteration type will guarantee a different entry typemap.
|
||||
*/
|
||||
if (iterobj->hasClass(&js_IteratorClass)) {
|
||||
guardClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
|
||||
|
||||
LIns *ni_ins = w.ldpObjPrivate(iterobj_ins);
|
||||
LIns *cursor_ins = w.ldpIterCursor(ni_ins);
|
||||
LIns *end_ins = w.ldpIterEnd(ni_ins);
|
||||
NativeIterator *ni = (NativeIterator *) iterobj->getPrivate();
|
||||
if (ni->isKeyIter()) {
|
||||
LIns *ni_ins = w.ldpObjPrivate(iterobj_ins);
|
||||
LIns *cursor_ins = w.ldpIterCursor(ni_ins);
|
||||
LIns *end_ins = w.ldpIterEnd(ni_ins);
|
||||
|
||||
cond_ins = w.ltp(cursor_ins, end_ins);
|
||||
cond_ins = w.ltp(cursor_ins, end_ins);
|
||||
stack(0, cond_ins);
|
||||
return ARECORD_CONTINUE;
|
||||
}
|
||||
} else {
|
||||
guardNotClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
|
||||
|
||||
enterDeepBailCall();
|
||||
|
||||
LIns* vp_ins = w.allocp(sizeof(Value));
|
||||
LIns* args[] = { vp_ins, iterobj_ins, cx_ins };
|
||||
pendingGuardCondition = w.call(&IteratorMore_ci, args);
|
||||
|
||||
leaveDeepBailCall();
|
||||
|
||||
cond_ins = is_boxed_true(AllocSlotsAddress(vp_ins));
|
||||
}
|
||||
|
||||
enterDeepBailCall();
|
||||
|
||||
LIns* vp_ins = w.allocp(sizeof(Value));
|
||||
LIns* args[] = { vp_ins, iterobj_ins, cx_ins };
|
||||
pendingGuardCondition = w.call(&IteratorMore_ci, args);
|
||||
|
||||
leaveDeepBailCall();
|
||||
|
||||
cond_ins = is_boxed_true(AllocSlotsAddress(vp_ins));
|
||||
stack(0, cond_ins);
|
||||
|
||||
return ARECORD_CONTINUE;
|
||||
@ -14819,9 +14872,9 @@ TraceRecorder::unboxNextValue(LIns* &v_ins)
|
||||
|
||||
/* Emit code to stringify the id if necessary. */
|
||||
Address cursorAddr = IterPropsAddress(cursor_ins);
|
||||
if (!(((NativeIterator *) iterobj->getPrivate())->flags & JSITER_FOREACH)) {
|
||||
if (ni->isKeyIter()) {
|
||||
/* Read the next id from the iterator. */
|
||||
jsid id = *ni->currentKey();
|
||||
jsid id = *ni->current();
|
||||
LIns *id_ins = w.name(w.ldp(cursorAddr), "id");
|
||||
|
||||
/*
|
||||
@ -14850,24 +14903,18 @@ TraceRecorder::unboxNextValue(LIns* &v_ins)
|
||||
|
||||
/* Increment the cursor by one jsid and store it back. */
|
||||
cursor_ins = w.addp(cursor_ins, w.nameImmw(sizeof(jsid)));
|
||||
} else {
|
||||
/* Read the next value from the iterator. */
|
||||
Value v = *ni->currentValue();
|
||||
v_ins = unbox_value(v, cursorAddr, snapshot(BRANCH_EXIT));
|
||||
|
||||
/* Increment the cursor by one Value and store it back. */
|
||||
cursor_ins = w.addp(cursor_ins, w.nameImmw(sizeof(Value)));
|
||||
w.stpIterCursor(cursor_ins, ni_ins);
|
||||
return ARECORD_CONTINUE;
|
||||
}
|
||||
|
||||
w.stpIterCursor(cursor_ins, ni_ins);
|
||||
} else {
|
||||
guardNotClass(iterobj_ins, &js_IteratorClass, snapshot(BRANCH_EXIT), LOAD_NORMAL);
|
||||
|
||||
Address iterValueAddr = CxAddress(iterValue);
|
||||
v_ins = unbox_value(cx->iterValue, iterValueAddr, snapshot(BRANCH_EXIT));
|
||||
storeMagic(JS_NO_ITER_VALUE, iterValueAddr);
|
||||
}
|
||||
|
||||
|
||||
Address iterValueAddr = CxAddress(iterValue);
|
||||
v_ins = unbox_value(cx->iterValue, iterValueAddr, snapshot(BRANCH_EXIT));
|
||||
storeMagic(JS_NO_ITER_VALUE, iterValueAddr);
|
||||
|
||||
return ARECORD_CONTINUE;
|
||||
}
|
||||
|
||||
|
@ -411,9 +411,15 @@ class VMAllocator : public nanojit::Allocator
|
||||
{
|
||||
|
||||
public:
|
||||
VMAllocator() : mOutOfMemory(false), mSize(0)
|
||||
VMAllocator(char* reserve, size_t reserveSize)
|
||||
: mOutOfMemory(false), mSize(0), mReserve(reserve),
|
||||
mReserveCurr(uintptr_t(reserve)), mReserveLimit(uintptr_t(reserve + reserveSize))
|
||||
{}
|
||||
|
||||
~VMAllocator() {
|
||||
js_free(mReserve);
|
||||
}
|
||||
|
||||
size_t size() {
|
||||
return mSize;
|
||||
}
|
||||
@ -464,43 +470,10 @@ public:
|
||||
bool mOutOfMemory;
|
||||
size_t mSize;
|
||||
|
||||
/*
|
||||
* FIXME: Area the LIR spills into if we encounter an OOM mid-way
|
||||
* through compilation; we must check mOutOfMemory before we run out
|
||||
* of mReserve, otherwise we're in undefined territory. This area
|
||||
* used to be one page, now 16 to be "safer". This is a temporary
|
||||
* and quite unsatisfactory approach to handling OOM in Nanojit.
|
||||
*/
|
||||
uintptr_t mReserve[0x10000];
|
||||
};
|
||||
|
||||
struct REHashKey {
|
||||
size_t re_length;
|
||||
uint16 re_flags;
|
||||
const jschar* re_chars;
|
||||
|
||||
REHashKey(size_t re_length, uint16 re_flags, const jschar *re_chars)
|
||||
: re_length(re_length)
|
||||
, re_flags(re_flags)
|
||||
, re_chars(re_chars)
|
||||
{}
|
||||
|
||||
bool operator==(const REHashKey& other) const
|
||||
{
|
||||
return ((this->re_length == other.re_length) &&
|
||||
(this->re_flags == other.re_flags) &&
|
||||
!memcmp(this->re_chars, other.re_chars,
|
||||
this->re_length * sizeof(jschar)));
|
||||
}
|
||||
};
|
||||
|
||||
struct REHashFn {
|
||||
static size_t hash(const REHashKey& k) {
|
||||
return
|
||||
k.re_length +
|
||||
k.re_flags +
|
||||
nanojit::murmurhash(k.re_chars, k.re_length * sizeof(jschar));
|
||||
}
|
||||
/* See nanojit::Allocator::allocChunk() for details on these. */
|
||||
char* mReserve;
|
||||
uintptr_t mReserveCurr;
|
||||
uintptr_t mReserveLimit;
|
||||
};
|
||||
|
||||
struct FrameInfo {
|
||||
|
@ -552,41 +552,26 @@ Reify(JSContext *cx, JSCompartment *origin, Value *vp)
|
||||
* N.B. the order of closing/creating iterators is important due to the
|
||||
* implicit cx->enumerators state.
|
||||
*/
|
||||
|
||||
if (ni->isKeyIter()) {
|
||||
size_t length = ni->numKeys();
|
||||
AutoIdVector keys(cx);
|
||||
if (length > 0) {
|
||||
if (!keys.resize(length))
|
||||
return false;
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
keys[i] = ni->beginKey()[i];
|
||||
if (!origin->wrapId(cx, &keys[i]))
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
close.clear();
|
||||
return js_CloseIterator(cx, iterObj) &&
|
||||
VectorToKeyIterator(cx, obj, ni->flags, keys, vp);
|
||||
}
|
||||
|
||||
size_t length = ni->numValues();
|
||||
AutoValueVector vals(cx);
|
||||
size_t length = ni->numKeys();
|
||||
bool isKeyIter = ni->isKeyIter();
|
||||
AutoIdVector keys(cx);
|
||||
if (length > 0) {
|
||||
if (!vals.resize(length))
|
||||
if (!keys.resize(length))
|
||||
return false;
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
vals[i] = ni->beginValue()[i];
|
||||
if (!origin->wrap(cx, &vals[i]))
|
||||
keys[i] = ni->begin()[i];
|
||||
if (!origin->wrapId(cx, &keys[i]))
|
||||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
close.clear();
|
||||
return js_CloseIterator(cx, iterObj) &&
|
||||
VectorToValueIterator(cx, obj, ni->flags, vals, vp);
|
||||
if (!js_CloseIterator(cx, iterObj))
|
||||
return false;
|
||||
|
||||
if (isKeyIter)
|
||||
return VectorToKeyIterator(cx, obj, ni->flags, keys, vp);
|
||||
return VectorToValueIterator(cx, obj, ni->flags, keys, vp);
|
||||
}
|
||||
|
||||
bool
|
||||
|
@ -64,7 +64,7 @@ using namespace std;
|
||||
/* Allocator SPI implementation. */
|
||||
|
||||
void*
|
||||
nanojit::Allocator::allocChunk(size_t nbytes)
|
||||
nanojit::Allocator::allocChunk(size_t nbytes, bool /*fallible*/)
|
||||
{
|
||||
void *p = malloc(nbytes);
|
||||
if (!p)
|
||||
|
@ -1499,12 +1499,14 @@ mjit::Compiler::generateMethod()
|
||||
END_CASE(JSOP_STRICTNE)
|
||||
|
||||
BEGIN_CASE(JSOP_ITER)
|
||||
iter(PC[1]);
|
||||
if (!iter(PC[1]))
|
||||
return Compile_Error;
|
||||
END_CASE(JSOP_ITER)
|
||||
|
||||
BEGIN_CASE(JSOP_MOREITER)
|
||||
/* This MUST be fused with IFNE or IFNEX. */
|
||||
iterMore();
|
||||
/* At the byte level, this is always fused with IFNE or IFNEX. */
|
||||
if (!iterMore())
|
||||
return Compile_Error;
|
||||
break;
|
||||
END_CASE(JSOP_MOREITER)
|
||||
|
||||
@ -3964,7 +3966,7 @@ mjit::Compiler::jsop_propinc(JSOp op, VoidStubAtom stub, uint32 index)
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
bool
|
||||
mjit::Compiler::iter(uintN flags)
|
||||
{
|
||||
FrameEntry *fe = frame.peek(-1);
|
||||
@ -3979,7 +3981,7 @@ mjit::Compiler::iter(uintN flags)
|
||||
INLINE_STUBCALL(stubs::Iter);
|
||||
frame.pop();
|
||||
frame.pushSynced();
|
||||
return;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!fe->isTypeKnown()) {
|
||||
@ -4065,6 +4067,8 @@ mjit::Compiler::iter(uintN flags)
|
||||
frame.pushTypedPayload(JSVAL_TYPE_OBJECT, ioreg);
|
||||
|
||||
stubcc.rejoin(Changes(1));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -4092,7 +4096,7 @@ mjit::Compiler::iterNext()
|
||||
RegisterID T3 = frame.allocReg();
|
||||
RegisterID T4 = frame.allocReg();
|
||||
|
||||
/* Test if for-each. */
|
||||
/* Test for a value iterator, which could come through an Iterator object. */
|
||||
masm.load32(Address(T1, offsetof(NativeIterator, flags)), T3);
|
||||
notFast = masm.branchTest32(Assembler::NonZero, T3, Imm32(JSITER_FOREACH));
|
||||
stubcc.linkExit(notFast, Uses(1));
|
||||
@ -4129,7 +4133,7 @@ mjit::Compiler::iterNext()
|
||||
bool
|
||||
mjit::Compiler::iterMore()
|
||||
{
|
||||
FrameEntry *fe= frame.peek(-1);
|
||||
FrameEntry *fe = frame.peek(-1);
|
||||
RegisterID reg = frame.tempRegForData(fe);
|
||||
|
||||
frame.pinReg(reg);
|
||||
@ -4143,6 +4147,11 @@ mjit::Compiler::iterMore()
|
||||
/* Get private from iter obj. */
|
||||
masm.loadObjPrivate(reg, T1);
|
||||
|
||||
/* Test that the iterator supports fast iteration. */
|
||||
notFast = masm.branchTest32(Assembler::NonZero, Address(T1, offsetof(NativeIterator, flags)),
|
||||
Imm32(JSITER_FOREACH));
|
||||
stubcc.linkExitForBranch(notFast);
|
||||
|
||||
/* Get props_cursor, test */
|
||||
RegisterID T2 = frame.allocReg();
|
||||
frame.syncAndForgetEverything();
|
||||
|
@ -396,7 +396,7 @@ class Compiler : public BaseCompiler
|
||||
/* Emitting helpers. */
|
||||
void restoreFrameRegs(Assembler &masm);
|
||||
bool emitStubCmpOp(BoolStub stub, jsbytecode *target, JSOp fused);
|
||||
void iter(uintN flags);
|
||||
bool iter(uintN flags);
|
||||
void iterNext();
|
||||
bool iterMore();
|
||||
void iterEnd();
|
||||
|
@ -920,9 +920,9 @@ UpdateTraceHintSingle(Repatcher &repatcher, JSC::CodeLocationJump jump, JSC::Cod
|
||||
}
|
||||
|
||||
static void
|
||||
DisableTraceHint(VMFrame &f, ic::TraceICInfo &ic)
|
||||
DisableTraceHint(JITScript *jit, ic::TraceICInfo &ic)
|
||||
{
|
||||
Repatcher repatcher(f.jit());
|
||||
Repatcher repatcher(jit);
|
||||
UpdateTraceHintSingle(repatcher, ic.traceHint, ic.jumpTarget);
|
||||
|
||||
if (ic.hasSlowTraceHint)
|
||||
@ -1021,7 +1021,7 @@ RunTracer(VMFrame &f)
|
||||
#if JS_MONOIC
|
||||
ic.loopCounterStart = *loopCounter;
|
||||
if (blacklist)
|
||||
DisableTraceHint(f, ic);
|
||||
DisableTraceHint(entryFrame->jit(), ic);
|
||||
#endif
|
||||
|
||||
// Even though ExecuteTree() bypasses the interpreter, it should propagate
|
||||
|
@ -1 +1 @@
|
||||
4ca71b4e30e696851c0a7a934a0e73426cf8c2c7
|
||||
f6016c7c7cd415a26dad9cf39d34141b8b482d43
|
||||
|
@ -68,28 +68,35 @@ namespace nanojit
|
||||
postReset();
|
||||
}
|
||||
|
||||
void* Allocator::allocSlow(size_t nbytes)
|
||||
void* Allocator::allocSlow(size_t nbytes, bool fallible)
|
||||
{
|
||||
NanoAssert((nbytes & 7) == 0);
|
||||
fill(nbytes);
|
||||
NanoAssert(current_top + nbytes <= current_limit);
|
||||
void* p = current_top;
|
||||
current_top += nbytes;
|
||||
return p;
|
||||
if (fill(nbytes, fallible)) {
|
||||
NanoAssert(current_top + nbytes <= current_limit);
|
||||
void* p = current_top;
|
||||
current_top += nbytes;
|
||||
return p;
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
void Allocator::fill(size_t nbytes)
|
||||
bool Allocator::fill(size_t nbytes, bool fallible)
|
||||
{
|
||||
const size_t minChunk = 2000;
|
||||
if (nbytes < minChunk)
|
||||
nbytes = minChunk;
|
||||
if (nbytes < MIN_CHUNK_SZB)
|
||||
nbytes = MIN_CHUNK_SZB;
|
||||
size_t chunkbytes = sizeof(Chunk) + nbytes - sizeof(int64_t);
|
||||
void* mem = allocChunk(chunkbytes);
|
||||
Chunk* chunk = (Chunk*) mem;
|
||||
chunk->prev = current_chunk;
|
||||
current_chunk = chunk;
|
||||
current_top = (char*)chunk->data;
|
||||
current_limit = (char*)mem + chunkbytes;
|
||||
void* mem = allocChunk(chunkbytes, fallible);
|
||||
if (mem) {
|
||||
Chunk* chunk = (Chunk*) mem;
|
||||
chunk->prev = current_chunk;
|
||||
current_chunk = chunk;
|
||||
current_top = (char*)chunk->data;
|
||||
current_limit = (char*)mem + chunkbytes;
|
||||
return true;
|
||||
} else {
|
||||
NanoAssert(fallible);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -46,30 +46,53 @@ namespace nanojit
|
||||
* Allocator is a bump-pointer allocator with an SPI for getting more
|
||||
* memory from embedder-implemented allocator, such as malloc()/free().
|
||||
*
|
||||
* allocations never return NULL. The implementation of allocChunk()
|
||||
* alloc() never returns NULL. The implementation of allocChunk()
|
||||
* is expected to perform a longjmp or exception when an allocation can't
|
||||
* proceed.
|
||||
* proceed. fallibleAlloc() (and fallibleAllocChunk()) may return NULL.
|
||||
* They should be used for large allocations whose failure can be handled
|
||||
* without too much difficulty.
|
||||
*/
|
||||
class Allocator {
|
||||
public:
|
||||
Allocator();
|
||||
~Allocator();
|
||||
|
||||
// Usable space in the minimum chunk size; there are also a few bytes
|
||||
// used for administration.
|
||||
static const size_t MIN_CHUNK_SZB = 2000;
|
||||
|
||||
void reset();
|
||||
|
||||
/** alloc memory, never return null. */
|
||||
void* alloc(size_t nbytes) {
|
||||
void* p;
|
||||
nbytes = (nbytes + 7) & ~7; // round up
|
||||
if (current_top + nbytes <= current_limit) {
|
||||
void *p = current_top;
|
||||
p = current_top;
|
||||
current_top += nbytes;
|
||||
return p;
|
||||
} else {
|
||||
p = allocSlow(nbytes, /* fallible = */false);
|
||||
NanoAssert(p);
|
||||
}
|
||||
return allocSlow(nbytes);
|
||||
return p;
|
||||
}
|
||||
|
||||
/** alloc memory, maybe return null. */
|
||||
void* fallibleAlloc(size_t nbytes) {
|
||||
void* p;
|
||||
nbytes = (nbytes + 7) & ~7; // round up
|
||||
if (current_top + nbytes <= current_limit) {
|
||||
p = current_top;
|
||||
current_top += nbytes;
|
||||
} else {
|
||||
p = allocSlow(nbytes, /* fallible = */true);
|
||||
}
|
||||
return p;
|
||||
}
|
||||
|
||||
protected:
|
||||
void* allocSlow(size_t nbytes);
|
||||
void fill(size_t minbytes);
|
||||
void* allocSlow(size_t nbytes, bool fallible = false);
|
||||
bool fill(size_t minbytes, bool fallible);
|
||||
|
||||
class Chunk {
|
||||
public:
|
||||
@ -84,7 +107,7 @@ namespace nanojit
|
||||
// allocator SPI
|
||||
|
||||
/** allocate another block from a host provided allocator */
|
||||
void* allocChunk(size_t nbytes);
|
||||
void* allocChunk(size_t nbytes, bool fallible);
|
||||
|
||||
/** free back to the same allocator */
|
||||
void freeChunk(void*);
|
||||
|
@ -2061,9 +2061,9 @@ namespace nanojit
|
||||
storesSinceLastLoad(ACCSET_NONE),
|
||||
alloc(alloc),
|
||||
knownCmpValues(alloc),
|
||||
suspended(false)
|
||||
suspended(false),
|
||||
initOOM(false)
|
||||
{
|
||||
|
||||
m_findNL[NLImmISmall] = &CseFilter::findImmISmall;
|
||||
m_findNL[NLImmILarge] = &CseFilter::findImmILarge;
|
||||
m_findNL[NLImmQ] = PTR_SIZE(NULL, &CseFilter::findImmQ);
|
||||
@ -2082,15 +2082,26 @@ namespace nanojit
|
||||
m_capNL[NL3] = 16;
|
||||
m_capNL[NLCall] = 64;
|
||||
|
||||
// The largish allocations are fallible, the small ones are
|
||||
// infallible. See the comment on initOOM's declaration for why.
|
||||
|
||||
for (NLKind nlkind = NLFirst; nlkind <= NLLast; nlkind = nextNLKind(nlkind)) {
|
||||
m_listNL[nlkind] = new (alloc) LIns*[m_capNL[nlkind]];
|
||||
m_listNL[nlkind] = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capNL[nlkind]);
|
||||
if (!m_listNL[nlkind]) {
|
||||
initOOM = true;
|
||||
return;
|
||||
}
|
||||
m_usedNL[nlkind] = 1; // Force memset in clearAll().
|
||||
}
|
||||
|
||||
// Note that this allocates the CONST and MULTIPLE tables as well.
|
||||
for (CseAcc a = 0; a < CSE_NUM_USED_ACCS; a++) {
|
||||
m_capL[a] = 16;
|
||||
m_listL[a] = new (alloc) LIns*[m_capL[a]];
|
||||
m_listL[a] = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capL[a]);
|
||||
if (!m_listL[a]) {
|
||||
initOOM = true;
|
||||
return;
|
||||
}
|
||||
m_usedL[a] = 1; // Force memset(0) in first clearAll().
|
||||
}
|
||||
|
||||
@ -2210,43 +2221,61 @@ namespace nanojit
|
||||
return hashfinish(hash);
|
||||
}
|
||||
|
||||
void CseFilter::growNL(NLKind nlkind)
|
||||
bool CseFilter::growNL(NLKind nlkind)
|
||||
{
|
||||
NanoAssert(nlkind != NLImmISmall);
|
||||
const uint32_t oldcap = m_capNL[nlkind];
|
||||
m_capNL[nlkind] <<= 1;
|
||||
LIns** oldlist = m_listNL[nlkind];
|
||||
m_listNL[nlkind] = new (alloc) LIns*[m_capNL[nlkind]];
|
||||
VMPI_memset(m_listNL[nlkind], 0, m_capNL[nlkind] * sizeof(LIns*));
|
||||
find_t find = m_findNL[nlkind];
|
||||
for (uint32_t i = 0; i < oldcap; i++) {
|
||||
LIns* ins = oldlist[i];
|
||||
if (!ins) continue;
|
||||
uint32_t j = (this->*find)(ins);
|
||||
NanoAssert(!m_listNL[nlkind][j]);
|
||||
m_listNL[nlkind][j] = ins;
|
||||
// We make this allocation fallible because it's potentially large and
|
||||
// easy to recover from. If it fails, we won't add any more
|
||||
// instructions to the table and some CSE opportunities may be missed.
|
||||
LIns** tmp = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capNL[nlkind]);
|
||||
if (tmp) {
|
||||
LIns** oldlist = m_listNL[nlkind];
|
||||
m_listNL[nlkind] = tmp;
|
||||
VMPI_memset(m_listNL[nlkind], 0, m_capNL[nlkind] * sizeof(LIns*));
|
||||
find_t find = m_findNL[nlkind];
|
||||
for (uint32_t i = 0; i < oldcap; i++) {
|
||||
LIns* ins = oldlist[i];
|
||||
if (!ins) continue;
|
||||
uint32_t j = (this->*find)(ins);
|
||||
NanoAssert(!m_listNL[nlkind][j]);
|
||||
m_listNL[nlkind][j] = ins;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
m_capNL[nlkind] = oldcap;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void CseFilter::growL(CseAcc cseAcc)
|
||||
bool CseFilter::growL(CseAcc cseAcc)
|
||||
{
|
||||
const uint32_t oldcap = m_capL[cseAcc];
|
||||
m_capL[cseAcc] <<= 1;
|
||||
LIns** oldlist = m_listL[cseAcc];
|
||||
m_listL[cseAcc] = new (alloc) LIns*[m_capL[cseAcc]];
|
||||
VMPI_memset(m_listL[cseAcc], 0, m_capL[cseAcc] * sizeof(LIns*));
|
||||
find_t find = &CseFilter::findLoad;
|
||||
for (uint32_t i = 0; i < oldcap; i++) {
|
||||
LIns* ins = oldlist[i];
|
||||
if (!ins) continue;
|
||||
uint32_t j = (this->*find)(ins);
|
||||
NanoAssert(!m_listL[cseAcc][j]);
|
||||
m_listL[cseAcc][j] = ins;
|
||||
LIns** tmp = (LIns**)alloc.fallibleAlloc(sizeof(LIns*) * m_capL[cseAcc]);
|
||||
if (tmp) {
|
||||
LIns** oldlist = m_listL[cseAcc];
|
||||
m_listL[cseAcc] = tmp;
|
||||
VMPI_memset(m_listL[cseAcc], 0, m_capL[cseAcc] * sizeof(LIns*));
|
||||
find_t find = &CseFilter::findLoad;
|
||||
for (uint32_t i = 0; i < oldcap; i++) {
|
||||
LIns* ins = oldlist[i];
|
||||
if (!ins) continue;
|
||||
uint32_t j = (this->*find)(ins);
|
||||
NanoAssert(!m_listL[cseAcc][j]);
|
||||
m_listL[cseAcc][j] = ins;
|
||||
}
|
||||
return true;
|
||||
} else {
|
||||
m_capL[cseAcc] = oldcap;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void CseFilter::addNLImmISmall(LIns* ins, uint32_t k)
|
||||
{
|
||||
NanoAssert(!initOOM);
|
||||
if (suspended) return;
|
||||
NLKind nlkind = NLImmISmall;
|
||||
NanoAssert(k < m_capNL[nlkind]);
|
||||
@ -2257,24 +2286,36 @@ namespace nanojit
|
||||
|
||||
void CseFilter::addNL(NLKind nlkind, LIns* ins, uint32_t k)
|
||||
{
|
||||
NanoAssert(!initOOM);
|
||||
if (suspended) return;
|
||||
NanoAssert(!m_listNL[nlkind][k]);
|
||||
m_usedNL[nlkind]++;
|
||||
m_listNL[nlkind][k] = ins;
|
||||
if ((m_usedNL[nlkind] * 4) >= (m_capNL[nlkind] * 3)) { // load factor of 0.75
|
||||
growNL(nlkind);
|
||||
bool ok = growNL(nlkind);
|
||||
if (!ok) {
|
||||
// OOM: undo the insertion.
|
||||
m_usedNL[nlkind]--;
|
||||
m_listNL[nlkind][k] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void CseFilter::addL(LIns* ins, uint32_t k)
|
||||
{
|
||||
NanoAssert(!initOOM);
|
||||
if (suspended) return;
|
||||
CseAcc cseAcc = miniAccSetToCseAcc(ins->miniAccSet(), ins->loadQual());
|
||||
NanoAssert(!m_listL[cseAcc][k]);
|
||||
m_usedL[cseAcc]++;
|
||||
m_listL[cseAcc][k] = ins;
|
||||
if ((m_usedL[cseAcc] * 4) >= (m_capL[cseAcc] * 3)) { // load factor of 0.75
|
||||
growL(cseAcc);
|
||||
bool ok = growL(cseAcc);
|
||||
if (!ok) {
|
||||
// OOM: undo the insertion.
|
||||
m_usedL[cseAcc]--;
|
||||
m_listL[cseAcc][k] = NULL;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2054,8 +2054,9 @@ namespace nanojit
|
||||
uint32_t findCall(LIns* ins);
|
||||
uint32_t findLoad(LIns* ins);
|
||||
|
||||
void growNL(NLKind kind);
|
||||
void growL(CseAcc cseAcc);
|
||||
// These return false if they failed to grow due to OOM.
|
||||
bool growNL(NLKind kind);
|
||||
bool growL(CseAcc cseAcc);
|
||||
|
||||
void addNLImmISmall(LIns* ins, uint32_t k);
|
||||
// 'k' is the index found by findXYZ().
|
||||
@ -2069,6 +2070,17 @@ namespace nanojit
|
||||
public:
|
||||
CseFilter(LirWriter *out, uint8_t embNumUsedAccs, Allocator&);
|
||||
|
||||
// CseFilter does some largish fallible allocations at start-up. If
|
||||
// they fail, the constructor sets this field to 'true'. It should be
|
||||
// checked after creation, and if set the CseFilter cannot be used.
|
||||
// (But the check can be skipped if allocChunk() always succeeds.)
|
||||
//
|
||||
// FIXME: This fallibility is a sop to TraceMonkey's implementation of
|
||||
// infallible malloc -- by avoiding some largish infallible
|
||||
// allocations, it reduces the size of the reserve space needed.
|
||||
// Bug 624590 is open to fix this.
|
||||
bool initOOM;
|
||||
|
||||
LIns* insImmI(int32_t imm);
|
||||
#ifdef NANOJIT_64BIT
|
||||
LIns* insImmQ(uint64_t q);
|
||||
@ -2116,13 +2128,13 @@ namespace nanojit
|
||||
LIns *state, *param1, *sp, *rp;
|
||||
LIns* savedRegs[NumSavedRegs+1]; // Allocate an extra element in case NumSavedRegs == 0
|
||||
|
||||
protected:
|
||||
friend class LirBufWriter;
|
||||
|
||||
/** Each chunk is just a raw area of LIns instances, with no header
|
||||
and no more than 8-byte alignment. The chunk size is somewhat arbitrary. */
|
||||
static const size_t CHUNK_SZB = 8000;
|
||||
|
||||
protected:
|
||||
friend class LirBufWriter;
|
||||
|
||||
/** Get CHUNK_SZB more memory for LIR instructions. */
|
||||
void chunkAlloc();
|
||||
void moveToNewChunk(uintptr_t addrOfLastLInsOnCurrentChunk);
|
||||
|
@ -30,6 +30,7 @@ script regress-569306.js
|
||||
script regress-569464.js
|
||||
script regress-571014.js
|
||||
script regress-573875.js
|
||||
script regress-576847.js
|
||||
script regress-577648-1.js
|
||||
script regress-577648-2.js
|
||||
script regress-583429.js
|
||||
@ -80,3 +81,4 @@ script regress-621814.js
|
||||
script regress-620750.js
|
||||
script regress-624199.js
|
||||
script regress-624547.js
|
||||
script regress-626436.js
|
||||
|
19
js/src/tests/js1_8_5/regress/regress-576847.js
Normal file
19
js/src/tests/js1_8_5/regress/regress-576847.js
Normal file
@ -0,0 +1,19 @@
|
||||
/*
|
||||
* Any copyright is dedicated to the Public Domain.
|
||||
* http://creativecommons.org/licenses/publicdomain/
|
||||
*/
|
||||
|
||||
/* Don't crash. */
|
||||
try {
|
||||
eval("function f(){}(((f)for(x in function(){}))())");
|
||||
var threwTypeError = false;
|
||||
} catch (x) {
|
||||
var threwTypeError = x instanceof TypeError;
|
||||
}
|
||||
assertEq(threwTypeError, true);
|
||||
|
||||
/* Properly bind f. */
|
||||
assertEq(eval("function f() {}; var i = (f for (f in [1])); uneval([n for (n in i)])"),
|
||||
'["0"]');
|
||||
|
||||
reportCompare(true, true);
|
7
js/src/tests/js1_8_5/regress/regress-626436.js
Normal file
7
js/src/tests/js1_8_5/regress/regress-626436.js
Normal file
@ -0,0 +1,7 @@
|
||||
// Any copyright is dedicated to the Public Domain.
|
||||
// http://creativecommons.org/licenses/publicdomain/
|
||||
// Contributors: Christian Holler <decoder@own-hero.net>, Jesse Ruderman <jruderman@gmail.com>
|
||||
|
||||
(1 ? 2 : delete(0 ? 0 : {})).x;
|
||||
|
||||
reportCompare(0, 0, 'ok');
|
@ -108,8 +108,11 @@ Writer::init(LogControl *logc_)
|
||||
if (logc->lcbits & LC_TMRecorder)
|
||||
lir = new (alloc) VerboseWriter(*alloc, lir, lirbuf->printer, logc);
|
||||
#endif
|
||||
if (avmplus::AvmCore::config.cseopt)
|
||||
lir = cse = new (alloc) CseFilter(lir, TM_NUM_USED_ACCS, *alloc);
|
||||
if (avmplus::AvmCore::config.cseopt) {
|
||||
cse = new (alloc) CseFilter(lir, TM_NUM_USED_ACCS, *alloc);
|
||||
if (!cse->initOOM)
|
||||
lir = cse; // Skip CseFilter if we OOM'd when creating it.
|
||||
}
|
||||
lir = new (alloc) ExprFilter(lir);
|
||||
lir = new (alloc) FuncFilter(lir);
|
||||
#ifdef DEBUG
|
||||
|
@ -1500,7 +1500,7 @@ mozJSComponentLoader::ImportInto(const nsACString & aLocation,
|
||||
|
||||
JSAutoEnterCompartment ac;
|
||||
if (!ac.enter(mContext, mod->global))
|
||||
return NULL;
|
||||
return NS_ERROR_FAILURE;
|
||||
|
||||
if (!JS_GetProperty(mContext, mod->global,
|
||||
"EXPORTED_SYMBOLS", &symbols)) {
|
||||
|
@ -56,7 +56,6 @@ public:
|
||||
nsAutoJSValHolder()
|
||||
: mRt(NULL)
|
||||
, mVal(JSVAL_NULL)
|
||||
, mGCThing(NULL)
|
||||
, mHeld(JS_FALSE)
|
||||
{
|
||||
// nothing to do
|
||||
@ -78,11 +77,11 @@ public:
|
||||
|
||||
/**
|
||||
* Hold by rooting on the runtime.
|
||||
* Note that mGCThing may be JSVAL_NULL, which is not a problem.
|
||||
* Note that mVal may be JSVAL_NULL, which is not a problem.
|
||||
*/
|
||||
JSBool Hold(JSRuntime* aRt) {
|
||||
if (!mHeld) {
|
||||
if (js_AddGCThingRootRT(aRt, &mGCThing, "nsAutoJSValHolder")) {
|
||||
if (js_AddRootRT(aRt, &mVal, "nsAutoJSValHolder")) {
|
||||
mRt = aRt;
|
||||
mHeld = JS_TRUE;
|
||||
} else {
|
||||
@ -93,7 +92,7 @@ public:
|
||||
}
|
||||
|
||||
/**
|
||||
* Manually release, nullifying mVal, mGCThing, and mRt, but returning
|
||||
* Manually release, nullifying mVal, and mRt, but returning
|
||||
* the original jsval.
|
||||
*/
|
||||
jsval Release() {
|
||||
@ -102,12 +101,11 @@ public:
|
||||
jsval oldval = mVal;
|
||||
|
||||
if (mHeld) {
|
||||
js_RemoveRoot(mRt, &mGCThing); // infallible
|
||||
js_RemoveRoot(mRt, &mVal); // infallible
|
||||
mHeld = JS_FALSE;
|
||||
}
|
||||
|
||||
mVal = JSVAL_NULL;
|
||||
mGCThing = NULL;
|
||||
mRt = NULL;
|
||||
|
||||
return oldval;
|
||||
@ -154,16 +152,12 @@ public:
|
||||
}
|
||||
#endif
|
||||
mVal = aOther;
|
||||
mGCThing = JSVAL_IS_GCTHING(aOther)
|
||||
? JSVAL_TO_GCTHING(aOther)
|
||||
: NULL;
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
JSRuntime* mRt;
|
||||
jsval mVal;
|
||||
void* mGCThing;
|
||||
JSBool mHeld;
|
||||
};
|
||||
|
||||
|
@ -127,7 +127,7 @@ namespace XPCWrapper {
|
||||
JSObject *
|
||||
Unwrap(JSContext *cx, JSObject *wrapper)
|
||||
{
|
||||
if (wrapper->isProxy()) {
|
||||
if (wrapper->isWrapper()) {
|
||||
if (xpc::AccessCheck::isScriptAccessOnly(cx, wrapper))
|
||||
return nsnull;
|
||||
return wrapper->unwrap();
|
||||
|
@ -83,6 +83,8 @@ GetCurrentOuter(JSContext *cx, JSObject *obj)
|
||||
JSObject *
|
||||
WrapperFactory::WaiveXray(JSContext *cx, JSObject *obj)
|
||||
{
|
||||
obj = obj->unwrap();
|
||||
|
||||
// We have to make sure that if we're wrapping an outer window, that
|
||||
// the .wrappedJSObject also wraps the outer window.
|
||||
obj = GetCurrentOuter(cx, obj);
|
||||
@ -101,7 +103,9 @@ WrapperFactory::WaiveXray(JSContext *cx, JSObject *obj)
|
||||
if (proto && !(proto = WaiveXray(cx, proto)))
|
||||
return nsnull;
|
||||
|
||||
js::SwitchToCompartment sc(cx, obj->compartment());
|
||||
JSAutoEnterCompartment ac;
|
||||
if (!ac.enter(cx, obj))
|
||||
return nsnull;
|
||||
wobj = JSWrapper::New(cx, obj, proto, obj->getGlobal(), &WaiveXrayWrapperWrapper);
|
||||
if (!wobj)
|
||||
return nsnull;
|
||||
@ -128,7 +132,10 @@ JSObject *
|
||||
WrapperFactory::DoubleWrap(JSContext *cx, JSObject *obj, uintN flags)
|
||||
{
|
||||
if (flags & WrapperFactory::WAIVE_XRAY_WRAPPER_FLAG) {
|
||||
js::SwitchToCompartment sc(cx, obj->compartment());
|
||||
JSAutoEnterCompartment ac;
|
||||
if (!ac.enter(cx, obj))
|
||||
return nsnull;
|
||||
|
||||
return WaiveXray(cx, obj);
|
||||
}
|
||||
return obj;
|
||||
@ -345,7 +352,7 @@ WrapperFactory::WaiveXrayAndWrap(JSContext *cx, jsval *vp)
|
||||
if (JSVAL_IS_PRIMITIVE(*vp))
|
||||
return JS_WrapValue(cx, vp);
|
||||
|
||||
JSObject *obj = JSVAL_TO_OBJECT(*vp)->unwrap();
|
||||
JSObject *obj = JSVAL_TO_OBJECT(*vp);
|
||||
|
||||
obj = WaiveXray(cx, obj);
|
||||
if (!obj)
|
||||
|
@ -723,6 +723,10 @@ RECURSE:
|
||||
|
||||
if (*stack.currentFrame->args.instructionPtr == OP_KET || stack.currentFrame->args.subjectPtr == stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
|
||||
DPRINTF(("non-repeating ket or empty match\n"));
|
||||
if (stack.currentFrame->args.subjectPtr == stack.currentFrame->locals.subjectPtrAtStartOfInstruction && stack.currentFrame->args.groupMatched) {
|
||||
DPRINTF(("empty string while group already matched; bailing"));
|
||||
RRETURN_NO_MATCH;
|
||||
}
|
||||
stack.currentFrame->args.instructionPtr += 1 + LINK_SIZE;
|
||||
NEXT_OPCODE;
|
||||
}
|
||||
@ -1266,7 +1270,7 @@ RECURSE:
|
||||
if (minimize) {
|
||||
stack.currentFrame->locals.repeatOthercase = othercase;
|
||||
for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
|
||||
RECURSIVE_MATCH(28, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(28, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
|
||||
@ -1308,7 +1312,7 @@ RECURSE:
|
||||
|
||||
if (minimize) {
|
||||
for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
|
||||
RECURSIVE_MATCH(30, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(30, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
if (stack.currentFrame->locals.fi >= stack.currentFrame->locals.max || stack.currentFrame->args.subjectPtr >= md.endSubject)
|
||||
@ -1328,7 +1332,7 @@ RECURSE:
|
||||
stack.currentFrame->args.subjectPtr += 2;
|
||||
}
|
||||
while (stack.currentFrame->args.subjectPtr >= stack.currentFrame->locals.subjectPtrAtStartOfInstruction) {
|
||||
RECURSIVE_MATCH(31, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(31, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
stack.currentFrame->args.subjectPtr -= 2;
|
||||
@ -1424,7 +1428,7 @@ RECURSE:
|
||||
|
||||
if (minimize) {
|
||||
for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
|
||||
RECURSIVE_MATCH(38, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(38, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
int d = *stack.currentFrame->args.subjectPtr++;
|
||||
@ -1452,7 +1456,7 @@ RECURSE:
|
||||
++stack.currentFrame->args.subjectPtr;
|
||||
}
|
||||
for (;;) {
|
||||
RECURSIVE_MATCH(40, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(40, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
|
||||
@ -1478,7 +1482,7 @@ RECURSE:
|
||||
|
||||
if (minimize) {
|
||||
for (stack.currentFrame->locals.fi = min;; stack.currentFrame->locals.fi++) {
|
||||
RECURSIVE_MATCH(42, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(42, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
int d = *stack.currentFrame->args.subjectPtr++;
|
||||
@ -1502,7 +1506,7 @@ RECURSE:
|
||||
++stack.currentFrame->args.subjectPtr;
|
||||
}
|
||||
for (;;) {
|
||||
RECURSIVE_MATCH(44, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, false);
|
||||
RECURSIVE_MATCH(44, stack.currentFrame->args.instructionPtr, stack.currentFrame->args.bracketChain, stack.currentFrame->args.groupMatched);
|
||||
if (isMatch)
|
||||
RRETURN;
|
||||
if (stack.currentFrame->args.subjectPtr-- == stack.currentFrame->locals.subjectPtrAtStartOfInstruction)
|
||||
|
@ -531,14 +531,17 @@ public:
|
||||
case PatternTerm::TypeParenthesesSubpattern:
|
||||
// Note: for fixed once parentheses we will ensure at least the minimum is available; others are on their own.
|
||||
term.frameLocation = currentCallFrameSize;
|
||||
if ((term.quantityCount == 1) && !term.parentheses.isCopy) {
|
||||
if (term.quantityType == QuantifierFixedCount) {
|
||||
currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
|
||||
currentInputPosition += term.parentheses.disjunction->m_minimumSize;
|
||||
} else {
|
||||
if (term.quantityCount == 1 && !term.parentheses.isCopy) {
|
||||
if (term.quantityType != QuantifierFixedCount)
|
||||
currentCallFrameSize += RegexStackSpaceForBackTrackInfoParenthesesOnce;
|
||||
currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
|
||||
}
|
||||
currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
|
||||
// If quantity is fixed, then pre-check its minimum size.
|
||||
if (term.quantityType == QuantifierFixedCount)
|
||||
currentInputPosition += term.parentheses.disjunction->m_minimumSize;
|
||||
term.inputPosition = currentInputPosition;
|
||||
} else if (term.parentheses.isTerminal) {
|
||||
currentCallFrameSize += RegexStackSpaceForBackTrackInfoParenthesesTerminal;
|
||||
currentCallFrameSize = setupDisjunctionOffsets(term.parentheses.disjunction, currentCallFrameSize, currentInputPosition);
|
||||
term.inputPosition = currentInputPosition;
|
||||
} else {
|
||||
term.inputPosition = currentInputPosition;
|
||||
@ -592,6 +595,33 @@ public:
|
||||
setupDisjunctionOffsets(m_pattern.m_body, 0, 0);
|
||||
}
|
||||
|
||||
// This optimization identifies sets of parentheses that we will never need to backtrack.
|
||||
// In these cases we do not need to store state from prior iterations.
|
||||
// We can presently avoid backtracking for:
|
||||
// * a set of parens at the end of the regular expression (last term in any of the alternatives of the main body disjunction).
|
||||
// * where the parens are non-capturing, and quantified unbounded greedy (*).
|
||||
// * where the parens do not contain any capturing subpatterns.
|
||||
void checkForTerminalParentheses()
|
||||
{
|
||||
// This check is much too crude; should be just checking whether the candidate
|
||||
// node contains nested capturing subpatterns, not the whole expression!
|
||||
if (m_pattern.m_numSubpatterns)
|
||||
return;
|
||||
|
||||
js::Vector<PatternAlternative*, 0, js::SystemAllocPolicy>& alternatives = m_pattern.m_body->m_alternatives;
|
||||
for (unsigned i =0; i < alternatives.length(); ++i) {
|
||||
js::Vector<PatternTerm, 0, js::SystemAllocPolicy>& terms = alternatives[i]->m_terms;
|
||||
if (terms.length()) {
|
||||
PatternTerm& term = terms.back();
|
||||
if (term.type == PatternTerm::TypeParenthesesSubpattern
|
||||
&& term.quantityType == QuantifierGreedy
|
||||
&& term.quantityCount == UINT_MAX
|
||||
&& !term.capture())
|
||||
term.parentheses.isTerminal = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
RegexPattern& m_pattern;
|
||||
PatternAlternative* m_alternative;
|
||||
@ -624,6 +654,7 @@ int compileRegex(const UString& patternString, RegexPattern& pattern)
|
||||
JS_ASSERT(numSubpatterns == pattern.m_numSubpatterns);
|
||||
}
|
||||
|
||||
constructor.checkForTerminalParentheses();
|
||||
constructor.setupOffsets();
|
||||
|
||||
return 0;
|
||||
|
@ -917,12 +917,7 @@ class RegexGenerator : private MacroAssembler {
|
||||
PatternDisjunction* disjunction = term.parentheses.disjunction;
|
||||
ASSERT(term.quantityCount == 1);
|
||||
|
||||
if (term.parentheses.isCopy) {
|
||||
m_shouldFallBack = true;
|
||||
return;
|
||||
}
|
||||
|
||||
unsigned preCheckedCount = ((term.quantityCount == 1) && (term.quantityType == QuantifierFixedCount)) ? disjunction->m_minimumSize : 0;
|
||||
unsigned preCheckedCount = (term.quantityType == QuantifierFixedCount) ? disjunction->m_minimumSize : 0;
|
||||
|
||||
unsigned parenthesesFrameLocation = term.frameLocation;
|
||||
unsigned alternativeFrameLocation = parenthesesFrameLocation;
|
||||
@ -941,12 +936,12 @@ class RegexGenerator : private MacroAssembler {
|
||||
Jump nonGreedySkipParentheses;
|
||||
Label nonGreedyTryParentheses;
|
||||
if (term.quantityType == QuantifierGreedy)
|
||||
storeToFrame(Imm32(1), parenthesesFrameLocation);
|
||||
storeToFrame(index, parenthesesFrameLocation);
|
||||
else if (term.quantityType == QuantifierNonGreedy) {
|
||||
storeToFrame(Imm32(0), parenthesesFrameLocation);
|
||||
storeToFrame(Imm32(-1), parenthesesFrameLocation);
|
||||
nonGreedySkipParentheses = jump();
|
||||
nonGreedyTryParentheses = label();
|
||||
storeToFrame(Imm32(1), parenthesesFrameLocation);
|
||||
storeToFrame(index, parenthesesFrameLocation);
|
||||
}
|
||||
|
||||
// store the match start index
|
||||
@ -964,29 +959,21 @@ class RegexGenerator : private MacroAssembler {
|
||||
TermGenerationState parenthesesState(disjunction, state.checkedTotal);
|
||||
generateParenthesesDisjunction(state.term(), parenthesesState, alternativeFrameLocation);
|
||||
|
||||
// store the match end index
|
||||
if (term.invertOrCapture) {
|
||||
int inputOffset = state.inputOffset();
|
||||
if (inputOffset) {
|
||||
move(index, indexTemporary);
|
||||
add32(Imm32(state.inputOffset()), indexTemporary);
|
||||
store32(indexTemporary, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
|
||||
} else
|
||||
store32(index, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
|
||||
}
|
||||
Jump success = jump();
|
||||
Jump success = (term.quantityType == QuantifierFixedCount) ?
|
||||
jump() :
|
||||
branch32(NotEqual, index, Address(stackPointerRegister, (parenthesesFrameLocation * sizeof(void*))));
|
||||
|
||||
// A failure AFTER the parens jumps here
|
||||
Label backtrackFromAfterParens(this);
|
||||
|
||||
if (term.quantityType == QuantifierGreedy) {
|
||||
// If this is zero we have now tested with both with and without the parens.
|
||||
// If this is -1 we have now tested with both with and without the parens.
|
||||
loadFromFrame(parenthesesFrameLocation, indexTemporary);
|
||||
state.jumpToBacktrack(branchTest32(Zero, indexTemporary), this);
|
||||
state.jumpToBacktrack(branch32(Equal, indexTemporary, Imm32(-1)), this);
|
||||
} else if (term.quantityType == QuantifierNonGreedy) {
|
||||
// If this is zero we have now tested with both with and without the parens.
|
||||
// If this is -1 we have now tested without the parens, now test with.
|
||||
loadFromFrame(parenthesesFrameLocation, indexTemporary);
|
||||
branchTest32(Zero, indexTemporary).linkTo(nonGreedyTryParentheses, this);
|
||||
branch32(Equal, indexTemporary, Imm32(-1)).linkTo(nonGreedyTryParentheses, this);
|
||||
}
|
||||
|
||||
parenthesesState.plantJumpToBacktrackIfExists(this);
|
||||
@ -1000,7 +987,7 @@ class RegexGenerator : private MacroAssembler {
|
||||
}
|
||||
|
||||
if (term.quantityType == QuantifierGreedy)
|
||||
storeToFrame(Imm32(0), parenthesesFrameLocation);
|
||||
storeToFrame(Imm32(-1), parenthesesFrameLocation);
|
||||
else
|
||||
state.jumpToBacktrack(jump(), this);
|
||||
|
||||
@ -1008,6 +995,17 @@ class RegexGenerator : private MacroAssembler {
|
||||
if (term.quantityType == QuantifierNonGreedy)
|
||||
nonGreedySkipParentheses.link(this);
|
||||
success.link(this);
|
||||
|
||||
// store the match end index
|
||||
if (term.invertOrCapture) {
|
||||
int inputOffset = state.inputOffset();
|
||||
if (inputOffset) {
|
||||
move(index, indexTemporary);
|
||||
add32(Imm32(state.inputOffset()), indexTemporary);
|
||||
store32(indexTemporary, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
|
||||
} else
|
||||
store32(index, Address(output, ((term.parentheses.subpatternId << 1) + 1) * sizeof(int)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1018,25 +1016,6 @@ class RegexGenerator : private MacroAssembler {
|
||||
ASSERT(parenthesesTerm.type == PatternTerm::TypeParenthesesSubpattern);
|
||||
ASSERT(parenthesesTerm.quantityCount != 1); // Handled by generateParenthesesSingle.
|
||||
|
||||
// Capturing not yet implemented!
|
||||
if (parenthesesTerm.invertOrCapture) {
|
||||
m_shouldFallBack = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Quantification limit not yet implemented!
|
||||
if (parenthesesTerm.quantityCount != 0xffffffff) {
|
||||
m_shouldFallBack = true;
|
||||
return;
|
||||
}
|
||||
|
||||
// Need to reset nested subpatterns between iterations...
|
||||
// for the minute this crude check rejects all patterns with any subpatterns!
|
||||
if (m_pattern.m_numSubpatterns) {
|
||||
m_shouldFallBack = true;
|
||||
return;
|
||||
}
|
||||
|
||||
TermGenerationState parenthesesState(disjunction, state.checkedTotal);
|
||||
|
||||
Label matchAgain(this);
|
||||
@ -1058,7 +1037,11 @@ class RegexGenerator : private MacroAssembler {
|
||||
generateTerm(parenthesesState);
|
||||
|
||||
// If we get here, we matched! If the index advanced then try to match more since limit isn't supported yet.
|
||||
branch32(GreaterThan, index, Address(stackPointerRegister, (parenthesesTerm.frameLocation * sizeof(void*))), matchAgain);
|
||||
branch32(NotEqual, index, Address(stackPointerRegister, (parenthesesTerm.frameLocation * sizeof(void*))), matchAgain);
|
||||
|
||||
// If we get here we matched, but we matched "" - cannot accept this alternative as is, so either backtrack,
|
||||
// or fall through to try the next alternative if no backtrack is available.
|
||||
parenthesesState.plantJumpToBacktrackIfExists(this);
|
||||
|
||||
parenthesesState.linkAlternativeBacktracks(this);
|
||||
// We get here if the alternative fails to match - fall through to the next iteration, or out of the loop.
|
||||
@ -1191,17 +1174,12 @@ class RegexGenerator : private MacroAssembler {
|
||||
break;
|
||||
|
||||
case PatternTerm::TypeParenthesesSubpattern:
|
||||
if (term.quantityCount == 1) {
|
||||
if (term.quantityCount == 1 && !term.parentheses.isCopy)
|
||||
generateParenthesesSingle(state);
|
||||
break;
|
||||
} else if (state.isLastTerm() && state.isMainDisjunction()) { // Is this is the last term of the main disjunction?
|
||||
// If this has a greedy quantifier, then it will never need to backtrack!
|
||||
if (term.quantityType == QuantifierGreedy) {
|
||||
generateParenthesesGreedyNoBacktrack(state);
|
||||
break;
|
||||
}
|
||||
}
|
||||
m_shouldFallBack = true;
|
||||
else if (term.parentheses.isTerminal)
|
||||
generateParenthesesGreedyNoBacktrack(state);
|
||||
else
|
||||
m_shouldFallBack = true;
|
||||
break;
|
||||
|
||||
case PatternTerm::TypeParentheticalAssertion:
|
||||
|
@ -74,7 +74,8 @@ public:
|
||||
|
||||
int execute(const UChar* input, unsigned start, unsigned length, int* output)
|
||||
{
|
||||
return JS_EXTENSION((reinterpret_cast<RegexJITCode>(m_ref.m_code.executableAddress()))(input, start, length, output));
|
||||
void *code = m_ref.m_code.executableAddress();
|
||||
return JS_EXTENSION((reinterpret_cast<RegexJITCode>(code))(input, start, length, output));
|
||||
}
|
||||
|
||||
private:
|
||||
|
@ -39,6 +39,7 @@ namespace JSC { namespace Yarr {
|
||||
#define RegexStackSpaceForBackTrackInfoAlternative 1 // One per alternative.
|
||||
#define RegexStackSpaceForBackTrackInfoParentheticalAssertion 1
|
||||
#define RegexStackSpaceForBackTrackInfoParenthesesOnce 1 // Only for !fixed quantifiers.
|
||||
#define RegexStackSpaceForBackTrackInfoParenthesesTerminal 1
|
||||
#define RegexStackSpaceForBackTrackInfoParentheses 4
|
||||
|
||||
struct PatternDisjunction;
|
||||
@ -137,6 +138,7 @@ struct PatternTerm {
|
||||
unsigned subpatternId;
|
||||
unsigned lastSubpatternId;
|
||||
bool isCopy;
|
||||
bool isTerminal;
|
||||
} parentheses;
|
||||
};
|
||||
QuantifierType quantityType;
|
||||
@ -168,6 +170,7 @@ struct PatternTerm {
|
||||
parentheses.disjunction = disjunction;
|
||||
parentheses.subpatternId = subpatternId;
|
||||
parentheses.isCopy = false;
|
||||
parentheses.isTerminal = false;
|
||||
quantityType = QuantifierFixedCount;
|
||||
quantityCount = 1;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user