mirror of
https://github.com/mozilla/gecko-dev.git
synced 2025-03-09 04:25:38 +00:00
Bug 807853 - Add (but do not yet use) parallel compilation mode to ion r=dvander,terrence
This commit is contained in:
parent
98609c8753
commit
ab059eb318
@ -138,6 +138,7 @@ CPPSRCS = \
|
||||
FoldConstants.cpp \
|
||||
Intl.cpp \
|
||||
NameFunctions.cpp \
|
||||
ParallelDo.cpp \
|
||||
ParallelArray.cpp \
|
||||
ParseMaps.cpp \
|
||||
ParseNode.cpp \
|
||||
@ -317,7 +318,9 @@ CPPSRCS += MIR.cpp \
|
||||
ValueNumbering.cpp \
|
||||
RangeAnalysis.cpp \
|
||||
VMFunctions.cpp \
|
||||
ParallelFunctions.cpp \
|
||||
AliasAnalysis.cpp \
|
||||
ParallelArrayAnalysis.cpp \
|
||||
UnreachableCodeElimination.cpp \
|
||||
$(NULL)
|
||||
endif #ENABLE_ION
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
#include "builtin/TestingFunctions.h"
|
||||
#include "methodjit/MethodJIT.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
|
||||
#include "vm/Stack-inl.h"
|
||||
|
||||
@ -878,6 +879,14 @@ DisplayName(JSContext *cx, unsigned argc, jsval *vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
JSBool
|
||||
js::testingFunc_inParallelSection(JSContext *cx, unsigned argc, jsval *vp)
|
||||
{
|
||||
JS_ASSERT(!ForkJoinSlice::InParallelSection());
|
||||
JS_SET_RVAL(cx, vp, JSVAL_FALSE);
|
||||
return true;
|
||||
}
|
||||
|
||||
static JSFunctionSpecWithHelp TestingFunctions[] = {
|
||||
JS_FN_HELP("gc", ::GC, 0, 0,
|
||||
"gc([obj] | 'compartment')",
|
||||
@ -1009,6 +1018,10 @@ static JSFunctionSpecWithHelp TestingFunctions[] = {
|
||||
" inferred name based on where the function was defined. This can be\n"
|
||||
" different from the 'name' property on the function."),
|
||||
|
||||
JS_FN_HELP("inParallelSection", testingFunc_inParallelSection, 0, 0,
|
||||
"inParallelSection()",
|
||||
" True if this code is executing within a parallel section."),
|
||||
|
||||
JS_FS_HELP_END
|
||||
};
|
||||
|
||||
|
@ -11,6 +11,9 @@ namespace js {
|
||||
bool
|
||||
DefineTestingFunctions(JSContext *cx, JSHandleObject obj);
|
||||
|
||||
JSBool
|
||||
testingFunc_inParallelSection(JSContext *cx, unsigned argc, jsval *vp);
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#endif /* TestingFunctions_h__ */
|
||||
|
@ -610,6 +610,13 @@ class Rooted : public RootedBase<T>
|
||||
#endif
|
||||
}
|
||||
|
||||
void init(PerThreadData *ptArg) {
|
||||
#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
|
||||
PerThreadDataFriendFields *pt = PerThreadDataFriendFields::get(ptArg);
|
||||
commonInit(pt->thingGCRooters);
|
||||
#endif
|
||||
}
|
||||
|
||||
public:
|
||||
Rooted(JSContext *cx
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
@ -636,6 +643,31 @@ class Rooted : public RootedBase<T>
|
||||
init(cx);
|
||||
}
|
||||
|
||||
Rooted(PerThreadData *pt
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: ptr(RootMethods<T>::initial())
|
||||
{
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
init(pt);
|
||||
}
|
||||
|
||||
Rooted(PerThreadData *pt, T initial
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: ptr(initial)
|
||||
{
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
init(pt);
|
||||
}
|
||||
|
||||
template <typename S>
|
||||
Rooted(PerThreadData *pt, const Unrooted<S> &initial
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_PARAM)
|
||||
: ptr(static_cast<S>(initial))
|
||||
{
|
||||
MOZ_GUARD_OBJECT_NOTIFIER_INIT;
|
||||
init(pt);
|
||||
}
|
||||
|
||||
~Rooted() {
|
||||
#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
|
||||
JS_ASSERT(*stack == this);
|
||||
|
@ -80,6 +80,8 @@ MarkExactStackRoots(JSTracer *trc)
|
||||
for (unsigned i = 0; i < THING_ROOT_LIMIT; i++) {
|
||||
for (ContextIter cx(trc->runtime); !cx.done(); cx.next())
|
||||
MarkExactStackRootList(trc, cx->thingGCRooters[i], ThingRootKind(i));
|
||||
|
||||
MarkExactStackRootList(trc, trc->runtime->mainThread->thingGCRooters[i], ThingRootKind(i));
|
||||
}
|
||||
}
|
||||
#endif /* JSGC_USE_EXACT_ROOTING */
|
||||
|
@ -196,6 +196,19 @@ SuppressCheckRoots(Vector<Rooter, 0, SystemAllocPolicy> &rooters)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void
|
||||
GatherRooters(Vector<Rooter, 0, SystemAllocPolicy> &rooters,
|
||||
Rooted<void*> **thingGCRooters,
|
||||
unsigned thingRootKind)
|
||||
{
|
||||
Rooted<void*> *rooter = thingGCRooters[thingRootKind];
|
||||
while (rooter) {
|
||||
Rooter r = { rooter, ThingRootKind(thingRootKind) };
|
||||
JS_ALWAYS_TRUE(rooters.append(r));
|
||||
rooter = rooter->previous();
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
JS::CheckStackRoots(JSContext *cx)
|
||||
{
|
||||
@ -243,16 +256,13 @@ JS::CheckStackRoots(JSContext *cx)
|
||||
#endif
|
||||
|
||||
// Gather up all of the rooters
|
||||
Vector< Rooter, 0, SystemAllocPolicy> rooters;
|
||||
Vector<Rooter, 0, SystemAllocPolicy> rooters;
|
||||
for (unsigned i = 0; i < THING_ROOT_LIMIT; i++) {
|
||||
for (ContextIter cx(rt); !cx.done(); cx.next()) {
|
||||
Rooted<void*> *rooter = cx->thingGCRooters[i];
|
||||
while (rooter) {
|
||||
Rooter r = { rooter, ThingRootKind(i) };
|
||||
JS_ALWAYS_TRUE(rooters.append(r));
|
||||
rooter = rooter->previous();
|
||||
}
|
||||
GatherRooters(rooters, cx->thingGCRooters, i);
|
||||
}
|
||||
|
||||
GatherRooters(rooters, rt->mainThread.thingGCRooters, i);
|
||||
}
|
||||
|
||||
if (SuppressCheckRoots(rooters))
|
||||
|
@ -239,6 +239,8 @@ ConvertFrames(JSContext *cx, IonActivation *activation, IonBailoutIterator &it)
|
||||
#ifdef DEBUG
|
||||
// Use count is reset after invalidation. Log use count on bailouts to
|
||||
// determine if we have a critical sequence of bailout.
|
||||
//
|
||||
// Note: frame conversion only occurs in sequential mode
|
||||
if (it.script()->ion == it.ionScript()) {
|
||||
IonSpew(IonSpew_Bailouts, " Current script use count is %u",
|
||||
it.script()->getUseCount());
|
||||
|
@ -18,7 +18,9 @@
|
||||
#include "jsnum.h"
|
||||
#include "jsmath.h"
|
||||
#include "jsinterpinlines.h"
|
||||
#include "ParallelFunctions.h"
|
||||
#include "ExecutionModeInlines.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
|
||||
#include "vm/StringObject-inl.h"
|
||||
|
||||
@ -476,6 +478,17 @@ CodeGenerator::visitLambda(LLambda *lir)
|
||||
masm.newGCThing(output, fun, ool->entry());
|
||||
masm.initGCThing(output, fun);
|
||||
|
||||
emitLambdaInit(output, scopeChain, fun);
|
||||
|
||||
masm.bind(ool->rejoin());
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
CodeGenerator::emitLambdaInit(const Register &output,
|
||||
const Register &scopeChain,
|
||||
JSFunction *fun)
|
||||
{
|
||||
// Initialize nargs and flags. We do this with a single uint32 to avoid
|
||||
// 16-bit writes.
|
||||
union {
|
||||
@ -494,8 +507,22 @@ CodeGenerator::visitLambda(LLambda *lir)
|
||||
Address(output, JSFunction::offsetOfNativeOrScript()));
|
||||
masm.storePtr(scopeChain, Address(output, JSFunction::offsetOfEnvironment()));
|
||||
masm.storePtr(ImmGCPtr(fun->displayAtom()), Address(output, JSFunction::offsetOfAtom()));
|
||||
}
|
||||
|
||||
masm.bind(ool->rejoin());
|
||||
bool
|
||||
CodeGenerator::visitParLambda(LParLambda *lir)
|
||||
{
|
||||
Register resultReg = ToRegister(lir->output());
|
||||
Register parSliceReg = ToRegister(lir->parSlice());
|
||||
Register scopeChainReg = ToRegister(lir->scopeChain());
|
||||
Register tempReg1 = ToRegister(lir->getTemp0());
|
||||
Register tempReg2 = ToRegister(lir->getTemp1());
|
||||
JSFunction *fun = lir->mir()->fun();
|
||||
|
||||
JS_ASSERT(scopeChainReg != resultReg);
|
||||
|
||||
emitParAllocateGCThing(resultReg, parSliceReg, tempReg1, tempReg2, fun);
|
||||
emitLambdaInit(resultReg, scopeChainReg, fun);
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -763,6 +790,52 @@ CodeGenerator::visitFunctionEnvironment(LFunctionEnvironment *lir)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParSlice(LParSlice *lir)
|
||||
{
|
||||
const Register tempReg = ToRegister(lir->getTempReg());
|
||||
|
||||
masm.setupUnalignedABICall(0, tempReg);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParForkJoinSlice));
|
||||
JS_ASSERT(ToRegister(lir->output()) == ReturnReg);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParWriteGuard(LParWriteGuard *lir)
|
||||
{
|
||||
JS_ASSERT(gen->info().executionMode() == ParallelExecution);
|
||||
|
||||
const Register tempReg = ToRegister(lir->getTempReg());
|
||||
masm.setupUnalignedABICall(2, tempReg);
|
||||
masm.passABIArg(ToRegister(lir->parSlice()));
|
||||
masm.passABIArg(ToRegister(lir->object()));
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParWriteGuard));
|
||||
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
|
||||
// branch to the OOL failure code if false is returned
|
||||
masm.branchTestBool(Assembler::Zero, ReturnReg, ReturnReg, bail);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParDump(LParDump *lir)
|
||||
{
|
||||
ValueOperand value = ToValue(lir, 0);
|
||||
masm.reserveStack(sizeof(Value));
|
||||
masm.storeValue(value, Address(StackPointer, 0));
|
||||
masm.movePtr(StackPointer, CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParDumpValue));
|
||||
masm.freeStack(sizeof(Value));
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitTypeBarrier(LTypeBarrier *lir)
|
||||
{
|
||||
@ -986,8 +1059,26 @@ static const VMFunction GetIntrinsicValueInfo =
|
||||
bool
|
||||
CodeGenerator::visitCallGetIntrinsicValue(LCallGetIntrinsicValue *lir)
|
||||
{
|
||||
pushArg(ImmGCPtr(lir->mir()->name()));
|
||||
return callVM(GetIntrinsicValueInfo, lir);
|
||||
// When compiling parallel kernels, always bail.
|
||||
switch (gen->info().executionMode()) {
|
||||
case SequentialExecution: {
|
||||
pushArg(ImmGCPtr(lir->mir()->name()));
|
||||
return callVM(GetIntrinsicValueInfo, lir);
|
||||
}
|
||||
|
||||
case ParallelExecution: {
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
|
||||
masm.jump(bail);
|
||||
return true;
|
||||
}
|
||||
|
||||
default:
|
||||
JS_NOT_REACHED("Bad execution mode");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
typedef bool (*InvokeFunctionFn)(JSContext *, HandleFunction, uint32_t, Value *, Value *);
|
||||
@ -1031,7 +1122,8 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
|
||||
Register objreg = ToRegister(call->getTempObject());
|
||||
Register nargsreg = ToRegister(call->getNargsReg());
|
||||
uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
|
||||
Label invoke, thunk, makeCall, end;
|
||||
ExecutionMode executionMode = gen->info().executionMode();
|
||||
Label uncompiled, thunk, makeCall, end;
|
||||
|
||||
// Known-target case is handled by LCallKnown.
|
||||
JS_ASSERT(!call->hasSingleTarget());
|
||||
@ -1049,15 +1141,14 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
|
||||
return false;
|
||||
|
||||
// Guard that calleereg is an interpreted function with a JSScript:
|
||||
masm.branchIfFunctionHasNoScript(calleereg, &invoke);
|
||||
masm.branchIfFunctionHasNoScript(calleereg, &uncompiled);
|
||||
|
||||
// Knowing that calleereg is a non-native function, load the JSScript.
|
||||
masm.loadPtr(Address(calleereg, offsetof(JSFunction, u.i.script_)), objreg);
|
||||
ExecutionMode executionMode = gen->info().executionMode();
|
||||
masm.loadPtr(Address(objreg, ionOffset(executionMode)), objreg);
|
||||
|
||||
// Guard that the IonScript has been compiled.
|
||||
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &invoke);
|
||||
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &uncompiled);
|
||||
|
||||
// Nestle the StackPointer up to the argument vector.
|
||||
masm.freeStack(unusedStack);
|
||||
@ -1100,9 +1191,18 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
|
||||
masm.jump(&end);
|
||||
|
||||
// Handle uncompiled or native functions.
|
||||
masm.bind(&invoke);
|
||||
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
|
||||
return false;
|
||||
masm.bind(&uncompiled);
|
||||
switch (executionMode) {
|
||||
case SequentialExecution:
|
||||
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case ParallelExecution:
|
||||
if (!emitParCallToUncompiledScript(calleereg))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
masm.bind(&end);
|
||||
|
||||
@ -1115,10 +1215,30 @@ CodeGenerator::visitCallGeneric(LCallGeneric *call)
|
||||
masm.bind(¬Primitive);
|
||||
}
|
||||
|
||||
if (!checkForParallelBailout())
|
||||
return false;
|
||||
|
||||
dropArguments(call->numStackArgs() + 1);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Generates a call to ParCallToUncompiledScript() and then bails out.
|
||||
// |calleeReg| should contain the JSFunction*.
|
||||
bool
|
||||
CodeGenerator::emitParCallToUncompiledScript(Register calleeReg)
|
||||
{
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
|
||||
masm.movePtr(calleeReg, CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCallToUncompiledScript));
|
||||
masm.jump(bail);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
{
|
||||
@ -1127,7 +1247,8 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
Register objreg = ToRegister(call->getTempObject());
|
||||
uint32_t unusedStack = StackOffsetOfPassedArg(call->argslot());
|
||||
RootedFunction target(cx, call->getSingleTarget());
|
||||
Label end, invoke;
|
||||
ExecutionMode executionMode = gen->info().executionMode();
|
||||
Label end, uncompiled;
|
||||
|
||||
// Native single targets are handled by LCallNative.
|
||||
JS_ASSERT(!target->isNative());
|
||||
@ -1140,10 +1261,13 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
if (target->isInterpretedLazy() && !target->getOrCreateScript(cx))
|
||||
return false;
|
||||
|
||||
// If the function is known to be uncompilable, only emit the call to InvokeFunction.
|
||||
ExecutionMode executionMode = gen->info().executionMode();
|
||||
// If the function is known to be uncompilable, just emit the call to
|
||||
// Invoke in sequential mode, else mark as cannot compile.
|
||||
RootedScript targetScript(cx, target->nonLazyScript());
|
||||
if (GetIonScript(targetScript, executionMode) == ION_DISABLED_SCRIPT) {
|
||||
if (executionMode == ParallelExecution)
|
||||
return false;
|
||||
|
||||
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
|
||||
return false;
|
||||
|
||||
@ -1163,7 +1287,7 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
masm.loadPtr(Address(objreg, ionOffset(executionMode)), objreg);
|
||||
|
||||
// Guard that the IonScript has been compiled.
|
||||
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &invoke);
|
||||
masm.branchPtr(Assembler::BelowOrEqual, objreg, ImmWord(ION_COMPILING_SCRIPT), &uncompiled);
|
||||
|
||||
// Load the start of the target IonCode.
|
||||
masm.loadPtr(Address(objreg, IonScript::offsetOfMethod()), objreg);
|
||||
@ -1190,12 +1314,24 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
masm.jump(&end);
|
||||
|
||||
// Handle uncompiled functions.
|
||||
masm.bind(&invoke);
|
||||
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
|
||||
return false;
|
||||
masm.bind(&uncompiled);
|
||||
switch (executionMode) {
|
||||
case SequentialExecution:
|
||||
if (!emitCallInvokeFunction(call, calleereg, call->numActualArgs(), unusedStack))
|
||||
return false;
|
||||
break;
|
||||
|
||||
case ParallelExecution:
|
||||
if (!emitParCallToUncompiledScript(calleereg))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
masm.bind(&end);
|
||||
|
||||
if (!checkForParallelBailout())
|
||||
return false;
|
||||
|
||||
// If the return value of the constructing function is Primitive,
|
||||
// replace the return value with the Object from CreateThis.
|
||||
if (call->mir()->isConstructing()) {
|
||||
@ -1209,6 +1345,22 @@ CodeGenerator::visitCallKnown(LCallKnown *call)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::checkForParallelBailout()
|
||||
{
|
||||
// In parallel mode, if we call another ion-compiled function and
|
||||
// it returns JS_ION_ERROR, that indicates a bailout that we have
|
||||
// to propagate up the stack.
|
||||
ExecutionMode executionMode = gen->info().executionMode();
|
||||
if (executionMode == ParallelExecution) {
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, bail);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::emitCallInvokeFunction(LApplyArgsGeneric *apply, Register extraStackSize)
|
||||
{
|
||||
@ -1579,6 +1731,142 @@ CodeGenerator::visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool)
|
||||
return true;
|
||||
}
|
||||
|
||||
// Out-of-line path to report over-recursed error and fail.
|
||||
class ParCheckOverRecursedFailure : public OutOfLineCodeBase<CodeGenerator>
|
||||
{
|
||||
LParCheckOverRecursed *lir_;
|
||||
|
||||
public:
|
||||
ParCheckOverRecursedFailure(LParCheckOverRecursed *lir)
|
||||
: lir_(lir)
|
||||
{ }
|
||||
|
||||
bool accept(CodeGenerator *codegen) {
|
||||
return codegen->visitParCheckOverRecursedFailure(this);
|
||||
}
|
||||
|
||||
LParCheckOverRecursed *lir() const {
|
||||
return lir_;
|
||||
}
|
||||
};
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParCheckOverRecursed(LParCheckOverRecursed *lir)
|
||||
{
|
||||
// See above: unlike visitCheckOverRecursed(), this code runs in
|
||||
// parallel mode and hence uses the ionStackLimit from the current
|
||||
// thread state. Also, we must check the interrupt flags because
|
||||
// on interrupt or abort, only the stack limit for the main thread
|
||||
// is reset, not the worker threads. See comment in vm/ForkJoin.h
|
||||
// for more details.
|
||||
|
||||
Register parSliceReg = ToRegister(lir->parSlice());
|
||||
Register tempReg = ToRegister(lir->getTempReg());
|
||||
|
||||
masm.loadPtr(Address(parSliceReg, offsetof(ForkJoinSlice, perThreadData)), tempReg);
|
||||
masm.loadPtr(Address(tempReg, offsetof(PerThreadData, ionStackLimit)), tempReg);
|
||||
|
||||
// Conditional forward (unlikely) branch to failure.
|
||||
ParCheckOverRecursedFailure *ool = new ParCheckOverRecursedFailure(lir);
|
||||
if (!addOutOfLineCode(ool))
|
||||
return false;
|
||||
masm.branchPtr(Assembler::BelowOrEqual, StackPointer, tempReg, ool->entry());
|
||||
masm.parCheckInterruptFlags(tempReg, ool->entry());
|
||||
masm.bind(ool->rejoin());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParCheckOverRecursedFailure(ParCheckOverRecursedFailure *ool)
|
||||
{
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
|
||||
// Avoid saving/restoring the temp register since we will put the
|
||||
// ReturnReg into it below and we don't want to clobber that
|
||||
// during PopRegsInMask():
|
||||
LParCheckOverRecursed *lir = ool->lir();
|
||||
Register tempReg = ToRegister(lir->getTempReg());
|
||||
RegisterSet saveSet(lir->safepoint()->liveRegs());
|
||||
saveSet.maybeTake(tempReg);
|
||||
|
||||
masm.PushRegsInMask(saveSet);
|
||||
masm.movePtr(ToRegister(lir->parSlice()), CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCheckOverRecursed));
|
||||
masm.movePtr(ReturnReg, tempReg);
|
||||
masm.PopRegsInMask(saveSet);
|
||||
masm.branchTestBool(Assembler::Zero, tempReg, tempReg, bail);
|
||||
masm.jump(ool->rejoin());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// Out-of-line path to report over-recursed error and fail.
|
||||
class OutOfLineParCheckInterrupt : public OutOfLineCodeBase<CodeGenerator>
|
||||
{
|
||||
public:
|
||||
LParCheckInterrupt *const lir;
|
||||
|
||||
OutOfLineParCheckInterrupt(LParCheckInterrupt *lir)
|
||||
: lir(lir)
|
||||
{ }
|
||||
|
||||
bool accept(CodeGenerator *codegen) {
|
||||
return codegen->visitOutOfLineParCheckInterrupt(this);
|
||||
}
|
||||
};
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParCheckInterrupt(LParCheckInterrupt *lir)
|
||||
{
|
||||
// First check for slice->shared->interrupt_.
|
||||
OutOfLineParCheckInterrupt *ool = new OutOfLineParCheckInterrupt(lir);
|
||||
if (!addOutOfLineCode(ool))
|
||||
return false;
|
||||
|
||||
// We must check two flags:
|
||||
// - runtime->interrupt
|
||||
// - runtime->parallelAbort
|
||||
// See vm/ForkJoin.h for discussion on why we use this design.
|
||||
|
||||
Register tempReg = ToRegister(lir->getTempReg());
|
||||
masm.parCheckInterruptFlags(tempReg, ool->entry());
|
||||
masm.bind(ool->rejoin());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitOutOfLineParCheckInterrupt(OutOfLineParCheckInterrupt *ool)
|
||||
{
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
|
||||
// Avoid saving/restoring the temp register since we will put the
|
||||
// ReturnReg into it below and we don't want to clobber that
|
||||
// during PopRegsInMask():
|
||||
LParCheckInterrupt *lir = ool->lir;
|
||||
Register tempReg = ToRegister(lir->getTempReg());
|
||||
RegisterSet saveSet(lir->safepoint()->liveRegs());
|
||||
saveSet.maybeTake(tempReg);
|
||||
|
||||
masm.PushRegsInMask(saveSet);
|
||||
masm.movePtr(ToRegister(ool->lir->parSlice()), CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCheckInterrupt));
|
||||
masm.movePtr(ReturnReg, tempReg);
|
||||
masm.PopRegsInMask(saveSet);
|
||||
masm.branchTestBool(Assembler::Zero, tempReg, tempReg, bail);
|
||||
masm.jump(ool->rejoin());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
IonScriptCounts *
|
||||
CodeGenerator::maybeCreateScriptCounts()
|
||||
{
|
||||
@ -1728,6 +2016,9 @@ CodeGenerator::generateBody()
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!callTraceLIR(i, *iter))
|
||||
return false;
|
||||
|
||||
if (!iter->accept(this))
|
||||
return false;
|
||||
}
|
||||
@ -1765,6 +2056,8 @@ static const VMFunction NewInitArrayInfo =
|
||||
bool
|
||||
CodeGenerator::visitNewArrayCallVM(LNewArray *lir)
|
||||
{
|
||||
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
|
||||
|
||||
Register objReg = ToRegister(lir->output());
|
||||
|
||||
JS_ASSERT(!lir->isCall());
|
||||
@ -1813,21 +2106,14 @@ CodeGenerator::visitNewSlots(LNewSlots *lir)
|
||||
bool
|
||||
CodeGenerator::visitNewArray(LNewArray *lir)
|
||||
{
|
||||
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
|
||||
Register objReg = ToRegister(lir->output());
|
||||
JSObject *templateObject = lir->mir()->templateObject();
|
||||
uint32_t count = lir->mir()->count();
|
||||
|
||||
JS_ASSERT(count < JSObject::NELEMENTS_LIMIT);
|
||||
|
||||
size_t maxArraySlots =
|
||||
gc::GetGCKindSlots(gc::FINALIZE_OBJECT_LAST) - ObjectElements::VALUES_PER_HEADER;
|
||||
|
||||
// Allocate space using the VMCall
|
||||
// when mir hints it needs to get allocated immediatly,
|
||||
// but only when data doesn't fit the available array slots.
|
||||
bool allocating = lir->mir()->isAllocating() && count > maxArraySlots;
|
||||
|
||||
if (templateObject->hasSingletonType() || allocating)
|
||||
if (lir->mir()->shouldUseVM())
|
||||
return visitNewArrayCallVM(lir);
|
||||
|
||||
OutOfLineNewArray *ool = new OutOfLineNewArray(lir);
|
||||
@ -1875,6 +2161,8 @@ static const VMFunction NewInitObjectInfo = FunctionInfo<NewInitObjectFn>(NewIni
|
||||
bool
|
||||
CodeGenerator::visitNewObjectVMCall(LNewObject *lir)
|
||||
{
|
||||
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
|
||||
|
||||
Register objReg = ToRegister(lir->output());
|
||||
|
||||
JS_ASSERT(!lir->isCall());
|
||||
@ -1894,11 +2182,11 @@ CodeGenerator::visitNewObjectVMCall(LNewObject *lir)
|
||||
bool
|
||||
CodeGenerator::visitNewObject(LNewObject *lir)
|
||||
{
|
||||
JS_ASSERT(gen->info().executionMode() == SequentialExecution);
|
||||
Register objReg = ToRegister(lir->output());
|
||||
|
||||
JSObject *templateObject = lir->mir()->templateObject();
|
||||
|
||||
if (templateObject->hasSingletonType() || templateObject->hasDynamicSlots())
|
||||
if (lir->mir()->shouldUseVM())
|
||||
return visitNewObjectVMCall(lir);
|
||||
|
||||
OutOfLineNewObject *ool = new OutOfLineNewObject(lir);
|
||||
@ -1955,7 +2243,7 @@ CodeGenerator::visitNewCallObject(LNewCallObject *lir)
|
||||
{
|
||||
Register obj = ToRegister(lir->output());
|
||||
|
||||
JSObject *templateObj = lir->mir()->templateObj();
|
||||
JSObject *templateObj = lir->mir()->templateObject();
|
||||
|
||||
// If we have a template object, we can inline call object creation.
|
||||
OutOfLineCode *ool;
|
||||
@ -1984,6 +2272,68 @@ CodeGenerator::visitNewCallObject(LNewCallObject *lir)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParNewCallObject(LParNewCallObject *lir)
|
||||
{
|
||||
Register resultReg = ToRegister(lir->output());
|
||||
Register parSliceReg = ToRegister(lir->parSlice());
|
||||
Register tempReg1 = ToRegister(lir->getTemp0());
|
||||
Register tempReg2 = ToRegister(lir->getTemp1());
|
||||
JSObject *templateObj = lir->mir()->templateObj();
|
||||
|
||||
emitParAllocateGCThing(resultReg, parSliceReg, tempReg1, tempReg2, templateObj);
|
||||
|
||||
// NB: !lir->slots()->isRegister() implies that there is no slots
|
||||
// array at all, and the memory is already zeroed when copying
|
||||
// from the template object
|
||||
|
||||
if (lir->slots()->isRegister()) {
|
||||
Register slotsReg = ToRegister(lir->slots());
|
||||
JS_ASSERT(slotsReg != resultReg);
|
||||
masm.storePtr(slotsReg, Address(resultReg, JSObject::offsetOfSlots()));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParNewDenseArray(LParNewDenseArray *lir)
|
||||
{
|
||||
Register parSliceReg = ToRegister(lir->parSlice());
|
||||
Register lengthReg = ToRegister(lir->length());
|
||||
Register tempReg0 = ToRegister(lir->getTemp0());
|
||||
Register tempReg1 = ToRegister(lir->getTemp1());
|
||||
Register tempReg2 = ToRegister(lir->getTemp2());
|
||||
JSObject *templateObj = lir->mir()->templateObject();
|
||||
|
||||
// Allocate the array into tempReg2. Don't use resultReg because it
|
||||
// may alias parSliceReg etc.
|
||||
emitParAllocateGCThing(tempReg2, parSliceReg, tempReg0, tempReg1, templateObj);
|
||||
|
||||
// Invoke a C helper to allocate the elements. For convenience,
|
||||
// this helper also returns the array back to us, or NULL, which
|
||||
// obviates the need to preserve the register across the call. In
|
||||
// reality, we should probably just have the C helper also
|
||||
// *allocate* the array, but that would require that it initialize
|
||||
// the various fields of the object, and I didn't want to
|
||||
// duplicate the code in initGCThing() that already does such an
|
||||
// admirable job.
|
||||
masm.setupUnalignedABICall(3, CallTempReg3);
|
||||
masm.passABIArg(parSliceReg);
|
||||
masm.passABIArg(tempReg2);
|
||||
masm.passABIArg(lengthReg);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParExtendArray));
|
||||
|
||||
Register resultReg = ToRegister(lir->output());
|
||||
JS_ASSERT(resultReg == ReturnReg);
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
masm.branchTestPtr(Assembler::Zero, resultReg, resultReg, bail);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef JSObject *(*NewStringObjectFn)(JSContext *, HandleString);
|
||||
static const VMFunction NewStringObjectInfo = FunctionInfo<NewStringObjectFn>(NewStringObject);
|
||||
|
||||
@ -2018,6 +2368,100 @@ typedef bool(*InitPropFn)(JSContext *cx, HandleObject obj,
|
||||
static const VMFunction InitPropInfo =
|
||||
FunctionInfo<InitPropFn>(InitProp);
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParNew(LParNew *lir)
|
||||
{
|
||||
Register objReg = ToRegister(lir->output());
|
||||
Register parSliceReg = ToRegister(lir->parSlice());
|
||||
Register tempReg1 = ToRegister(lir->getTemp0());
|
||||
Register tempReg2 = ToRegister(lir->getTemp1());
|
||||
JSObject *templateObject = lir->mir()->templateObject();
|
||||
emitParAllocateGCThing(objReg, parSliceReg, tempReg1, tempReg2,
|
||||
templateObject);
|
||||
return true;
|
||||
}
|
||||
|
||||
class OutOfLineParNewGCThing : public OutOfLineCodeBase<CodeGenerator>
|
||||
{
|
||||
public:
|
||||
gc::AllocKind allocKind;
|
||||
Register objReg;
|
||||
|
||||
OutOfLineParNewGCThing(gc::AllocKind allocKind, Register objReg)
|
||||
: allocKind(allocKind), objReg(objReg)
|
||||
{}
|
||||
|
||||
bool accept(CodeGenerator *codegen) {
|
||||
return codegen->visitOutOfLineParNewGCThing(this);
|
||||
}
|
||||
};
|
||||
|
||||
bool
|
||||
CodeGenerator::emitParAllocateGCThing(const Register &objReg,
|
||||
const Register &parSliceReg,
|
||||
const Register &tempReg1,
|
||||
const Register &tempReg2,
|
||||
JSObject *templateObj)
|
||||
{
|
||||
gc::AllocKind allocKind = templateObj->getAllocKind();
|
||||
OutOfLineParNewGCThing *ool = new OutOfLineParNewGCThing(allocKind, objReg);
|
||||
if (!ool || !addOutOfLineCode(ool))
|
||||
return false;
|
||||
|
||||
masm.parNewGCThing(objReg, parSliceReg, tempReg1, tempReg2,
|
||||
templateObj, ool->entry());
|
||||
masm.bind(ool->rejoin());
|
||||
masm.initGCThing(objReg, templateObj);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitOutOfLineParNewGCThing(OutOfLineParNewGCThing *ool)
|
||||
{
|
||||
// As a fallback for allocation in par. exec. mode, we invoke the
|
||||
// C helper ParNewGCThing(), which calls into the GC code. If it
|
||||
// returns NULL, we bail. If returns non-NULL, we rejoin the
|
||||
// original instruction.
|
||||
|
||||
// This saves all caller-save registers, regardless of whether
|
||||
// they are live. This is wasteful but a simplification, given
|
||||
// that for some of the LIR that this is used with
|
||||
// (e.g., LParLambda) there are values in those registers
|
||||
// that must not be clobbered but which are not technically
|
||||
// considered live.
|
||||
RegisterSet saveSet(RegisterSet::Volatile());
|
||||
|
||||
// Also preserve the temps we're about to overwrite,
|
||||
// but don't bother to save the objReg.
|
||||
saveSet.addUnchecked(CallTempReg0);
|
||||
saveSet.addUnchecked(CallTempReg1);
|
||||
saveSet.maybeTake(AnyRegister(ool->objReg));
|
||||
|
||||
masm.PushRegsInMask(saveSet);
|
||||
masm.move32(Imm32(ool->allocKind), CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParNewGCThing));
|
||||
masm.movePtr(ReturnReg, ool->objReg);
|
||||
masm.PopRegsInMask(saveSet);
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
masm.branchTestPtr(Assembler::Zero, ool->objReg, ool->objReg, bail);
|
||||
masm.jump(ool->rejoin());
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParBailout(LParBailout *lir)
|
||||
{
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
masm.jump(bail);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitInitProp(LInitProp *lir)
|
||||
{
|
||||
@ -2371,6 +2815,34 @@ CodeGenerator::visitBinaryV(LBinaryV *lir)
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitParCompareS(LParCompareS *lir)
|
||||
{
|
||||
JSOp op = lir->mir()->jsop();
|
||||
Register left = ToRegister(lir->left());
|
||||
Register right = ToRegister(lir->right());
|
||||
|
||||
JS_ASSERT((op == JSOP_EQ || op == JSOP_STRICTEQ) ||
|
||||
(op == JSOP_NE || op == JSOP_STRICTNE));
|
||||
|
||||
masm.setupUnalignedABICall(2, CallTempReg2);
|
||||
masm.passABIArg(left);
|
||||
masm.passABIArg(right);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParCompareStrings));
|
||||
masm.and32(Imm32(0xF), ReturnReg); // The C functions return an enum whose size is undef
|
||||
|
||||
// Check for cases that we do not currently handle in par exec
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
masm.branch32(Assembler::Equal, ReturnReg, Imm32(ParCompareUnknown), bail);
|
||||
|
||||
if (op == JSOP_NE || op == JSOP_STRICTNE)
|
||||
masm.xor32(Imm32(1), ReturnReg);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
typedef bool (*StringCompareFn)(JSContext *, HandleString, HandleString, JSBool *);
|
||||
static const VMFunction stringsEqualInfo =
|
||||
FunctionInfo<StringCompareFn>(ion::StringsEqual<true>);
|
||||
@ -3130,17 +3602,25 @@ CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool)
|
||||
value = TypedOrValueRegister(valueType, ToAnyRegister(store->value()));
|
||||
}
|
||||
|
||||
// We can bump the initialized length inline if index ==
|
||||
// initializedLength and index < capacity. Otherwise, we have to
|
||||
// consider fallback options. In fallback cases, we branch to one
|
||||
// of two labels because (at least in parallel mode) we can
|
||||
// recover from index < capacity but not index !=
|
||||
// initializedLength.
|
||||
Label indexNotInitLen;
|
||||
Label indexWouldExceedCapacity;
|
||||
|
||||
// If index == initializedLength, try to bump the initialized length inline.
|
||||
// If index > initializedLength, call a stub. Note that this relies on the
|
||||
// condition flags sticking from the incoming branch.
|
||||
Label callStub;
|
||||
masm.j(Assembler::NotEqual, &callStub);
|
||||
masm.j(Assembler::NotEqual, &indexNotInitLen);
|
||||
|
||||
Int32Key key = ToInt32Key(index);
|
||||
|
||||
// Check array capacity.
|
||||
masm.branchKey(Assembler::BelowOrEqual, Address(elements, ObjectElements::offsetOfCapacity()),
|
||||
key, &callStub);
|
||||
key, &indexWouldExceedCapacity);
|
||||
|
||||
// Update initialized length. The capacity guard above ensures this won't overflow,
|
||||
// due to NELEMENTS_LIMIT.
|
||||
@ -3168,22 +3648,82 @@ CodeGenerator::visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool)
|
||||
masm.jump(ool->rejoinStore());
|
||||
}
|
||||
|
||||
masm.bind(&callStub);
|
||||
saveLive(ins);
|
||||
switch (gen->info().executionMode()) {
|
||||
case SequentialExecution:
|
||||
masm.bind(&indexNotInitLen);
|
||||
masm.bind(&indexWouldExceedCapacity);
|
||||
saveLive(ins);
|
||||
|
||||
pushArg(Imm32(current->mir()->strict()));
|
||||
pushArg(value);
|
||||
if (index->isConstant())
|
||||
pushArg(*index->toConstant());
|
||||
else
|
||||
pushArg(TypedOrValueRegister(MIRType_Int32, ToAnyRegister(index)));
|
||||
pushArg(object);
|
||||
if (!callVM(SetObjectElementInfo, ins))
|
||||
return false;
|
||||
pushArg(Imm32(current->mir()->strict()));
|
||||
pushArg(value);
|
||||
if (index->isConstant())
|
||||
pushArg(*index->toConstant());
|
||||
else
|
||||
pushArg(TypedOrValueRegister(MIRType_Int32, ToAnyRegister(index)));
|
||||
pushArg(object);
|
||||
if (!callVM(SetObjectElementInfo, ins))
|
||||
return false;
|
||||
|
||||
restoreLive(ins);
|
||||
masm.jump(ool->rejoin());
|
||||
return true;
|
||||
restoreLive(ins);
|
||||
masm.jump(ool->rejoin());
|
||||
return true;
|
||||
|
||||
case ParallelExecution:
|
||||
Label *bail;
|
||||
if (!ensureOutOfLineParallelAbort(&bail))
|
||||
return false;
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
// If the problem is that we do not have sufficient capacity,
|
||||
// try to reallocate the elements array and then branch back
|
||||
// to perform the actual write. Note that we do not want to
|
||||
// force the reg alloc to assign any particular register, so
|
||||
// we make space on the stack and pass the arguments that way.
|
||||
// (Also, outside of the VM call mechanism, it's very hard to
|
||||
// pass in a Value to a C function!).
|
||||
masm.bind(&indexWouldExceedCapacity);
|
||||
|
||||
// The use of registers here is somewhat subtle. We need to
|
||||
// save and restore the volatile registers but we also need to
|
||||
// preserve the ReturnReg. Normally we'd just add a constraint
|
||||
// to the regalloc, but since this is the slow path of a hot
|
||||
// instruction we don't want to do that. So instead we push
|
||||
// the volatile registers but we don't save the register
|
||||
// `object`. We will copy the ReturnReg into `object`. The
|
||||
// function we are calling (`ParPush`) agrees to either return
|
||||
// `object` unchanged or NULL. This way after we restore the
|
||||
// registers, we can examine `object` to know whether an error
|
||||
// occurred.
|
||||
RegisterSet saveSet(ins->safepoint()->liveRegs());
|
||||
saveSet.maybeTake(object);
|
||||
|
||||
masm.PushRegsInMask(saveSet);
|
||||
masm.reserveStack(sizeof(ParPushArgs));
|
||||
masm.storePtr(object, Address(StackPointer, offsetof(ParPushArgs, object)));
|
||||
masm.storeConstantOrRegister(value, Address(StackPointer,
|
||||
offsetof(ParPushArgs, value)));
|
||||
masm.movePtr(StackPointer, CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParPush));
|
||||
masm.freeStack(sizeof(ParPushArgs));
|
||||
masm.movePtr(ReturnReg, object);
|
||||
masm.PopRegsInMask(saveSet);
|
||||
masm.branchTestPtr(Assembler::Zero, object, object, bail);
|
||||
masm.jump(ool->rejoin());
|
||||
|
||||
//////////////////////////////////////////////////////////////
|
||||
// If the problem is that we are trying to write an index that
|
||||
// is not the initialized length, that would result in a
|
||||
// sparse array, and since we don't want to think about that
|
||||
// case right now, we just bail out.
|
||||
masm.bind(&indexNotInitLen);
|
||||
masm.jump(bail);
|
||||
return true;
|
||||
}
|
||||
|
||||
JS_ASSERT(false);
|
||||
return false;
|
||||
}
|
||||
|
||||
typedef bool (*ArrayPopShiftFn)(JSContext *, HandleObject, MutableHandleValue);
|
||||
@ -3699,7 +4239,8 @@ CodeGenerator::link()
|
||||
bailouts_.length(), graph.numConstants(),
|
||||
safepointIndices_.length(), osiIndices_.length(),
|
||||
cacheList_.length(), safepoints_.size(),
|
||||
graph.mir().numScripts());
|
||||
graph.mir().numScripts(),
|
||||
executionMode == ParallelExecution ? ForkJoinSlices(cx) : 0);
|
||||
SetIonScript(script, executionMode, ionScript);
|
||||
|
||||
if (!ionScript)
|
||||
@ -3738,6 +4279,9 @@ CodeGenerator::link()
|
||||
JS_ASSERT(graph.mir().numScripts() > 0);
|
||||
ionScript->copyScriptEntries(graph.mir().scripts());
|
||||
|
||||
if (executionMode == ParallelExecution)
|
||||
ionScript->zeroParallelInvalidatedScripts();
|
||||
|
||||
linkAbsoluteLabels();
|
||||
|
||||
// The correct state for prebarriers is unknown until the end of compilation,
|
||||
@ -5090,6 +5634,19 @@ CodeGenerator::visitFunctionBoundary(LFunctionBoundary *lir)
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitOutOfLineParallelAbort(OutOfLineParallelAbort *ool)
|
||||
{
|
||||
masm.movePtr(ImmWord((void *) current->mir()->info().script()), CallTempReg0);
|
||||
masm.setupUnalignedABICall(1, CallTempReg1);
|
||||
masm.passABIArg(CallTempReg0);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, ParallelAbort));
|
||||
|
||||
masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
|
||||
masm.jump(returnLabel_);
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
||||
|
@ -21,15 +21,19 @@
|
||||
namespace js {
|
||||
namespace ion {
|
||||
|
||||
class OutOfLineNewParallelArray;
|
||||
class OutOfLineTestObject;
|
||||
class OutOfLineNewArray;
|
||||
class OutOfLineNewObject;
|
||||
class CheckOverRecursedFailure;
|
||||
class ParCheckOverRecursedFailure;
|
||||
class OutOfLineParCheckInterrupt;
|
||||
class OutOfLineUnboxDouble;
|
||||
class OutOfLineCache;
|
||||
class OutOfLineStoreElementHole;
|
||||
class OutOfLineTypeOfV;
|
||||
class OutOfLineLoadTypedArray;
|
||||
class OutOfLineParNewGCThing;
|
||||
|
||||
class CodeGenerator : public CodeGeneratorSpecific
|
||||
{
|
||||
@ -72,6 +76,7 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitRegExpTest(LRegExpTest *lir);
|
||||
bool visitLambda(LLambda *lir);
|
||||
bool visitLambdaForSingleton(LLambdaForSingleton *lir);
|
||||
bool visitParLambda(LParLambda *lir);
|
||||
bool visitPointer(LPointer *lir);
|
||||
bool visitSlots(LSlots *lir);
|
||||
bool visitStoreSlotV(LStoreSlotV *store);
|
||||
@ -90,6 +95,7 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitApplyArgsGeneric(LApplyArgsGeneric *apply);
|
||||
bool visitDoubleToInt32(LDoubleToInt32 *lir);
|
||||
bool visitNewSlots(LNewSlots *lir);
|
||||
bool visitOutOfLineNewParallelArray(OutOfLineNewParallelArray *ool);
|
||||
bool visitNewArrayCallVM(LNewArray *lir);
|
||||
bool visitNewArray(LNewArray *lir);
|
||||
bool visitOutOfLineNewArray(OutOfLineNewArray *ool);
|
||||
@ -98,7 +104,11 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitOutOfLineNewObject(OutOfLineNewObject *ool);
|
||||
bool visitNewDeclEnvObject(LNewDeclEnvObject *lir);
|
||||
bool visitNewCallObject(LNewCallObject *lir);
|
||||
bool visitParNewCallObject(LParNewCallObject *lir);
|
||||
bool visitNewStringObject(LNewStringObject *lir);
|
||||
bool visitParNew(LParNew *lir);
|
||||
bool visitParNewDenseArray(LParNewDenseArray *lir);
|
||||
bool visitParBailout(LParBailout *lir);
|
||||
bool visitInitProp(LInitProp *lir);
|
||||
bool visitCreateThis(LCreateThis *lir);
|
||||
bool visitCreateThisWithProto(LCreateThisWithProto *lir);
|
||||
@ -132,6 +142,7 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
Register output, Register temp);
|
||||
bool visitCompareS(LCompareS *lir);
|
||||
bool visitCompareStrictS(LCompareStrictS *lir);
|
||||
bool visitParCompareS(LParCompareS *lir);
|
||||
bool visitCompareVM(LCompareVM *lir);
|
||||
bool visitIsNullOrLikeUndefined(LIsNullOrLikeUndefined *lir);
|
||||
bool visitIsNullOrLikeUndefinedAndBranch(LIsNullOrLikeUndefinedAndBranch *lir);
|
||||
@ -141,6 +152,9 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitCharCodeAt(LCharCodeAt *lir);
|
||||
bool visitFromCharCode(LFromCharCode *lir);
|
||||
bool visitFunctionEnvironment(LFunctionEnvironment *lir);
|
||||
bool visitParSlice(LParSlice *lir);
|
||||
bool visitParWriteGuard(LParWriteGuard *lir);
|
||||
bool visitParDump(LParDump *lir);
|
||||
bool visitCallGetProperty(LCallGetProperty *lir);
|
||||
bool visitCallGetElement(LCallGetElement *lir);
|
||||
bool visitCallSetElement(LCallSetElement *lir);
|
||||
@ -196,6 +210,12 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitCheckOverRecursed(LCheckOverRecursed *lir);
|
||||
bool visitCheckOverRecursedFailure(CheckOverRecursedFailure *ool);
|
||||
|
||||
bool visitParCheckOverRecursed(LParCheckOverRecursed *lir);
|
||||
bool visitParCheckOverRecursedFailure(ParCheckOverRecursedFailure *ool);
|
||||
|
||||
bool visitParCheckInterrupt(LParCheckInterrupt *lir);
|
||||
bool visitOutOfLineParCheckInterrupt(OutOfLineParCheckInterrupt *ool);
|
||||
|
||||
bool visitUnboxDouble(LUnboxDouble *lir);
|
||||
bool visitOutOfLineUnboxDouble(OutOfLineUnboxDouble *ool);
|
||||
bool visitOutOfLineStoreElementHole(OutOfLineStoreElementHole *ool);
|
||||
@ -207,6 +227,10 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitOutOfLineGetNameCache(OutOfLineCache *ool);
|
||||
bool visitOutOfLineCallsiteCloneCache(OutOfLineCache *ool);
|
||||
|
||||
bool visitOutOfLineParNewGCThing(OutOfLineParNewGCThing *ool);
|
||||
|
||||
bool visitOutOfLineParallelAbort(OutOfLineParallelAbort *ool);
|
||||
|
||||
bool visitGetPropertyCacheV(LGetPropertyCacheV *ins) {
|
||||
return visitCache(ins);
|
||||
}
|
||||
@ -236,9 +260,23 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitCache(LInstruction *load);
|
||||
bool visitCallSetProperty(LInstruction *ins);
|
||||
|
||||
bool checkForParallelBailout();
|
||||
|
||||
ConstantOrRegister getSetPropertyValue(LInstruction *ins);
|
||||
bool generateBranchV(const ValueOperand &value, Label *ifTrue, Label *ifFalse, FloatRegister fr);
|
||||
|
||||
bool emitParAllocateGCThing(const Register &objReg,
|
||||
const Register &threadContextReg,
|
||||
const Register &tempReg1,
|
||||
const Register &tempReg2,
|
||||
JSObject *templateObj);
|
||||
|
||||
bool emitParCallToUncompiledScript(Register calleeReg);
|
||||
|
||||
void emitLambdaInit(const Register &resultReg,
|
||||
const Register &scopeChainReg,
|
||||
JSFunction *fun);
|
||||
|
||||
IonScriptCounts *maybeCreateScriptCounts();
|
||||
|
||||
// Test whether value is truthy or not and jump to the corresponding label.
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include "EdgeCaseAnalysis.h"
|
||||
#include "RangeAnalysis.h"
|
||||
#include "LinearScan.h"
|
||||
#include "vm/ParallelDo.h"
|
||||
#include "ParallelArrayAnalysis.h"
|
||||
#include "jscompartment.h"
|
||||
#include "vm/ThreadPool.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
@ -470,6 +472,7 @@ IonScript::IonScript()
|
||||
safepointsSize_(0),
|
||||
scriptList_(0),
|
||||
scriptEntries_(0),
|
||||
parallelInvalidatedScriptList_(0),
|
||||
refcount_(0),
|
||||
recompileInfo_(),
|
||||
slowCallCount(0)
|
||||
@ -482,7 +485,7 @@ IonScript *
|
||||
IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t snapshotsSize,
|
||||
size_t bailoutEntries, size_t constants, size_t safepointIndices,
|
||||
size_t osiIndices, size_t cacheEntries, size_t safepointsSize,
|
||||
size_t scriptEntries)
|
||||
size_t scriptEntries, size_t parallelInvalidatedScriptEntries)
|
||||
{
|
||||
if (snapshotsSize >= MAX_BUFFER_SIZE ||
|
||||
(bailoutEntries >= MAX_BUFFER_SIZE / sizeof(uint32_t)))
|
||||
@ -502,6 +505,8 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
|
||||
size_t paddedCacheEntriesSize = AlignBytes(cacheEntries * sizeof(IonCache), DataAlignment);
|
||||
size_t paddedSafepointSize = AlignBytes(safepointsSize, DataAlignment);
|
||||
size_t paddedScriptSize = AlignBytes(scriptEntries * sizeof(RawScript), DataAlignment);
|
||||
size_t paddedParallelInvalidatedScriptSize =
|
||||
AlignBytes(parallelInvalidatedScriptEntries * sizeof(RawScript), DataAlignment);
|
||||
size_t bytes = paddedSnapshotsSize +
|
||||
paddedBailoutSize +
|
||||
paddedConstantsSize +
|
||||
@ -509,7 +514,8 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
|
||||
paddedOsiIndicesSize +
|
||||
paddedCacheEntriesSize +
|
||||
paddedSafepointSize +
|
||||
paddedScriptSize;
|
||||
paddedScriptSize +
|
||||
paddedParallelInvalidatedScriptSize;
|
||||
uint8_t *buffer = (uint8_t *)cx->malloc_(sizeof(IonScript) + bytes);
|
||||
if (!buffer)
|
||||
return NULL;
|
||||
@ -551,6 +557,10 @@ IonScript::New(JSContext *cx, uint32_t frameSlots, uint32_t frameSize, size_t sn
|
||||
script->scriptEntries_ = scriptEntries;
|
||||
offsetCursor += paddedScriptSize;
|
||||
|
||||
script->parallelInvalidatedScriptList_ = offsetCursor;
|
||||
script->parallelInvalidatedScriptEntries_ = parallelInvalidatedScriptEntries;
|
||||
offsetCursor += parallelInvalidatedScriptEntries;
|
||||
|
||||
script->frameSlots_ = frameSlots;
|
||||
script->frameSize_ = frameSize;
|
||||
|
||||
@ -606,6 +616,13 @@ IonScript::copyScriptEntries(JSScript **scripts)
|
||||
scriptList()[i] = scripts[i];
|
||||
}
|
||||
|
||||
void
|
||||
IonScript::zeroParallelInvalidatedScripts()
|
||||
{
|
||||
memset(parallelInvalidatedScriptList(), 0,
|
||||
parallelInvalidatedScriptEntries_ * sizeof(JSScript *));
|
||||
}
|
||||
|
||||
void
|
||||
IonScript::copySafepointIndices(const SafepointIndex *si, MacroAssembler &masm)
|
||||
{
|
||||
@ -772,8 +789,8 @@ ion::ToggleBarriers(JSCompartment *comp, bool needs)
|
||||
namespace js {
|
||||
namespace ion {
|
||||
|
||||
CodeGenerator *
|
||||
CompileBackEnd(MIRGenerator *mir)
|
||||
bool
|
||||
OptimizeMIR(MIRGenerator *mir)
|
||||
{
|
||||
IonSpewPass("BuildSSA");
|
||||
// Note: don't call AssertGraphCoherency before SplitCriticalEdges,
|
||||
@ -782,146 +799,146 @@ CompileBackEnd(MIRGenerator *mir)
|
||||
MIRGraph &graph = mir->graph();
|
||||
|
||||
if (mir->shouldCancel("Start"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (!SplitCriticalEdges(graph))
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Split Critical Edges");
|
||||
AssertGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Split Critical Edges"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (!RenumberBlocks(graph))
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Renumber Blocks");
|
||||
AssertGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Renumber Blocks"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (!BuildDominatorTree(graph))
|
||||
return NULL;
|
||||
return false;
|
||||
// No spew: graph not changed.
|
||||
|
||||
if (mir->shouldCancel("Dominator Tree"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
// This must occur before any code elimination.
|
||||
if (!EliminatePhis(mir, graph, AggressiveObservability))
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Eliminate phis");
|
||||
AssertGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Eliminate phis"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (!BuildPhiReverseMapping(graph))
|
||||
return NULL;
|
||||
return false;
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
// No spew: graph not changed.
|
||||
|
||||
if (mir->shouldCancel("Phi reverse mapping"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
// This pass also removes copies.
|
||||
if (!ApplyTypeInformation(mir, graph))
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Apply types");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Apply types"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
// Alias analysis is required for LICM and GVN so that we don't move
|
||||
// loads across stores.
|
||||
if (js_IonOptions.licm || js_IonOptions.gvn) {
|
||||
AliasAnalysis analysis(mir, graph);
|
||||
if (!analysis.analyze())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Alias analysis");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Alias analysis"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
// Eliminating dead resume point operands requires basic block
|
||||
// instructions to be numbered. Reuse the numbering computed during
|
||||
// alias analysis.
|
||||
if (!EliminateDeadResumePointOperands(mir, graph))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (mir->shouldCancel("Eliminate dead resume point operands"))
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (js_IonOptions.gvn) {
|
||||
ValueNumberer gvn(mir, graph, js_IonOptions.gvnIsOptimistic);
|
||||
if (!gvn.analyze())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("GVN");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("GVN"))
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (js_IonOptions.uce) {
|
||||
UnreachableCodeElimination uce(mir, graph);
|
||||
if (!uce.analyze())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("UCE");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
}
|
||||
|
||||
if (mir->shouldCancel("UCE"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (js_IonOptions.licm) {
|
||||
LICM licm(mir, graph);
|
||||
if (!licm.analyze())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("LICM");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("LICM"))
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (js_IonOptions.rangeAnalysis) {
|
||||
RangeAnalysis r(graph);
|
||||
if (!r.addBetaNobes())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Beta");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("RA Beta"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (!r.analyze())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Range Analysis");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Range Analysis"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
if (!r.removeBetaNobes())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("De-Beta");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("RA De-Beta"))
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!EliminateDeadCode(mir, graph))
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("DCE");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("DCE"))
|
||||
return NULL;
|
||||
return false;
|
||||
|
||||
// Passes after this point must not move instructions; these analyses
|
||||
// depend on knowing the final order in which instructions will execute.
|
||||
@ -929,12 +946,12 @@ CompileBackEnd(MIRGenerator *mir)
|
||||
if (js_IonOptions.edgeCaseAnalysis) {
|
||||
EdgeCaseAnalysis edgeCaseAnalysis(mir, graph);
|
||||
if (!edgeCaseAnalysis.analyzeLate())
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Edge Case Analysis (Late)");
|
||||
AssertGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Edge Case Analysis (Late)"))
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Note: check elimination has to run after all other passes that move
|
||||
@ -942,12 +959,17 @@ CompileBackEnd(MIRGenerator *mir)
|
||||
// motion after this pass could incorrectly move a load or store before its
|
||||
// bounds check.
|
||||
if (!EliminateRedundantChecks(graph))
|
||||
return NULL;
|
||||
return false;
|
||||
IonSpewPass("Bounds Check Elimination");
|
||||
AssertGraphCoherency(graph);
|
||||
|
||||
if (mir->shouldCancel("Bounds Check Elimination"))
|
||||
return NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
CodeGenerator *
|
||||
GenerateLIR(MIRGenerator *mir)
|
||||
{
|
||||
MIRGraph &graph = mir->graph();
|
||||
|
||||
LIRGraph *lir = mir->temp().lifoAlloc()->new_<LIRGraph>(&graph);
|
||||
if (!lir)
|
||||
@ -1028,12 +1050,21 @@ CompileBackEnd(MIRGenerator *mir)
|
||||
return codegen;
|
||||
}
|
||||
|
||||
CodeGenerator *
|
||||
CompileBackEnd(MIRGenerator *mir)
|
||||
{
|
||||
if (!OptimizeMIR(mir))
|
||||
return NULL;
|
||||
return GenerateLIR(mir);
|
||||
}
|
||||
|
||||
class SequentialCompileContext {
|
||||
public:
|
||||
ExecutionMode executionMode() {
|
||||
return SequentialExecution;
|
||||
}
|
||||
|
||||
MethodStatus checkScriptSize(JSContext *cx, UnrootedScript script);
|
||||
AbortReason compile(IonBuilder *builder, MIRGraph *graph,
|
||||
ScopedJSDeletePtr<LifoAlloc> &autoDelete);
|
||||
};
|
||||
@ -1302,8 +1333,8 @@ CheckScript(UnrootedScript script)
|
||||
return true;
|
||||
}
|
||||
|
||||
static MethodStatus
|
||||
CheckScriptSize(JSContext *cx, UnrootedScript script)
|
||||
MethodStatus
|
||||
SequentialCompileContext::checkScriptSize(JSContext *cx, UnrootedScript script)
|
||||
{
|
||||
if (!js_IonOptions.limitScriptSize)
|
||||
return Method_Compiled;
|
||||
@ -1345,8 +1376,10 @@ CheckScriptSize(JSContext *cx, UnrootedScript script)
|
||||
return Method_Compiled;
|
||||
}
|
||||
|
||||
template <typename CompileContext>
|
||||
static MethodStatus
|
||||
Compile(JSContext *cx, JSScript *script, JSFunction *fun, jsbytecode *osrPc, bool constructing)
|
||||
Compile(JSContext *cx, HandleScript script, HandleFunction fun, jsbytecode *osrPc, bool constructing,
|
||||
CompileContext &compileContext)
|
||||
{
|
||||
JS_ASSERT(ion::IsEnabled(cx));
|
||||
JS_ASSERT_IF(osrPc != NULL, (JSOp)*osrPc == JSOP_LOOPENTRY);
|
||||
@ -1361,36 +1394,39 @@ Compile(JSContext *cx, JSScript *script, JSFunction *fun, jsbytecode *osrPc, boo
|
||||
return Method_CantCompile;
|
||||
}
|
||||
|
||||
MethodStatus status = CheckScriptSize(cx, script);
|
||||
MethodStatus status = compileContext.checkScriptSize(cx, script);
|
||||
if (status != Method_Compiled) {
|
||||
IonSpew(IonSpew_Abort, "Aborted compilation of %s:%d", script->filename, script->lineno);
|
||||
return status;
|
||||
}
|
||||
|
||||
if (script->ion) {
|
||||
if (!script->ion->method())
|
||||
ExecutionMode executionMode = compileContext.executionMode();
|
||||
IonScript *scriptIon = GetIonScript(script, executionMode);
|
||||
if (scriptIon) {
|
||||
if (!scriptIon->method())
|
||||
return Method_CantCompile;
|
||||
return Method_Compiled;
|
||||
}
|
||||
|
||||
if (cx->methodJitEnabled) {
|
||||
// If JM is enabled we use getUseCount instead of incUseCount to avoid
|
||||
// bumping the use count twice.
|
||||
if (script->getUseCount() < js_IonOptions.usesBeforeCompile)
|
||||
return Method_Skipped;
|
||||
} else {
|
||||
if (script->incUseCount() < js_IonOptions.usesBeforeCompileNoJaeger)
|
||||
return Method_Skipped;
|
||||
}
|
||||
if (executionMode == SequentialExecution) {
|
||||
if (cx->methodJitEnabled) {
|
||||
// If JM is enabled we use getUseCount instead of incUseCount to avoid
|
||||
// bumping the use count twice.
|
||||
|
||||
SequentialCompileContext compileContext;
|
||||
if (script->getUseCount() < js_IonOptions.usesBeforeCompile)
|
||||
return Method_Skipped;
|
||||
} else {
|
||||
if (script->incUseCount() < js_IonOptions.usesBeforeCompileNoJaeger)
|
||||
return Method_Skipped;
|
||||
}
|
||||
}
|
||||
|
||||
AbortReason reason = IonCompile(cx, script, fun, osrPc, constructing, compileContext);
|
||||
if (reason == AbortReason_Disable)
|
||||
return Method_CantCompile;
|
||||
|
||||
// Compilation succeeded or we invalidated right away or an inlining/alloc abort
|
||||
return script->hasIonScript() ? Method_Compiled : Method_Skipped;
|
||||
return HasIonScript(script, executionMode) ? Method_Compiled : Method_Skipped;
|
||||
}
|
||||
|
||||
} // namespace ion
|
||||
@ -1428,15 +1464,17 @@ ion::CanEnterAtBranch(JSContext *cx, JSScript *script, AbstractFramePtr fp,
|
||||
}
|
||||
|
||||
// Attempt compilation. Returns Method_Compiled if already compiled.
|
||||
JSFunction *fun = fp.isFunctionFrame() ? fp.fun() : NULL;
|
||||
MethodStatus status = Compile(cx, script, fun, pc, isConstructing);
|
||||
RootedFunction fun(cx, fp.isFunctionFrame() ? fp.fun() : NULL);
|
||||
SequentialCompileContext compileContext;
|
||||
RootedScript rscript(cx, script);
|
||||
MethodStatus status = Compile(cx, rscript, fun, pc, isConstructing, compileContext);
|
||||
if (status != Method_Compiled) {
|
||||
if (status == Method_CantCompile)
|
||||
ForbidCompilation(cx, script);
|
||||
return status;
|
||||
}
|
||||
|
||||
if (script->ion->osrPc() != pc)
|
||||
if (script->ion && script->ion->osrPc() != pc)
|
||||
return Method_Skipped;
|
||||
|
||||
return Method_Compiled;
|
||||
@ -1480,8 +1518,10 @@ ion::CanEnter(JSContext *cx, JSScript *script, AbstractFramePtr fp,
|
||||
}
|
||||
|
||||
// Attempt compilation. Returns Method_Compiled if already compiled.
|
||||
JSFunction *fun = fp.isFunctionFrame() ? fp.fun() : NULL;
|
||||
MethodStatus status = Compile(cx, script, fun, NULL, isConstructing);
|
||||
RootedFunction fun(cx, fp.isFunctionFrame() ? fp.fun() : NULL);
|
||||
SequentialCompileContext compileContext;
|
||||
RootedScript rscript(cx, script);
|
||||
MethodStatus status = Compile(cx, rscript, fun, NULL, isConstructing, compileContext);
|
||||
if (status != Method_Compiled) {
|
||||
if (status == Method_CantCompile)
|
||||
ForbidCompilation(cx, script);
|
||||
@ -1491,6 +1531,135 @@ ion::CanEnter(JSContext *cx, JSScript *script, AbstractFramePtr fp,
|
||||
return Method_Compiled;
|
||||
}
|
||||
|
||||
MethodStatus
|
||||
ParallelCompileContext::checkScriptSize(JSContext *cx, UnrootedScript script)
|
||||
{
|
||||
if (!js_IonOptions.limitScriptSize)
|
||||
return Method_Compiled;
|
||||
|
||||
// When compiling for parallel execution we don't have off-thread
|
||||
// compilation. We also up the max script size of the kernels.
|
||||
static const uint32_t MAX_SCRIPT_SIZE = 5000;
|
||||
static const uint32_t MAX_LOCALS_AND_ARGS = 256;
|
||||
|
||||
if (script->length > MAX_SCRIPT_SIZE) {
|
||||
IonSpew(IonSpew_Abort, "Script too large (%u bytes)", script->length);
|
||||
return Method_CantCompile;
|
||||
}
|
||||
|
||||
uint32_t numLocalsAndArgs = analyze::TotalSlots(script);
|
||||
if (numLocalsAndArgs > MAX_LOCALS_AND_ARGS) {
|
||||
IonSpew(IonSpew_Abort, "Too many locals and arguments (%u)", numLocalsAndArgs);
|
||||
return Method_CantCompile;
|
||||
}
|
||||
|
||||
return Method_Compiled;
|
||||
}
|
||||
|
||||
MethodStatus
|
||||
ParallelCompileContext::compileTransitively()
|
||||
{
|
||||
using parallel::SpewBeginCompile;
|
||||
using parallel::SpewEndCompile;
|
||||
|
||||
if (worklist_.empty())
|
||||
return Method_Skipped;
|
||||
|
||||
RootedFunction fun(cx_);
|
||||
RootedScript script(cx_);
|
||||
while (!worklist_.empty()) {
|
||||
fun = worklist_.back()->toFunction();
|
||||
script = fun->nonLazyScript();
|
||||
worklist_.popBack();
|
||||
|
||||
SpewBeginCompile(fun);
|
||||
|
||||
// If we had invalidations last time the parallel script run, add the
|
||||
// invalidated scripts to the worklist.
|
||||
if (script->hasParallelIonScript()) {
|
||||
IonScript *ion = script->parallelIonScript();
|
||||
JS_ASSERT(ion->parallelInvalidatedScriptEntries() > 0);
|
||||
|
||||
RootedFunction invalidFun(cx_);
|
||||
for (uint32_t i = 0; i < ion->parallelInvalidatedScriptEntries(); i++) {
|
||||
if (JSScript *invalid = ion->getAndZeroParallelInvalidatedScript(i)) {
|
||||
invalidFun = invalid->function();
|
||||
parallel::Spew(parallel::SpewCompile,
|
||||
"Adding previously invalidated function %p:%s:%u",
|
||||
fun.get(), invalid->filename, invalid->lineno);
|
||||
appendToWorklist(invalidFun);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Attempt compilation. Returns Method_Compiled if already compiled.
|
||||
MethodStatus status = Compile(cx_, script, fun, NULL, false, *this);
|
||||
if (status != Method_Compiled) {
|
||||
if (status == Method_CantCompile)
|
||||
ForbidCompilation(cx_, script, ParallelExecution);
|
||||
return SpewEndCompile(status);
|
||||
}
|
||||
|
||||
// This can GC, so afterward, script->parallelIon is not guaranteed to be valid.
|
||||
if (!cx_->compartment->ionCompartment()->enterJIT())
|
||||
return SpewEndCompile(Method_Error);
|
||||
|
||||
// Subtle: it is possible for GC to occur during compilation of
|
||||
// one of the invoked functions, which would cause the earlier
|
||||
// functions (such as the kernel itself) to be collected. In this
|
||||
// event, we give up and fallback to sequential for now.
|
||||
if (!script->hasParallelIonScript()) {
|
||||
parallel::Spew(parallel::SpewCompile,
|
||||
"Function %p:%s:%u was garbage-collected or invalidated",
|
||||
fun.get(), script->filename, script->lineno);
|
||||
return SpewEndCompile(Method_Skipped);
|
||||
}
|
||||
|
||||
SpewEndCompile(Method_Compiled);
|
||||
}
|
||||
|
||||
return Method_Compiled;
|
||||
}
|
||||
|
||||
AbortReason
|
||||
ParallelCompileContext::compile(IonBuilder *builder,
|
||||
MIRGraph *graph,
|
||||
ScopedJSDeletePtr<LifoAlloc> &autoDelete)
|
||||
{
|
||||
JS_ASSERT(!builder->script()->parallelIon);
|
||||
|
||||
RootedScript builderScript(cx_, builder->script());
|
||||
IonSpewNewFunction(graph, builderScript);
|
||||
|
||||
if (!builder->build())
|
||||
return builder->abortReason();
|
||||
builder->clearForBackEnd();
|
||||
|
||||
// For the time being, we do not enable parallel compilation.
|
||||
|
||||
if (!OptimizeMIR(builder)) {
|
||||
IonSpew(IonSpew_Abort, "Failed during back-end compilation.");
|
||||
return AbortReason_Disable;
|
||||
}
|
||||
|
||||
if (!analyzeAndGrowWorklist(builder, *graph)) {
|
||||
return AbortReason_Disable;
|
||||
}
|
||||
|
||||
CodeGenerator *codegen = GenerateLIR(builder);
|
||||
if (!codegen) {
|
||||
IonSpew(IonSpew_Abort, "Failed during back-end compilation.");
|
||||
return AbortReason_Disable;
|
||||
}
|
||||
|
||||
bool success = codegen->link();
|
||||
js_delete(codegen);
|
||||
|
||||
IonSpewEndFunction();
|
||||
|
||||
return success ? AbortReason_NoAbort : AbortReason_Disable;
|
||||
}
|
||||
|
||||
MethodStatus
|
||||
ion::CanEnterUsingFastInvoke(JSContext *cx, HandleScript script, uint32_t numActualArgs)
|
||||
{
|
||||
@ -1953,38 +2122,60 @@ ion::Invalidate(JSContext *cx, const Vector<types::RecompileInfo> &invalid, bool
|
||||
}
|
||||
|
||||
bool
|
||||
ion::Invalidate(JSContext *cx, UnrootedScript script, bool resetUses)
|
||||
ion::Invalidate(JSContext *cx, UnrootedScript script, ExecutionMode mode, bool resetUses)
|
||||
{
|
||||
AutoAssertNoGC nogc;
|
||||
JS_ASSERT(script->hasIonScript());
|
||||
|
||||
Vector<types::RecompileInfo> scripts(cx);
|
||||
if (!scripts.append(script->ionScript()->recompileInfo()))
|
||||
return false;
|
||||
|
||||
switch (mode) {
|
||||
case SequentialExecution:
|
||||
JS_ASSERT(script->hasIonScript());
|
||||
if (!scripts.append(script->ionScript()->recompileInfo()))
|
||||
return false;
|
||||
break;
|
||||
case ParallelExecution:
|
||||
JS_ASSERT(script->hasParallelIonScript());
|
||||
if (!scripts.append(script->parallelIonScript()->recompileInfo()))
|
||||
return false;
|
||||
break;
|
||||
}
|
||||
|
||||
Invalidate(cx, scripts, resetUses);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ion::Invalidate(JSContext *cx, UnrootedScript script, bool resetUses)
|
||||
{
|
||||
return Invalidate(cx, script, SequentialExecution, resetUses);
|
||||
}
|
||||
|
||||
static void
|
||||
FinishInvalidationOf(FreeOp *fop, UnrootedScript script, IonScript **ionField)
|
||||
{
|
||||
// If this script has Ion code on the stack, invalidation() will return
|
||||
// true. In this case we have to wait until destroying it.
|
||||
if (!(*ionField)->invalidated()) {
|
||||
types::TypeCompartment &types = script->compartment()->types;
|
||||
(*ionField)->recompileInfo().compilerOutput(types)->invalidate();
|
||||
|
||||
ion::IonScript::Destroy(fop, *ionField);
|
||||
}
|
||||
|
||||
// In all cases, NULL out script->ion to avoid re-entry.
|
||||
*ionField = NULL;
|
||||
}
|
||||
|
||||
void
|
||||
ion::FinishInvalidation(FreeOp *fop, UnrootedScript script)
|
||||
{
|
||||
if (!script->hasIonScript())
|
||||
return;
|
||||
if (script->hasIonScript())
|
||||
FinishInvalidationOf(fop, script, &script->ion);
|
||||
|
||||
/*
|
||||
* If this script has Ion code on the stack, invalidation() will return
|
||||
* true. In this case we have to wait until destroying it.
|
||||
*/
|
||||
if (!script->ion->invalidated()) {
|
||||
types::TypeCompartment &types = script->compartment()->types;
|
||||
script->ion->recompileInfo().compilerOutput(types)->invalidate();
|
||||
|
||||
ion::IonScript::Destroy(fop, script->ion);
|
||||
}
|
||||
|
||||
/* In all cases, NULL out script->ion to avoid re-entry. */
|
||||
script->ion = NULL;
|
||||
if (script->hasParallelIonScript())
|
||||
FinishInvalidationOf(fop, script, &script->parallelIon);
|
||||
}
|
||||
|
||||
void
|
||||
@ -2002,22 +2193,43 @@ ion::MarkShapeFromIon(JSRuntime *rt, Shape **shapep)
|
||||
void
|
||||
ion::ForbidCompilation(JSContext *cx, UnrootedScript script)
|
||||
{
|
||||
IonSpew(IonSpew_Abort, "Disabling Ion compilation of script %s:%d",
|
||||
script->filename, script->lineno);
|
||||
ForbidCompilation(cx, script, SequentialExecution);
|
||||
}
|
||||
|
||||
void
|
||||
ion::ForbidCompilation(JSContext *cx, UnrootedScript script, ExecutionMode mode)
|
||||
{
|
||||
IonSpew(IonSpew_Abort, "Disabling Ion mode %d compilation of script %s:%d",
|
||||
mode, script->filename, script->lineno);
|
||||
|
||||
CancelOffThreadIonCompile(cx->compartment, script);
|
||||
|
||||
if (script->hasIonScript()) {
|
||||
// It is only safe to modify script->ion if the script is not currently
|
||||
// running, because IonFrameIterator needs to tell what ionScript to
|
||||
// use (either the one on the JSScript, or the one hidden in the
|
||||
// breadcrumbs Invalidation() leaves). Therefore, if invalidation
|
||||
// fails, we cannot disable the script.
|
||||
if (!Invalidate(cx, script, false))
|
||||
return;
|
||||
switch (mode) {
|
||||
case SequentialExecution:
|
||||
if (script->hasIonScript()) {
|
||||
// It is only safe to modify script->ion if the script is not currently
|
||||
// running, because IonFrameIterator needs to tell what ionScript to
|
||||
// use (either the one on the JSScript, or the one hidden in the
|
||||
// breadcrumbs Invalidation() leaves). Therefore, if invalidation
|
||||
// fails, we cannot disable the script.
|
||||
if (!Invalidate(cx, script, mode, false))
|
||||
return;
|
||||
}
|
||||
|
||||
script->ion = ION_DISABLED_SCRIPT;
|
||||
return;
|
||||
|
||||
case ParallelExecution:
|
||||
if (script->hasParallelIonScript()) {
|
||||
if (!Invalidate(cx, script, mode, false))
|
||||
return;
|
||||
}
|
||||
|
||||
script->parallelIon = ION_DISABLED_SCRIPT;
|
||||
return;
|
||||
}
|
||||
|
||||
script->ion = ION_DISABLED_SCRIPT;
|
||||
JS_NOT_REACHED("No such execution mode");
|
||||
}
|
||||
|
||||
uint32_t
|
||||
@ -2101,7 +2313,7 @@ ion::PurgeCaches(UnrootedScript script, JSCompartment *c) {
|
||||
script->ion->purgeCaches(c);
|
||||
|
||||
if (script->hasParallelIonScript())
|
||||
script->ion->purgeCaches(c);
|
||||
script->parallelIon->purgeCaches(c);
|
||||
}
|
||||
|
||||
size_t
|
||||
|
@ -11,6 +11,7 @@
|
||||
#include "jscntxt.h"
|
||||
#include "jscompartment.h"
|
||||
#include "IonCode.h"
|
||||
#include "CompileInfo.h"
|
||||
#include "jsinfer.h"
|
||||
#include "jsinterp.h"
|
||||
|
||||
@ -18,6 +19,7 @@ namespace js {
|
||||
namespace ion {
|
||||
|
||||
class TempAllocator;
|
||||
class ParallelCompileContext; // in ParallelArrayAnalysis.h
|
||||
|
||||
// Possible register allocators which may be used.
|
||||
enum IonRegisterAllocator {
|
||||
@ -173,6 +175,11 @@ struct IonOptions
|
||||
// Default: 5
|
||||
uint32_t slowCallIncUseCount;
|
||||
|
||||
// How many uses of a parallel kernel before we attempt compilation.
|
||||
//
|
||||
// Default: 1
|
||||
uint32_t usesBeforeCompileParallel;
|
||||
|
||||
void setEagerCompilation() {
|
||||
eagerCompilation = true;
|
||||
usesBeforeCompile = usesBeforeCompileNoJaeger = 0;
|
||||
@ -209,7 +216,8 @@ struct IonOptions
|
||||
inlineUseCountRatio(128),
|
||||
eagerCompilation(false),
|
||||
slowCallLimit(512),
|
||||
slowCallIncUseCount(5)
|
||||
slowCallIncUseCount(5),
|
||||
usesBeforeCompileParallel(1)
|
||||
{
|
||||
}
|
||||
};
|
||||
@ -301,6 +309,7 @@ IonExecStatus FastInvoke(JSContext *cx, HandleFunction fun, CallArgsList &args);
|
||||
void Invalidate(types::TypeCompartment &types, FreeOp *fop,
|
||||
const Vector<types::RecompileInfo> &invalid, bool resetUses = true);
|
||||
void Invalidate(JSContext *cx, const Vector<types::RecompileInfo> &invalid, bool resetUses = true);
|
||||
bool Invalidate(JSContext *cx, UnrootedScript script, ExecutionMode mode, bool resetUses = true);
|
||||
bool Invalidate(JSContext *cx, UnrootedScript script, bool resetUses = true);
|
||||
|
||||
void MarkValueFromIon(JSRuntime *rt, Value *vp);
|
||||
@ -323,6 +332,7 @@ static inline bool IsEnabled(JSContext *cx)
|
||||
}
|
||||
|
||||
void ForbidCompilation(JSContext *cx, UnrootedScript script);
|
||||
void ForbidCompilation(JSContext *cx, UnrootedScript script, ExecutionMode mode);
|
||||
uint32_t UsesBeforeIonRecompile(UnrootedScript script, jsbytecode *pc);
|
||||
|
||||
void PurgeCaches(UnrootedScript script, JSCompartment *c);
|
||||
|
@ -4507,6 +4507,16 @@ IonBuilder::jsop_initprop(HandlePropertyName name)
|
||||
needsBarrier = false;
|
||||
}
|
||||
|
||||
// In parallel execution, we never require write barriers. See
|
||||
// forkjoin.cpp for more information.
|
||||
switch (info().executionMode()) {
|
||||
case SequentialExecution:
|
||||
break;
|
||||
case ParallelExecution:
|
||||
needsBarrier = false;
|
||||
break;
|
||||
}
|
||||
|
||||
if (templateObject->isFixedSlot(shape->slot())) {
|
||||
MStoreFixedSlot *store = MStoreFixedSlot::New(obj, shape->slot(), value);
|
||||
if (needsBarrier)
|
||||
@ -5470,8 +5480,8 @@ IonBuilder::jsop_getelem_dense()
|
||||
return pushTypeBarrier(load, types, barrier);
|
||||
}
|
||||
|
||||
static MInstruction *
|
||||
GetTypedArrayLength(MDefinition *obj)
|
||||
MInstruction *
|
||||
IonBuilder::getTypedArrayLength(MDefinition *obj)
|
||||
{
|
||||
if (obj->isConstant()) {
|
||||
JSObject *array = &obj->toConstant()->value().toObject();
|
||||
@ -5482,8 +5492,8 @@ GetTypedArrayLength(MDefinition *obj)
|
||||
return MTypedArrayLength::New(obj);
|
||||
}
|
||||
|
||||
static MInstruction *
|
||||
GetTypedArrayElements(MDefinition *obj)
|
||||
MInstruction *
|
||||
IonBuilder::getTypedArrayElements(MDefinition *obj)
|
||||
{
|
||||
if (obj->isConstant()) {
|
||||
JSObject *array = &obj->toConstant()->value().toObject();
|
||||
@ -5546,14 +5556,14 @@ IonBuilder::jsop_getelem_typed(int arrayType)
|
||||
}
|
||||
|
||||
// Get the length.
|
||||
MInstruction *length = GetTypedArrayLength(obj);
|
||||
MInstruction *length = getTypedArrayLength(obj);
|
||||
current->add(length);
|
||||
|
||||
// Bounds check.
|
||||
id = addBoundsCheck(id, length);
|
||||
|
||||
// Get the elements vector.
|
||||
MInstruction *elements = GetTypedArrayElements(obj);
|
||||
MInstruction *elements = getTypedArrayElements(obj);
|
||||
current->add(elements);
|
||||
|
||||
// Load the element.
|
||||
@ -5723,14 +5733,14 @@ IonBuilder::jsop_setelem_typed(int arrayType)
|
||||
id = idInt32;
|
||||
|
||||
// Get the length.
|
||||
MInstruction *length = GetTypedArrayLength(obj);
|
||||
MInstruction *length = getTypedArrayLength(obj);
|
||||
current->add(length);
|
||||
|
||||
// Bounds check.
|
||||
id = addBoundsCheck(id, length);
|
||||
|
||||
// Get the elements vector.
|
||||
MInstruction *elements = GetTypedArrayElements(obj);
|
||||
MInstruction *elements = getTypedArrayElements(obj);
|
||||
current->add(elements);
|
||||
|
||||
// Clamp value to [0, 255] for Uint8ClampedArray.
|
||||
@ -5794,7 +5804,7 @@ IonBuilder::jsop_length_fastPath()
|
||||
|
||||
if (sig.inTypes->getTypedArrayType() != TypedArray::TYPE_MAX) {
|
||||
MDefinition *obj = current->pop();
|
||||
MInstruction *length = GetTypedArrayLength(obj);
|
||||
MInstruction *length = getTypedArrayLength(obj);
|
||||
current->add(length);
|
||||
current->push(length);
|
||||
return true;
|
||||
|
@ -310,6 +310,10 @@ class IonBuilder : public MIRGenerator
|
||||
types::StackTypeSet *barrier, types::StackTypeSet *types,
|
||||
TypeOracle::Unary unary, TypeOracle::UnaryTypes unaryTypes);
|
||||
|
||||
// Typed array helpers.
|
||||
MInstruction *getTypedArrayLength(MDefinition *obj);
|
||||
MInstruction *getTypedArrayElements(MDefinition *obj);
|
||||
|
||||
bool jsop_add(MDefinition *left, MDefinition *right);
|
||||
bool jsop_bitnot();
|
||||
bool jsop_bitop(JSOp op);
|
||||
@ -415,6 +419,18 @@ class IonBuilder : public MIRGenerator
|
||||
// RegExp natives.
|
||||
InliningStatus inlineRegExpTest(CallInfo &callInfo);
|
||||
|
||||
// Parallel Array.
|
||||
InliningStatus inlineUnsafeSetElement(CallInfo &callInfo);
|
||||
bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
|
||||
bool inlineUnsafeSetTypedArrayElement(CallInfo &callInfo, uint32_t base, int arrayType);
|
||||
InliningStatus inlineForceSequentialOrInParallelSection(CallInfo &callInfo);
|
||||
InliningStatus inlineNewDenseArray(CallInfo &callInfo);
|
||||
InliningStatus inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo);
|
||||
InliningStatus inlineNewDenseArrayForParallelExecution(CallInfo &callInfo);
|
||||
|
||||
InliningStatus inlineThrowError(CallInfo &callInfo);
|
||||
InliningStatus inlineDump(CallInfo &callInfo);
|
||||
|
||||
InliningStatus inlineNativeCall(CallInfo &callInfo, JSNative native);
|
||||
|
||||
// Call functions
|
||||
|
@ -211,6 +211,18 @@ struct IonScript
|
||||
uint32_t scriptList_;
|
||||
uint32_t scriptEntries_;
|
||||
|
||||
// In parallel mode, list of scripts that we call that were invalidated
|
||||
// last time this script bailed out. These will be recompiled (or tried to
|
||||
// be) upon next parallel entry of this script.
|
||||
//
|
||||
// For non-parallel IonScripts, this is NULL.
|
||||
//
|
||||
// For parallel IonScripts, there are as many entries as there are slices,
|
||||
// since for any single parallel execution, we can only get a single
|
||||
// invalidation per slice.
|
||||
uint32_t parallelInvalidatedScriptList_;
|
||||
uint32_t parallelInvalidatedScriptEntries_;
|
||||
|
||||
// Number of references from invalidation records.
|
||||
size_t refcount_;
|
||||
|
||||
@ -244,6 +256,10 @@ struct IonScript
|
||||
JSScript **scriptList() const {
|
||||
return (JSScript **)(reinterpret_cast<const uint8_t *>(this) + scriptList_);
|
||||
}
|
||||
JSScript **parallelInvalidatedScriptList() {
|
||||
return (JSScript **)(reinterpret_cast<const uint8_t *>(this) +
|
||||
parallelInvalidatedScriptList_);
|
||||
}
|
||||
|
||||
private:
|
||||
void trace(JSTracer *trc);
|
||||
@ -255,7 +271,8 @@ struct IonScript
|
||||
static IonScript *New(JSContext *cx, uint32_t frameLocals, uint32_t frameSize,
|
||||
size_t snapshotsSize, size_t snapshotEntries,
|
||||
size_t constants, size_t safepointIndexEntries, size_t osiIndexEntries,
|
||||
size_t cacheEntries, size_t safepointsSize, size_t scriptEntries);
|
||||
size_t cacheEntries, size_t safepointsSize, size_t scriptEntries,
|
||||
size_t parallelInvalidatedScriptEntries);
|
||||
static void Trace(JSTracer *trc, IonScript *script);
|
||||
static void Destroy(FreeOp *fop, IonScript *script);
|
||||
|
||||
@ -339,6 +356,15 @@ struct IonScript
|
||||
size_t scriptEntries() const {
|
||||
return scriptEntries_;
|
||||
}
|
||||
size_t parallelInvalidatedScriptEntries() const {
|
||||
return parallelInvalidatedScriptEntries_;
|
||||
}
|
||||
RawScript getAndZeroParallelInvalidatedScript(uint32_t i) {
|
||||
JS_ASSERT(i < parallelInvalidatedScriptEntries_);
|
||||
RawScript script = parallelInvalidatedScriptList()[i];
|
||||
parallelInvalidatedScriptList()[i] = NULL;
|
||||
return script;
|
||||
}
|
||||
size_t sizeOfIncludingThis(JSMallocSizeOfFun mallocSizeOf) const {
|
||||
return mallocSizeOf(this);
|
||||
}
|
||||
@ -380,6 +406,7 @@ struct IonScript
|
||||
void copyCacheEntries(const IonCache *caches, MacroAssembler &masm);
|
||||
void copySafepoints(const SafepointWriter *writer);
|
||||
void copyScriptEntries(JSScript **scripts);
|
||||
void zeroParallelInvalidatedScripts();
|
||||
|
||||
bool invalidated() const {
|
||||
return refcount_ != 0;
|
||||
|
@ -332,6 +332,60 @@ MacroAssembler::newGCThing(const Register &result,
|
||||
subPtr(Imm32(thingSize), result);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::parNewGCThing(const Register &result,
|
||||
const Register &threadContextReg,
|
||||
const Register &tempReg1,
|
||||
const Register &tempReg2,
|
||||
JSObject *templateObject,
|
||||
Label *fail)
|
||||
{
|
||||
// Similar to ::newGCThing(), except that it allocates from a
|
||||
// custom Allocator in the ForkJoinSlice*, rather than being
|
||||
// hardcoded to the compartment allocator. This requires two
|
||||
// temporary registers.
|
||||
//
|
||||
// Subtle: I wanted to reuse `result` for one of the temporaries,
|
||||
// but the register allocator was assigning it to the same
|
||||
// register as `threadContextReg`. Then we overwrite that
|
||||
// register which messed up the OOL code.
|
||||
|
||||
gc::AllocKind allocKind = templateObject->getAllocKind();
|
||||
uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
|
||||
|
||||
// Load the allocator:
|
||||
// tempReg1 = (Allocator*) forkJoinSlice->allocator
|
||||
loadPtr(Address(threadContextReg, offsetof(js::ForkJoinSlice, allocator)),
|
||||
tempReg1);
|
||||
|
||||
// Get a pointer to the relevant free list:
|
||||
// tempReg1 = (FreeSpan*) &tempReg1->arenas.freeLists[(allocKind)]
|
||||
uint32_t offset = (offsetof(Allocator, arenas) +
|
||||
js::gc::ArenaLists::getFreeListOffset(allocKind));
|
||||
addPtr(Imm32(offset), tempReg1);
|
||||
|
||||
// Load first item on the list
|
||||
// tempReg2 = tempReg1->first
|
||||
loadPtr(Address(tempReg1, offsetof(gc::FreeSpan, first)), tempReg2);
|
||||
|
||||
// Check whether list is empty
|
||||
// if tempReg1->last <= tempReg2, fail
|
||||
branchPtr(Assembler::BelowOrEqual,
|
||||
Address(tempReg1, offsetof(gc::FreeSpan, last)),
|
||||
tempReg2,
|
||||
fail);
|
||||
|
||||
// If not, take first and advance pointer by thingSize bytes.
|
||||
// result = tempReg2;
|
||||
// tempReg2 += thingSize;
|
||||
movePtr(tempReg2, result);
|
||||
addPtr(Imm32(thingSize), tempReg2);
|
||||
|
||||
// Update `first`
|
||||
// tempReg1->first = tempReg2;
|
||||
storePtr(tempReg2, Address(tempReg1, offsetof(gc::FreeSpan, first)));
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::initGCThing(const Register &obj, JSObject *templateObject)
|
||||
{
|
||||
@ -378,6 +432,18 @@ MacroAssembler::initGCThing(const Register &obj, JSObject *templateObject)
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::parCheckInterruptFlags(const Register &tempReg,
|
||||
Label *fail)
|
||||
{
|
||||
JSCompartment *compartment = GetIonContext()->compartment;
|
||||
|
||||
void *interrupt = (void*)&compartment->rt->interrupt;
|
||||
movePtr(ImmWord(interrupt), tempReg);
|
||||
load32(Address(tempReg, 0), tempReg);
|
||||
branchTest32(Assembler::NonZero, tempReg, tempReg, fail);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssembler::maybeRemoveOsrFrame(Register scratch)
|
||||
{
|
||||
|
@ -18,6 +18,9 @@
|
||||
#include "ion/IonCompartment.h"
|
||||
#include "ion/IonInstrumentation.h"
|
||||
#include "ion/TypeOracle.h"
|
||||
#include "ion/ParallelFunctions.h"
|
||||
|
||||
#include "vm/ForkJoin.h"
|
||||
|
||||
#include "jstypedarray.h"
|
||||
#include "jscompartment.h"
|
||||
@ -489,8 +492,19 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
|
||||
// Inline allocation.
|
||||
void newGCThing(const Register &result, JSObject *templateObject, Label *fail);
|
||||
void parNewGCThing(const Register &result,
|
||||
const Register &threadContextReg,
|
||||
const Register &tempReg1,
|
||||
const Register &tempReg2,
|
||||
JSObject *templateObject,
|
||||
Label *fail);
|
||||
void initGCThing(const Register &obj, JSObject *templateObject);
|
||||
|
||||
// Checks the flags that signal that parallel code may need to interrupt or
|
||||
// abort. Branches to fail in that case.
|
||||
void parCheckInterruptFlags(const Register &tempReg,
|
||||
Label *fail);
|
||||
|
||||
// If the IonCode that created this assembler needs to transition into the VM,
|
||||
// we want to store the IonCode on the stack in order to mark it during a GC.
|
||||
// This is a reference to a patch location where the IonCode* will be written.
|
||||
|
@ -236,6 +236,7 @@ ion::CheckLogging()
|
||||
" pools Literal Pools (ARM only for now)\n"
|
||||
" cacheflush Instruction Cache flushes (ARM only for now)\n"
|
||||
" logs C1 and JSON visualization logging\n"
|
||||
" trace Generate calls to js::ion::Trace() for effectful instructions\n"
|
||||
" all Everything\n"
|
||||
"\n"
|
||||
);
|
||||
@ -278,6 +279,8 @@ ion::CheckLogging()
|
||||
EnableChannel(IonSpew_CacheFlush);
|
||||
if (ContainsFlag(env, "logs"))
|
||||
EnableIonDebugLogging();
|
||||
if (ContainsFlag(env, "trace"))
|
||||
EnableChannel(IonSpew_Trace);
|
||||
if (ContainsFlag(env, "all"))
|
||||
LoggingBits = uint32_t(-1);
|
||||
|
||||
|
@ -52,6 +52,8 @@ namespace ion {
|
||||
_(Safepoints) \
|
||||
/* Debug info about Pools*/ \
|
||||
_(Pools) \
|
||||
/* Calls to js::ion::Trace() */ \
|
||||
_(Trace) \
|
||||
/* Debug info about the I$ */ \
|
||||
_(CacheFlush)
|
||||
|
||||
|
@ -64,11 +64,12 @@ enum MIRType
|
||||
MIRType_Object,
|
||||
MIRType_Magic,
|
||||
MIRType_Value,
|
||||
MIRType_None, // Invalid, used as a placeholder.
|
||||
MIRType_Slots, // A slots vector
|
||||
MIRType_Elements, // An elements vector
|
||||
MIRType_StackFrame, // StackFrame pointer for OSR.
|
||||
MIRType_Shape // A Shape pointer.
|
||||
MIRType_None, // Invalid, used as a placeholder.
|
||||
MIRType_Slots, // A slots vector
|
||||
MIRType_Elements, // An elements vector
|
||||
MIRType_StackFrame, // StackFrame pointer for OSR.
|
||||
MIRType_Shape, // A Shape pointer.
|
||||
MIRType_ForkJoinSlice // js::ForkJoinSlice*
|
||||
};
|
||||
|
||||
#ifdef DEBUG
|
||||
|
@ -249,6 +249,16 @@ class LNewSlots : public LCallInstructionHelper<1, 0, 3>
|
||||
}
|
||||
};
|
||||
|
||||
class LNewParallelArray : public LInstructionHelper<1, 0, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(NewParallelArray);
|
||||
|
||||
MNewParallelArray *mir() const {
|
||||
return mir_->toNewParallelArray();
|
||||
}
|
||||
};
|
||||
|
||||
class LNewArray : public LInstructionHelper<1, 0, 0>
|
||||
{
|
||||
public:
|
||||
@ -269,6 +279,79 @@ class LNewObject : public LInstructionHelper<1, 0, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LParNew : public LInstructionHelper<1, 1, 2>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParNew);
|
||||
|
||||
LParNew(const LAllocation &parSlice,
|
||||
const LDefinition &temp1,
|
||||
const LDefinition &temp2)
|
||||
{
|
||||
setOperand(0, parSlice);
|
||||
setTemp(0, temp1);
|
||||
setTemp(1, temp2);
|
||||
}
|
||||
|
||||
MParNew *mir() const {
|
||||
return mir_->toParNew();
|
||||
}
|
||||
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LAllocation *getTemp0() {
|
||||
return getTemp(0)->output();
|
||||
}
|
||||
|
||||
const LAllocation *getTemp1() {
|
||||
return getTemp(1)->output();
|
||||
}
|
||||
};
|
||||
|
||||
class LParNewDenseArray : public LCallInstructionHelper<1, 2, 3>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParNewDenseArray);
|
||||
|
||||
LParNewDenseArray(const LAllocation &parSlice,
|
||||
const LAllocation &length,
|
||||
const LDefinition &temp1,
|
||||
const LDefinition &temp2,
|
||||
const LDefinition &temp3) {
|
||||
setOperand(0, parSlice);
|
||||
setOperand(1, length);
|
||||
setTemp(0, temp1);
|
||||
setTemp(1, temp2);
|
||||
setTemp(2, temp3);
|
||||
}
|
||||
|
||||
MParNewDenseArray *mir() const {
|
||||
return mir_->toParNewDenseArray();
|
||||
}
|
||||
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LAllocation *length() {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
const LAllocation *getTemp0() {
|
||||
return getTemp(0)->output();
|
||||
}
|
||||
|
||||
const LAllocation *getTemp1() {
|
||||
return getTemp(1)->output();
|
||||
}
|
||||
|
||||
const LAllocation *getTemp2() {
|
||||
return getTemp(2)->output();
|
||||
}
|
||||
};
|
||||
|
||||
// Allocates a new DeclEnvObject.
|
||||
//
|
||||
// This instruction generates two possible instruction sets:
|
||||
@ -311,6 +394,64 @@ class LNewCallObject : public LInstructionHelper<1, 1, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LParNewCallObject : public LInstructionHelper<1, 2, 2>
|
||||
{
|
||||
LParNewCallObject(const LAllocation &parSlice,
|
||||
const LAllocation &slots,
|
||||
const LDefinition &temp1,
|
||||
const LDefinition &temp2) {
|
||||
setOperand(0, parSlice);
|
||||
setOperand(1, slots);
|
||||
setTemp(0, temp1);
|
||||
setTemp(1, temp2);
|
||||
}
|
||||
|
||||
public:
|
||||
LIR_HEADER(ParNewCallObject);
|
||||
|
||||
static LParNewCallObject *NewWithSlots(const LAllocation &parSlice,
|
||||
const LAllocation &slots,
|
||||
const LDefinition &temp1,
|
||||
const LDefinition &temp2) {
|
||||
return new LParNewCallObject(parSlice, slots, temp1, temp2);
|
||||
}
|
||||
|
||||
static LParNewCallObject *NewSansSlots(const LAllocation &parSlice,
|
||||
const LDefinition &temp1,
|
||||
const LDefinition &temp2) {
|
||||
LAllocation slots = LConstantIndex::Bogus();
|
||||
return new LParNewCallObject(parSlice, slots, temp1, temp2);
|
||||
}
|
||||
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LAllocation *slots() {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
const bool hasDynamicSlots() {
|
||||
// TO INVESTIGATE: Felix tried using isRegister() method here,
|
||||
// but for useFixed(_, CallTempN), isRegister() is false (and
|
||||
// isUse() is true). So for now ignore that and try to match
|
||||
// the LConstantIndex::Bogus() generated above instead.
|
||||
return slots() && ! slots()->isConstant();
|
||||
}
|
||||
|
||||
const MParNewCallObject *mir() const {
|
||||
return mir_->toParNewCallObject();
|
||||
}
|
||||
|
||||
const LAllocation *getTemp0() {
|
||||
return getTemp(0)->output();
|
||||
}
|
||||
|
||||
const LAllocation *getTemp1() {
|
||||
return getTemp(1)->output();
|
||||
}
|
||||
};
|
||||
|
||||
class LNewStringObject : public LInstructionHelper<1, 1, 1>
|
||||
{
|
||||
public:
|
||||
@ -332,6 +473,12 @@ class LNewStringObject : public LInstructionHelper<1, 1, 1>
|
||||
}
|
||||
};
|
||||
|
||||
class LParBailout : public LInstructionHelper<0, 0, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParBailout);
|
||||
};
|
||||
|
||||
// Takes in an Object and a Value.
|
||||
class LInitProp : public LCallInstructionHelper<0, 1 + BOX_PIECES, 0>
|
||||
{
|
||||
@ -371,6 +518,48 @@ class LCheckOverRecursed : public LInstructionHelper<0, 0, 1>
|
||||
}
|
||||
};
|
||||
|
||||
class LParCheckOverRecursed : public LInstructionHelper<0, 1, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParCheckOverRecursed);
|
||||
|
||||
LParCheckOverRecursed(const LAllocation &parSlice,
|
||||
const LDefinition &tempReg)
|
||||
{
|
||||
setOperand(0, parSlice);
|
||||
setTemp(0, tempReg);
|
||||
}
|
||||
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LDefinition *getTempReg() {
|
||||
return getTemp(0);
|
||||
}
|
||||
};
|
||||
|
||||
class LParCheckInterrupt : public LInstructionHelper<0, 1, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParCheckInterrupt);
|
||||
|
||||
LParCheckInterrupt(const LAllocation &parSlice,
|
||||
const LDefinition &tempReg)
|
||||
{
|
||||
setOperand(0, parSlice);
|
||||
setTemp(0, tempReg);
|
||||
}
|
||||
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LDefinition *getTempReg() {
|
||||
return getTemp(0);
|
||||
}
|
||||
};
|
||||
|
||||
class LDefVar : public LCallInstructionHelper<0, 1, 0>
|
||||
{
|
||||
public:
|
||||
@ -1146,6 +1335,27 @@ class LCompareStrictS : public LInstructionHelper<1, BOX_PIECES + 1, 2>
|
||||
}
|
||||
};
|
||||
|
||||
class LParCompareS : public LCallInstructionHelper<1, 2, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParCompareS);
|
||||
|
||||
LParCompareS(const LAllocation &left, const LAllocation &right) {
|
||||
setOperand(0, left);
|
||||
setOperand(1, right);
|
||||
}
|
||||
|
||||
const LAllocation *left() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation *right() {
|
||||
return getOperand(1);
|
||||
}
|
||||
MCompare *mir() {
|
||||
return mir_->toCompare();
|
||||
}
|
||||
};
|
||||
|
||||
// Used for strict-equality comparisons where one side is a boolean
|
||||
// and the other is a value. Note that CompareI is used to compare
|
||||
// two booleans.
|
||||
@ -2098,6 +2308,37 @@ class LLambda : public LInstructionHelper<1, 1, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LParLambda : public LInstructionHelper<1, 2, 2>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParLambda);
|
||||
|
||||
LParLambda(const LAllocation &parSlice,
|
||||
const LAllocation &scopeChain,
|
||||
const LDefinition &temp1,
|
||||
const LDefinition &temp2) {
|
||||
setOperand(0, parSlice);
|
||||
setOperand(1, scopeChain);
|
||||
setTemp(0, temp1);
|
||||
setTemp(1, temp2);
|
||||
}
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation *scopeChain() {
|
||||
return getOperand(1);
|
||||
}
|
||||
const MParLambda *mir() const {
|
||||
return mir_->toParLambda();
|
||||
}
|
||||
const LAllocation *getTemp0() {
|
||||
return getTemp(0)->output();
|
||||
}
|
||||
const LAllocation *getTemp1() {
|
||||
return getTemp(1)->output();
|
||||
}
|
||||
};
|
||||
|
||||
// Determines the implicit |this| value for function calls.
|
||||
class LImplicitThis : public LInstructionHelper<BOX_PIECES, 1, 0>
|
||||
{
|
||||
@ -3076,6 +3317,20 @@ class LFunctionEnvironment : public LInstructionHelper<1, 1, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LParSlice : public LCallInstructionHelper<1, 0, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParSlice);
|
||||
|
||||
LParSlice(const LDefinition &temp1) {
|
||||
setTemp(0, temp1);
|
||||
}
|
||||
|
||||
const LAllocation *getTempReg() {
|
||||
return getTemp(0)->output();
|
||||
}
|
||||
};
|
||||
|
||||
class LCallGetProperty : public LCallInstructionHelper<BOX_PIECES, BOX_PIECES, 0>
|
||||
{
|
||||
public:
|
||||
@ -3325,6 +3580,48 @@ class LGetArgument : public LInstructionHelper<BOX_PIECES, 1, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LParWriteGuard : public LCallInstructionHelper<0, 2, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParWriteGuard);
|
||||
|
||||
LParWriteGuard(const LAllocation &parSlice,
|
||||
const LAllocation &object,
|
||||
const LDefinition &temp1) {
|
||||
setOperand(0, parSlice);
|
||||
setOperand(1, object);
|
||||
setTemp(0, temp1);
|
||||
}
|
||||
|
||||
bool isCall() const {
|
||||
return true;
|
||||
}
|
||||
|
||||
const LAllocation *parSlice() {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
const LAllocation *object() {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
const LAllocation *getTempReg() {
|
||||
return getTemp(0)->output();
|
||||
}
|
||||
};
|
||||
|
||||
class LParDump : public LCallInstructionHelper<0, BOX_PIECES, 0>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(ParDump);
|
||||
|
||||
static const size_t Value = 0;
|
||||
|
||||
const LAllocation *value() {
|
||||
return getOperand(0);
|
||||
}
|
||||
};
|
||||
|
||||
// Guard that a value is in a TypeSet.
|
||||
class LTypeBarrier : public LInstructionHelper<BOX_PIECES, BOX_PIECES, 1>
|
||||
{
|
||||
|
@ -546,6 +546,8 @@ class LDefinition
|
||||
return LDefinition::GENERAL;
|
||||
case MIRType_StackFrame:
|
||||
return LDefinition::GENERAL;
|
||||
case MIRType_ForkJoinSlice:
|
||||
return LDefinition::GENERAL;
|
||||
default:
|
||||
JS_NOT_REACHED("unexpected type");
|
||||
return LDefinition::GENERAL;
|
||||
|
@ -22,14 +22,20 @@
|
||||
_(TableSwitch) \
|
||||
_(TableSwitchV) \
|
||||
_(Goto) \
|
||||
_(NewParallelArray) \
|
||||
_(NewArray) \
|
||||
_(NewObject) \
|
||||
_(NewSlots) \
|
||||
_(NewDeclEnvObject) \
|
||||
_(NewCallObject) \
|
||||
_(NewStringObject) \
|
||||
_(ParNew) \
|
||||
_(ParNewDenseArray) \
|
||||
_(ParNewCallObject) \
|
||||
_(ParBailout) \
|
||||
_(InitProp) \
|
||||
_(CheckOverRecursed) \
|
||||
_(ParCheckOverRecursed) \
|
||||
_(RecompileCheck) \
|
||||
_(DefVar) \
|
||||
_(DefFun) \
|
||||
@ -63,6 +69,7 @@
|
||||
_(CompareDAndBranch) \
|
||||
_(CompareS) \
|
||||
_(CompareStrictS) \
|
||||
_(ParCompareS) \
|
||||
_(CompareB) \
|
||||
_(CompareBAndBranch) \
|
||||
_(CompareV) \
|
||||
@ -109,6 +116,7 @@
|
||||
_(RegExpTest) \
|
||||
_(Lambda) \
|
||||
_(LambdaForSingleton) \
|
||||
_(ParLambda) \
|
||||
_(ImplicitThis) \
|
||||
_(Slots) \
|
||||
_(Elements) \
|
||||
@ -119,6 +127,8 @@
|
||||
_(StoreSlotT) \
|
||||
_(GuardShape) \
|
||||
_(GuardClass) \
|
||||
_(ParWriteGuard) \
|
||||
_(ParDump) \
|
||||
_(TypeBarrier) \
|
||||
_(MonitorTypes) \
|
||||
_(InitializedLength) \
|
||||
@ -149,6 +159,7 @@
|
||||
_(StoreFixedSlotV) \
|
||||
_(StoreFixedSlotT) \
|
||||
_(FunctionEnvironment) \
|
||||
_(ParSlice) \
|
||||
_(GetPropertyCacheV) \
|
||||
_(GetPropertyCacheT) \
|
||||
_(GetElementCacheV) \
|
||||
@ -184,6 +195,7 @@
|
||||
_(InstanceOfV) \
|
||||
_(CallInstanceOf) \
|
||||
_(InterruptCheck) \
|
||||
_(ParCheckInterrupt) \
|
||||
_(FunctionBoundary) \
|
||||
_(GetDOMProperty) \
|
||||
_(SetDOMProperty) \
|
||||
|
@ -629,6 +629,7 @@ LinearScanAllocator::splitBlockingIntervals(LAllocation allocation)
|
||||
if (fixed->numRanges() > 0) {
|
||||
CodePosition fixedPos = current->intersect(fixed);
|
||||
if (fixedPos != CodePosition::MIN) {
|
||||
JS_ASSERT(fixedPos > current->start());
|
||||
JS_ASSERT(fixedPos < current->end());
|
||||
if (!splitInterval(current, fixedPos))
|
||||
return false;
|
||||
|
@ -113,6 +113,19 @@ LIRGenerator::visitCheckOverRecursed(MCheckOverRecursed *ins)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParCheckOverRecursed(MParCheckOverRecursed *ins)
|
||||
{
|
||||
LParCheckOverRecursed *lir = new LParCheckOverRecursed(
|
||||
useRegister(ins->parSlice()),
|
||||
temp());
|
||||
if (!add(lir))
|
||||
return false;
|
||||
if (!assignSafepoint(lir, ins))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitDefVar(MDefVar *ins)
|
||||
{
|
||||
@ -143,6 +156,13 @@ LIRGenerator::visitNewSlots(MNewSlots *ins)
|
||||
return defineReturn(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitNewParallelArray(MNewParallelArray *ins)
|
||||
{
|
||||
LNewParallelArray *lir = new LNewParallelArray();
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitNewArray(MNewArray *ins)
|
||||
{
|
||||
@ -183,6 +203,25 @@ LIRGenerator::visitNewCallObject(MNewCallObject *ins)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParNewCallObject(MParNewCallObject *ins)
|
||||
{
|
||||
const LAllocation &parThreadContext = useRegister(ins->parSlice());
|
||||
const LDefinition &temp1 = temp();
|
||||
const LDefinition &temp2 = temp();
|
||||
|
||||
LParNewCallObject *lir;
|
||||
if (ins->slots()->type() == MIRType_Slots) {
|
||||
const LAllocation &slots = useRegister(ins->slots());
|
||||
lir = LParNewCallObject::NewWithSlots(parThreadContext, slots,
|
||||
temp1, temp2);
|
||||
} else {
|
||||
lir = LParNewCallObject::NewSansSlots(parThreadContext, temp1, temp2);
|
||||
}
|
||||
|
||||
return define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitNewStringObject(MNewStringObject *ins)
|
||||
{
|
||||
@ -192,6 +231,13 @@ LIRGenerator::visitNewStringObject(MNewStringObject *ins)
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParBailout(MParBailout *ins)
|
||||
{
|
||||
LParBailout *lir = new LParBailout();
|
||||
return add(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitInitProp(MInitProp *ins)
|
||||
{
|
||||
@ -580,10 +626,24 @@ LIRGenerator::visitCompare(MCompare *comp)
|
||||
// LCompareSAndBranch. Doing this now wouldn't be wrong, but doesn't
|
||||
// make sense and avoids confusion.
|
||||
if (comp->compareType() == MCompare::Compare_String) {
|
||||
LCompareS *lir = new LCompareS(useRegister(left), useRegister(right), temp());
|
||||
if (!define(lir, comp))
|
||||
return false;
|
||||
return assignSafepoint(lir, comp);
|
||||
switch (comp->block()->info().executionMode()) {
|
||||
case SequentialExecution:
|
||||
{
|
||||
LCompareS *lir = new LCompareS(useRegister(left), useRegister(right), temp());
|
||||
if (!define(lir, comp))
|
||||
return false;
|
||||
return assignSafepoint(lir, comp);
|
||||
}
|
||||
|
||||
case ParallelExecution:
|
||||
{
|
||||
LParCompareS *lir = new LParCompareS(useFixed(left, CallTempReg0),
|
||||
useFixed(right, CallTempReg1));
|
||||
return defineReturn(lir, comp);
|
||||
}
|
||||
}
|
||||
|
||||
JS_NOT_REACHED("Unexpected execution mode");
|
||||
}
|
||||
|
||||
// Strict compare between value and string
|
||||
@ -1382,6 +1442,17 @@ LIRGenerator::visitLambda(MLambda *ins)
|
||||
return define(lir, ins) && assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParLambda(MParLambda *ins)
|
||||
{
|
||||
JS_ASSERT(!ins->fun()->hasSingletonType());
|
||||
JS_ASSERT(!types::UseNewTypeForClone(ins->fun()));
|
||||
LParLambda *lir = new LParLambda(useRegister(ins->parSlice()),
|
||||
useRegister(ins->scopeChain()),
|
||||
temp(), temp());
|
||||
return define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitImplicitThis(MImplicitThis *ins)
|
||||
{
|
||||
@ -1439,6 +1510,62 @@ LIRGenerator::visitFunctionEnvironment(MFunctionEnvironment *ins)
|
||||
return define(new LFunctionEnvironment(useRegisterAtStart(ins->function())), ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParSlice(MParSlice *ins)
|
||||
{
|
||||
LParSlice *lir = new LParSlice(tempFixed(CallTempReg0));
|
||||
return defineReturn(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParWriteGuard(MParWriteGuard *ins)
|
||||
{
|
||||
return add(new LParWriteGuard(useFixed(ins->parSlice(), CallTempReg0),
|
||||
useFixed(ins->object(), CallTempReg1),
|
||||
tempFixed(CallTempReg2)));
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParCheckInterrupt(MParCheckInterrupt *ins)
|
||||
{
|
||||
LParCheckInterrupt *lir = new LParCheckInterrupt(
|
||||
useRegister(ins->parSlice()),
|
||||
temp());
|
||||
if (!add(lir))
|
||||
return false;
|
||||
if (!assignSafepoint(lir, ins))
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParDump(MParDump *ins)
|
||||
{
|
||||
LParDump *lir = new LParDump();
|
||||
useBoxFixed(lir, LParDump::Value, ins->value(), CallTempReg0, CallTempReg1);
|
||||
return add(lir);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParNew(MParNew *ins)
|
||||
{
|
||||
LParNew *lir = new LParNew(useRegister(ins->parSlice()),
|
||||
temp(), temp());
|
||||
return define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitParNewDenseArray(MParNewDenseArray *ins)
|
||||
{
|
||||
LParNewDenseArray *lir = new LParNewDenseArray(
|
||||
useFixed(ins->parSlice(), CallTempReg0),
|
||||
useFixed(ins->length(), CallTempReg1),
|
||||
tempFixed(CallTempReg2),
|
||||
tempFixed(CallTempReg3),
|
||||
tempFixed(CallTempReg4));
|
||||
return defineReturn(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitStoreSlot(MStoreSlot *ins)
|
||||
{
|
||||
|
@ -79,13 +79,19 @@ class LIRGenerator : public LIRGeneratorSpecific
|
||||
bool visitGoto(MGoto *ins);
|
||||
bool visitTableSwitch(MTableSwitch *tableswitch);
|
||||
bool visitNewSlots(MNewSlots *ins);
|
||||
bool visitNewParallelArray(MNewParallelArray *ins);
|
||||
bool visitNewArray(MNewArray *ins);
|
||||
bool visitNewObject(MNewObject *ins);
|
||||
bool visitNewDeclEnvObject(MNewDeclEnvObject *ins);
|
||||
bool visitNewCallObject(MNewCallObject *ins);
|
||||
bool visitNewStringObject(MNewStringObject *ins);
|
||||
bool visitParNew(MParNew *ins);
|
||||
bool visitParNewCallObject(MParNewCallObject *ins);
|
||||
bool visitParNewDenseArray(MParNewDenseArray *ins);
|
||||
bool visitParBailout(MParBailout *ins);
|
||||
bool visitInitProp(MInitProp *ins);
|
||||
bool visitCheckOverRecursed(MCheckOverRecursed *ins);
|
||||
bool visitParCheckOverRecursed(MParCheckOverRecursed *ins);
|
||||
bool visitDefVar(MDefVar *ins);
|
||||
bool visitDefFun(MDefFun *ins);
|
||||
bool visitPrepareCall(MPrepareCall *ins);
|
||||
@ -136,6 +142,7 @@ class LIRGenerator : public LIRGeneratorSpecific
|
||||
bool visitRegExp(MRegExp *ins);
|
||||
bool visitRegExpTest(MRegExpTest *ins);
|
||||
bool visitLambda(MLambda *ins);
|
||||
bool visitParLambda(MParLambda *ins);
|
||||
bool visitImplicitThis(MImplicitThis *ins);
|
||||
bool visitSlots(MSlots *ins);
|
||||
bool visitElements(MElements *ins);
|
||||
@ -143,6 +150,10 @@ class LIRGenerator : public LIRGeneratorSpecific
|
||||
bool visitConvertElementsToDoubles(MConvertElementsToDoubles *ins);
|
||||
bool visitLoadSlot(MLoadSlot *ins);
|
||||
bool visitFunctionEnvironment(MFunctionEnvironment *ins);
|
||||
bool visitParSlice(MParSlice *ins);
|
||||
bool visitParWriteGuard(MParWriteGuard *ins);
|
||||
bool visitParCheckInterrupt(MParCheckInterrupt *ins);
|
||||
bool visitParDump(MParDump *ins);
|
||||
bool visitStoreSlot(MStoreSlot *ins);
|
||||
bool visitTypeBarrier(MTypeBarrier *ins);
|
||||
bool visitMonitorTypes(MMonitorTypes *ins);
|
||||
|
@ -7,6 +7,8 @@
|
||||
|
||||
#include "jslibmath.h"
|
||||
#include "jsmath.h"
|
||||
#include "builtin/ParallelArray.h"
|
||||
#include "builtin/TestingFunctions.h"
|
||||
|
||||
#include "MIR.h"
|
||||
#include "MIRGraph.h"
|
||||
@ -76,6 +78,22 @@ IonBuilder::inlineNativeCall(CallInfo &callInfo, JSNative native)
|
||||
if (native == regexp_test)
|
||||
return inlineRegExpTest(callInfo);
|
||||
|
||||
// Parallel Array
|
||||
if (native == intrinsic_UnsafeSetElement)
|
||||
return inlineUnsafeSetElement(callInfo);
|
||||
if (native == testingFunc_inParallelSection)
|
||||
return inlineForceSequentialOrInParallelSection(callInfo);
|
||||
if (native == intrinsic_NewDenseArray)
|
||||
return inlineNewDenseArray(callInfo);
|
||||
|
||||
// Self-hosting
|
||||
if (native == intrinsic_ThrowError)
|
||||
return inlineThrowError(callInfo);
|
||||
#ifdef DEBUG
|
||||
if (native == intrinsic_Dump)
|
||||
return inlineDump(callInfo);
|
||||
#endif
|
||||
|
||||
return InliningStatus_NotInlined;
|
||||
}
|
||||
|
||||
@ -846,5 +864,286 @@ IonBuilder::inlineRegExpTest(CallInfo &callInfo)
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineUnsafeSetElement(CallInfo &callInfo)
|
||||
{
|
||||
uint32_t argc = callInfo.argc();
|
||||
if (argc < 3 || (argc % 3) != 0 || callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
/* Important:
|
||||
*
|
||||
* Here we inline each of the stores resulting from a call to
|
||||
* %UnsafeSetElement(). It is essential that these stores occur
|
||||
* atomically and cannot be interrupted by a stack or recursion
|
||||
* check. If this is not true, race conditions can occur.
|
||||
*/
|
||||
|
||||
for (uint32_t base = 0; base < argc; base += 3) {
|
||||
uint32_t arri = base + 1;
|
||||
uint32_t idxi = base + 2;
|
||||
|
||||
types::StackTypeSet *obj = getInlineArgTypeSet(callInfo, arri);
|
||||
types::StackTypeSet *id = getInlineArgTypeSet(callInfo, idxi);
|
||||
|
||||
int arrayType;
|
||||
if (!oracle->elementAccessIsDenseNative(obj, id) &&
|
||||
!oracle->elementAccessIsTypedArray(obj, id, &arrayType))
|
||||
{
|
||||
return InliningStatus_NotInlined;
|
||||
}
|
||||
}
|
||||
|
||||
callInfo.unwrapArgs();
|
||||
|
||||
// Push the result first so that the stack depth matches up for
|
||||
// the potential bailouts that will occur in the stores below.
|
||||
MConstant *udef = MConstant::New(UndefinedValue());
|
||||
current->add(udef);
|
||||
current->push(udef);
|
||||
|
||||
for (uint32_t base = 0; base < argc; base += 3) {
|
||||
uint32_t arri = base + 1;
|
||||
uint32_t idxi = base + 2;
|
||||
|
||||
types::StackTypeSet *obj = getInlineArgTypeSet(callInfo, arri);
|
||||
types::StackTypeSet *id = getInlineArgTypeSet(callInfo, idxi);
|
||||
|
||||
if (oracle->elementAccessIsDenseNative(obj, id)) {
|
||||
if (!inlineUnsafeSetDenseArrayElement(callInfo, base))
|
||||
return InliningStatus_Error;
|
||||
continue;
|
||||
}
|
||||
|
||||
int arrayType;
|
||||
if (oracle->elementAccessIsTypedArray(obj, id, &arrayType)) {
|
||||
if (!inlineUnsafeSetTypedArrayElement(callInfo, base, arrayType))
|
||||
return InliningStatus_Error;
|
||||
continue;
|
||||
}
|
||||
|
||||
JS_NOT_REACHED("Element access not dense array nor typed array");
|
||||
}
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
bool
|
||||
IonBuilder::inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base)
|
||||
{
|
||||
// Note: we do not check the conditions that are asserted as true
|
||||
// in intrinsic_UnsafeSetElement():
|
||||
// - arr is a dense array
|
||||
// - idx < initialized length
|
||||
// Furthermore, note that inference should be propagating
|
||||
// the type of the value to the JSID_VOID property of the array.
|
||||
|
||||
uint32_t arri = base + 1;
|
||||
uint32_t idxi = base + 2;
|
||||
uint32_t elemi = base + 3;
|
||||
|
||||
MElements *elements = MElements::New(callInfo.getArg(arri));
|
||||
current->add(elements);
|
||||
|
||||
MToInt32 *id = MToInt32::New(callInfo.getArg(idxi));
|
||||
current->add(id);
|
||||
|
||||
// We disable the hole check for this store. This implies that if
|
||||
// there were setters on the prototype, they would not be invoked.
|
||||
// But this is actually the desired behavior.
|
||||
|
||||
MStoreElement *store = MStoreElement::New(elements, id,
|
||||
callInfo.getArg(elemi),
|
||||
/* needsHoleCheck = */ false);
|
||||
store->setRacy();
|
||||
|
||||
current->add(store);
|
||||
|
||||
if (!resumeAfter(store))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
IonBuilder::inlineUnsafeSetTypedArrayElement(CallInfo &callInfo,
|
||||
uint32_t base,
|
||||
int arrayType)
|
||||
{
|
||||
// Note: we do not check the conditions that are asserted as true
|
||||
// in intrinsic_UnsafeSetElement():
|
||||
// - arr is a typed array
|
||||
// - idx < length
|
||||
|
||||
uint32_t arri = base + 1;
|
||||
uint32_t idxi = base + 2;
|
||||
uint32_t elemi = base + 3;
|
||||
|
||||
MInstruction *elements = getTypedArrayElements(callInfo.getArg(arri));
|
||||
current->add(elements);
|
||||
|
||||
MToInt32 *id = MToInt32::New(callInfo.getArg(idxi));
|
||||
current->add(id);
|
||||
|
||||
MDefinition *value = callInfo.getArg(elemi);
|
||||
if (arrayType == TypedArray::TYPE_UINT8_CLAMPED) {
|
||||
value = MClampToUint8::New(value);
|
||||
current->add(value->toInstruction());
|
||||
}
|
||||
|
||||
MStoreTypedArrayElement *store = MStoreTypedArrayElement::New(elements, id, value, arrayType);
|
||||
store->setRacy();
|
||||
|
||||
current->add(store);
|
||||
|
||||
if (!resumeAfter(store))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineForceSequentialOrInParallelSection(CallInfo &callInfo)
|
||||
{
|
||||
if (callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
ExecutionMode executionMode = info().executionMode();
|
||||
switch (executionMode) {
|
||||
case SequentialExecution:
|
||||
// In sequential mode, leave as is, because we'd have to
|
||||
// access the "in warmup" flag of the runtime.
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
case ParallelExecution:
|
||||
// During Parallel Exec, we always force sequential, so
|
||||
// replace with true. This permits UCE to eliminate the
|
||||
// entire path as dead, which is important.
|
||||
callInfo.unwrapArgs();
|
||||
MConstant *ins = MConstant::New(BooleanValue(true));
|
||||
current->add(ins);
|
||||
current->push(ins);
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
JS_NOT_REACHED("Invalid execution mode");
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineNewDenseArray(CallInfo &callInfo)
|
||||
{
|
||||
if (callInfo.constructing() || callInfo.argc() != 1)
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
// For now, in seq. mode we just call the C function. In
|
||||
// par. mode we use inlined MIR.
|
||||
ExecutionMode executionMode = info().executionMode();
|
||||
switch (executionMode) {
|
||||
case SequentialExecution:
|
||||
return inlineNewDenseArrayForSequentialExecution(callInfo);
|
||||
case ParallelExecution:
|
||||
return inlineNewDenseArrayForParallelExecution(callInfo);
|
||||
}
|
||||
|
||||
JS_NOT_REACHED("unknown ExecutionMode");
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineNewDenseArrayForSequentialExecution(CallInfo &callInfo)
|
||||
{
|
||||
// not yet implemented; in seq. mode the C function is not so bad
|
||||
return InliningStatus_NotInlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineNewDenseArrayForParallelExecution(CallInfo &callInfo)
|
||||
{
|
||||
// Create the new parallel array object. Parallel arrays have specially
|
||||
// constructed type objects, so we can only perform the inlining if we
|
||||
// already have one of these type objects.
|
||||
types::StackTypeSet *returnTypes = getInlineReturnTypeSet();
|
||||
if (returnTypes->getKnownTypeTag() != JSVAL_TYPE_OBJECT)
|
||||
return InliningStatus_NotInlined;
|
||||
if (returnTypes->getObjectCount() != 1)
|
||||
return InliningStatus_NotInlined;
|
||||
types::TypeObject *typeObject = returnTypes->getTypeObject(0);
|
||||
|
||||
RootedObject templateObject(cx, NewDenseAllocatedArray(cx, 0));
|
||||
if (!templateObject)
|
||||
return InliningStatus_Error;
|
||||
templateObject->setType(typeObject);
|
||||
|
||||
MParNewDenseArray *newObject = new MParNewDenseArray(graph().parSlice(),
|
||||
callInfo.getArg(1),
|
||||
templateObject);
|
||||
current->add(newObject);
|
||||
current->push(newObject);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineThrowError(CallInfo &callInfo)
|
||||
{
|
||||
// In Parallel Execution, convert %ThrowError() into a bailout.
|
||||
|
||||
if (callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
ExecutionMode executionMode = info().executionMode();
|
||||
switch (executionMode) {
|
||||
case SequentialExecution:
|
||||
return InliningStatus_NotInlined;
|
||||
case ParallelExecution:
|
||||
break;
|
||||
}
|
||||
|
||||
callInfo.unwrapArgs();
|
||||
|
||||
MParBailout *bailout = new MParBailout();
|
||||
if (!bailout)
|
||||
return InliningStatus_Error;
|
||||
current->end(bailout);
|
||||
|
||||
current = newBlock(pc);
|
||||
if (!current)
|
||||
return InliningStatus_Error;
|
||||
|
||||
MConstant *udef = MConstant::New(UndefinedValue());
|
||||
current->add(udef);
|
||||
current->push(udef);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineDump(CallInfo &callInfo)
|
||||
{
|
||||
// In Parallel Execution, call ParDump. We just need a debugging
|
||||
// aid!
|
||||
|
||||
if (callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
ExecutionMode executionMode = info().executionMode();
|
||||
switch (executionMode) {
|
||||
case SequentialExecution:
|
||||
return InliningStatus_NotInlined;
|
||||
case ParallelExecution:
|
||||
break;
|
||||
}
|
||||
|
||||
callInfo.unwrapArgs();
|
||||
|
||||
MParDump *dump = new MParDump(callInfo.getArg(1));
|
||||
current->add(dump);
|
||||
|
||||
MConstant *udef = MConstant::New(UndefinedValue());
|
||||
current->add(udef);
|
||||
current->push(udef);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
@ -1879,6 +1879,29 @@ MBeta::computeRange()
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
MNewObject::shouldUseVM() const
|
||||
{
|
||||
return templateObject()->hasSingletonType() ||
|
||||
templateObject()->hasDynamicSlots();
|
||||
}
|
||||
|
||||
bool
|
||||
MNewArray::shouldUseVM() const
|
||||
{
|
||||
JS_ASSERT(count() < JSObject::NELEMENTS_LIMIT);
|
||||
|
||||
size_t maxArraySlots =
|
||||
gc::GetGCKindSlots(gc::FINALIZE_OBJECT_LAST) - ObjectElements::VALUES_PER_HEADER;
|
||||
|
||||
// Allocate space using the VMCall
|
||||
// when mir hints it needs to get allocated immediatly,
|
||||
// but only when data doesn't fit the available array slots.
|
||||
bool allocating = isAllocating() && count() > maxArraySlots;
|
||||
|
||||
return templateObject()->hasSingletonType() || allocating;
|
||||
}
|
||||
|
||||
bool
|
||||
MLoadFixedSlot::mightAlias(MDefinition *store)
|
||||
{
|
||||
|
348
js/src/ion/MIR.h
348
js/src/ion/MIR.h
@ -310,6 +310,7 @@ class MDefinition : public MNode
|
||||
{ }
|
||||
|
||||
virtual Opcode op() const = 0;
|
||||
virtual const char *opName() const = 0;
|
||||
void printName(FILE *fp);
|
||||
static void PrintOpcodeName(FILE *fp, Opcode op);
|
||||
virtual void printOpcode(FILE *fp);
|
||||
@ -579,6 +580,9 @@ class MInstruction
|
||||
Opcode op() const { \
|
||||
return MDefinition::Op_##opcode; \
|
||||
} \
|
||||
const char *opName() const { \
|
||||
return #opcode; \
|
||||
} \
|
||||
bool accept(MInstructionVisitor *visitor) { \
|
||||
return visitor->visit##opcode(this); \
|
||||
}
|
||||
@ -610,6 +614,15 @@ class MAryInstruction : public MInstruction
|
||||
class MNullaryInstruction : public MAryInstruction<0>
|
||||
{ };
|
||||
|
||||
class MUnaryInstruction : public MAryInstruction<1>
|
||||
{
|
||||
protected:
|
||||
MUnaryInstruction(MDefinition *ins)
|
||||
{
|
||||
setOperand(0, ins);
|
||||
}
|
||||
};
|
||||
|
||||
// Generates an LSnapshot without further effect.
|
||||
class MStart : public MNullaryInstruction
|
||||
{
|
||||
@ -1068,6 +1081,28 @@ class MThrow
|
||||
}
|
||||
};
|
||||
|
||||
class MNewParallelArray : public MNullaryInstruction
|
||||
{
|
||||
CompilerRootObject templateObject_;
|
||||
|
||||
MNewParallelArray(JSObject *templateObject)
|
||||
: templateObject_(templateObject)
|
||||
{
|
||||
setResultType(MIRType_Object);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(NewParallelArray);
|
||||
|
||||
static MNewParallelArray *New(JSObject *templateObject) {
|
||||
return new MNewParallelArray(templateObject);
|
||||
}
|
||||
|
||||
JSObject *templateObject() const {
|
||||
return templateObject_;
|
||||
}
|
||||
};
|
||||
|
||||
class MNewArray : public MNullaryInstruction
|
||||
{
|
||||
public:
|
||||
@ -1107,6 +1142,10 @@ class MNewArray : public MNullaryInstruction
|
||||
return allocating_ == NewArray_Allocating;
|
||||
}
|
||||
|
||||
// Returns true if the code generator should call through to the
|
||||
// VM rather than the fast path.
|
||||
bool shouldUseVM() const;
|
||||
|
||||
// NewArray is marked as non-effectful because all our allocations are
|
||||
// either lazy when we are using "new Array(length)" or bounded by the
|
||||
// script or the stack size when we are using "new Array(...)" or "[...]"
|
||||
@ -1135,11 +1174,54 @@ class MNewObject : public MNullaryInstruction
|
||||
return new MNewObject(templateObject);
|
||||
}
|
||||
|
||||
// Returns true if the code generator should call through to the
|
||||
// VM rather than the fast path.
|
||||
bool shouldUseVM() const;
|
||||
|
||||
JSObject *templateObject() const {
|
||||
return templateObject_;
|
||||
}
|
||||
};
|
||||
|
||||
// Could be allocating either a new array or a new object.
|
||||
class MParNew : public MUnaryInstruction
|
||||
{
|
||||
CompilerRootObject templateObject_;
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParNew);
|
||||
|
||||
MParNew(MDefinition *parSlice,
|
||||
JSObject *templateObject)
|
||||
: MUnaryInstruction(parSlice),
|
||||
templateObject_(templateObject)
|
||||
{
|
||||
setResultType(MIRType_Object);
|
||||
}
|
||||
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
JSObject *templateObject() const {
|
||||
return templateObject_;
|
||||
}
|
||||
};
|
||||
|
||||
// Could be allocating either a new array or a new object.
|
||||
class MParBailout : public MAryControlInstruction<0, 0>
|
||||
{
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParBailout);
|
||||
|
||||
MParBailout()
|
||||
: MAryControlInstruction()
|
||||
{
|
||||
setResultType(MIRType_Undefined);
|
||||
setGuard();
|
||||
}
|
||||
};
|
||||
|
||||
// Slow path for adding a property to an object without a known base.
|
||||
class MInitProp
|
||||
: public MAryInstruction<2>,
|
||||
@ -1360,15 +1442,6 @@ class MApplyArgs
|
||||
}
|
||||
};
|
||||
|
||||
class MUnaryInstruction : public MAryInstruction<1>
|
||||
{
|
||||
protected:
|
||||
MUnaryInstruction(MDefinition *ins)
|
||||
{
|
||||
setOperand(0, ins);
|
||||
}
|
||||
};
|
||||
|
||||
class MBinaryInstruction : public MAryInstruction<2>
|
||||
{
|
||||
protected:
|
||||
@ -3188,6 +3261,45 @@ class MCheckOverRecursed : public MNullaryInstruction
|
||||
INSTRUCTION_HEADER(CheckOverRecursed)
|
||||
};
|
||||
|
||||
// Check the current frame for over-recursion past the global stack limit.
|
||||
// Uses the per-thread recursion limit.
|
||||
class MParCheckOverRecursed : public MUnaryInstruction
|
||||
{
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParCheckOverRecursed);
|
||||
|
||||
MParCheckOverRecursed(MDefinition *parForkJoinSlice)
|
||||
: MUnaryInstruction(parForkJoinSlice)
|
||||
{
|
||||
setResultType(MIRType_None);
|
||||
setGuard();
|
||||
setMovable();
|
||||
}
|
||||
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
};
|
||||
|
||||
// Check for an interrupt (or rendezvous) in parallel mode.
|
||||
class MParCheckInterrupt : public MUnaryInstruction
|
||||
{
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParCheckInterrupt);
|
||||
|
||||
MParCheckInterrupt(MDefinition *parForkJoinSlice)
|
||||
: MUnaryInstruction(parForkJoinSlice)
|
||||
{
|
||||
setResultType(MIRType_None);
|
||||
setGuard();
|
||||
setMovable();
|
||||
}
|
||||
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
};
|
||||
|
||||
// Check the script's use count and trigger recompilation to inline
|
||||
// calls when the script becomes hot.
|
||||
class MRecompileCheck : public MNullaryInstruction
|
||||
@ -3401,6 +3513,47 @@ class MLambda
|
||||
}
|
||||
};
|
||||
|
||||
class MParLambda
|
||||
: public MBinaryInstruction,
|
||||
public SingleObjectPolicy
|
||||
{
|
||||
CompilerRootFunction fun_;
|
||||
|
||||
MParLambda(MDefinition *parSlice,
|
||||
MDefinition *scopeChain, JSFunction *fun)
|
||||
: MBinaryInstruction(parSlice, scopeChain), fun_(fun)
|
||||
{
|
||||
setResultType(MIRType_Object);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParLambda);
|
||||
|
||||
static MParLambda *New(MDefinition *parSlice,
|
||||
MDefinition *scopeChain, JSFunction *fun) {
|
||||
return new MParLambda(parSlice, scopeChain, fun);
|
||||
}
|
||||
|
||||
static MParLambda *New(MDefinition *parSlice,
|
||||
MLambda *originalInstruction) {
|
||||
return New(parSlice,
|
||||
originalInstruction->scopeChain(),
|
||||
originalInstruction->fun());
|
||||
}
|
||||
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
MDefinition *scopeChain() const {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
JSFunction *fun() const {
|
||||
return fun_;
|
||||
}
|
||||
};
|
||||
|
||||
// Determines the implicit |this| value for function calls.
|
||||
class MImplicitThis
|
||||
: public MUnaryInstruction,
|
||||
@ -3960,11 +4113,13 @@ class MStoreElementCommon
|
||||
{
|
||||
bool needsBarrier_;
|
||||
MIRType elementType_;
|
||||
bool racy_; // if true, exempted from normal data race req. during par. exec.
|
||||
|
||||
protected:
|
||||
MStoreElementCommon()
|
||||
: needsBarrier_(false),
|
||||
elementType_(MIRType_Value)
|
||||
elementType_(MIRType_Value),
|
||||
racy_(false)
|
||||
{ }
|
||||
|
||||
public:
|
||||
@ -3981,6 +4136,12 @@ class MStoreElementCommon
|
||||
void setNeedsBarrier() {
|
||||
needsBarrier_ = true;
|
||||
}
|
||||
bool racy() const {
|
||||
return racy_;
|
||||
}
|
||||
void setRacy() {
|
||||
racy_ = true;
|
||||
}
|
||||
};
|
||||
|
||||
// Store a value to a dense array slots vector.
|
||||
@ -4288,9 +4449,12 @@ class MStoreTypedArrayElement
|
||||
{
|
||||
int arrayType_;
|
||||
|
||||
// See note in MStoreElementCommon.
|
||||
bool racy_;
|
||||
|
||||
MStoreTypedArrayElement(MDefinition *elements, MDefinition *index, MDefinition *value,
|
||||
int arrayType)
|
||||
: MTernaryInstruction(elements, index, value), arrayType_(arrayType)
|
||||
: MTernaryInstruction(elements, index, value), arrayType_(arrayType), racy_(false)
|
||||
{
|
||||
setResultType(MIRType_Value);
|
||||
setMovable();
|
||||
@ -4334,6 +4498,12 @@ class MStoreTypedArrayElement
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::TypedArrayElement);
|
||||
}
|
||||
bool racy() const {
|
||||
return racy_;
|
||||
}
|
||||
void setRacy() {
|
||||
racy_ = true;
|
||||
}
|
||||
};
|
||||
|
||||
// Clamp input to range [0, 255] for Uint8ClampedArray.
|
||||
@ -5054,6 +5224,27 @@ class MFunctionEnvironment
|
||||
}
|
||||
};
|
||||
|
||||
// Loads the current js::ForkJoinSlice*.
|
||||
// Only applicable in ParallelExecution.
|
||||
class MParSlice
|
||||
: public MNullaryInstruction
|
||||
{
|
||||
public:
|
||||
MParSlice()
|
||||
: MNullaryInstruction()
|
||||
{
|
||||
setResultType(MIRType_ForkJoinSlice);
|
||||
}
|
||||
|
||||
INSTRUCTION_HEADER(ParSlice);
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
// Indicate that this instruction reads nothing, stores nothing.
|
||||
// (For all intents and purposes)
|
||||
return AliasSet::None();
|
||||
}
|
||||
};
|
||||
|
||||
// Store to vp[slot] (slots that are not inline in an object).
|
||||
class MStoreSlot
|
||||
: public MBinaryInstruction,
|
||||
@ -5895,6 +6086,62 @@ class MGetArgument
|
||||
}
|
||||
};
|
||||
|
||||
class MParWriteGuard
|
||||
: public MBinaryInstruction,
|
||||
public ObjectPolicy<1>
|
||||
{
|
||||
MParWriteGuard(MDefinition *parThreadContext,
|
||||
MDefinition *obj)
|
||||
: MBinaryInstruction(parThreadContext, obj)
|
||||
{
|
||||
setResultType(MIRType_None);
|
||||
setGuard();
|
||||
setMovable();
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParWriteGuard);
|
||||
|
||||
static MParWriteGuard *New(MDefinition *parThreadContext, MDefinition *obj) {
|
||||
return new MParWriteGuard(parThreadContext, obj);
|
||||
}
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
MDefinition *object() const {
|
||||
return getOperand(1);
|
||||
}
|
||||
BailoutKind bailoutKind() const {
|
||||
return Bailout_Normal;
|
||||
}
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::None();
|
||||
}
|
||||
};
|
||||
|
||||
class MParDump
|
||||
: public MUnaryInstruction,
|
||||
public BoxPolicy<0>
|
||||
{
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParDump);
|
||||
|
||||
MParDump(MDefinition *v)
|
||||
: MUnaryInstruction(v)
|
||||
{
|
||||
setResultType(MIRType_None);
|
||||
}
|
||||
|
||||
MDefinition *value() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
TypePolicy *typePolicy() {
|
||||
return this;
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// Given a value, guard that the value is in a particular TypeSet, then returns
|
||||
// that value.
|
||||
class MTypeBarrier : public MUnaryInstruction
|
||||
@ -6045,7 +6292,7 @@ class MNewCallObject : public MUnaryInstruction
|
||||
MDefinition *slots() {
|
||||
return getOperand(0);
|
||||
}
|
||||
JSObject *templateObj() {
|
||||
JSObject *templateObject() {
|
||||
return templateObj_;
|
||||
}
|
||||
AliasSet getAliasSet() const {
|
||||
@ -6053,6 +6300,51 @@ class MNewCallObject : public MUnaryInstruction
|
||||
}
|
||||
};
|
||||
|
||||
class MParNewCallObject : public MBinaryInstruction
|
||||
{
|
||||
CompilerRootObject templateObj_;
|
||||
|
||||
MParNewCallObject(MDefinition *parSlice,
|
||||
JSObject *templateObj, MDefinition *slots)
|
||||
: MBinaryInstruction(parSlice, slots),
|
||||
templateObj_(templateObj)
|
||||
{
|
||||
setResultType(MIRType_Object);
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParNewCallObject);
|
||||
|
||||
static MParNewCallObject *New(MDefinition *parSlice,
|
||||
JSObject *templateObj,
|
||||
MDefinition *slots) {
|
||||
return new MParNewCallObject(parSlice, templateObj, slots);
|
||||
}
|
||||
|
||||
static MParNewCallObject *New(MDefinition *parSlice,
|
||||
MNewCallObject *originalInstruction) {
|
||||
return New(parSlice,
|
||||
originalInstruction->templateObject(),
|
||||
originalInstruction->slots());
|
||||
}
|
||||
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
MDefinition *slots() const {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
JSObject *templateObj() const {
|
||||
return templateObj_;
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::None();
|
||||
}
|
||||
};
|
||||
|
||||
class MNewStringObject :
|
||||
public MUnaryInstruction,
|
||||
public StringPolicy
|
||||
@ -6158,6 +6450,38 @@ class MEnclosingScope : public MLoadFixedSlot
|
||||
}
|
||||
};
|
||||
|
||||
// Creates a dense array of the given length.
|
||||
//
|
||||
// Note: the template object should be an *empty* dense array!
|
||||
class MParNewDenseArray : public MBinaryInstruction
|
||||
{
|
||||
CompilerRootObject templateObject_;
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(ParNewDenseArray);
|
||||
|
||||
MParNewDenseArray(MDefinition *parSlice,
|
||||
MDefinition *length,
|
||||
JSObject *templateObject)
|
||||
: MBinaryInstruction(parSlice, length),
|
||||
templateObject_(templateObject)
|
||||
{
|
||||
setResultType(MIRType_Object);
|
||||
}
|
||||
|
||||
MDefinition *parSlice() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
|
||||
MDefinition *length() const {
|
||||
return getOperand(1);
|
||||
}
|
||||
|
||||
JSObject *templateObject() const {
|
||||
return templateObject_;
|
||||
}
|
||||
};
|
||||
|
||||
// A resume point contains the information needed to reconstruct the interpreter
|
||||
// state from a position in the JIT. See the big comment near resumeAfter() in
|
||||
// IonBuilder.cpp.
|
||||
|
@ -67,6 +67,37 @@ MIRGraph::unmarkBlocks() {
|
||||
i->unmark();
|
||||
}
|
||||
|
||||
MDefinition *
|
||||
MIRGraph::parSlice() {
|
||||
// Search the entry block to find a par slice instruction. If we do not
|
||||
// find one, add one after the Start instruction.
|
||||
//
|
||||
// Note: the original design used a field in MIRGraph to cache the
|
||||
// parSlice rather than searching for it again. However, this
|
||||
// could become out of date due to DCE. Given that we do not
|
||||
// generally have to search very far to find the par slice
|
||||
// instruction if it exists, and that we don't look for it that
|
||||
// often, I opted to simply eliminate the cache and search anew
|
||||
// each time, so that it is that much easier to keep the IR
|
||||
// coherent. - nmatsakis
|
||||
|
||||
MBasicBlock *entry = entryBlock();
|
||||
JS_ASSERT(entry->info().executionMode() == ParallelExecution);
|
||||
|
||||
MInstruction *start = NULL;
|
||||
for (MInstructionIterator ins(entry->begin()); ins != entry->end(); ins++) {
|
||||
if (ins->isParSlice())
|
||||
return *ins;
|
||||
else if (ins->isStart())
|
||||
start = *ins;
|
||||
}
|
||||
JS_ASSERT(start);
|
||||
|
||||
MParSlice *parSlice = new MParSlice();
|
||||
entry->insertAfter(start, parSlice);
|
||||
return parSlice;
|
||||
}
|
||||
|
||||
MBasicBlock *
|
||||
MBasicBlock::New(MIRGraph &graph, CompileInfo &info,
|
||||
MBasicBlock *pred, jsbytecode *entryPc, Kind kind)
|
||||
@ -127,6 +158,22 @@ MBasicBlock::NewSplitEdge(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred)
|
||||
return MBasicBlock::New(graph, info, pred, pred->pc(), SPLIT_EDGE);
|
||||
}
|
||||
|
||||
MBasicBlock *
|
||||
MBasicBlock::NewParBailout(MIRGraph &graph, CompileInfo &info,
|
||||
MBasicBlock *pred, jsbytecode *entryPc)
|
||||
{
|
||||
MBasicBlock *block = MBasicBlock::New(graph, info, pred, entryPc, NORMAL);
|
||||
if (!block)
|
||||
return NULL;
|
||||
|
||||
MParBailout *bailout = new MParBailout();
|
||||
if (!bailout)
|
||||
return NULL;
|
||||
|
||||
block->end(bailout);
|
||||
return block;
|
||||
}
|
||||
|
||||
MBasicBlock::MBasicBlock(MIRGraph &graph, CompileInfo &info, jsbytecode *pc, Kind kind)
|
||||
: earlyAbort_(false),
|
||||
graph_(graph),
|
||||
@ -730,14 +777,27 @@ MBasicBlock::getSuccessor(size_t index) const
|
||||
return lastIns()->getSuccessor(index);
|
||||
}
|
||||
|
||||
size_t
|
||||
MBasicBlock::getSuccessorIndex(MBasicBlock *block) const
|
||||
{
|
||||
JS_ASSERT(lastIns());
|
||||
for (size_t i = 0; i < numSuccessors(); i++) {
|
||||
if (getSuccessor(i) == block)
|
||||
return i;
|
||||
}
|
||||
JS_NOT_REACHED("Invalid successor");
|
||||
}
|
||||
|
||||
void
|
||||
MBasicBlock::replaceSuccessor(size_t pos, MBasicBlock *split)
|
||||
{
|
||||
JS_ASSERT(lastIns());
|
||||
lastIns()->replaceSuccessor(pos, split);
|
||||
|
||||
// Note, successors-with-phis is not yet set.
|
||||
JS_ASSERT(!successorWithPhis_);
|
||||
// Note, during split-critical-edges, successors-with-phis is not yet set.
|
||||
// During PAA, this case is handled before we enter.
|
||||
JS_ASSERT_IF(successorWithPhis_, successorWithPhis_ != getSuccessor(pos));
|
||||
|
||||
lastIns()->replaceSuccessor(pos, split);
|
||||
}
|
||||
|
||||
void
|
||||
@ -793,6 +853,7 @@ MBasicBlock::removePredecessor(MBasicBlock *pred)
|
||||
predecessors_.erase(ptr);
|
||||
return;
|
||||
}
|
||||
|
||||
JS_NOT_REACHED("predecessor was not found");
|
||||
}
|
||||
|
||||
|
@ -79,6 +79,8 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
|
||||
static MBasicBlock *NewPendingLoopHeader(MIRGraph &graph, CompileInfo &info,
|
||||
MBasicBlock *pred, jsbytecode *entryPc);
|
||||
static MBasicBlock *NewSplitEdge(MIRGraph &graph, CompileInfo &info, MBasicBlock *pred);
|
||||
static MBasicBlock *NewParBailout(MIRGraph &graph, CompileInfo &info,
|
||||
MBasicBlock *pred, jsbytecode *entryPc);
|
||||
|
||||
bool dominates(MBasicBlock *other);
|
||||
|
||||
@ -165,8 +167,11 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
|
||||
void inheritSlots(MBasicBlock *parent);
|
||||
bool initEntrySlots();
|
||||
|
||||
// Replaces an edge for a given block with a new block. This is used for
|
||||
// critical edge splitting.
|
||||
// Replaces an edge for a given block with a new block. This is
|
||||
// used for critical edge splitting and also for inserting
|
||||
// bailouts during ParallelArrayAnalysis.
|
||||
//
|
||||
// Note: If successorWithPhis is set, you must not be replacing it.
|
||||
void replacePredecessor(MBasicBlock *old, MBasicBlock *split);
|
||||
void replaceSuccessor(size_t pos, MBasicBlock *split);
|
||||
|
||||
@ -394,6 +399,7 @@ class MBasicBlock : public TempObject, public InlineListNode<MBasicBlock>
|
||||
}
|
||||
size_t numSuccessors() const;
|
||||
MBasicBlock *getSuccessor(size_t index) const;
|
||||
size_t getSuccessorIndex(MBasicBlock *) const;
|
||||
|
||||
// Specifies the closest loop header dominating this block.
|
||||
void setLoopHeader(MBasicBlock *loop) {
|
||||
@ -608,6 +614,14 @@ class MIRGraph
|
||||
JSScript **scripts() {
|
||||
return scripts_.begin();
|
||||
}
|
||||
|
||||
// The ParSlice is an instance of ForkJoinSlice*, it carries
|
||||
// "per-helper-thread" information. So as not to modify the
|
||||
// calling convention for parallel code, we obtain the current
|
||||
// slice from thread-local storage. This helper method will
|
||||
// lazilly insert an MParSlice instruction in the entry block and
|
||||
// return the definition.
|
||||
MDefinition *parSlice();
|
||||
};
|
||||
|
||||
class MDefinitionIterator
|
||||
|
@ -72,6 +72,7 @@ namespace ion {
|
||||
_(TruncateToInt32) \
|
||||
_(ToString) \
|
||||
_(NewSlots) \
|
||||
_(NewParallelArray) \
|
||||
_(NewArray) \
|
||||
_(NewObject) \
|
||||
_(NewDeclEnvObject) \
|
||||
@ -145,14 +146,32 @@ namespace ion {
|
||||
_(InterruptCheck) \
|
||||
_(FunctionBoundary) \
|
||||
_(GetDOMProperty) \
|
||||
_(SetDOMProperty)
|
||||
_(SetDOMProperty) \
|
||||
_(ParCheckOverRecursed) \
|
||||
_(ParNewCallObject) \
|
||||
_(ParNew) \
|
||||
_(ParNewDenseArray) \
|
||||
_(ParBailout) \
|
||||
_(ParLambda) \
|
||||
_(ParSlice) \
|
||||
_(ParWriteGuard) \
|
||||
_(ParDump) \
|
||||
_(ParCheckInterrupt)
|
||||
|
||||
// Forward declarations of MIR types.
|
||||
#define FORWARD_DECLARE(op) class M##op;
|
||||
MIR_OPCODE_LIST(FORWARD_DECLARE)
|
||||
#undef FORWARD_DECLARE
|
||||
|
||||
class MInstructionVisitor
|
||||
class MInstructionVisitor // interface i.e. pure abstract class
|
||||
{
|
||||
public:
|
||||
#define VISIT_INS(op) virtual bool visit##op(M##op *) = 0;
|
||||
MIR_OPCODE_LIST(VISIT_INS)
|
||||
#undef VISIT_INS
|
||||
};
|
||||
|
||||
class MInstructionVisitorWithDefaults : public MInstructionVisitor
|
||||
{
|
||||
public:
|
||||
#define VISIT_INS(op) virtual bool visit##op(M##op *) { JS_NOT_REACHED("NYI: " #op); return false; }
|
||||
|
848
js/src/ion/ParallelArrayAnalysis.cpp
Normal file
848
js/src/ion/ParallelArrayAnalysis.cpp
Normal file
@ -0,0 +1,848 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=4 sw=4 et tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "Ion.h"
|
||||
#include "MIR.h"
|
||||
#include "MIRGraph.h"
|
||||
#include "ParallelArrayAnalysis.h"
|
||||
#include "IonSpewer.h"
|
||||
#include "UnreachableCodeElimination.h"
|
||||
#include "IonAnalysis.h"
|
||||
|
||||
#include "vm/ParallelDo.h"
|
||||
|
||||
#include "vm/Stack.h"
|
||||
|
||||
namespace js {
|
||||
namespace ion {
|
||||
|
||||
using parallel::Spew;
|
||||
using parallel::SpewMIR;
|
||||
using parallel::SpewCompile;
|
||||
|
||||
#define SAFE_OP(op) \
|
||||
virtual bool visit##op(M##op *prop) { return true; }
|
||||
|
||||
#define CUSTOM_OP(op) \
|
||||
virtual bool visit##op(M##op *prop);
|
||||
|
||||
#define DROP_OP(op) \
|
||||
virtual bool visit##op(M##op *ins) { \
|
||||
MBasicBlock *block = ins->block(); \
|
||||
block->discard(ins); \
|
||||
return true; \
|
||||
}
|
||||
|
||||
#define PERMIT(T) (1 << T)
|
||||
|
||||
#define PERMIT_NUMERIC (PERMIT(MIRType_Int32) | PERMIT(MIRType_Double))
|
||||
|
||||
#define SPECIALIZED_OP(op, flags) \
|
||||
virtual bool visit##op(M##op *ins) { \
|
||||
return visitSpecializedInstruction(ins, ins->specialization(), flags); \
|
||||
}
|
||||
|
||||
#define UNSAFE_OP(op) \
|
||||
virtual bool visit##op(M##op *ins) { \
|
||||
SpewMIR(ins, "Unsafe"); \
|
||||
return markUnsafe(); \
|
||||
}
|
||||
|
||||
#define WRITE_GUARDED_OP(op, obj) \
|
||||
virtual bool visit##op(M##op *prop) { \
|
||||
return insertWriteGuard(prop, prop->obj()); \
|
||||
}
|
||||
|
||||
#define MAYBE_WRITE_GUARDED_OP(op, obj) \
|
||||
virtual bool visit##op(M##op *prop) { \
|
||||
if (prop->racy()) \
|
||||
return true; \
|
||||
return insertWriteGuard(prop, prop->obj()); \
|
||||
}
|
||||
|
||||
class ParallelArrayVisitor : public MInstructionVisitor
|
||||
{
|
||||
JSContext *cx_;
|
||||
ParallelCompileContext &compileContext_;
|
||||
MIRGraph &graph_;
|
||||
bool unsafe_;
|
||||
MDefinition *parSlice_;
|
||||
|
||||
bool insertWriteGuard(MInstruction *writeInstruction,
|
||||
MDefinition *valueBeingWritten);
|
||||
|
||||
bool replaceWithParNew(MInstruction *newInstruction,
|
||||
JSObject *templateObject);
|
||||
|
||||
bool replace(MInstruction *oldInstruction,
|
||||
MInstruction *replacementInstruction);
|
||||
|
||||
bool visitSpecializedInstruction(MInstruction *ins, MIRType spec, uint32_t flags);
|
||||
|
||||
// Intended for use in a visitXyz() instruction like "return
|
||||
// markUnsafe()". Sets the unsafe flag and returns true (since
|
||||
// this does not indicate an unrecoverable compilation failure).
|
||||
bool markUnsafe() {
|
||||
JS_ASSERT(!unsafe_);
|
||||
unsafe_ = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
public:
|
||||
AutoObjectVector callTargets;
|
||||
|
||||
ParallelArrayVisitor(JSContext *cx, ParallelCompileContext &compileContext,
|
||||
MIRGraph &graph)
|
||||
: cx_(cx),
|
||||
compileContext_(compileContext),
|
||||
graph_(graph),
|
||||
unsafe_(false),
|
||||
parSlice_(NULL),
|
||||
callTargets(cx)
|
||||
{ }
|
||||
|
||||
void clearUnsafe() { unsafe_ = false; }
|
||||
bool unsafe() { return unsafe_; }
|
||||
MDefinition *parSlice() {
|
||||
if (!parSlice_)
|
||||
parSlice_ = graph_.parSlice();
|
||||
return parSlice_;
|
||||
}
|
||||
|
||||
bool convertToBailout(MBasicBlock *block, MInstruction *ins);
|
||||
|
||||
// I am taking the policy of blacklisting everything that's not
|
||||
// obviously safe for now. We can loosen as we need.
|
||||
|
||||
SAFE_OP(Constant)
|
||||
SAFE_OP(Parameter)
|
||||
SAFE_OP(Callee)
|
||||
SAFE_OP(TableSwitch)
|
||||
SAFE_OP(Goto)
|
||||
CUSTOM_OP(Test)
|
||||
CUSTOM_OP(Compare)
|
||||
SAFE_OP(Phi)
|
||||
SAFE_OP(Beta)
|
||||
UNSAFE_OP(OsrValue)
|
||||
UNSAFE_OP(OsrScopeChain)
|
||||
UNSAFE_OP(ReturnFromCtor)
|
||||
CUSTOM_OP(CheckOverRecursed)
|
||||
DROP_OP(RecompileCheck)
|
||||
UNSAFE_OP(DefVar)
|
||||
UNSAFE_OP(DefFun)
|
||||
UNSAFE_OP(CreateThis)
|
||||
UNSAFE_OP(CreateThisWithTemplate)
|
||||
UNSAFE_OP(CreateThisWithProto)
|
||||
SAFE_OP(PrepareCall)
|
||||
SAFE_OP(PassArg)
|
||||
CUSTOM_OP(Call)
|
||||
UNSAFE_OP(ApplyArgs)
|
||||
SAFE_OP(BitNot)
|
||||
UNSAFE_OP(TypeOf)
|
||||
SAFE_OP(ToId)
|
||||
SAFE_OP(BitAnd)
|
||||
SAFE_OP(BitOr)
|
||||
SAFE_OP(BitXor)
|
||||
SAFE_OP(Lsh)
|
||||
SAFE_OP(Rsh)
|
||||
SPECIALIZED_OP(Ursh, PERMIT_NUMERIC)
|
||||
SPECIALIZED_OP(MinMax, PERMIT_NUMERIC)
|
||||
SAFE_OP(Abs)
|
||||
SAFE_OP(Sqrt)
|
||||
SAFE_OP(MathFunction)
|
||||
SPECIALIZED_OP(Add, PERMIT_NUMERIC)
|
||||
SPECIALIZED_OP(Sub, PERMIT_NUMERIC)
|
||||
SPECIALIZED_OP(Mul, PERMIT_NUMERIC)
|
||||
SPECIALIZED_OP(Div, PERMIT_NUMERIC)
|
||||
SPECIALIZED_OP(Mod, PERMIT_NUMERIC)
|
||||
UNSAFE_OP(Concat)
|
||||
UNSAFE_OP(CharCodeAt)
|
||||
UNSAFE_OP(FromCharCode)
|
||||
SAFE_OP(Return)
|
||||
CUSTOM_OP(Throw)
|
||||
SAFE_OP(Box) // Boxing just creates a JSVal, doesn't alloc.
|
||||
SAFE_OP(Unbox)
|
||||
SAFE_OP(GuardObject)
|
||||
SAFE_OP(ToDouble)
|
||||
SAFE_OP(ToInt32)
|
||||
SAFE_OP(TruncateToInt32)
|
||||
UNSAFE_OP(ToString)
|
||||
SAFE_OP(NewSlots)
|
||||
CUSTOM_OP(NewArray)
|
||||
CUSTOM_OP(NewObject)
|
||||
CUSTOM_OP(NewCallObject)
|
||||
CUSTOM_OP(NewParallelArray)
|
||||
UNSAFE_OP(InitProp)
|
||||
SAFE_OP(Start)
|
||||
UNSAFE_OP(OsrEntry)
|
||||
SAFE_OP(Nop)
|
||||
UNSAFE_OP(RegExp)
|
||||
CUSTOM_OP(Lambda)
|
||||
UNSAFE_OP(ImplicitThis)
|
||||
SAFE_OP(Slots)
|
||||
SAFE_OP(Elements)
|
||||
SAFE_OP(ConstantElements)
|
||||
SAFE_OP(LoadSlot)
|
||||
WRITE_GUARDED_OP(StoreSlot, slots)
|
||||
SAFE_OP(FunctionEnvironment) // just a load of func env ptr
|
||||
SAFE_OP(TypeBarrier) // causes a bailout if the type is not found: a-ok with us
|
||||
SAFE_OP(MonitorTypes) // causes a bailout if the type is not found: a-ok with us
|
||||
UNSAFE_OP(GetPropertyCache)
|
||||
UNSAFE_OP(GetElementCache)
|
||||
UNSAFE_OP(BindNameCache)
|
||||
SAFE_OP(GuardShape)
|
||||
SAFE_OP(GuardClass)
|
||||
SAFE_OP(ArrayLength)
|
||||
SAFE_OP(TypedArrayLength)
|
||||
SAFE_OP(TypedArrayElements)
|
||||
SAFE_OP(InitializedLength)
|
||||
WRITE_GUARDED_OP(SetInitializedLength, elements)
|
||||
SAFE_OP(Not)
|
||||
SAFE_OP(BoundsCheck)
|
||||
SAFE_OP(BoundsCheckLower)
|
||||
SAFE_OP(LoadElement)
|
||||
SAFE_OP(LoadElementHole)
|
||||
MAYBE_WRITE_GUARDED_OP(StoreElement, elements)
|
||||
WRITE_GUARDED_OP(StoreElementHole, elements)
|
||||
UNSAFE_OP(ArrayPopShift)
|
||||
UNSAFE_OP(ArrayPush)
|
||||
SAFE_OP(LoadTypedArrayElement)
|
||||
SAFE_OP(LoadTypedArrayElementHole)
|
||||
MAYBE_WRITE_GUARDED_OP(StoreTypedArrayElement, elements)
|
||||
UNSAFE_OP(ClampToUint8)
|
||||
SAFE_OP(LoadFixedSlot)
|
||||
WRITE_GUARDED_OP(StoreFixedSlot, object)
|
||||
UNSAFE_OP(CallGetProperty)
|
||||
UNSAFE_OP(GetNameCache)
|
||||
SAFE_OP(CallGetIntrinsicValue) // Bails in parallel mode
|
||||
UNSAFE_OP(CallsiteCloneCache)
|
||||
UNSAFE_OP(CallGetElement)
|
||||
UNSAFE_OP(CallSetElement)
|
||||
UNSAFE_OP(CallSetProperty)
|
||||
UNSAFE_OP(DeleteProperty)
|
||||
UNSAFE_OP(SetPropertyCache)
|
||||
UNSAFE_OP(IteratorStart)
|
||||
UNSAFE_OP(IteratorNext)
|
||||
UNSAFE_OP(IteratorMore)
|
||||
UNSAFE_OP(IteratorEnd)
|
||||
SAFE_OP(StringLength)
|
||||
UNSAFE_OP(ArgumentsLength)
|
||||
UNSAFE_OP(GetArgument)
|
||||
SAFE_OP(Floor)
|
||||
SAFE_OP(Round)
|
||||
UNSAFE_OP(InstanceOf)
|
||||
CUSTOM_OP(InterruptCheck)
|
||||
SAFE_OP(ParSlice)
|
||||
SAFE_OP(ParNew)
|
||||
SAFE_OP(ParNewDenseArray)
|
||||
SAFE_OP(ParNewCallObject)
|
||||
SAFE_OP(ParLambda)
|
||||
SAFE_OP(ParDump)
|
||||
SAFE_OP(ParBailout)
|
||||
UNSAFE_OP(ArrayConcat)
|
||||
UNSAFE_OP(GetDOMProperty)
|
||||
UNSAFE_OP(SetDOMProperty)
|
||||
UNSAFE_OP(NewStringObject)
|
||||
UNSAFE_OP(Random)
|
||||
UNSAFE_OP(Pow)
|
||||
UNSAFE_OP(PowHalf)
|
||||
UNSAFE_OP(RegExpTest)
|
||||
UNSAFE_OP(CallInstanceOf)
|
||||
UNSAFE_OP(FunctionBoundary)
|
||||
UNSAFE_OP(GuardString)
|
||||
UNSAFE_OP(NewDeclEnvObject)
|
||||
UNSAFE_OP(In)
|
||||
UNSAFE_OP(InArray)
|
||||
SAFE_OP(ParWriteGuard)
|
||||
SAFE_OP(ParCheckInterrupt)
|
||||
SAFE_OP(ParCheckOverRecursed)
|
||||
SAFE_OP(PolyInlineDispatch)
|
||||
|
||||
// It looks like this could easily be made safe:
|
||||
UNSAFE_OP(ConvertElementsToDoubles)
|
||||
};
|
||||
|
||||
bool
|
||||
ParallelCompileContext::appendToWorklist(HandleFunction fun)
|
||||
{
|
||||
JS_ASSERT(fun);
|
||||
|
||||
if (!fun->isInterpreted())
|
||||
return true;
|
||||
|
||||
RootedScript script(cx_, fun->nonLazyScript());
|
||||
|
||||
// Skip if we're disabled.
|
||||
if (!script->canParallelIonCompile()) {
|
||||
Spew(SpewCompile, "Skipping %p:%s:%u, canParallelIonCompile() is false",
|
||||
fun.get(), script->filename, script->lineno);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Skip if we're compiling off thread.
|
||||
if (script->parallelIon == ION_COMPILING_SCRIPT) {
|
||||
Spew(SpewCompile, "Skipping %p:%s:%u, off-main-thread compilation in progress",
|
||||
fun.get(), script->filename, script->lineno);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Skip if the code is expected to result in a bailout.
|
||||
if (script->parallelIon && script->parallelIon->bailoutExpected()) {
|
||||
Spew(SpewCompile, "Skipping %p:%s:%u, bailout expected",
|
||||
fun.get(), script->filename, script->lineno);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Skip if we haven't warmed up to get some type info. We're betting
|
||||
// that the parallel kernel will be non-branchy for the most part, so
|
||||
// this threshold is usually very low (1).
|
||||
if (script->getUseCount() < js_IonOptions.usesBeforeCompileParallel) {
|
||||
Spew(SpewCompile, "Skipping %p:%s:%u, use count %u < %u",
|
||||
fun.get(), script->filename, script->lineno,
|
||||
script->getUseCount(), js_IonOptions.usesBeforeCompileParallel);
|
||||
return true;
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < worklist_.length(); i++) {
|
||||
if (worklist_[i]->toFunction() == fun)
|
||||
return true;
|
||||
}
|
||||
|
||||
// Note that we add all possibly compilable functions to the worklist,
|
||||
// even if they're already compiled. This is so that we can return
|
||||
// Method_Compiled and not Method_Skipped if we have a worklist full of
|
||||
// already-compiled functions.
|
||||
return worklist_.append(fun);
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelCompileContext::analyzeAndGrowWorklist(MIRGenerator *mir, MIRGraph &graph)
|
||||
{
|
||||
// Walk the basic blocks in a DFS. When we encounter a block with an
|
||||
// unsafe instruction, then we know that this block will bailout when
|
||||
// executed. Therefore, we replace the block.
|
||||
//
|
||||
// We don't need a worklist, though, because the graph is sorted
|
||||
// in RPO. Therefore, we just use the marked flags to tell us
|
||||
// when we visited some predecessor of the current block.
|
||||
ParallelArrayVisitor visitor(cx_, *this, graph);
|
||||
graph.entryBlock()->mark(); // Note: in par. exec., we never enter from OSR.
|
||||
uint32_t marked = 0;
|
||||
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
|
||||
if (mir->shouldCancel("ParallelArrayAnalysis"))
|
||||
return false;
|
||||
|
||||
if (block->isMarked()) {
|
||||
// Iterate through and transform the instructions. Stop
|
||||
// if we encounter an inherently unsafe operation, in
|
||||
// which case we will transform this block into a bailout
|
||||
// block.
|
||||
MInstruction *instr = NULL;
|
||||
for (MInstructionIterator ins(block->begin());
|
||||
ins != block->end() && !visitor.unsafe();)
|
||||
{
|
||||
if (mir->shouldCancel("ParallelArrayAnalysis"))
|
||||
return false;
|
||||
|
||||
// We may be removing or replacing the current
|
||||
// instruction, so advance `ins` now. Remember the
|
||||
// last instr. we looked at for use later if it should
|
||||
// prove unsafe.
|
||||
instr = *ins++;
|
||||
|
||||
if (!instr->accept(&visitor))
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!visitor.unsafe()) {
|
||||
// Count the number of reachable blocks.
|
||||
marked++;
|
||||
|
||||
// Block consists of only safe instructions. Visit its successors.
|
||||
for (uint32_t i = 0; i < block->numSuccessors(); i++)
|
||||
block->getSuccessor(i)->mark();
|
||||
} else {
|
||||
// Block contains an unsafe instruction. That means that once
|
||||
// we enter this block, we are guaranteed to bailout.
|
||||
|
||||
// If this is the entry block, then there is no point
|
||||
// in even trying to execute this function as it will
|
||||
// always bailout.
|
||||
if (*block == graph.entryBlock()) {
|
||||
Spew(SpewCompile, "Entry block contains unsafe MIR");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Otherwise, create a replacement that will.
|
||||
if (!visitor.convertToBailout(*block, instr))
|
||||
return false;
|
||||
|
||||
JS_ASSERT(!block->isMarked());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Append newly discovered outgoing callgraph edges to the worklist.
|
||||
RootedFunction target(cx_);
|
||||
for (uint32_t i = 0; i < visitor.callTargets.length(); i++) {
|
||||
target = visitor.callTargets[i]->toFunction();
|
||||
appendToWorklist(target);
|
||||
}
|
||||
|
||||
Spew(SpewCompile, "Safe");
|
||||
IonSpewPass("ParallelArrayAnalysis");
|
||||
|
||||
UnreachableCodeElimination uce(mir, graph);
|
||||
if (!uce.removeUnmarkedBlocks(marked))
|
||||
return false;
|
||||
IonSpewPass("UCEAfterParallelArrayAnalysis");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (!removeResumePointOperands(mir, graph))
|
||||
return false;
|
||||
IonSpewPass("RemoveResumePointOperands");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
if (!EliminateDeadCode(mir, graph))
|
||||
return false;
|
||||
IonSpewPass("DCEAfterParallelArrayAnalysis");
|
||||
AssertExtendedGraphCoherency(graph);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelCompileContext::removeResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
|
||||
{
|
||||
// In parallel exec mode, nothing is effectful, therefore we do
|
||||
// not need to reconstruct interpreter state and can simply
|
||||
// bailout by returning a special code. Ideally we'd either
|
||||
// remove the unused resume points or else never generate them in
|
||||
// the first place, but I encountered various assertions and
|
||||
// crashes attempting to do that, so for the time being I simply
|
||||
// replace their operands with undefined. This prevents them from
|
||||
// interfering with DCE and other optimizations. It is also *necessary*
|
||||
// to handle cases like this:
|
||||
//
|
||||
// foo(a, b, c.bar())
|
||||
//
|
||||
// where `foo` was deemed to be an unsafe function to call. This
|
||||
// is because without neutering the ResumePoints, they would still
|
||||
// refer to the MPassArg nodes generated for the call to foo().
|
||||
// But the call to foo() is dead and has been removed, leading to
|
||||
// an inconsistent IR and assertions at codegen time.
|
||||
|
||||
MConstant *udef = NULL;
|
||||
for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
|
||||
if (udef)
|
||||
replaceOperandsOnResumePoint(block->entryResumePoint(), udef);
|
||||
|
||||
for (MInstructionIterator ins(block->begin()); ins != block->end(); ins++) {
|
||||
if (ins->isStart()) {
|
||||
JS_ASSERT(udef == NULL);
|
||||
udef = MConstant::New(UndefinedValue());
|
||||
block->insertAfter(*ins, udef);
|
||||
} else if (udef) {
|
||||
if (MResumePoint *resumePoint = ins->resumePoint())
|
||||
replaceOperandsOnResumePoint(resumePoint, udef);
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ParallelCompileContext::replaceOperandsOnResumePoint(MResumePoint *resumePoint,
|
||||
MDefinition *withDef)
|
||||
{
|
||||
for (size_t i = 0; i < resumePoint->numOperands(); i++)
|
||||
resumePoint->replaceOperand(i, withDef);
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitTest(MTest *)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitCompare(MCompare *compare)
|
||||
{
|
||||
MCompare::CompareType type = compare->compareType();
|
||||
return type == MCompare::Compare_Int32 ||
|
||||
type == MCompare::Compare_Double ||
|
||||
type == MCompare::Compare_String;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::convertToBailout(MBasicBlock *block, MInstruction *ins)
|
||||
{
|
||||
JS_ASSERT(unsafe()); // `block` must have contained unsafe items
|
||||
JS_ASSERT(block->isMarked()); // `block` must have been reachable to get here
|
||||
|
||||
// Clear the unsafe flag for subsequent blocks.
|
||||
clearUnsafe();
|
||||
|
||||
// This block is no longer reachable.
|
||||
block->unmark();
|
||||
|
||||
// Determine the best PC to use for the bailouts we'll be creating.
|
||||
jsbytecode *pc = block->pc();
|
||||
if (!pc)
|
||||
pc = block->pc();
|
||||
|
||||
// Create a bailout block for each predecessor. In principle, we
|
||||
// only need one bailout block--in fact, only one per graph! But I
|
||||
// found this approach easier to implement given the design of the
|
||||
// MIR Graph construction routines. Besides, most often `block`
|
||||
// has only one predecessor. Also, using multiple blocks helps to
|
||||
// keep the PC information more accurate (though replacing `block`
|
||||
// with exactly one bailout would be just as good).
|
||||
for (size_t i = 0; i < block->numPredecessors(); i++) {
|
||||
MBasicBlock *pred = block->getPredecessor(i);
|
||||
|
||||
// We only care about incoming edges from reachable predecessors.
|
||||
if (!pred->isMarked())
|
||||
continue;
|
||||
|
||||
// create bailout block to insert on this edge
|
||||
MBasicBlock *bailBlock = MBasicBlock::NewParBailout(graph_, block->info(), pred, pc);
|
||||
if (!bailBlock)
|
||||
return false;
|
||||
|
||||
// if `block` had phis, we are replacing it with `bailBlock` which does not
|
||||
if (pred->successorWithPhis() == block)
|
||||
pred->setSuccessorWithPhis(NULL, 0);
|
||||
|
||||
// redirect the predecessor to the bailout block
|
||||
uint32_t succIdx = pred->getSuccessorIndex(block);
|
||||
pred->replaceSuccessor(succIdx, bailBlock);
|
||||
|
||||
// Insert the bailout block after `block` in the execution
|
||||
// order. This should satisfy the RPO requirements and
|
||||
// moreover ensures that we will visit this block in our outer
|
||||
// walk, thus allowing us to keep the count of marked blocks
|
||||
// accurate.
|
||||
graph_.insertBlockAfter(block, bailBlock);
|
||||
bailBlock->mark();
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Memory allocation
|
||||
//
|
||||
// Simple memory allocation opcodes---those which ultimately compile
|
||||
// down to a (possibly inlined) invocation of NewGCThing()---are
|
||||
// replaced with MParNew, which is supplied with the thread context.
|
||||
// These allocations will take place using per-helper-thread arenas.
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitNewParallelArray(MNewParallelArray *ins)
|
||||
{
|
||||
MParNew *parNew = new MParNew(parSlice(), ins->templateObject());
|
||||
replace(ins, parNew);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitNewCallObject(MNewCallObject *ins)
|
||||
{
|
||||
// fast path: replace with ParNewCallObject op
|
||||
MParNewCallObject *parNewCallObjectInstruction =
|
||||
MParNewCallObject::New(parSlice(), ins);
|
||||
replace(ins, parNewCallObjectInstruction);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitLambda(MLambda *ins)
|
||||
{
|
||||
if (ins->fun()->hasSingletonType() ||
|
||||
types::UseNewTypeForClone(ins->fun()))
|
||||
{
|
||||
// slow path: bail on parallel execution.
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
// fast path: replace with ParLambda op
|
||||
MParLambda *parLambdaInstruction = MParLambda::New(parSlice(), ins);
|
||||
replace(ins, parLambdaInstruction);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitNewObject(MNewObject *newInstruction)
|
||||
{
|
||||
if (newInstruction->shouldUseVM()) {
|
||||
SpewMIR(newInstruction, "should use VM");
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
return replaceWithParNew(newInstruction,
|
||||
newInstruction->templateObject());
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitNewArray(MNewArray *newInstruction)
|
||||
{
|
||||
if (newInstruction->shouldUseVM()) {
|
||||
SpewMIR(newInstruction, "should use VM");
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
return replaceWithParNew(newInstruction,
|
||||
newInstruction->templateObject());
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::replaceWithParNew(MInstruction *newInstruction,
|
||||
JSObject *templateObject)
|
||||
{
|
||||
MParNew *parNewInstruction = new MParNew(parSlice(), templateObject);
|
||||
replace(newInstruction, parNewInstruction);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::replace(MInstruction *oldInstruction,
|
||||
MInstruction *replacementInstruction)
|
||||
{
|
||||
MBasicBlock *block = oldInstruction->block();
|
||||
block->insertBefore(oldInstruction, replacementInstruction);
|
||||
oldInstruction->replaceAllUsesWith(replacementInstruction);
|
||||
block->discard(oldInstruction);
|
||||
return true;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Write Guards
|
||||
//
|
||||
// We only want to permit writes to locally guarded objects.
|
||||
// Furthermore, we want to avoid PICs and other non-thread-safe things
|
||||
// (though perhaps we should support PICs at some point). If we
|
||||
// cannot determine the origin of an object, we can insert a write
|
||||
// guard which will check whether the object was allocated from the
|
||||
// per-thread-arena or not.
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::insertWriteGuard(MInstruction *writeInstruction,
|
||||
MDefinition *valueBeingWritten)
|
||||
{
|
||||
// Many of the write operations do not take the JS object
|
||||
// but rather something derived from it, such as the elements.
|
||||
// So we need to identify the JS object:
|
||||
MDefinition *object;
|
||||
switch (valueBeingWritten->type()) {
|
||||
case MIRType_Object:
|
||||
object = valueBeingWritten;
|
||||
break;
|
||||
|
||||
case MIRType_Slots:
|
||||
switch (valueBeingWritten->op()) {
|
||||
case MDefinition::Op_Slots:
|
||||
object = valueBeingWritten->toSlots()->object();
|
||||
break;
|
||||
|
||||
case MDefinition::Op_NewSlots:
|
||||
// Values produced by new slots will ALWAYS be
|
||||
// thread-local.
|
||||
return true;
|
||||
|
||||
default:
|
||||
SpewMIR(writeInstruction, "cannot insert write guard for %s",
|
||||
valueBeingWritten->opName());
|
||||
return markUnsafe();
|
||||
}
|
||||
break;
|
||||
|
||||
case MIRType_Elements:
|
||||
switch (valueBeingWritten->op()) {
|
||||
case MDefinition::Op_Elements:
|
||||
object = valueBeingWritten->toElements()->object();
|
||||
break;
|
||||
|
||||
case MDefinition::Op_TypedArrayElements:
|
||||
object = valueBeingWritten->toTypedArrayElements()->object();
|
||||
break;
|
||||
|
||||
default:
|
||||
SpewMIR(writeInstruction, "cannot insert write guard for %s",
|
||||
valueBeingWritten->opName());
|
||||
return markUnsafe();
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
SpewMIR(writeInstruction, "cannot insert write guard for MIR Type %d",
|
||||
valueBeingWritten->type());
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
if (object->isUnbox())
|
||||
object = object->toUnbox()->input();
|
||||
|
||||
switch (object->op()) {
|
||||
case MDefinition::Op_ParNew:
|
||||
// MParNew will always be creating something thread-local, omit the guard
|
||||
SpewMIR(writeInstruction, "write to ParNew prop does not require guard");
|
||||
return true;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
MBasicBlock *block = writeInstruction->block();
|
||||
MParWriteGuard *writeGuard = MParWriteGuard::New(parSlice(), object);
|
||||
block->insertBefore(writeInstruction, writeGuard);
|
||||
writeGuard->adjustInputs(writeGuard);
|
||||
return true;
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Calls
|
||||
//
|
||||
// We only support calls to interpreted functions that that have already been
|
||||
// Ion compiled. If a function has no IonScript, we bail out. The compilation
|
||||
// is done during warmup of the parallel kernel, see js::RunScript.
|
||||
|
||||
static bool
|
||||
GetPossibleCallees(JSContext *cx, HandleScript script, jsbytecode *pc,
|
||||
types::StackTypeSet *calleeTypes, AutoObjectVector &targets)
|
||||
{
|
||||
JS_ASSERT(calleeTypes);
|
||||
|
||||
if (calleeTypes->baseFlags() != 0)
|
||||
return true;
|
||||
|
||||
unsigned objCount = calleeTypes->getObjectCount();
|
||||
|
||||
if (objCount == 0)
|
||||
return true;
|
||||
|
||||
RootedFunction fun(cx);
|
||||
for (unsigned i = 0; i < objCount; i++) {
|
||||
RawObject obj = calleeTypes->getSingleObject(i);
|
||||
if (obj && obj->isFunction()) {
|
||||
fun = obj->toFunction();
|
||||
} else {
|
||||
types::TypeObject *typeObj = calleeTypes->getTypeObject(i);
|
||||
if (!typeObj)
|
||||
continue;
|
||||
fun = typeObj->interpretedFunction;
|
||||
if (!fun)
|
||||
continue;
|
||||
}
|
||||
|
||||
if (fun->isCloneAtCallsite()) {
|
||||
fun = CloneFunctionAtCallsite(cx, fun, script, pc);
|
||||
if (!fun)
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!targets.append(fun))
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitCall(MCall *ins)
|
||||
{
|
||||
JS_ASSERT(ins->getSingleTarget() || ins->calleeTypes());
|
||||
|
||||
// DOM? Scary.
|
||||
if (ins->isDOMFunction()) {
|
||||
SpewMIR(ins, "call to dom function");
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
RootedFunction target(cx_, ins->getSingleTarget());
|
||||
if (target) {
|
||||
// Native? Scary.
|
||||
if (target->isNative()) {
|
||||
SpewMIR(ins, "call to native function");
|
||||
return markUnsafe();
|
||||
}
|
||||
return callTargets.append(target);
|
||||
}
|
||||
|
||||
if (ins->isConstructing()) {
|
||||
SpewMIR(ins, "call to unknown constructor");
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
RootedScript script(cx_, ins->block()->info().script());
|
||||
return GetPossibleCallees(cx_, script, ins->resumePoint()->pc(),
|
||||
ins->calleeTypes(), callTargets);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Stack limit, interrupts
|
||||
//
|
||||
// In sequential Ion code, the stack limit is stored in the JSRuntime.
|
||||
// We store it in the thread context. We therefore need a separate
|
||||
// instruction to access it, one parameterized by the thread context.
|
||||
// Similar considerations apply to checking for interrupts.
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitCheckOverRecursed(MCheckOverRecursed *ins)
|
||||
{
|
||||
MParCheckOverRecursed *replacement = new MParCheckOverRecursed(parSlice());
|
||||
return replace(ins, replacement);
|
||||
}
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitInterruptCheck(MInterruptCheck *ins)
|
||||
{
|
||||
MParCheckInterrupt *replacement = new MParCheckInterrupt(parSlice());
|
||||
return replace(ins, replacement);
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Specialized ops
|
||||
//
|
||||
// Some ops, like +, can be specialized to ints/doubles. Anything
|
||||
// else is terrifying.
|
||||
//
|
||||
// TODO---Eventually, we should probably permit arbitrary + but bail
|
||||
// if the operands are not both integers/floats.
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitSpecializedInstruction(MInstruction *ins, MIRType spec,
|
||||
uint32_t flags)
|
||||
{
|
||||
uint32_t flag = 1 << spec;
|
||||
if (flags & flag)
|
||||
return true;
|
||||
|
||||
SpewMIR(ins, "specialized to unacceptable type %d", spec);
|
||||
return markUnsafe();
|
||||
}
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////////
|
||||
// Throw
|
||||
|
||||
bool
|
||||
ParallelArrayVisitor::visitThrow(MThrow *thr)
|
||||
{
|
||||
MBasicBlock *block = thr->block();
|
||||
JS_ASSERT(block->lastIns() == thr);
|
||||
block->discardLastIns();
|
||||
MParBailout *bailout = new MParBailout();
|
||||
if (!bailout)
|
||||
return false;
|
||||
block->end(bailout);
|
||||
return true;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
61
js/src/ion/ParallelArrayAnalysis.h
Normal file
61
js/src/ion/ParallelArrayAnalysis.h
Normal file
@ -0,0 +1,61 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=4 sw=4 et tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef jsion_parallel_array_analysis_h__
|
||||
#define jsion_parallel_array_analysis_h__
|
||||
|
||||
#include "MIR.h"
|
||||
#include "CompileInfo.h"
|
||||
|
||||
namespace js {
|
||||
|
||||
class StackFrame;
|
||||
|
||||
namespace ion {
|
||||
|
||||
class MIRGraph;
|
||||
class AutoDestroyAllocator;
|
||||
|
||||
class ParallelCompileContext
|
||||
{
|
||||
private:
|
||||
JSContext *cx_;
|
||||
|
||||
// Compilation is transitive from some set of root(s).
|
||||
AutoObjectVector worklist_;
|
||||
|
||||
// Is a function compilable for parallel execution?
|
||||
bool analyzeAndGrowWorklist(MIRGenerator *mir, MIRGraph &graph);
|
||||
|
||||
bool removeResumePointOperands(MIRGenerator *mir, MIRGraph &graph);
|
||||
void replaceOperandsOnResumePoint(MResumePoint *resumePoint, MDefinition *withDef);
|
||||
|
||||
public:
|
||||
ParallelCompileContext(JSContext *cx)
|
||||
: cx_(cx),
|
||||
worklist_(cx)
|
||||
{ }
|
||||
|
||||
// Should we append a function to the worklist?
|
||||
bool appendToWorklist(HandleFunction fun);
|
||||
|
||||
ExecutionMode executionMode() {
|
||||
return ParallelExecution;
|
||||
}
|
||||
|
||||
// Defined in Ion.cpp, so that they can make use of static fns defined there
|
||||
MethodStatus checkScriptSize(JSContext *cx, UnrootedScript script);
|
||||
MethodStatus compileTransitively();
|
||||
AbortReason compile(IonBuilder *builder, MIRGraph *graph,
|
||||
ScopedJSDeletePtr<LifoAlloc> &autoDelete);
|
||||
};
|
||||
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
||||
#endif // jsion_parallel_array_analysis_h
|
225
js/src/ion/ParallelFunctions.cpp
Normal file
225
js/src/ion/ParallelFunctions.cpp
Normal file
@ -0,0 +1,225 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=4 sw=4 et tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "jsinterp.h"
|
||||
#include "ParallelFunctions.h"
|
||||
#include "IonSpewer.h"
|
||||
|
||||
#include "jsinterpinlines.h"
|
||||
#include "jscompartmentinlines.h"
|
||||
|
||||
#include "vm/ParallelDo.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace ion;
|
||||
|
||||
using parallel::Spew;
|
||||
using parallel::SpewBailouts;
|
||||
using parallel::SpewBailoutIR;
|
||||
|
||||
// Load the current thread context.
|
||||
ForkJoinSlice *
|
||||
ion::ParForkJoinSlice()
|
||||
{
|
||||
return ForkJoinSlice::Current();
|
||||
}
|
||||
|
||||
// ParNewGCThing() is called in place of NewGCThing() when executing
|
||||
// parallel code. It uses the ArenaLists for the current thread and
|
||||
// allocates from there.
|
||||
JSObject *
|
||||
ion::ParNewGCThing(gc::AllocKind allocKind)
|
||||
{
|
||||
ForkJoinSlice *slice = ForkJoinSlice::Current();
|
||||
uint32_t thingSize = (uint32_t)gc::Arena::thingSize(allocKind);
|
||||
void *t = slice->allocator->parallelNewGCThing(allocKind, thingSize);
|
||||
return static_cast<JSObject *>(t);
|
||||
}
|
||||
|
||||
// Check that the object was created by the current thread
|
||||
// (and hence is writable).
|
||||
bool
|
||||
ion::ParWriteGuard(ForkJoinSlice *slice, JSObject *object)
|
||||
{
|
||||
JS_ASSERT(ForkJoinSlice::Current() == slice);
|
||||
return slice->allocator->arenas.containsArena(slice->runtime(),
|
||||
object->arenaHeader());
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static void
|
||||
printTrace(const char *prefix, struct IonLIRTraceData *cached)
|
||||
{
|
||||
fprintf(stderr, "%s / Block %3u / LIR %3u / Mode %u / LIR %s\n",
|
||||
prefix,
|
||||
cached->bblock, cached->lir, cached->execModeInt, cached->lirOpName);
|
||||
}
|
||||
|
||||
struct IonLIRTraceData seqTraceData;
|
||||
#endif
|
||||
|
||||
void
|
||||
ion::TraceLIR(uint32_t bblock, uint32_t lir, uint32_t execModeInt,
|
||||
const char *lirOpName, const char *mirOpName,
|
||||
JSScript *script, jsbytecode *pc)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
static enum { NotSet, All, Bailouts } traceMode;
|
||||
|
||||
// If you set IONFLAGS=trace, this function will be invoked before every LIR.
|
||||
//
|
||||
// You can either modify it to do whatever you like, or use gdb scripting.
|
||||
// For example:
|
||||
//
|
||||
// break ParTrace
|
||||
// commands
|
||||
// continue
|
||||
// exit
|
||||
|
||||
if (traceMode == NotSet) {
|
||||
// Racy, but that's ok.
|
||||
const char *env = getenv("IONFLAGS");
|
||||
if (strstr(env, "trace-all"))
|
||||
traceMode = All;
|
||||
else
|
||||
traceMode = Bailouts;
|
||||
}
|
||||
|
||||
IonLIRTraceData *cached;
|
||||
if (execModeInt == 0)
|
||||
cached = &seqTraceData;
|
||||
else
|
||||
cached = &ForkJoinSlice::Current()->traceData;
|
||||
|
||||
if (bblock == 0xDEADBEEF) {
|
||||
if (execModeInt == 0)
|
||||
printTrace("BAILOUT", cached);
|
||||
else
|
||||
SpewBailoutIR(cached->bblock, cached->lir,
|
||||
cached->lirOpName, cached->mirOpName,
|
||||
cached->script, cached->pc);
|
||||
}
|
||||
|
||||
cached->bblock = bblock;
|
||||
cached->lir = lir;
|
||||
cached->execModeInt = execModeInt;
|
||||
cached->lirOpName = lirOpName;
|
||||
cached->mirOpName = mirOpName;
|
||||
cached->script = script;
|
||||
cached->pc = pc;
|
||||
|
||||
if (traceMode == All)
|
||||
printTrace("Exec", cached);
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
ion::ParCheckOverRecursed(ForkJoinSlice *slice)
|
||||
{
|
||||
JS_ASSERT(ForkJoinSlice::Current() == slice);
|
||||
|
||||
// When an interrupt is triggered, we currently overwrite the
|
||||
// stack limit with a sentinel value that brings us here.
|
||||
// Therefore, we must check whether this is really a stack overrun
|
||||
// and, if not, check whether an interrupt is needed.
|
||||
if (slice->isMainThread()) {
|
||||
int stackDummy_;
|
||||
if (!JS_CHECK_STACK_SIZE(js::GetNativeStackLimit(slice->runtime()), &stackDummy_))
|
||||
return false;
|
||||
return ParCheckInterrupt(slice);
|
||||
} else {
|
||||
// FIXME---we don't ovewrite the stack limit for worker
|
||||
// threads, which means that technically they can recurse
|
||||
// forever---or at least a long time---without ever checking
|
||||
// the interrupt. it also means that if we get here on a
|
||||
// worker thread, this is a real stack overrun!
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
bool
|
||||
ion::ParCheckInterrupt(ForkJoinSlice *slice)
|
||||
{
|
||||
JS_ASSERT(ForkJoinSlice::Current() == slice);
|
||||
bool result = slice->check();
|
||||
if (!result)
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
void
|
||||
ion::ParDumpValue(Value *v)
|
||||
{
|
||||
#ifdef DEBUG
|
||||
js_DumpValue(*v);
|
||||
#endif
|
||||
}
|
||||
|
||||
JSObject*
|
||||
ion::ParPush(ParPushArgs *args)
|
||||
{
|
||||
// It is awkward to have the MIR pass the current slice in, so
|
||||
// just fetch it from TLS. Extending the array is kind of the
|
||||
// slow path anyhow as it reallocates the elements vector.
|
||||
ForkJoinSlice *slice = js::ForkJoinSlice::Current();
|
||||
JSObject::EnsureDenseResult res =
|
||||
args->object->parExtendDenseElements(slice->allocator,
|
||||
&args->value, 1);
|
||||
if (res != JSObject::ED_OK)
|
||||
return NULL;
|
||||
return args->object;
|
||||
}
|
||||
|
||||
JSObject *
|
||||
ion::ParExtendArray(ForkJoinSlice *slice, JSObject *array, uint32_t length)
|
||||
{
|
||||
JSObject::EnsureDenseResult res =
|
||||
array->parExtendDenseElements(slice->allocator, NULL, length);
|
||||
if (res != JSObject::ED_OK)
|
||||
return NULL;
|
||||
return array;
|
||||
}
|
||||
|
||||
ParCompareResult
|
||||
ion::ParCompareStrings(JSString *str1, JSString *str2)
|
||||
{
|
||||
// NYI---the rope case
|
||||
if (!str1->isLinear())
|
||||
return ParCompareUnknown;
|
||||
if (!str2->isLinear())
|
||||
return ParCompareUnknown;
|
||||
|
||||
JSLinearString &linearStr1 = str1->asLinear();
|
||||
JSLinearString &linearStr2 = str2->asLinear();
|
||||
if (EqualStrings(&linearStr1, &linearStr2))
|
||||
return ParCompareEq;
|
||||
return ParCompareNe;
|
||||
}
|
||||
|
||||
void
|
||||
ion::ParallelAbort(JSScript *script)
|
||||
{
|
||||
JS_ASSERT(ForkJoinSlice::InParallelSection());
|
||||
|
||||
ForkJoinSlice *slice = ForkJoinSlice::Current();
|
||||
|
||||
Spew(SpewBailouts, "Parallel abort in %p:%s:%d", script, script->filename, script->lineno);
|
||||
|
||||
if (!slice->abortedScript)
|
||||
slice->abortedScript = script;
|
||||
}
|
||||
|
||||
void
|
||||
ion::ParCallToUncompiledScript(JSFunction *func)
|
||||
{
|
||||
JS_ASSERT(ForkJoinSlice::InParallelSection());
|
||||
|
||||
#ifdef DEBUG
|
||||
RawScript script = func->nonLazyScript();
|
||||
Spew(SpewBailouts, "Call to uncompiled script: %p:%s:%d", script, script->filename, script->lineno);
|
||||
#endif
|
||||
}
|
63
js/src/ion/ParallelFunctions.h
Normal file
63
js/src/ion/ParallelFunctions.h
Normal file
@ -0,0 +1,63 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=4 sw=4 et tw=99:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef jsion_parallel_functions_h__
|
||||
#define jsion_parallel_functions_h__
|
||||
|
||||
#include "vm/ThreadPool.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
#include "gc/Heap.h"
|
||||
|
||||
namespace js {
|
||||
namespace ion {
|
||||
|
||||
ForkJoinSlice *ParForkJoinSlice();
|
||||
JSObject *ParNewGCThing(gc::AllocKind allocKind);
|
||||
bool ParWriteGuard(ForkJoinSlice *context, JSObject *object);
|
||||
void ParBailout(uint32_t id);
|
||||
bool ParCheckOverRecursed(ForkJoinSlice *slice);
|
||||
bool ParCheckInterrupt(ForkJoinSlice *context);
|
||||
|
||||
void ParDumpValue(Value *v);
|
||||
|
||||
// We pass the arguments to ParPush in a structure because, in code
|
||||
// gen, it is convenient to store them on the stack to avoid
|
||||
// constraining the reg alloc for the slow path.
|
||||
struct ParPushArgs {
|
||||
JSObject *object;
|
||||
Value value;
|
||||
};
|
||||
|
||||
// Extends the given object with the given value (like `Array.push`).
|
||||
// Returns NULL on failure or else `args->object`, which is convenient
|
||||
// during code generation.
|
||||
JSObject* ParPush(ParPushArgs *args);
|
||||
|
||||
// Extends the given array with `length` new holes. Returns NULL on
|
||||
// failure or else `array`, which is convenient during code
|
||||
// generation.
|
||||
JSObject *ParExtendArray(ForkJoinSlice *slice, JSObject *array, uint32_t length);
|
||||
|
||||
enum ParCompareResult {
|
||||
ParCompareNe = false,
|
||||
ParCompareEq = true,
|
||||
ParCompareUnknown = 2
|
||||
};
|
||||
ParCompareResult ParCompareStrings(JSString *str1, JSString *str2);
|
||||
|
||||
void ParallelAbort(JSScript *script);
|
||||
|
||||
void TraceLIR(uint32_t bblock, uint32_t lir, uint32_t execModeInt,
|
||||
const char *lirOpName, const char *mirOpName,
|
||||
JSScript *script, jsbytecode *pc);
|
||||
|
||||
void ParCallToUncompiledScript(JSFunction *func);
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
||||
#endif // jsion_parallel_functions_h__
|
@ -290,32 +290,19 @@ TypeInferenceOracle::inArrayIsPacked(UnrootedScript script, jsbytecode *pc)
|
||||
bool
|
||||
TypeInferenceOracle::elementReadIsDenseNative(RawScript script, jsbytecode *pc)
|
||||
{
|
||||
// Check whether the object is a dense array and index is int32 or double.
|
||||
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 1);
|
||||
StackTypeSet *id = script->analysis()->poppedTypes(pc, 0);
|
||||
|
||||
JSValueType idType = id->getKnownTypeTag();
|
||||
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
|
||||
return false;
|
||||
|
||||
Class *clasp = obj->getKnownClass();
|
||||
return clasp && clasp->isNative();
|
||||
return elementAccessIsDenseNative(script->analysis()->poppedTypes(pc, 1),
|
||||
script->analysis()->poppedTypes(pc, 0));
|
||||
}
|
||||
|
||||
bool
|
||||
TypeInferenceOracle::elementReadIsTypedArray(HandleScript script, jsbytecode *pc, int *arrayType)
|
||||
{
|
||||
// Check whether the object is a typed array and index is int32 or double.
|
||||
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 1);
|
||||
StackTypeSet *id = DropUnrooted(script)->analysis()->poppedTypes(pc, 0);
|
||||
|
||||
JSValueType idType = id->getKnownTypeTag();
|
||||
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
|
||||
return false;
|
||||
|
||||
*arrayType = obj->getTypedArrayType();
|
||||
if (*arrayType == TypedArray::TYPE_MAX)
|
||||
if (!elementAccessIsTypedArray(script->analysis()->poppedTypes(pc, 1),
|
||||
script->analysis()->poppedTypes(pc, 0),
|
||||
arrayType))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
JS_ASSERT(*arrayType >= 0 && *arrayType < TypedArray::TYPE_MAX);
|
||||
|
||||
@ -404,10 +391,13 @@ TypeInferenceOracle::elementReadGeneric(UnrootedScript script, jsbytecode *pc, b
|
||||
bool
|
||||
TypeInferenceOracle::elementWriteIsDenseNative(HandleScript script, jsbytecode *pc)
|
||||
{
|
||||
// Check whether the object is a dense array and index is int32 or double.
|
||||
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 2);
|
||||
StackTypeSet *id = script->analysis()->poppedTypes(pc, 1);
|
||||
return elementAccessIsDenseNative(script->analysis()->poppedTypes(pc, 2),
|
||||
script->analysis()->poppedTypes(pc, 1));
|
||||
}
|
||||
|
||||
bool
|
||||
TypeInferenceOracle::elementAccessIsDenseNative(StackTypeSet *obj, StackTypeSet *id)
|
||||
{
|
||||
JSValueType idType = id->getKnownTypeTag();
|
||||
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
|
||||
return false;
|
||||
@ -422,9 +412,15 @@ TypeInferenceOracle::elementWriteIsDenseNative(HandleScript script, jsbytecode *
|
||||
bool
|
||||
TypeInferenceOracle::elementWriteIsTypedArray(RawScript script, jsbytecode *pc, int *arrayType)
|
||||
{
|
||||
// Check whether the object is a dense array and index is int32 or double.
|
||||
StackTypeSet *obj = script->analysis()->poppedTypes(pc, 2);
|
||||
StackTypeSet *id = script->analysis()->poppedTypes(pc, 1);
|
||||
return elementAccessIsTypedArray(script->analysis()->poppedTypes(pc, 2),
|
||||
script->analysis()->poppedTypes(pc, 1),
|
||||
arrayType);
|
||||
}
|
||||
|
||||
bool
|
||||
TypeInferenceOracle::elementAccessIsTypedArray(StackTypeSet *obj, StackTypeSet *id, int *arrayType)
|
||||
{
|
||||
// Check whether the object is a typed array and index is int32 or double.
|
||||
|
||||
JSValueType idType = id->getKnownTypeTag();
|
||||
if (idType != JSVAL_TYPE_INT32 && idType != JSVAL_TYPE_DOUBLE)
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
namespace js {
|
||||
namespace ion {
|
||||
|
||||
enum LazyArgumentsType {
|
||||
MaybeArguments = 0,
|
||||
DefinitelyArguments,
|
||||
@ -121,6 +122,12 @@ class TypeOracle
|
||||
virtual bool elementWriteIsPacked(UnrootedScript script, jsbytecode *pc) {
|
||||
return false;
|
||||
}
|
||||
virtual bool elementAccessIsDenseNative(types::StackTypeSet *obj, types::StackTypeSet *id) {
|
||||
return false;
|
||||
}
|
||||
virtual bool elementAccessIsTypedArray(types::StackTypeSet *obj, types::StackTypeSet *id, int *arrayType) {
|
||||
return false;
|
||||
}
|
||||
virtual bool arrayResultShouldHaveDoubleConversion(UnrootedScript script, jsbytecode *pc) {
|
||||
return false;
|
||||
}
|
||||
@ -251,7 +258,9 @@ class TypeInferenceOracle : public TypeOracle
|
||||
bool elementReadIsPacked(UnrootedScript script, jsbytecode *pc);
|
||||
void elementReadGeneric(UnrootedScript script, jsbytecode *pc, bool *cacheable, bool *monitorResult);
|
||||
bool elementWriteIsDenseNative(HandleScript script, jsbytecode *pc);
|
||||
bool elementAccessIsDenseNative(types::StackTypeSet *obj, types::StackTypeSet *id);
|
||||
bool elementWriteIsTypedArray(RawScript script, jsbytecode *pc, int *arrayType);
|
||||
bool elementAccessIsTypedArray(types::StackTypeSet *obj, types::StackTypeSet *id, int *arrayType);
|
||||
bool elementWriteNeedsDoubleConversion(UnrootedScript script, jsbytecode *pc);
|
||||
bool elementWriteHasExtraIndexedProperty(UnrootedScript script, jsbytecode *pc);
|
||||
bool elementWriteIsPacked(UnrootedScript script, jsbytecode *pc);
|
||||
@ -360,6 +369,8 @@ StringFromMIRType(MIRType type)
|
||||
return "Elements";
|
||||
case MIRType_StackFrame:
|
||||
return "StackFrame";
|
||||
case MIRType_ForkJoinSlice:
|
||||
return "ForkJoinSlice";
|
||||
default:
|
||||
JS_NOT_REACHED("Unknown MIRType.");
|
||||
return "";
|
||||
|
@ -35,17 +35,36 @@ UnreachableCodeElimination::analyze()
|
||||
// Pass 1: Identify unreachable blocks (if any).
|
||||
if (!prunePointlessBranchesAndMarkReachableBlocks())
|
||||
return false;
|
||||
|
||||
return removeUnmarkedBlocksAndCleanup();
|
||||
}
|
||||
|
||||
bool
|
||||
UnreachableCodeElimination::removeUnmarkedBlocks(size_t marked)
|
||||
{
|
||||
marked_ = marked;
|
||||
return removeUnmarkedBlocksAndCleanup();
|
||||
}
|
||||
|
||||
bool
|
||||
UnreachableCodeElimination::removeUnmarkedBlocksAndCleanup()
|
||||
{
|
||||
// Everything is reachable, no work required.
|
||||
JS_ASSERT(marked_ <= graph_.numBlocks());
|
||||
if (marked_ == graph_.numBlocks()) {
|
||||
// Everything is reachable.
|
||||
graph_.unmarkBlocks();
|
||||
return true;
|
||||
}
|
||||
|
||||
// Pass 2: Remove unmarked blocks.
|
||||
// Pass 2: Remove unmarked blocks (see analyze() above).
|
||||
if (!removeUnmarkedBlocksAndClearDominators())
|
||||
return false;
|
||||
graph_.unmarkBlocks();
|
||||
|
||||
AssertGraphCoherency(graph_);
|
||||
|
||||
IonSpewPass("UCEMidPoint");
|
||||
|
||||
// Pass 3: Recompute dominators and tweak phis.
|
||||
BuildDominatorTree(graph_);
|
||||
if (redundantPhis_ && !EliminatePhis(mir_, graph_, ConservativeObservability))
|
||||
|
@ -26,6 +26,7 @@ class UnreachableCodeElimination
|
||||
bool prunePointlessBranchesAndMarkReachableBlocks();
|
||||
void removeUsesFromUnmarkedBlocks(MDefinition *instr);
|
||||
bool removeUnmarkedBlocksAndClearDominators();
|
||||
bool removeUnmarkedBlocksAndCleanup();
|
||||
|
||||
public:
|
||||
UnreachableCodeElimination(MIRGenerator *mir, MIRGraph &graph)
|
||||
@ -35,7 +36,13 @@ class UnreachableCodeElimination
|
||||
redundantPhis_(false)
|
||||
{}
|
||||
|
||||
// Walks the graph and discovers what is reachable. Removes everything else.
|
||||
bool analyze();
|
||||
|
||||
// Removes any blocks that are not marked. Assumes that these blocks are not
|
||||
// reachable. The parameter |marked| should be the number of blocks that
|
||||
// are marked.
|
||||
bool removeUnmarkedBlocks(size_t marked);
|
||||
};
|
||||
|
||||
} /* namespace ion */
|
||||
|
@ -13,6 +13,8 @@
|
||||
|
||||
#include "vm/StringObject-inl.h"
|
||||
|
||||
#include "builtin/ParallelArray.h"
|
||||
|
||||
#include "jsboolinlines.h"
|
||||
#include "jsinterpinlines.h"
|
||||
|
||||
|
@ -1486,6 +1486,12 @@ MacroAssemblerARMCompat::add32(Imm32 imm, Register dest)
|
||||
ma_add(imm, dest, SetCond);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::xor32(Imm32 imm, Register dest)
|
||||
{
|
||||
ma_eor(imm, dest, SetCond);
|
||||
}
|
||||
|
||||
void
|
||||
MacroAssemblerARMCompat::add32(Imm32 imm, const Address &dest)
|
||||
{
|
||||
|
@ -713,6 +713,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
ma_ldr(Operand(address.base, address.offset), ScratchRegister);
|
||||
branchTest32(cond, ScratchRegister, imm, label);
|
||||
}
|
||||
void branchTestBool(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
|
||||
branchTest32(cond, lhs, rhs, label);
|
||||
}
|
||||
void branchTestPtr(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
|
||||
branchTest32(cond, lhs, rhs, label);
|
||||
}
|
||||
@ -929,6 +932,7 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
|
||||
void add32(Imm32 imm, Register dest);
|
||||
void add32(Imm32 imm, const Address &dest);
|
||||
void sub32(Imm32 imm, Register dest);
|
||||
void xor32(Imm32 imm, Register dest);
|
||||
|
||||
void and32(Imm32 imm, Register dest);
|
||||
void and32(Imm32 imm, const Address &dest);
|
||||
|
@ -14,6 +14,8 @@
|
||||
#include "CodeGenerator-shared-inl.h"
|
||||
#include "ion/IonSpewer.h"
|
||||
#include "ion/IonMacroAssembler.h"
|
||||
#include "ion/ParallelFunctions.h"
|
||||
#include "builtin/ParallelArray.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::ion;
|
||||
@ -25,6 +27,7 @@ namespace ion {
|
||||
|
||||
CodeGeneratorShared::CodeGeneratorShared(MIRGenerator *gen, LIRGraph *graph)
|
||||
: oolIns(NULL),
|
||||
oolParallelAbort_(NULL),
|
||||
masm(&sps_),
|
||||
gen(gen),
|
||||
graph(*graph),
|
||||
@ -501,5 +504,89 @@ CodeGeneratorShared::markArgumentSlots(LSafepoint *safepoint)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorShared::ensureOutOfLineParallelAbort(Label **result)
|
||||
{
|
||||
if (!oolParallelAbort_) {
|
||||
oolParallelAbort_ = new OutOfLineParallelAbort();
|
||||
if (!addOutOfLineCode(oolParallelAbort_))
|
||||
return false;
|
||||
}
|
||||
|
||||
*result = oolParallelAbort_->entry();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
OutOfLineParallelAbort::generate(CodeGeneratorShared *codegen)
|
||||
{
|
||||
codegen->callTraceLIR(0xDEADBEEF, NULL, "ParallelBailout");
|
||||
return codegen->visitOutOfLineParallelAbort(this);
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorShared::callTraceLIR(uint32_t blockIndex, LInstruction *lir,
|
||||
const char *bailoutName)
|
||||
{
|
||||
JS_ASSERT_IF(!lir, bailoutName);
|
||||
|
||||
uint32_t emi = (uint32_t) gen->info().executionMode();
|
||||
|
||||
if (!IonSpewEnabled(IonSpew_Trace))
|
||||
return true;
|
||||
masm.PushRegsInMask(RegisterSet::All());
|
||||
|
||||
RegisterSet regSet(RegisterSet::All());
|
||||
|
||||
Register blockIndexReg = regSet.takeGeneral();
|
||||
Register lirIndexReg = regSet.takeGeneral();
|
||||
Register emiReg = regSet.takeGeneral();
|
||||
Register lirOpNameReg = regSet.takeGeneral();
|
||||
Register mirOpNameReg = regSet.takeGeneral();
|
||||
Register scriptReg = regSet.takeGeneral();
|
||||
Register pcReg = regSet.takeGeneral();
|
||||
|
||||
// This first move is here so that when you scan the disassembly,
|
||||
// you can easily pick out where each instruction begins. The
|
||||
// next few items indicate to you the Basic Block / LIR.
|
||||
masm.move32(Imm32(0xDEADBEEF), blockIndexReg);
|
||||
|
||||
if (lir) {
|
||||
masm.move32(Imm32(blockIndex), blockIndexReg);
|
||||
masm.move32(Imm32(lir->id()), lirIndexReg);
|
||||
masm.move32(Imm32(emi), emiReg);
|
||||
masm.movePtr(ImmWord(lir->opName()), lirOpNameReg);
|
||||
if (MDefinition *mir = lir->mirRaw()) {
|
||||
masm.movePtr(ImmWord(mir->opName()), mirOpNameReg);
|
||||
masm.movePtr(ImmWord((void *)mir->block()->info().script()), scriptReg);
|
||||
masm.movePtr(ImmWord(mir->trackedPc()), pcReg);
|
||||
} else {
|
||||
masm.movePtr(ImmWord((void *)NULL), mirOpNameReg);
|
||||
masm.movePtr(ImmWord((void *)NULL), scriptReg);
|
||||
masm.movePtr(ImmWord((void *)NULL), pcReg);
|
||||
}
|
||||
} else {
|
||||
masm.move32(Imm32(0xDEADBEEF), blockIndexReg);
|
||||
masm.move32(Imm32(0xDEADBEEF), lirIndexReg);
|
||||
masm.move32(Imm32(emi), emiReg);
|
||||
masm.movePtr(ImmWord(bailoutName), lirOpNameReg);
|
||||
masm.movePtr(ImmWord(bailoutName), mirOpNameReg);
|
||||
masm.movePtr(ImmWord((void *)NULL), scriptReg);
|
||||
masm.movePtr(ImmWord((void *)NULL), pcReg);
|
||||
}
|
||||
|
||||
masm.setupUnalignedABICall(7, CallTempReg4);
|
||||
masm.passABIArg(blockIndexReg);
|
||||
masm.passABIArg(lirIndexReg);
|
||||
masm.passABIArg(emiReg);
|
||||
masm.passABIArg(lirOpNameReg);
|
||||
masm.passABIArg(mirOpNameReg);
|
||||
masm.passABIArg(scriptReg);
|
||||
masm.passABIArg(pcReg);
|
||||
masm.callWithABI(JS_FUNC_TO_DATA_PTR(void *, TraceLIR));
|
||||
masm.PopRegsInMask(RegisterSet::All());
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
@ -25,6 +25,7 @@ namespace ion {
|
||||
class OutOfLineCode;
|
||||
class CodeGenerator;
|
||||
class MacroAssembler;
|
||||
class OutOfLineParallelAbort;
|
||||
|
||||
template <class ArgSeq, class StoreOutputTo>
|
||||
class OutOfLineCallVM;
|
||||
@ -34,6 +35,7 @@ class CodeGeneratorShared : public LInstructionVisitor
|
||||
{
|
||||
js::Vector<OutOfLineCode *, 0, SystemAllocPolicy> outOfLineCode_;
|
||||
OutOfLineCode *oolIns;
|
||||
OutOfLineParallelAbort *oolParallelAbort_;
|
||||
|
||||
public:
|
||||
MacroAssembler masm;
|
||||
@ -293,6 +295,15 @@ class CodeGeneratorShared : public LInstructionVisitor
|
||||
bool visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo> *ool);
|
||||
|
||||
bool visitOutOfLineTruncateSlow(OutOfLineTruncateSlow *ool);
|
||||
|
||||
public:
|
||||
// When compiling parallel code, all bailouts just abort funnel to
|
||||
// this same point and hence abort execution altogether:
|
||||
virtual bool visitOutOfLineParallelAbort(OutOfLineParallelAbort *ool) = 0;
|
||||
bool callTraceLIR(uint32_t blockIndex, LInstruction *lir, const char *bailoutName = NULL);
|
||||
|
||||
protected:
|
||||
bool ensureOutOfLineParallelAbort(Label **result);
|
||||
};
|
||||
|
||||
// Wrapper around Label, on the heap, to avoid a bogus assert with OOM.
|
||||
@ -541,6 +552,17 @@ CodeGeneratorShared::visitOutOfLineCallVM(OutOfLineCallVM<ArgSeq, StoreOutputTo>
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// An out-of-line parallel abort thunk.
|
||||
class OutOfLineParallelAbort : public OutOfLineCode
|
||||
{
|
||||
public:
|
||||
OutOfLineParallelAbort()
|
||||
{ }
|
||||
|
||||
bool generate(CodeGeneratorShared *codegen);
|
||||
};
|
||||
|
||||
} // namespace ion
|
||||
} // namespace js
|
||||
|
||||
|
@ -15,6 +15,7 @@
|
||||
#include "ion/IonFrames.h"
|
||||
#include "ion/MoveEmitter.h"
|
||||
#include "ion/IonCompartment.h"
|
||||
#include "ion/ParallelFunctions.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::ion;
|
||||
@ -290,6 +291,20 @@ class BailoutLabel {
|
||||
template <typename T> bool
|
||||
CodeGeneratorX86Shared::bailout(const T &binder, LSnapshot *snapshot)
|
||||
{
|
||||
CompileInfo &info = snapshot->mir()->block()->info();
|
||||
switch (info.executionMode()) {
|
||||
case ParallelExecution: {
|
||||
// in parallel mode, make no attempt to recover, just signal an error.
|
||||
Label *ool;
|
||||
if (!ensureOutOfLineParallelAbort(&ool))
|
||||
return false;
|
||||
binder(masm, ool);
|
||||
return true;
|
||||
}
|
||||
|
||||
case SequentialExecution: break;
|
||||
}
|
||||
|
||||
if (!encode(snapshot))
|
||||
return false;
|
||||
|
||||
|
@ -25,7 +25,7 @@ class MDefinition;
|
||||
class MInstruction;
|
||||
class LOsiPoint;
|
||||
|
||||
class LIRGeneratorShared : public MInstructionVisitor
|
||||
class LIRGeneratorShared : public MInstructionVisitorWithDefaults
|
||||
{
|
||||
protected:
|
||||
MIRGenerator *gen;
|
||||
|
@ -111,6 +111,9 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
void sub32(Imm32 imm, Register dest) {
|
||||
subl(imm, dest);
|
||||
}
|
||||
void xor32(Imm32 imm, Register dest) {
|
||||
xorl(imm, dest);
|
||||
}
|
||||
|
||||
void branch32(Condition cond, const Address &lhs, const Register &rhs, Label *label) {
|
||||
cmpl(Operand(lhs), rhs);
|
||||
@ -140,6 +143,10 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
testl(Operand(address), imm);
|
||||
j(cond, label);
|
||||
}
|
||||
void branchTestBool(Condition cond, const Register &lhs, const Register &rhs, Label *label) {
|
||||
testb(lhs, rhs);
|
||||
j(cond, label);
|
||||
}
|
||||
|
||||
// The following functions are exposed for use in platform-shared code.
|
||||
template <typename T>
|
||||
|
@ -16,6 +16,7 @@
|
||||
#include "vm/Shape.h"
|
||||
|
||||
#include "jsscriptinlines.h"
|
||||
#include "ion/ExecutionModeInlines.h"
|
||||
|
||||
using namespace js;
|
||||
using namespace js::ion;
|
||||
@ -141,12 +142,14 @@ CodeGeneratorX86::visitUnbox(LUnbox *unbox)
|
||||
void
|
||||
CodeGeneratorX86::linkAbsoluteLabels()
|
||||
{
|
||||
ExecutionMode executionMode = gen->info().executionMode();
|
||||
UnrootedScript script = gen->info().script();
|
||||
IonCode *method = script->ion->method();
|
||||
IonScript *ionScript = GetIonScript(script, executionMode);
|
||||
IonCode *method = ionScript->method();
|
||||
|
||||
for (size_t i = 0; i < deferredDoubles_.length(); i++) {
|
||||
DeferredDouble *d = deferredDoubles_[i];
|
||||
const Value &v = script->ion->getConstant(d->index());
|
||||
const Value &v = ionScript->getConstant(d->index());
|
||||
MacroAssembler::Bind(method, d->label(), &v);
|
||||
}
|
||||
}
|
||||
|
@ -882,6 +882,7 @@ JSRuntime::JSRuntime(JSUseHelperThreads useHelperThreads)
|
||||
ionPcScriptCache(NULL),
|
||||
threadPool(this),
|
||||
ctypesActivityCallback(NULL),
|
||||
parallelWarmup(0),
|
||||
ionReturnOverride_(MagicValue(JS_ARG_POISON)),
|
||||
useHelperThreads_(useHelperThreads),
|
||||
requestedHelperThreadCount(-1),
|
||||
|
@ -1155,6 +1155,10 @@ struct JSRuntime : js::RuntimeFriendFields,
|
||||
|
||||
js::CTypesActivityCallback ctypesActivityCallback;
|
||||
|
||||
// Non-zero if this is a parallel warmup execution. See
|
||||
// js::parallel::Do() for more information.
|
||||
uint32_t parallelWarmup;
|
||||
|
||||
private:
|
||||
// In certain cases, we want to optimize certain opcodes to typed instructions,
|
||||
// to avoid carrying an extra register to feed into an unbox. Unfortunately,
|
||||
@ -2261,6 +2265,16 @@ class ContextAllocPolicy
|
||||
void reportAllocOverflow() const { js_ReportAllocationOverflow(cx); }
|
||||
};
|
||||
|
||||
JSBool intrinsic_ThrowError(JSContext *cx, unsigned argc, Value *vp);
|
||||
JSBool intrinsic_NewDenseArray(JSContext *cx, unsigned argc, Value *vp);
|
||||
JSBool intrinsic_UnsafeSetElement(JSContext *cx, unsigned argc, Value *vp);
|
||||
JSBool intrinsic_ForceSequential(JSContext *cx, unsigned argc, Value *vp);
|
||||
JSBool intrinsic_NewParallelArray(JSContext *cx, unsigned argc, Value *vp);
|
||||
|
||||
#ifdef DEBUG
|
||||
JSBool intrinsic_Dump(JSContext *cx, unsigned argc, Value *vp);
|
||||
#endif
|
||||
|
||||
} /* namespace js */
|
||||
|
||||
#ifdef _MSC_VER
|
||||
|
@ -80,6 +80,7 @@
|
||||
#include "vm/ForkJoin.h"
|
||||
#include "vm/Shape.h"
|
||||
#include "vm/String.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
#include "ion/IonCode.h"
|
||||
#ifdef JS_ION
|
||||
# include "ion/IonMacroAssembler.h"
|
||||
@ -1196,10 +1197,10 @@ void *
|
||||
ArenaLists::parallelAllocate(Zone *zone, AllocKind thingKind, size_t thingSize)
|
||||
{
|
||||
/*
|
||||
* During parallel Rivertrail sections, no GC is permitted. If no
|
||||
* existing arena can satisfy the allocation, then a new one is
|
||||
* allocated. If that fails, then we return NULL which will cause
|
||||
* the parallel section to abort.
|
||||
* During parallel Rivertrail sections, if no existing arena can
|
||||
* satisfy the allocation, then a new one is allocated. If that
|
||||
* fails, then we return NULL which will cause the parallel
|
||||
* section to abort.
|
||||
*/
|
||||
|
||||
void *t = allocateFromFreeList(thingKind, thingSize);
|
||||
|
@ -282,6 +282,11 @@ struct ArenaLists {
|
||||
}
|
||||
}
|
||||
|
||||
static uintptr_t getFreeListOffset(AllocKind thingKind) {
|
||||
uintptr_t offset = offsetof(ArenaLists, freeLists);
|
||||
return offset + thingKind * sizeof(FreeSpan);
|
||||
}
|
||||
|
||||
const FreeSpan *getFreeList(AllocKind thingKind) const {
|
||||
return &freeLists[thingKind];
|
||||
}
|
||||
|
@ -329,6 +329,14 @@ struct PerThreadDataFriendFields
|
||||
|
||||
PerThreadDataFriendFields();
|
||||
|
||||
#if defined(JSGC_ROOT_ANALYSIS) || defined(JSGC_USE_EXACT_ROOTING)
|
||||
/*
|
||||
* Stack allocated GC roots for stack GC heap pointers, which may be
|
||||
* overwritten if moved during a GC.
|
||||
*/
|
||||
Rooted<void*> *thingGCRooters[THING_ROOT_LIMIT];
|
||||
#endif
|
||||
|
||||
#if defined(DEBUG) && defined(JS_GC_ZEAL) && defined(JSGC_ROOT_ANALYSIS) && !defined(JS_THREADSAFE)
|
||||
/*
|
||||
* Stack allocated list of stack locations which hold non-relocatable
|
||||
|
@ -56,19 +56,15 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
|
||||
// Asynchronous Flags
|
||||
//
|
||||
// These can be read without the lock (hence the |volatile| declaration).
|
||||
// All fields should be *written with the lock*, however.
|
||||
|
||||
// A thread has bailed and others should follow suit. Set and read
|
||||
// asynchronously. After setting abort, workers will acquire the lock,
|
||||
// decrement uncompleted, and then notify if uncompleted has reached
|
||||
// blocked.
|
||||
// Set to true when parallel execution should abort.
|
||||
volatile bool abort_;
|
||||
|
||||
// Set to true when a worker bails for a fatal reason.
|
||||
volatile bool fatal_;
|
||||
|
||||
// A thread has request a rendezvous. Only *written* with the lock (in
|
||||
// |initiateRendezvous()| and |endRendezvous()|) but may be *read* without
|
||||
// the lock.
|
||||
// The main thread has requested a rendezvous.
|
||||
volatile bool rendezvous_;
|
||||
|
||||
// Invoked only from the main thread:
|
||||
@ -121,15 +117,12 @@ class js::ForkJoinShared : public TaskExecutor, public Monitor
|
||||
// Invoked during processing by worker threads to "check in".
|
||||
bool check(ForkJoinSlice &threadCx);
|
||||
|
||||
// See comment on |ForkJoinSlice::setFatal()| in forkjoin.h
|
||||
bool setFatal();
|
||||
|
||||
// Requests a GC, either full or specific to a compartment.
|
||||
// Requests a GC, either full or specific to a zone.
|
||||
void requestGC(gcreason::Reason reason);
|
||||
void requestZoneGC(JS::Zone *zone, gcreason::Reason reason);
|
||||
|
||||
// Requests that computation abort.
|
||||
void setAbortFlag();
|
||||
void setAbortFlag(bool fatal);
|
||||
|
||||
JSRuntime *runtime() { return cx_->runtime; }
|
||||
};
|
||||
@ -310,7 +303,7 @@ ForkJoinShared::executeFromWorker(uint32_t workerId, uintptr_t stackLimit)
|
||||
void
|
||||
ForkJoinShared::executeFromMainThread()
|
||||
{
|
||||
executePortion(&cx_->runtime->mainThread, numSlices_ - 1);
|
||||
executePortion(&cx_->mainThread(), numSlices_ - 1);
|
||||
}
|
||||
|
||||
void
|
||||
@ -322,22 +315,14 @@ ForkJoinShared::executePortion(PerThreadData *perThread,
|
||||
AutoSetForkJoinSlice autoContext(&slice);
|
||||
|
||||
if (!op_.parallel(slice))
|
||||
setAbortFlag();
|
||||
}
|
||||
|
||||
bool
|
||||
ForkJoinShared::setFatal()
|
||||
{
|
||||
// Might as well set the abort flag to true, as it will make propagation
|
||||
// faster.
|
||||
setAbortFlag();
|
||||
fatal_ = true;
|
||||
return false;
|
||||
setAbortFlag(false);
|
||||
}
|
||||
|
||||
bool
|
||||
ForkJoinShared::check(ForkJoinSlice &slice)
|
||||
{
|
||||
JS_ASSERT(cx_->runtime->interrupt);
|
||||
|
||||
if (abort_)
|
||||
return false;
|
||||
|
||||
@ -354,8 +339,8 @@ ForkJoinShared::check(ForkJoinSlice &slice)
|
||||
// service the interrupt, then let them start back up again.
|
||||
// AutoRendezvous autoRendezvous(slice);
|
||||
// if (!js_HandleExecutionInterrupt(cx_))
|
||||
// return setFatal();
|
||||
setAbortFlag();
|
||||
// return setAbortFlag(true);
|
||||
setAbortFlag(false);
|
||||
return false;
|
||||
}
|
||||
} else if (rendezvous_) {
|
||||
@ -399,6 +384,7 @@ ForkJoinShared::initiateRendezvous(ForkJoinSlice &slice)
|
||||
|
||||
JS_ASSERT(slice.isMainThread());
|
||||
JS_ASSERT(!rendezvous_ && blocked_ == 0);
|
||||
JS_ASSERT(cx_->runtime->interrupt);
|
||||
|
||||
AutoLockMonitor lock(*this);
|
||||
|
||||
@ -440,16 +426,21 @@ ForkJoinShared::endRendezvous(ForkJoinSlice &slice)
|
||||
AutoLockMonitor lock(*this);
|
||||
rendezvous_ = false;
|
||||
blocked_ = 0;
|
||||
rendezvousIndex_ += 1;
|
||||
rendezvousIndex_++;
|
||||
|
||||
// Signal other threads that rendezvous is over.
|
||||
PR_NotifyAllCondVar(rendezvousEnd_);
|
||||
}
|
||||
|
||||
void
|
||||
ForkJoinShared::setAbortFlag()
|
||||
ForkJoinShared::setAbortFlag(bool fatal)
|
||||
{
|
||||
AutoLockMonitor lock(*this);
|
||||
|
||||
abort_ = true;
|
||||
fatal_ = fatal_ || fatal;
|
||||
|
||||
cx_->runtime->triggerOperationCallback();
|
||||
}
|
||||
|
||||
void
|
||||
@ -522,17 +513,10 @@ bool
|
||||
ForkJoinSlice::check()
|
||||
{
|
||||
#ifdef JS_THREADSAFE
|
||||
return shared->check(*this);
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
}
|
||||
|
||||
bool
|
||||
ForkJoinSlice::setFatal()
|
||||
{
|
||||
#ifdef JS_THREADSAFE
|
||||
return shared->setFatal();
|
||||
if (runtime()->interrupt)
|
||||
return shared->check(*this);
|
||||
else
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif
|
||||
@ -571,7 +555,7 @@ ForkJoinSlice::requestZoneGC(JS::Zone *zone, gcreason::Reason reason)
|
||||
void
|
||||
ForkJoinSlice::triggerAbort()
|
||||
{
|
||||
shared->setAbortFlag();
|
||||
shared->setAbortFlag(false);
|
||||
|
||||
// set iontracklimit to -1 so that on next entry to a function,
|
||||
// the thread will trigger the overrecursedcheck. If the thread
|
||||
|
@ -9,6 +9,7 @@
|
||||
#define ForkJoin_h__
|
||||
|
||||
#include "vm/ThreadPool.h"
|
||||
#include "jsgc.h"
|
||||
|
||||
// ForkJoin
|
||||
//
|
||||
@ -125,8 +126,7 @@ class AutoRendezvous;
|
||||
class AutoSetForkJoinSlice;
|
||||
|
||||
#ifdef DEBUG
|
||||
struct IonTraceData
|
||||
{
|
||||
struct IonLIRTraceData {
|
||||
uint32_t bblock;
|
||||
uint32_t lir;
|
||||
uint32_t execModeInt;
|
||||
@ -157,9 +157,9 @@ struct ForkJoinSlice
|
||||
// If we took a parallel bailout, the script that bailed out is stored here.
|
||||
JSScript *abortedScript;
|
||||
|
||||
// Records the last instruction to execute on this thread.
|
||||
#ifdef DEBUG
|
||||
IonTraceData traceData;
|
||||
// Records the last instr. to execute on this thread.
|
||||
IonLIRTraceData traceData;
|
||||
#endif
|
||||
|
||||
ForkJoinSlice(PerThreadData *perThreadData, uint32_t sliceId, uint32_t numSlices,
|
||||
@ -168,15 +168,6 @@ struct ForkJoinSlice
|
||||
// True if this is the main thread, false if it is one of the parallel workers.
|
||||
bool isMainThread();
|
||||
|
||||
// Generally speaking, if a thread returns false, that is interpreted as a
|
||||
// "bailout"---meaning, a recoverable error. If however you call this
|
||||
// function before returning false, then the error will be interpreted as
|
||||
// *fatal*. This doesn't strike me as the most elegant solution here but
|
||||
// I don't know what'd be better.
|
||||
//
|
||||
// For convenience, *always* returns false.
|
||||
bool setFatal();
|
||||
|
||||
// When the code would normally trigger a GC, we don't trigger it
|
||||
// immediately but instead record that request here. This will
|
||||
// cause |ExecuteForkJoinOp()| to invoke |TriggerGC()| or
|
||||
@ -188,11 +179,19 @@ struct ForkJoinSlice
|
||||
void requestGC(gcreason::Reason reason);
|
||||
void requestZoneGC(JS::Zone *zone, gcreason::Reason reason);
|
||||
|
||||
// During the parallel phase, this method should be invoked periodically,
|
||||
// for example on every backedge, similar to the interrupt check. If it
|
||||
// returns false, then the parallel phase has been aborted and so you
|
||||
// should bailout. The function may also rendesvous to perform GC or do
|
||||
// other similar things.
|
||||
// During the parallel phase, this method should be invoked
|
||||
// periodically, for example on every backedge, similar to the
|
||||
// interrupt check. If it returns false, then the parallel phase
|
||||
// has been aborted and so you should bailout. The function may
|
||||
// also rendesvous to perform GC or do other similar things.
|
||||
//
|
||||
// This function is guaranteed to have no effect if both
|
||||
// runtime()->interrupt is zero. Ion-generated code takes
|
||||
// advantage of this by inlining the checks on those flags before
|
||||
// actually calling this function. If this function ends up
|
||||
// getting called a lot from outside ion code, we can refactor
|
||||
// it into an inlined version with this check that calls a slower
|
||||
// version.
|
||||
bool check();
|
||||
|
||||
// Be wary, the runtime is shared between all threads!
|
||||
|
359
js/src/vm/ParallelDo.cpp
Normal file
359
js/src/vm/ParallelDo.cpp
Normal file
@ -0,0 +1,359 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sw=4 et tw=99 ft=cpp:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#include "vm/ParallelDo.h"
|
||||
|
||||
#include "jsapi.h"
|
||||
#include "jsobj.h"
|
||||
#include "jsarray.h"
|
||||
|
||||
#include "vm/String.h"
|
||||
#include "vm/GlobalObject.h"
|
||||
#include "vm/ThreadPool.h"
|
||||
#include "vm/ForkJoin.h"
|
||||
|
||||
#include "jsinterpinlines.h"
|
||||
#include "jsobjinlines.h"
|
||||
|
||||
#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
#include "ion/Ion.h"
|
||||
#include "ion/MIR.h"
|
||||
#include "ion/MIRGraph.h"
|
||||
#include "ion/IonCompartment.h"
|
||||
|
||||
#include "prprf.h"
|
||||
#endif
|
||||
|
||||
using namespace js;
|
||||
using namespace js::parallel;
|
||||
using namespace js::ion;
|
||||
|
||||
//
|
||||
// Debug spew
|
||||
//
|
||||
|
||||
#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
|
||||
static const char *
|
||||
ExecutionStatusToString(ExecutionStatus status)
|
||||
{
|
||||
switch (status) {
|
||||
case ExecutionFatal:
|
||||
return "fatal";
|
||||
case ExecutionSequential:
|
||||
return "sequential";
|
||||
case ExecutionParallel:
|
||||
return "parallel";
|
||||
}
|
||||
return "(unknown status)";
|
||||
}
|
||||
|
||||
static const char *
|
||||
MethodStatusToString(MethodStatus status)
|
||||
{
|
||||
switch (status) {
|
||||
case Method_Error:
|
||||
return "error";
|
||||
case Method_CantCompile:
|
||||
return "can't compile";
|
||||
case Method_Skipped:
|
||||
return "skipped";
|
||||
case Method_Compiled:
|
||||
return "compiled";
|
||||
}
|
||||
return "(unknown status)";
|
||||
}
|
||||
|
||||
static const size_t BufferSize = 4096;
|
||||
|
||||
class ParallelSpewer
|
||||
{
|
||||
uint32_t depth;
|
||||
bool colorable;
|
||||
bool active[NumSpewChannels];
|
||||
|
||||
const char *color(const char *colorCode) {
|
||||
if (!colorable)
|
||||
return "";
|
||||
return colorCode;
|
||||
}
|
||||
|
||||
const char *reset() { return color("\x1b[0m"); }
|
||||
const char *bold() { return color("\x1b[1m"); }
|
||||
const char *red() { return color("\x1b[31m"); }
|
||||
const char *green() { return color("\x1b[32m"); }
|
||||
const char *yellow() { return color("\x1b[33m"); }
|
||||
const char *cyan() { return color("\x1b[36m"); }
|
||||
const char *sliceColor(uint32_t id) {
|
||||
static const char *colors[] = {
|
||||
"\x1b[7m\x1b[31m", "\x1b[7m\x1b[32m", "\x1b[7m\x1b[33m",
|
||||
"\x1b[7m\x1b[34m", "\x1b[7m\x1b[35m", "\x1b[7m\x1b[36m",
|
||||
"\x1b[7m\x1b[37m",
|
||||
"\x1b[31m", "\x1b[32m", "\x1b[33m",
|
||||
"\x1b[34m", "\x1b[35m", "\x1b[36m",
|
||||
"\x1b[37m"
|
||||
};
|
||||
return color(colors[id % 14]);
|
||||
}
|
||||
|
||||
public:
|
||||
ParallelSpewer()
|
||||
: depth(0)
|
||||
{
|
||||
const char *env;
|
||||
|
||||
PodArrayZero(active);
|
||||
env = getenv("PAFLAGS");
|
||||
if (env) {
|
||||
if (strstr(env, "ops"))
|
||||
active[SpewOps] = true;
|
||||
if (strstr(env, "compile"))
|
||||
active[SpewCompile] = true;
|
||||
if (strstr(env, "bailouts"))
|
||||
active[SpewBailouts] = true;
|
||||
if (strstr(env, "full")) {
|
||||
for (uint32_t i = 0; i < NumSpewChannels; i++)
|
||||
active[i] = true;
|
||||
}
|
||||
}
|
||||
|
||||
env = getenv("TERM");
|
||||
if (env) {
|
||||
if (strcmp(env, "xterm-color") == 0 || strcmp(env, "xterm-256color") == 0)
|
||||
colorable = true;
|
||||
}
|
||||
}
|
||||
|
||||
bool isActive(SpewChannel channel) {
|
||||
return active[channel];
|
||||
}
|
||||
|
||||
void spewVA(SpewChannel channel, const char *fmt, va_list ap) {
|
||||
if (!active[channel])
|
||||
return;
|
||||
|
||||
// Print into a buffer first so we use one fprintf, which usually
|
||||
// doesn't get interrupted when running with multiple threads.
|
||||
char buf[BufferSize];
|
||||
|
||||
if (ForkJoinSlice *slice = ForkJoinSlice::Current()) {
|
||||
PR_snprintf(buf, BufferSize, "[%sParallel:%u%s] ",
|
||||
sliceColor(slice->sliceId), slice->sliceId, reset());
|
||||
} else {
|
||||
PR_snprintf(buf, BufferSize, "[Parallel:M] ");
|
||||
}
|
||||
|
||||
for (uint32_t i = 0; i < depth; i++)
|
||||
PR_snprintf(buf + strlen(buf), BufferSize, " ");
|
||||
|
||||
PR_vsnprintf(buf + strlen(buf), BufferSize, fmt, ap);
|
||||
PR_snprintf(buf + strlen(buf), BufferSize, "\n");
|
||||
|
||||
fprintf(stderr, "%s", buf);
|
||||
}
|
||||
|
||||
void spew(SpewChannel channel, const char *fmt, ...) {
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
spewVA(channel, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void beginOp(JSContext *cx, const char *name) {
|
||||
if (!active[SpewOps])
|
||||
return;
|
||||
|
||||
if (cx) {
|
||||
jsbytecode *pc;
|
||||
JSScript *script = cx->stack.currentScript(&pc);
|
||||
if (script && pc) {
|
||||
NonBuiltinScriptFrameIter iter(cx);
|
||||
if (iter.done()) {
|
||||
spew(SpewOps, "%sBEGIN %s%s (%s:%u)", bold(), name, reset(),
|
||||
script->filename, PCToLineNumber(script, pc));
|
||||
} else {
|
||||
spew(SpewOps, "%sBEGIN %s%s (%s:%u -> %s:%u)", bold(), name, reset(),
|
||||
iter.script()->filename, PCToLineNumber(iter.script(), iter.pc()),
|
||||
script->filename, PCToLineNumber(script, pc));
|
||||
}
|
||||
} else {
|
||||
spew(SpewOps, "%sBEGIN %s%s", bold(), name, reset());
|
||||
}
|
||||
} else {
|
||||
spew(SpewOps, "%sBEGIN %s%s", bold(), name, reset());
|
||||
}
|
||||
|
||||
depth++;
|
||||
}
|
||||
|
||||
void endOp(ExecutionStatus status) {
|
||||
if (!active[SpewOps])
|
||||
return;
|
||||
|
||||
JS_ASSERT(depth > 0);
|
||||
depth--;
|
||||
|
||||
const char *statusColor;
|
||||
switch (status) {
|
||||
case ExecutionFatal:
|
||||
statusColor = red();
|
||||
break;
|
||||
case ExecutionSequential:
|
||||
statusColor = yellow();
|
||||
break;
|
||||
case ExecutionParallel:
|
||||
statusColor = green();
|
||||
break;
|
||||
default:
|
||||
statusColor = reset();
|
||||
break;
|
||||
}
|
||||
|
||||
spew(SpewOps, "%sEND %s%s%s", bold(),
|
||||
statusColor, ExecutionStatusToString(status), reset());
|
||||
}
|
||||
|
||||
void bailout(uint32_t count) {
|
||||
if (!active[SpewOps])
|
||||
return;
|
||||
|
||||
spew(SpewOps, "%s%sBAILOUT %d%s", bold(), yellow(), count, reset());
|
||||
}
|
||||
|
||||
void beginCompile(HandleFunction fun) {
|
||||
if (!active[SpewCompile])
|
||||
return;
|
||||
|
||||
spew(SpewCompile, "COMPILE %p:%s:%u",
|
||||
fun.get(), fun->nonLazyScript()->filename, fun->nonLazyScript()->lineno);
|
||||
depth++;
|
||||
}
|
||||
|
||||
void endCompile(MethodStatus status) {
|
||||
if (!active[SpewCompile])
|
||||
return;
|
||||
|
||||
JS_ASSERT(depth > 0);
|
||||
depth--;
|
||||
|
||||
const char *statusColor;
|
||||
switch (status) {
|
||||
case Method_Error:
|
||||
case Method_CantCompile:
|
||||
statusColor = red();
|
||||
break;
|
||||
case Method_Skipped:
|
||||
statusColor = yellow();
|
||||
break;
|
||||
case Method_Compiled:
|
||||
statusColor = green();
|
||||
break;
|
||||
default:
|
||||
statusColor = reset();
|
||||
break;
|
||||
}
|
||||
|
||||
spew(SpewCompile, "END %s%s%s", statusColor, MethodStatusToString(status), reset());
|
||||
}
|
||||
|
||||
void spewMIR(MDefinition *mir, const char *fmt, va_list ap) {
|
||||
if (!active[SpewCompile])
|
||||
return;
|
||||
|
||||
char buf[BufferSize];
|
||||
PR_vsnprintf(buf, BufferSize, fmt, ap);
|
||||
|
||||
JSScript *script = mir->block()->info().script();
|
||||
spew(SpewCompile, "%s%s%s: %s (%s:%u)", cyan(), mir->opName(), reset(), buf,
|
||||
script->filename, PCToLineNumber(script, mir->trackedPc()));
|
||||
}
|
||||
|
||||
void spewBailoutIR(uint32_t bblockId, uint32_t lirId,
|
||||
const char *lir, const char *mir, JSScript *script, jsbytecode *pc) {
|
||||
if (!active[SpewBailouts])
|
||||
return;
|
||||
|
||||
// If we didn't bail from a LIR/MIR but from a propagated parallel
|
||||
// bailout, don't bother printing anything since we've printed it
|
||||
// elsewhere.
|
||||
if (mir && script) {
|
||||
spew(SpewBailouts, "%sBailout%s: %s / %s%s%s (block %d lir %d) (%s:%u)", yellow(), reset(),
|
||||
lir, cyan(), mir, reset(),
|
||||
bblockId, lirId,
|
||||
script->filename, PCToLineNumber(script, pc));
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Singleton instance of the spewer.
|
||||
static ParallelSpewer spewer;
|
||||
|
||||
bool
|
||||
parallel::SpewEnabled(SpewChannel channel)
|
||||
{
|
||||
return spewer.isActive(channel);
|
||||
}
|
||||
|
||||
void
|
||||
parallel::Spew(SpewChannel channel, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
spewer.spewVA(channel, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewBeginOp(JSContext *cx, const char *name)
|
||||
{
|
||||
spewer.beginOp(cx, name);
|
||||
}
|
||||
|
||||
ExecutionStatus
|
||||
parallel::SpewEndOp(ExecutionStatus status)
|
||||
{
|
||||
spewer.endOp(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewBailout(uint32_t count)
|
||||
{
|
||||
spewer.bailout(count);
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewBeginCompile(HandleFunction fun)
|
||||
{
|
||||
spewer.beginCompile(fun);
|
||||
}
|
||||
|
||||
MethodStatus
|
||||
parallel::SpewEndCompile(MethodStatus status)
|
||||
{
|
||||
spewer.endCompile(status);
|
||||
return status;
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewMIR(MDefinition *mir, const char *fmt, ...)
|
||||
{
|
||||
va_list ap;
|
||||
va_start(ap, fmt);
|
||||
spewer.spewMIR(mir, fmt, ap);
|
||||
va_end(ap);
|
||||
}
|
||||
|
||||
void
|
||||
parallel::SpewBailoutIR(uint32_t bblockId, uint32_t lirId,
|
||||
const char *lir, const char *mir,
|
||||
JSScript *script, jsbytecode *pc)
|
||||
{
|
||||
spewer.spewBailoutIR(bblockId, lirId, lir, mir, script, pc);
|
||||
}
|
||||
|
||||
#endif // DEBUG && JS_THREADSAFE && JS_ION
|
74
js/src/vm/ParallelDo.h
Normal file
74
js/src/vm/ParallelDo.h
Normal file
@ -0,0 +1,74 @@
|
||||
/* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sw=4 et tw=99 ft=cpp:
|
||||
*
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef ParallelDo_h__
|
||||
#define ParallelDo_h__
|
||||
|
||||
#include "jsapi.h"
|
||||
#include "jscntxt.h"
|
||||
#include "jsobj.h"
|
||||
#include "ion/Ion.h"
|
||||
|
||||
namespace js {
|
||||
namespace parallel {
|
||||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// Debug Spew
|
||||
|
||||
enum ExecutionStatus {
|
||||
// Parallel or seq execution terminated in a fatal way, operation failed
|
||||
ExecutionFatal,
|
||||
|
||||
// Parallel exec failed and so we fell back to sequential
|
||||
ExecutionSequential,
|
||||
|
||||
// Parallel exec was successful after some number of bailouts
|
||||
ExecutionParallel
|
||||
};
|
||||
|
||||
enum SpewChannel {
|
||||
SpewOps,
|
||||
SpewCompile,
|
||||
SpewBailouts,
|
||||
NumSpewChannels
|
||||
};
|
||||
|
||||
#if defined(DEBUG) && defined(JS_THREADSAFE) && defined(JS_ION)
|
||||
|
||||
bool SpewEnabled(SpewChannel channel);
|
||||
void Spew(SpewChannel channel, const char *fmt, ...);
|
||||
void SpewBeginOp(JSContext *cx, const char *name);
|
||||
void SpewBailout(uint32_t count);
|
||||
ExecutionStatus SpewEndOp(ExecutionStatus status);
|
||||
void SpewBeginCompile(HandleFunction fun);
|
||||
ion::MethodStatus SpewEndCompile(ion::MethodStatus status);
|
||||
void SpewMIR(ion::MDefinition *mir, const char *fmt, ...);
|
||||
void SpewBailoutIR(uint32_t bblockId, uint32_t lirId,
|
||||
const char *lir, const char *mir, JSScript *script, jsbytecode *pc);
|
||||
|
||||
#else
|
||||
|
||||
static inline bool SpewEnabled(SpewChannel channel) { return false; }
|
||||
static inline void Spew(SpewChannel channel, const char *fmt, ...) { }
|
||||
static inline void SpewBeginOp(JSContext *cx, const char *name) { }
|
||||
static inline void SpewBailout(uint32_t count) {}
|
||||
static inline ExecutionStatus SpewEndOp(ExecutionStatus status) { return status; }
|
||||
static inline void SpewBeginCompile(HandleFunction fun) { }
|
||||
#ifdef JS_ION
|
||||
static inline ion::MethodStatus SpewEndCompile(ion::MethodStatus status) { return status; }
|
||||
static inline void SpewMIR(ion::MDefinition *mir, const char *fmt, ...) { }
|
||||
#endif
|
||||
static inline void SpewBailoutIR(uint32_t bblockId, uint32_t lirId,
|
||||
const char *lir, const char *mir,
|
||||
JSScript *script, jsbytecode *pc) { }
|
||||
|
||||
#endif // DEBUG && JS_THREADSAFE && JS_ION
|
||||
|
||||
} // namespace parallel
|
||||
} // namespace js
|
||||
|
||||
#endif
|
@ -13,6 +13,11 @@
|
||||
|
||||
#include "gc/Marking.h"
|
||||
|
||||
#include "vm/ForkJoin.h"
|
||||
#include "vm/ThreadPool.h"
|
||||
|
||||
#include "builtin/ParallelArray.h"
|
||||
|
||||
#include "jsfuninlines.h"
|
||||
#include "jstypedarrayinlines.h"
|
||||
|
||||
@ -72,8 +77,8 @@ intrinsic_IsCallable(JSContext *cx, unsigned argc, Value *vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
intrinsic_ThrowError(JSContext *cx, unsigned argc, Value *vp)
|
||||
JSBool
|
||||
js::intrinsic_ThrowError(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
JS_ASSERT(args.length() >= 1);
|
||||
@ -129,6 +134,17 @@ intrinsic_AssertionFailed(JSContext *cx, unsigned argc, Value *vp)
|
||||
return false;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
intrinsic_MakeConstructible(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
JS_ASSERT(args.length() >= 1);
|
||||
JS_ASSERT(args[0].isObject());
|
||||
JS_ASSERT(args[0].toObject().isFunction());
|
||||
args[0].toObject().toFunction()->setIsSelfHostedConstructor();
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Used to decompile values in the nearest non-builtin stack frame, falling
|
||||
* back to decompiling in the current frame. Helpful for printing higher-order
|
||||
@ -154,14 +170,108 @@ intrinsic_DecompileArg(JSContext *cx, unsigned argc, Value *vp)
|
||||
return true;
|
||||
}
|
||||
|
||||
static JSBool
|
||||
intrinsic_MakeConstructible(JSContext *cx, unsigned argc, Value *vp)
|
||||
#ifdef DEBUG
|
||||
JSBool
|
||||
js::intrinsic_Dump(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
JS_ASSERT(args.length() >= 1);
|
||||
JS_ASSERT(args[0].isObject());
|
||||
JS_ASSERT(args[0].toObject().isFunction());
|
||||
args[0].toObject().toFunction()->setIsSelfHostedConstructor();
|
||||
RootedValue val(cx, args[0]);
|
||||
js_DumpValue(val);
|
||||
fprintf(stderr, "\n");
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
JSBool
|
||||
js::intrinsic_NewDenseArray(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
// Usage: %NewDenseArray(length)
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
|
||||
// Check that index is an int32
|
||||
if (!args[0].isInt32()) {
|
||||
JS_ReportError(cx, "Expected int32 as second argument");
|
||||
return false;
|
||||
}
|
||||
uint32_t length = args[0].toInt32();
|
||||
|
||||
// Make a new buffer and initialize it up to length.
|
||||
RootedObject buffer(cx, NewDenseAllocatedArray(cx, length));
|
||||
if (!buffer)
|
||||
return false;
|
||||
|
||||
types::TypeObject *newtype = types::GetTypeCallerInitObject(cx, JSProto_Array);
|
||||
if (!newtype)
|
||||
return false;
|
||||
buffer->setType(newtype);
|
||||
|
||||
JSObject::EnsureDenseResult edr = buffer->ensureDenseElements(cx, length, 0);
|
||||
switch (edr) {
|
||||
case JSObject::ED_OK:
|
||||
args.rval().setObject(*buffer);
|
||||
return true;
|
||||
|
||||
case JSObject::ED_SPARSE: // shouldn't happen!
|
||||
JS_ASSERT(!"%EnsureDenseArrayElements() would yield sparse array");
|
||||
JS_ReportError(cx, "%EnsureDenseArrayElements() would yield sparse array");
|
||||
break;
|
||||
|
||||
case JSObject::ED_FAILED:
|
||||
break;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
JSBool
|
||||
js::intrinsic_UnsafeSetElement(JSContext *cx, unsigned argc, Value *vp)
|
||||
{
|
||||
// Usage: %UnsafeSetElement(arr0, idx0, elem0,
|
||||
// ...,
|
||||
// arrN, idxN, elemN)
|
||||
//
|
||||
// For each set of |(arr, idx, elem)| arguments that are passed,
|
||||
// performs the assignment |arr[idx] = elem|. |arr| must be either
|
||||
// a dense array or a typed array.
|
||||
//
|
||||
// If |arr| is a dense array, the index must be an int32 less than the
|
||||
// initialized length of |arr|. Use |%EnsureDenseResultArrayElements| to
|
||||
// ensure that the initialized length is long enough.
|
||||
//
|
||||
// If |arr| is a typed array, the index must be an int32 less than the
|
||||
// length of |arr|.
|
||||
CallArgs args = CallArgsFromVp(argc, vp);
|
||||
|
||||
if ((args.length() % 3) != 0) {
|
||||
JS_ReportError(cx, "Incorrect number of arguments, not divisible by 3");
|
||||
return false;
|
||||
}
|
||||
|
||||
for (uint32_t base = 0; base < args.length(); base += 3) {
|
||||
uint32_t arri = base;
|
||||
uint32_t idxi = base+1;
|
||||
uint32_t elemi = base+2;
|
||||
|
||||
JS_ASSERT(args[arri].isObject());
|
||||
JS_ASSERT(args[arri].toObject().isNative() ||
|
||||
args[arri].toObject().isTypedArray());
|
||||
JS_ASSERT(args[idxi].isInt32());
|
||||
|
||||
RootedObject arrobj(cx, &args[arri].toObject());
|
||||
uint32_t idx = args[idxi].toInt32();
|
||||
|
||||
if (arrobj->isNative()) {
|
||||
JS_ASSERT(idx < arrobj->getDenseInitializedLength());
|
||||
JSObject::setDenseElementWithType(cx, arrobj, idx, args[elemi]);
|
||||
} else {
|
||||
JS_ASSERT(idx < TypedArray::length(arrobj));
|
||||
RootedValue tmp(cx, args[elemi]);
|
||||
// XXX: Always non-strict.
|
||||
JSObject::setElement(cx, arrobj, arrobj, idx, &tmp, false);
|
||||
}
|
||||
}
|
||||
|
||||
args.rval().setUndefined();
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -187,14 +297,22 @@ intrinsic_RuntimeDefaultLocale(JSContext *cx, unsigned argc, Value *vp)
|
||||
}
|
||||
|
||||
JSFunctionSpec intrinsic_functions[] = {
|
||||
JS_FN("ToObject", intrinsic_ToObject, 1,0),
|
||||
JS_FN("ToInteger", intrinsic_ToInteger, 1,0),
|
||||
JS_FN("IsCallable", intrinsic_IsCallable, 1,0),
|
||||
JS_FN("ThrowError", intrinsic_ThrowError, 4,0),
|
||||
JS_FN("AssertionFailed", intrinsic_AssertionFailed, 1,0),
|
||||
JS_FN("MakeConstructible", intrinsic_MakeConstructible, 1,0),
|
||||
JS_FN("DecompileArg", intrinsic_DecompileArg, 2,0),
|
||||
JS_FN("ToObject", intrinsic_ToObject, 1,0),
|
||||
JS_FN("ToInteger", intrinsic_ToInteger, 1,0),
|
||||
JS_FN("IsCallable", intrinsic_IsCallable, 1,0),
|
||||
JS_FN("ThrowError", intrinsic_ThrowError, 4,0),
|
||||
JS_FN("AssertionFailed", intrinsic_AssertionFailed, 1,0),
|
||||
JS_FN("MakeConstructible", intrinsic_MakeConstructible, 1,0),
|
||||
JS_FN("DecompileArg", intrinsic_DecompileArg, 2,0),
|
||||
JS_FN("RuntimeDefaultLocale", intrinsic_RuntimeDefaultLocale, 0,0),
|
||||
|
||||
JS_FN("NewDenseArray", intrinsic_NewDenseArray, 1,0),
|
||||
JS_FN("UnsafeSetElement", intrinsic_UnsafeSetElement, 3,0),
|
||||
|
||||
#ifdef DEBUG
|
||||
JS_FN("Dump", intrinsic_Dump, 1,0),
|
||||
#endif
|
||||
|
||||
JS_FS_END
|
||||
};
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user