Bug 1890513: Directly invoke variadic native functions. r=jandem

CodeGenerator.cpp:
Split `emitCallNative` into two methods, so we can reuse it for `emitApplyNative`.
Also add some comments how `NativeExitFrameLayout` is constructed on the stack and
add an assertion that the native C++ function returned an object when constructing.
`emitCallNative` uses `setupAlignedABICall`, so we now need to align the Value
arguments, this happens through the new `emitAlignStackForApplyNative` method.

LIR-shared.h, Lowering.cpp:
The callee doesn't need to be stored which frees up one register, so we can
allocate an additional temp-register. This matches the approach taken for
`LCallNative`. The additional temp register is needed, because `emitCallNative`
requires four registers.

VMFunctions:
- Remove no longer used `InvokeNativeFunction`.

Differential Revision: https://phabricator.services.mozilla.com/D207005
This commit is contained in:
André Bargull 2024-04-15 14:39:04 +00:00
parent b50231ef5c
commit 1928539e57
7 changed files with 235 additions and 161 deletions

View File

@ -5495,21 +5495,10 @@ void CodeGenerator::visitAssertCanElidePostWriteBarrier(
}
template <typename LCallIns>
void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
MCallBase* mir = call->mir();
uint32_t unusedStack = UnusedStackBytesForCall(mir->paddedNumStackArgs());
// Registers used for callWithABI() argument-passing.
const Register argContextReg = ToRegister(call->getArgContextReg());
const Register argUintNReg = ToRegister(call->getArgUintNReg());
const Register argVpReg = ToRegister(call->getArgVpReg());
// Misc. temporary registers.
const Register tempReg = ToRegister(call->getTempReg());
DebugOnly<uint32_t> initialStack = masm.framePushed();
void CodeGenerator::emitCallNative(LCallIns* call, JSNative native,
Register argContextReg, Register argUintNReg,
Register argVpReg, Register tempReg,
uint32_t unusedStack) {
masm.checkStackAlignment();
// Native functions have the signature:
@ -5524,17 +5513,21 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Push a Value containing the callee object: natives are allowed to access
// their callee before setting the return value. The StackPointer is moved
// to &vp[0].
//
// Also reserves the space for |NativeExitFrameLayout::{lo,hi}CalleeResult_|.
if constexpr (std::is_same_v<LCallIns, LCallClassHook>) {
Register calleeReg = ToRegister(call->getCallee());
masm.Push(TypedOrValueRegister(MIRType::Object, AnyRegister(calleeReg)));
// Enter the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.switchToObjectRealm(calleeReg, tempReg);
}
} else {
WrappedFunction* target = call->getSingleTarget();
WrappedFunction* target = call->mir()->getSingleTarget();
masm.Push(ObjectValue(*target->rawNativeJSFunction()));
// Enter the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.movePtr(ImmGCPtr(target->rawNativeJSFunction()), tempReg);
masm.switchToObjectRealm(tempReg, tempReg);
@ -5543,12 +5536,17 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Preload arguments into registers.
masm.loadJSContext(argContextReg);
masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
masm.moveStackPtrTo(argVpReg);
// Initialize |NativeExitFrameLayout::argc_|.
masm.Push(argUintNReg);
// Construct native exit frame.
//
// |buildFakeExitFrame| initializes |NativeExitFrameLayout::exit_| and
// |enterFakeExitFrameForNative| initializes |NativeExitFrameLayout::footer_|.
//
// The NativeExitFrameLayout is now fully initialized.
uint32_t safepointOffset = masm.buildFakeExitFrame(tempReg);
masm.enterFakeExitFrameForNative(argContextReg, tempReg,
call->mir()->isConstructing());
@ -5581,6 +5579,7 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Test for failure.
masm.branchIfFalseBool(ReturnReg, masm.failureLabel());
// Exit the callee realm.
if (call->mir()->maybeCrossRealm()) {
masm.switchToRealm(gen->realm->realmPtr(), ReturnReg);
}
@ -5593,10 +5592,44 @@ void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
// Until C++ code is instrumented against Spectre, prevent speculative
// execution from returning any private data.
if (JitOptions.spectreJitToCxxCalls && !call->mir()->ignoresReturnValue() &&
mir->hasLiveDefUses()) {
call->mir()->hasLiveDefUses()) {
masm.speculationBarrier();
}
#ifdef DEBUG
// Native constructors are guaranteed to return an Object value.
if (call->mir()->isConstructing()) {
Label notPrimitive;
masm.branchTestPrimitive(Assembler::NotEqual, JSReturnOperand,
&notPrimitive);
masm.assumeUnreachable("native constructors don't return primitives");
masm.bind(&notPrimitive);
}
#endif
}
template <typename LCallIns>
void CodeGenerator::emitCallNative(LCallIns* call, JSNative native) {
uint32_t unusedStack =
UnusedStackBytesForCall(call->mir()->paddedNumStackArgs());
// Registers used for callWithABI() argument-passing.
const Register argContextReg = ToRegister(call->getArgContextReg());
const Register argUintNReg = ToRegister(call->getArgUintNReg());
const Register argVpReg = ToRegister(call->getArgVpReg());
// Misc. temporary registers.
const Register tempReg = ToRegister(call->getTempReg());
DebugOnly<uint32_t> initialStack = masm.framePushed();
// Initialize the argc register.
masm.move32(Imm32(call->mir()->numActualArgs()), argUintNReg);
// Create the exit frame and call the native.
emitCallNative(call, native, argContextReg, argUintNReg, argVpReg, tempReg,
unusedStack);
// The next instruction is removing the footer of the exit frame, so there
// is no need for leaveFakeExitFrame.
@ -6841,15 +6874,35 @@ void CodeGenerator::emitApplyGeneric(T* apply) {
}
template <typename T>
void CodeGenerator::emitCallInvokeNativeFunction(T* apply) {
pushArg(masm.getStackPointer()); // argv.
pushArg(ToRegister(apply->getArgc())); // argc.
pushArg(Imm32(apply->mir()->ignoresReturnValue())); // ignoresReturnValue.
pushArg(Imm32(apply->mir()->isConstructing())); // isConstructing.
void CodeGenerator::emitAlignStackForApplyNative(T* apply, Register argc) {
static_assert(JitStackAlignment % ABIStackAlignment == 0,
"aligning on JIT stack subsumes ABI alignment");
using Fn =
bool (*)(JSContext*, bool, bool, uint32_t, Value*, MutableHandleValue);
callVM<Fn, jit::InvokeNativeFunction>(apply);
// Align the arguments on the JitStackAlignment.
if constexpr (JitStackValueAlignment > 1) {
static_assert(JitStackValueAlignment == 2,
"Stack padding adds exactly one Value");
MOZ_ASSERT(frameSize() % JitStackValueAlignment == 0,
"Stack padding assumes that the frameSize is correct");
Assembler::Condition cond;
if constexpr (T::isConstructing()) {
// If the number of arguments is even, then we do not need any padding.
//
// Also see emitAllocateSpaceForApply().
cond = Assembler::Zero;
} else {
// If the number of arguments is odd, then we do not need any padding.
//
// Also see emitAllocateSpaceForConstructAndPushNewTarget().
cond = Assembler::NonZero;
}
Label noPaddingNeeded;
masm.branchTestPtr(cond, argc, Imm32(1), &noPaddingNeeded);
masm.pushValue(MagicValue(JS_ARG_POISON));
masm.bind(&noPaddingNeeded);
}
}
template <typename T>
@ -6859,11 +6912,19 @@ void CodeGenerator::emitPushNativeArguments(T* apply) {
Register scratch = ToRegister(apply->getTempForArgCopy());
uint32_t extraFormals = apply->numExtraFormals();
// Align stack.
emitAlignStackForApplyNative(apply, argc);
// Push newTarget.
if constexpr (T::isConstructing()) {
masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
}
// Push arguments.
Label noCopy;
masm.branchTestPtr(Assembler::Zero, argc, argc, &noCopy);
{
// Use scratch register to calculate stack space (no padding needed).
// Use scratch register to calculate stack space.
masm.movePtr(argc, scratch);
// Reserve space for copying the arguments.
@ -6885,6 +6946,13 @@ void CodeGenerator::emitPushNativeArguments(T* apply) {
argvDstOffset);
}
masm.bind(&noCopy);
// Push |this|.
if constexpr (T::isConstructing()) {
masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
} else {
masm.pushValue(ToValue(apply, T::ThisIndex));
}
}
template <typename T>
@ -6904,6 +6972,14 @@ void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
// The array length is our argc.
masm.load32(Address(elements, ObjectElements::offsetOfLength()), tmpArgc);
// Align stack.
emitAlignStackForApplyNative(apply, tmpArgc);
// Push newTarget.
if constexpr (T::isConstructing()) {
masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
}
// Skip the copy of arguments if there are none.
Label noCopy;
masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
@ -6919,8 +6995,15 @@ void CodeGenerator::emitPushArrayAsNativeArguments(T* apply) {
}
masm.bind(&noCopy);
// Set argc in preparation for emitCallInvokeNativeFunction.
// Set argc in preparation for calling the native function.
masm.load32(Address(elements, ObjectElements::offsetOfLength()), argc);
// Push |this|.
if constexpr (T::isConstructing()) {
masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
} else {
masm.pushValue(ToValue(apply, T::ThisIndex));
}
}
void CodeGenerator::emitPushArguments(LApplyArgsNative* apply) {
@ -6944,6 +7027,7 @@ void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
Register argsObj = ToRegister(apply->getArgsObj());
Register tmpArgc = ToRegister(apply->getTempObject());
Register scratch = ToRegister(apply->getTempForArgCopy());
Register scratch2 = ToRegister(apply->getTempExtra());
// NB: argc and argsObj are mapped to the same register.
MOZ_ASSERT(argc == argsObj);
@ -6951,11 +7035,14 @@ void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
// Load argc into tmpArgc.
masm.loadArgumentsObjectLength(argsObj, tmpArgc);
// Align stack.
emitAlignStackForApplyNative(apply, tmpArgc);
// Push arguments.
Label noCopy, epilogue;
masm.branchTestPtr(Assembler::Zero, tmpArgc, tmpArgc, &noCopy);
{
// Use scratch register to calculate stack space (no padding needed).
// Use scratch register to calculate stack space.
masm.movePtr(tmpArgc, scratch);
// Reserve space for copying the arguments.
@ -6970,56 +7057,65 @@ void CodeGenerator::emitPushArguments(LApplyArgsObjNative* apply) {
size_t argvSrcOffset = ArgumentsData::offsetOfArgs();
size_t argvDstOffset = 0;
// Stash away |tmpArgc| and adjust argvDstOffset accordingly.
masm.push(tmpArgc);
argvDstOffset += sizeof(void*);
Register argvIndex = scratch2;
masm.move32(tmpArgc, argvIndex);
// Copy the values.
emitCopyValuesForApply(argvSrcBase, tmpArgc, scratch, argvSrcOffset,
emitCopyValuesForApply(argvSrcBase, argvIndex, scratch, argvSrcOffset,
argvDstOffset);
// Set argc in preparation for emitCallInvokeNativeFunction.
masm.pop(argc);
masm.jump(&epilogue);
}
masm.bind(&noCopy);
{
// Set argc in preparation for emitCallInvokeNativeFunction.
masm.movePtr(ImmWord(0), argc);
}
masm.bind(&epilogue);
// Set argc in preparation for calling the native function.
masm.movePtr(tmpArgc, argc);
// Push |this|.
masm.pushValue(ToValue(apply, LApplyArgsObjNative::ThisIndex));
}
template <typename T>
void CodeGenerator::emitApplyNative(T* apply) {
MOZ_ASSERT(apply->mir()->getSingleTarget()->isNativeWithoutJitEntry());
constexpr bool isConstructing = T::isConstructing();
MOZ_ASSERT(isConstructing == apply->mir()->isConstructing(),
MOZ_ASSERT(T::isConstructing() == apply->mir()->isConstructing(),
"isConstructing condition must be consistent");
// Push newTarget.
if constexpr (isConstructing) {
masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getNewTarget()));
WrappedFunction* target = apply->mir()->getSingleTarget();
MOZ_ASSERT(target->isNativeWithoutJitEntry());
JSNative native = target->native();
if (apply->mir()->ignoresReturnValue() && target->hasJitInfo()) {
const JSJitInfo* jitInfo = target->jitInfo();
if (jitInfo->type() == JSJitInfo::IgnoresReturnValueNative) {
native = jitInfo->ignoresReturnValueMethod;
}
}
// Push arguments.
// Push arguments, including newTarget and |this|.
emitPushArguments(apply);
// Push |this|.
if constexpr (isConstructing) {
masm.pushValue(MagicValue(JS_IS_CONSTRUCTING));
} else {
masm.pushValue(ToValue(apply, T::ThisIndex));
}
// Registers used for callWithABI() argument-passing.
Register argContextReg = ToRegister(apply->getTempObject());
Register argUintNReg = ToRegister(apply->getArgc());
Register argVpReg = ToRegister(apply->getTempForArgCopy());
Register tempReg = ToRegister(apply->getTempExtra());
// Push callee.
masm.pushValue(JSVAL_TYPE_OBJECT, ToRegister(apply->getFunction()));
// No unused stack for variadic calls.
uint32_t unusedStack = 0;
// Call the native function.
emitCallInvokeNativeFunction(apply);
// Pushed arguments don't change the pushed frames amount.
MOZ_ASSERT(masm.framePushed() == frameSize());
// Create the exit frame and call the native.
emitCallNative(apply, native, argContextReg, argUintNReg, argVpReg, tempReg,
unusedStack);
// The exit frame is still on the stack.
MOZ_ASSERT(masm.framePushed() == frameSize() + NativeExitFrameLayout::Size());
// The next instruction is removing the exit frame, so there is no need for
// leaveFakeExitFrame.
// Pop arguments and continue.
masm.setFramePushed(frameSize());
emitRestoreStackPointerFromFP();
}

View File

@ -108,6 +108,11 @@ class CodeGenerator final : public CodeGeneratorSpecific {
inline OutOfLineCode* oolCallVM(LInstruction* ins, const ArgSeq& args,
const StoreOutputTo& out);
template <typename LCallIns>
void emitCallNative(LCallIns* call, JSNative native, Register argContextReg,
Register argUintNReg, Register argVpReg, Register tempReg,
uint32_t unusedStack);
template <typename LCallIns>
void emitCallNative(LCallIns* call, JSNative native);
@ -248,7 +253,7 @@ class CodeGenerator final : public CodeGeneratorSpecific {
template <typename T>
void emitApplyNative(T* apply);
template <typename T>
void emitCallInvokeNativeFunction(T* apply);
void emitAlignStackForApplyNative(T* apply, Register argc);
template <typename T>
void emitPushNativeArguments(T* apply);
template <typename T>

View File

@ -654,7 +654,6 @@ void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
auto argc = useFixedAtStart(apply->getArgc(), CallTempReg0);
auto thisValue =
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
@ -665,9 +664,13 @@ void LIRGenerator::visitApplyArgs(MApplyArgs* apply) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
auto temp = tempFixed(CallTempReg3);
lir = new (alloc())
LApplyArgsNative(function, argc, thisValue, tempObj, tempCopy);
LApplyArgsNative(argc, thisValue, tempObj, tempCopy, temp);
} else {
auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
lir = new (alloc())
LApplyArgsGeneric(function, argc, thisValue, tempObj, tempCopy);
}
@ -686,7 +689,6 @@ void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
auto argsObj = useFixedAtStart(apply->getArgsObj(), CallTempReg0);
auto thisValue =
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
@ -697,9 +699,13 @@ void LIRGenerator::visitApplyArgsObj(MApplyArgsObj* apply) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
auto temp = tempFixed(CallTempReg3);
lir = new (alloc())
LApplyArgsObjNative(function, argsObj, thisValue, tempObj, tempCopy);
LApplyArgsObjNative(argsObj, thisValue, tempObj, tempCopy, temp);
} else {
auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
lir = new (alloc())
LApplyArgsObj(function, argsObj, thisValue, tempObj, tempCopy);
}
@ -718,7 +724,6 @@ void LIRGenerator::visitApplyArray(MApplyArray* apply) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
auto elements = useFixedAtStart(apply->getElements(), CallTempReg0);
auto thisValue =
useBoxFixedAtStart(apply->getThis(), CallTempReg4, CallTempReg5);
@ -729,9 +734,13 @@ void LIRGenerator::visitApplyArray(MApplyArray* apply) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
auto temp = tempFixed(CallTempReg3);
lir = new (alloc())
LApplyArrayNative(function, elements, thisValue, tempObj, tempCopy);
LApplyArrayNative(elements, thisValue, tempObj, tempCopy, temp);
} else {
auto function = useFixedAtStart(apply->getFunction(), CallTempReg3);
lir = new (alloc())
LApplyArrayGeneric(function, elements, thisValue, tempObj, tempCopy);
}
@ -754,7 +763,6 @@ void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto argc = useFixedAtStart(mir->getArgc(), CallTempReg0);
auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
auto temp = tempFixed(CallTempReg2);
@ -763,11 +771,13 @@ void LIRGenerator::visitConstructArgs(MConstructArgs* mir) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
auto temp2 = tempFixed(CallTempReg4);
auto temp2 = tempFixed(CallTempReg3);
auto temp3 = tempFixed(CallTempReg4);
lir = new (alloc())
LConstructArgsNative(function, argc, newTarget, temp, temp2);
lir =
new (alloc()) LConstructArgsNative(argc, newTarget, temp, temp2, temp3);
} else {
auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto thisValue =
useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);
@ -792,7 +802,6 @@ void LIRGenerator::visitConstructArray(MConstructArray* mir) {
static_assert(CallTempReg2 != JSReturnReg_Type);
static_assert(CallTempReg2 != JSReturnReg_Data);
auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto elements = useFixedAtStart(mir->getElements(), CallTempReg0);
auto newTarget = useFixedAtStart(mir->getNewTarget(), CallTempReg1);
auto temp = tempFixed(CallTempReg2);
@ -801,11 +810,13 @@ void LIRGenerator::visitConstructArray(MConstructArray* mir) {
LInstruction* lir;
if (target && target->isNativeWithoutJitEntry()) {
auto temp2 = tempFixed(CallTempReg4);
auto temp2 = tempFixed(CallTempReg3);
auto temp3 = tempFixed(CallTempReg4);
lir = new (alloc())
LConstructArrayNative(function, elements, newTarget, temp, temp2);
LConstructArrayNative(elements, newTarget, temp, temp2, temp3);
} else {
auto function = useFixedAtStart(mir->getFunction(), CallTempReg3);
auto thisValue =
useBoxFixedAtStart(mir->getThis(), CallTempReg4, CallTempReg5);

View File

@ -211,7 +211,6 @@ namespace jit {
_(InterpretResume, js::jit::InterpretResume) \
_(InterruptCheck, js::jit::InterruptCheck) \
_(InvokeFunction, js::jit::InvokeFunction) \
_(InvokeNativeFunction, js::jit::InvokeNativeFunction) \
_(IonBinaryArithICUpdate, js::jit::IonBinaryArithIC::update) \
_(IonBindNameICUpdate, js::jit::IonBindNameIC::update) \
_(IonCheckPrivateFieldICUpdate, js::jit::IonCheckPrivateFieldIC::update) \

View File

@ -545,39 +545,6 @@ bool InvokeFunction(JSContext* cx, HandleObject obj, bool constructing,
return Call(cx, fval, thisv, args, rval);
}
bool InvokeNativeFunction(JSContext* cx, bool constructing,
bool ignoresReturnValue, uint32_t argc, Value* argv,
MutableHandleValue rval) {
// Ensure argv array is rooted - we may GC in here.
size_t numValues = argc + 2 + constructing;
RootedExternalValueArray argvRoot(cx, numValues, argv);
// Data in the argument vector is arranged for a JIT -> C++ call.
CallArgs callArgs = CallArgsFromSp(argc + constructing, argv + numValues,
constructing, ignoresReturnValue);
// This function is only called when the callee is a native function.
MOZ_ASSERT(callArgs.callee().as<JSFunction>().isNativeWithoutJitEntry());
if (constructing) {
MOZ_ASSERT(callArgs.thisv().isMagic(JS_IS_CONSTRUCTING));
if (!ConstructFromStack(cx, callArgs)) {
return false;
}
MOZ_ASSERT(callArgs.rval().isObject(),
"native constructors don't return primitives");
} else {
if (!CallFromStack(cx, callArgs)) {
return false;
}
}
rval.set(callArgs.rval());
return true;
}
void* GetContextSensitiveInterpreterStub() {
return TlsContext.get()->runtime()->jitRuntime()->interpreterStub().value;
}

View File

@ -354,10 +354,6 @@ struct LastArg<HeadType, TailTypes...> {
uint32_t argc, Value* argv,
MutableHandleValue rval);
[[nodiscard]] bool InvokeNativeFunction(JSContext* cx, bool constructing,
bool ignoresReturnValue, uint32_t argc,
Value* argv, MutableHandleValue rval);
bool InvokeFromInterpreterStub(JSContext* cx,
InterpreterStubExitFrameLayout* frame);
void* GetContextSensitiveInterpreterStub();

View File

@ -817,19 +817,19 @@ class LConstructArrayGeneric
};
class LApplyArgsNative
: public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
: public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 1, 3> {
public:
LIR_HEADER(ApplyArgsNative)
LApplyArgsNative(const LAllocation& func, const LAllocation& argc,
const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
const LDefinition& tmpCopy)
LApplyArgsNative(const LAllocation& argc, const LBoxAllocation& thisv,
const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argc);
setOperand(0, argc);
setBoxOperand(ThisIndex, thisv);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return false; }
@ -838,94 +838,94 @@ class LApplyArgsNative
uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
const LAllocation* getFunction() { return getOperand(0); }
const LAllocation* getArgc() { return getOperand(1); }
const LAllocation* getArgc() { return getOperand(0); }
static const size_t ThisIndex = 2;
static const size_t ThisIndex = 1;
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
const LDefinition* getTempExtra() { return getTemp(2); }
};
class LApplyArgsObjNative
: public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
: public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 1, 3> {
public:
LIR_HEADER(ApplyArgsObjNative)
LApplyArgsObjNative(const LAllocation& func, const LAllocation& argsObj,
const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
const LDefinition& tmpCopy)
LApplyArgsObjNative(const LAllocation& argsObj, const LBoxAllocation& thisv,
const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argsObj);
setOperand(0, argsObj);
setBoxOperand(ThisIndex, thisv);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return false; }
MApplyArgsObj* mir() const { return mir_->toApplyArgsObj(); }
const LAllocation* getFunction() { return getOperand(0); }
const LAllocation* getArgsObj() { return getOperand(1); }
const LAllocation* getArgsObj() { return getOperand(0); }
static const size_t ThisIndex = 2;
static const size_t ThisIndex = 1;
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
const LDefinition* getTempExtra() { return getTemp(2); }
// argc is mapped to the same register as argsObj: argc becomes live as
// argsObj is dying, all registers are calltemps.
const LAllocation* getArgc() { return getOperand(1); }
const LAllocation* getArgc() { return getOperand(0); }
};
class LApplyArrayNative
: public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 2, 2> {
: public LCallInstructionHelper<BOX_PIECES, BOX_PIECES + 1, 3> {
public:
LIR_HEADER(ApplyArrayNative)
LApplyArrayNative(const LAllocation& func, const LAllocation& elements,
const LBoxAllocation& thisv, const LDefinition& tmpObjReg,
const LDefinition& tmpCopy)
LApplyArrayNative(const LAllocation& elements, const LBoxAllocation& thisv,
const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, elements);
setOperand(0, elements);
setBoxOperand(ThisIndex, thisv);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return false; }
MApplyArray* mir() const { return mir_->toApplyArray(); }
const LAllocation* getFunction() { return getOperand(0); }
const LAllocation* getElements() { return getOperand(1); }
const LAllocation* getElements() { return getOperand(0); }
static const size_t ThisIndex = 2;
static const size_t ThisIndex = 1;
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
const LDefinition* getTempExtra() { return getTemp(2); }
// argc is mapped to the same register as elements: argc becomes live as
// elements is dying, all registers are calltemps.
const LAllocation* getArgc() { return getOperand(1); }
const LAllocation* getArgc() { return getOperand(0); }
};
class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 2, 3> {
public:
LIR_HEADER(ConstructArgsNative)
LConstructArgsNative(const LAllocation& func, const LAllocation& argc,
const LAllocation& newTarget,
const LDefinition& tmpObjReg, const LDefinition& tmpCopy)
LConstructArgsNative(const LAllocation& argc, const LAllocation& newTarget,
const LDefinition& tmpObjReg, const LDefinition& tmpCopy,
const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, argc);
setOperand(2, newTarget);
setOperand(0, argc);
setOperand(1, newTarget);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return true; }
@ -934,44 +934,44 @@ class LConstructArgsNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
uint32_t numExtraFormals() const { return mir()->numExtraFormals(); }
const LAllocation* getFunction() { return getOperand(0); }
const LAllocation* getArgc() { return getOperand(1); }
const LAllocation* getNewTarget() { return getOperand(2); }
const LAllocation* getArgc() { return getOperand(0); }
const LAllocation* getNewTarget() { return getOperand(1); }
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
const LDefinition* getTempExtra() { return getTemp(2); }
};
class LConstructArrayNative : public LCallInstructionHelper<BOX_PIECES, 3, 2> {
class LConstructArrayNative : public LCallInstructionHelper<BOX_PIECES, 2, 3> {
public:
LIR_HEADER(ConstructArrayNative)
LConstructArrayNative(const LAllocation& func, const LAllocation& elements,
LConstructArrayNative(const LAllocation& elements,
const LAllocation& newTarget,
const LDefinition& tmpObjReg,
const LDefinition& tmpCopy)
const LDefinition& tmpCopy, const LDefinition& tmpExtra)
: LCallInstructionHelper(classOpcode) {
setOperand(0, func);
setOperand(1, elements);
setOperand(2, newTarget);
setOperand(0, elements);
setOperand(1, newTarget);
setTemp(0, tmpObjReg);
setTemp(1, tmpCopy);
setTemp(2, tmpExtra);
}
static constexpr bool isConstructing() { return true; }
MConstructArray* mir() const { return mir_->toConstructArray(); }
const LAllocation* getFunction() { return getOperand(0); }
const LAllocation* getElements() { return getOperand(1); }
const LAllocation* getNewTarget() { return getOperand(2); }
const LAllocation* getElements() { return getOperand(0); }
const LAllocation* getNewTarget() { return getOperand(1); }
const LDefinition* getTempObject() { return getTemp(0); }
const LDefinition* getTempForArgCopy() { return getTemp(1); }
const LDefinition* getTempExtra() { return getTemp(2); }
// argc is mapped to the same register as elements: argc becomes live as
// elements is dying, all registers are calltemps.
const LAllocation* getArgc() { return getOperand(1); }
const LAllocation* getArgc() { return getOperand(0); }
};
// Takes in either an integer or boolean input and tests it for truthiness.