Bug 1478632 - wasm simd, part 1: feature gating and related prep. r=rhunt

We add a configuration option for SIMD and apply ENABLE_WASM_SIMD
throughout the engine as appropriate, mostly to insert #error or
MOZ_CRASH where things need to be done in later patches or for
architectures that we won't currently consider.

We add a command line switch for the shell and an option for
about:config and plumb the value of this through the engine.

Differential Revision: https://phabricator.services.mozilla.com/D57940
This commit is contained in:
Lars T Hansen 2020-05-05 08:17:47 +00:00
parent c7c91266a0
commit 414fee387b
49 changed files with 335 additions and 163 deletions

View File

@ -298,6 +298,9 @@ void LoadContextOptions(const char* aPrefName, void* /* aClosure */) {
.setWasmMultiValue(
GetWorkerPref<bool>(NS_LITERAL_CSTRING("wasm_multi_value")))
#endif
#ifdef ENABLE_WASM_SIMD
.setWasmSimd(GetWorkerPref<bool>(NS_LITERAL_CSTRING("wasm_simd")))
#endif
#ifdef ENABLE_WASM_REFTYPES
.setWasmGc(GetWorkerPref<bool>(NS_LITERAL_CSTRING("wasm_gc")))
#endif

View File

@ -642,3 +642,24 @@ def enable_new_regexp(value):
set_config('ENABLE_NEW_REGEXP', enable_new_regexp)
set_define('ENABLE_NEW_REGEXP', enable_new_regexp)
# Support for WebAssembly SIMD
# =====================================================
@depends('--enable-jit', '--enable-simulator', target, milestone)
def default_wasm_simd(jit_enabled, simulator, target, milestone):
if not jit_enabled or simulator:
return
# Note, not `target.cpu in ('x86_64')`, as the parens are stripped
# from the 1-element tuple and the operation does string matching.
# Good grief.
if milestone.is_nightly and target.cpu == 'x86_64':
return True
js_option('--enable-wasm-simd',
default=default_wasm_simd,
help='{Enable|Disable} WebAssembly SIMD')
set_config('ENABLE_WASM_SIMD', depends_if('--enable-wasm-simd')(lambda x: True))
set_define('ENABLE_WASM_SIMD', depends_if('--enable-wasm-simd')(lambda x: True))

View File

@ -28,6 +28,7 @@ class JS_PUBLIC_API ContextOptions {
wasmReftypes_(true),
wasmGc_(false),
wasmMultiValue_(false),
wasmSimd_(false),
testWasmAwaitTier2_(false),
#ifdef ENABLE_WASM_BIGINT
enableWasmBigInt_(true),
@ -122,6 +123,10 @@ class JS_PUBLIC_API ContextOptions {
// Defined out-of-line because it depends on a compile-time option
ContextOptions& setWasmMultiValue(bool flag);
bool wasmSimd() const { return wasmSimd_; }
// Defined out-of-line because it depends on a compile-time option
ContextOptions& setWasmSimd(bool flag);
bool throwOnAsmJSValidationFailure() const {
return throwOnAsmJSValidationFailure_;
}
@ -210,6 +215,7 @@ class JS_PUBLIC_API ContextOptions {
setWasmIon(false);
setWasmGc(false);
setWasmMultiValue(false);
setWasmSimd(false);
}
private:
@ -223,6 +229,7 @@ class JS_PUBLIC_API ContextOptions {
bool wasmReftypes_ : 1;
bool wasmGc_ : 1;
bool wasmMultiValue_ : 1;
bool wasmSimd_ : 1;
bool testWasmAwaitTier2_ : 1;
#ifdef ENABLE_WASM_BIGINT
bool enableWasmBigInt_ : 1;

View File

@ -796,6 +796,12 @@ static bool WasmBigIntEnabled(JSContext* cx, unsigned argc, Value* vp) {
return true;
}
static bool WasmSimdSupported(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setBoolean(wasm::SimdAvailable(cx));
return true;
}
static bool WasmCompilersPresent(JSContext* cx, unsigned argc, Value* vp) {
CallArgs args = CallArgsFromVp(argc, vp);
@ -6606,6 +6612,11 @@ gc::ZealModeHelpText),
" Returns a boolean indicating whether the WebAssembly bulk memory proposal is\n"
" supported on the current device."),
JS_FN_HELP("wasmSimdSupported", WasmSimdSupported, 0, 0,
"wasmSimdSupported()",
" Returns a boolean indicating whether WebAssembly SIMD is supported by the\n"
" compilers and runtime."),
JS_FN_HELP("wasmCompilersPresent", WasmCompilersPresent, 0, 0,
"wasmCompilersPresent()",
" Returns a string indicating the present wasm compilers: a comma-separated list\n"

View File

@ -139,10 +139,8 @@ class StoreOp {
masm.storeDouble(reg, dump);
} else if (reg.isSingle()) {
masm.storeFloat32(reg, dump);
# if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
} else if (reg.isSimd128()) {
masm.storeUnalignedSimd128Float(reg, dump);
# endif
MOZ_CRASH("Unexpected case for SIMD");
} else {
MOZ_CRASH("Unexpected register type.");
}
@ -169,9 +167,11 @@ class VerifyOp {
ScratchFloat32Scope scratch(masm);
masm.loadFloat32(dump, scratch);
masm.branchFloat(Assembler::DoubleNotEqual, scratch, reg, failure_);
} else if (reg.isSimd128()) {
MOZ_CRASH("Unexpected case for SIMD");
} else {
MOZ_CRASH("Unexpected register type.");
}
// :TODO: (Bug 1133745) Add support to verify SIMD registers.
}
};

View File

@ -155,7 +155,13 @@ bool jit::InitializeJit() {
return true;
}
bool jit::JitSupportsSimd() { return js::jit::MacroAssembler::SupportsSimd(); }
bool jit::JitSupportsWasmSimd() {
#if defined(ENABLE_WASM_SIMD)
return js::jit::MacroAssembler::SupportsWasmSimd();
#else
MOZ_CRASH("Do not call");
#endif
}
bool jit::JitSupportsAtomics() {
#if defined(JS_CODEGEN_ARM)

View File

@ -153,7 +153,7 @@ static inline bool IsErrorStatus(JitExecStatus status) {
return status == JitExec_Error || status == JitExec_Aborted;
}
bool JitSupportsSimd();
bool JitSupportsWasmSimd();
bool JitSupportsAtomics();
} // namespace jit

View File

@ -2174,6 +2174,9 @@ MachineState MachineState::FromBailout(RegisterDump::GPRArray& regs,
for (unsigned i = 0; i < FloatRegisters::TotalSingle; i++) {
machine.setRegisterLocation(FloatRegister(i, FloatRegister::Single),
(double*)&fbase[i]);
# ifdef ENABLE_WASM_SIMD
# error "More care needed here"
# endif
}
#elif defined(JS_CODEGEN_MIPS32)
for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
@ -2181,6 +2184,9 @@ MachineState MachineState::FromBailout(RegisterDump::GPRArray& regs,
FloatRegister::FromIndex(i, FloatRegister::Double), &fpregs[i]);
machine.setRegisterLocation(
FloatRegister::FromIndex(i, FloatRegister::Single), &fpregs[i]);
# ifdef ENABLE_WASM_SIMD
# error "More care needed here"
# endif
}
#elif defined(JS_CODEGEN_MIPS64)
for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
@ -2188,6 +2194,9 @@ MachineState MachineState::FromBailout(RegisterDump::GPRArray& regs,
&fpregs[i]);
machine.setRegisterLocation(FloatRegister(i, FloatRegisters::Single),
&fpregs[i]);
# ifdef ENABLE_WASM_SIMD
# error "More care needed here"
# endif
}
#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
@ -2206,6 +2215,9 @@ MachineState MachineState::FromBailout(RegisterDump::GPRArray& regs,
machine.setRegisterLocation(
FloatRegister(FloatRegisters::Encoding(i), FloatRegisters::Double),
&fpregs[i]);
# ifdef ENABLE_WASM_SIMD
# error "More care needed here"
# endif
}
#elif defined(JS_CODEGEN_NONE)

View File

@ -500,7 +500,9 @@ class LDefinition {
static_assert(MAX_VIRTUAL_REGISTERS <= VREG_MASK);
bits_ =
(index << VREG_SHIFT) | (policy << POLICY_SHIFT) | (type << TYPE_SHIFT);
MOZ_ASSERT_IF(!SupportsSimd, !isSimdType());
#ifndef ENABLE_WASM_SIMD
MOZ_ASSERT(!isSimdType());
#endif
}
public:

View File

@ -29,7 +29,6 @@ class StackSlotAllocator {
}
uint32_t allocateQuadSlot() {
MOZ_ASSERT(SupportsSimd);
// This relies on the fact that any architecture specific
// alignment of the stack pointer is done a priori.
if (height_ % 8 != 0) {

View File

@ -398,6 +398,10 @@ FloatRegisters::Code FloatRegisters::FromName(const char* name) {
}
FloatRegisterSet VFPRegister::ReduceSetForPush(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
LiveFloatRegisterSet mod;
for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
if ((*iter).isSingle()) {
@ -416,6 +420,10 @@ FloatRegisterSet VFPRegister::ReduceSetForPush(const FloatRegisterSet& s) {
}
uint32_t VFPRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
FloatRegisterSet ss = s.reduceSetForPush();
uint64_t bits = ss.bits();
uint32_t ret = mozilla::CountPopulation32(bits & 0xffffffff) * sizeof(float);
@ -423,6 +431,10 @@ uint32_t VFPRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
return ret;
}
uint32_t VFPRegister::getRegisterDumpOffsetInBytes() {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
if (isSingle()) {
return id() * sizeof(float);
}

View File

@ -277,11 +277,6 @@ static_assert(JitStackAlignment % sizeof(Value) == 0 &&
JitStackValueAlignment >= 1,
"Stack alignment should be a non-zero multiple of sizeof(Value)");
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
static constexpr bool SupportsSimd = false;
static constexpr uint32_t SimdMemoryAlignment = 8;
static_assert(CodeAlignment % SimdMemoryAlignment == 0,
@ -300,15 +295,6 @@ static_assert(JitStackAlignment % SimdMemoryAlignment == 0,
static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
static const uint32_t WasmTrapInstructionLength = 4;
// Does this architecture support SIMD conversions between Uint32x4 and
// Float32x4?
static constexpr bool SupportsUint32x4FloatConversions = false;
// Does this architecture support comparisons of unsigned integer vectors?
static constexpr bool SupportsUint8x16Compares = false;
static constexpr bool SupportsUint16x8Compares = false;
static constexpr bool SupportsUint32x4Compares = false;
static const Scale ScalePointer = TimesFour;
class Instruction;
@ -1676,7 +1662,6 @@ class Assembler : public AssemblerShared {
static bool SupportsFloatingPoint() { return HasVFP(); }
static bool SupportsUnalignedAccesses() { return HasARMv7(); }
static bool SupportsFastUnalignedAccesses() { return false; }
static bool SupportsSimd() { return js::jit::SupportsSimd; }
static bool HasRoundInstruction(RoundingMode mode) { return false; }

View File

@ -4053,6 +4053,13 @@ void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
}
MOZ_ASSERT(diffG == 0);
// It's possible that the logic is just fine as it is if the reduced set
// maps SIMD pairs to plain doubles and transferMultipleByRuns() stores
// and loads doubles.
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
adjustFrame(diffF);
diffF += transferMultipleByRuns(set.fpus(), IsStore, StackPointer, DB);
MOZ_ASSERT(diffF == 0);
@ -4086,6 +4093,11 @@ void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
}
MOZ_ASSERT(diffG == 0);
// See above.
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
if (diffF > 0) {
computeEffectiveAddress(dest, scratch);
diffF += transferMultipleByRuns(set.fpus(), IsStore, scratch, DB);
@ -4101,6 +4113,11 @@ void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
const int32_t reservedG = diffG;
const int32_t reservedF = diffF;
// See above.
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
// ARM can load multiple registers at once, but only if we want back all
// the registers we previously saved to the stack.
if (ignore.emptyFloat()) {

View File

@ -555,6 +555,10 @@ void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm) {
static void PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass,
Register spArg) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
// the stack should look like:
// [IonFrame]
// bailoutFrame.registersnapshot

View File

@ -47,6 +47,10 @@ FloatRegisters::Code FloatRegisters::FromName(const char* name) {
}
FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
LiveFloatRegisterSet ret;
for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
ret.addUnchecked(FromCode((*iter).encoding()));
@ -55,10 +59,18 @@ FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
}
uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
return s.size() * sizeof(double);
}
uint32_t FloatRegister::getRegisterDumpOffsetInBytes() {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
// Although registers are 128-bits wide, only the first 64 need saving per
// ABI.
return encoding() * sizeof(double);

View File

@ -60,6 +60,9 @@ ABIArg ABIArgGenerator::next(MIRType type) {
break;
default:
// Note that in Assembler-x64.cpp there's a special case for Win64 which
// does not allow passing SIMD by value. Since there's Win64 on ARM64 we
// may need to duplicate that logic here.
MOZ_CRASH("Unexpected argument type");
}
return current_;

View File

@ -163,11 +163,6 @@ static_assert(JitStackAlignment % sizeof(Value) == 0 &&
JitStackValueAlignment >= 1,
"Stack alignment should be a non-zero multiple of sizeof(Value)");
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
static constexpr bool SupportsSimd = false;
static constexpr uint32_t SimdMemoryAlignment = 16;
static_assert(CodeAlignment % SimdMemoryAlignment == 0,
@ -180,15 +175,6 @@ static_assert(CodeAlignment % SimdMemoryAlignment == 0,
static const uint32_t WasmStackAlignment = SimdMemoryAlignment;
static const uint32_t WasmTrapInstructionLength = 4;
// Does this architecture support SIMD conversions between Uint32x4 and
// Float32x4?
static constexpr bool SupportsUint32x4FloatConversions = false;
// Does this architecture support comparisons of unsigned integer vectors?
static constexpr bool SupportsUint8x16Compares = false;
static constexpr bool SupportsUint16x8Compares = false;
static constexpr bool SupportsUint32x4Compares = false;
class Assembler : public vixl::Assembler {
public:
Assembler() : vixl::Assembler() {}
@ -289,7 +275,6 @@ class Assembler : public vixl::Assembler {
static bool SupportsFloatingPoint() { return true; }
static bool SupportsUnalignedAccesses() { return true; }
static bool SupportsFastUnalignedAccesses() { return true; }
static bool SupportsSimd() { return js::jit::SupportsSimd; }
static bool HasRoundInstruction(RoundingMode mode) { return false; }

View File

@ -446,6 +446,10 @@ void MacroAssembler::flush() { Assembler::flush(); }
// Stack manipulation functions.
void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
for (GeneralRegisterBackwardIterator iter(set.gprs()); iter.more();) {
vixl::CPURegister src[4] = {vixl::NoCPUReg, vixl::NoCPUReg, vixl::NoCPUReg,
vixl::NoCPUReg};
@ -474,6 +478,10 @@ void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
Register scratch) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
FloatRegisterSet fpuSet(set.fpus().reduceSetForPush());
unsigned numFpu = fpuSet.size();
int32_t diffF = fpuSet.getPushSizeInBytes();
@ -510,6 +518,10 @@ void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
LiveRegisterSet ignore) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
// The offset of the data from the stack pointer.
uint32_t offset = 0;

View File

@ -467,6 +467,10 @@ void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm) {
}
static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
// The stack saved in spArg must be (higher entries have higher memory
// addresses):
// - snapshotOffset_

View File

@ -120,12 +120,6 @@ static constexpr Register RegExpTesterLastIndexReg = CallTempReg2;
static constexpr uint32_t CodeAlignment = 8;
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
static constexpr bool SupportsSimd = false;
/* clang-format off */
// MIPS instruction types
// +---------------------------------------------------------------+
@ -1237,7 +1231,6 @@ class AssemblerMIPSShared : public AssemblerShared {
}
static bool SupportsUnalignedAccesses() { return true; }
static bool SupportsFastUnalignedAccesses() { return false; }
static bool SupportsSimd() { return js::jit::SupportsSimd; }
static bool HasRoundInstruction(RoundingMode mode) { return false; }

View File

@ -54,6 +54,10 @@ FloatRegister FloatRegister::singleOverlay() const {
}
FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
LiveFloatRegisterSet mod;
for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
// Even for single size registers save complete double register.
@ -63,6 +67,10 @@ FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
}
uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
FloatRegisterSet ss = s.reduceSetForPush();
uint64_t bits = ss.bits();
// We are only pushing double registers.

View File

@ -137,6 +137,7 @@ class FloatRegister : public FloatRegisterMIPSShared {
bool isSingle() const { return kind_ == Single; }
bool isDouble() const { return kind_ == Double; }
bool isInvalid() const { return code_ == FloatRegisters::invalid_freg; }
bool isSimd128() const { return false; }
FloatRegister doubleOverlay() const;
FloatRegister singleOverlay() const;

View File

@ -154,15 +154,6 @@ static constexpr uint32_t SimdMemoryAlignment = 8;
static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
static const uint32_t WasmTrapInstructionLength = 4;
// Does this architecture support SIMD conversions between Uint32x4 and
// Float32x4?
static constexpr bool SupportsUint32x4FloatConversions = false;
// Does this architecture support comparisons of unsigned integer vectors?
static constexpr bool SupportsUint8x16Compares = false;
static constexpr bool SupportsUint16x8Compares = false;
static constexpr bool SupportsUint32x4Compares = false;
static constexpr Scale ScalePointer = TimesFour;
class Assembler : public AssemblerMIPSShared {

View File

@ -1929,6 +1929,10 @@ void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
}
MOZ_ASSERT(diffG == 0);
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
if (diffF > 0) {
// Double values have to be aligned. We reserve extra space so that we can
// start writing from the first aligned location.
@ -1955,6 +1959,10 @@ void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
const int32_t reservedG = diffG;
const int32_t reservedF = diffF;
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
if (reservedF > 0) {
// Read the buffer form the first aligned location.
ma_addu(SecondScratchReg, sp, Imm32(reservedF));
@ -1999,6 +2007,10 @@ void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
}
MOZ_ASSERT(diffG == 0);
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
if (diffF > 0) {
computeEffectiveAddress(dest, scratch);
ma_and(scratch, scratch, Imm32(~(ABIStackAlignment - 1)));

View File

@ -581,6 +581,11 @@ static void PushBailoutFrame(MacroAssembler& masm, uint32_t frameClass,
masm.storePtr(Register::FromCode(i), Address(StackPointer, off));
}
#ifdef ENABLE_WASM_SIMD
// What to do for SIMD?
# error "Needs more careful logic if SIMD is enabled"
#endif
// Save floating point registers
// We can use as_sdc1 because stack is alligned.
for (uint32_t i = 0; i < FloatRegisters::TotalDouble; i++) {

View File

@ -52,6 +52,10 @@ FloatRegister FloatRegister::doubleOverlay() const {
}
FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
LiveFloatRegisterSet mod;
for (FloatRegisterIterator iter(s); iter.more(); ++iter) {
if ((*iter).isSingle()) {
@ -65,6 +69,10 @@ FloatRegisterSet FloatRegister::ReduceSetForPush(const FloatRegisterSet& s) {
}
uint32_t FloatRegister::GetPushSizeInBytes(const FloatRegisterSet& s) {
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
FloatRegisterSet ss = s.reduceSetForPush();
uint64_t bits = ss.bits();
// We are only pushing double registers.

View File

@ -131,6 +131,7 @@ class FloatRegister : public FloatRegisterMIPSShared {
bool isSingle() const { return kind_ == Codes::Single; }
bool isDouble() const { return kind_ == Codes::Double; }
bool isSimd128() const { return false; }
FloatRegister singleOverlay() const;
FloatRegister doubleOverlay() const;

View File

@ -188,15 +188,6 @@ static constexpr uint32_t SimdMemoryAlignment = 16;
static constexpr uint32_t WasmStackAlignment = SimdMemoryAlignment;
static const uint32_t WasmTrapInstructionLength = 4;
// Does this architecture support SIMD conversions between Uint32x4 and
// Float32x4?
static constexpr bool SupportsUint32x4FloatConversions = false;
// Does this architecture support comparisons of unsigned integer vectors?
static constexpr bool SupportsUint8x16Compares = false;
static constexpr bool SupportsUint16x8Compares = false;
static constexpr bool SupportsUint32x4Compares = false;
static constexpr Scale ScalePointer = TimesEight;
class Assembler : public AssemblerMIPSShared {

View File

@ -1781,6 +1781,11 @@ void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
diff -= sizeof(intptr_t);
storePtr(*iter, Address(StackPointer, diff));
}
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
iter.more(); ++iter) {
diff -= sizeof(double);
@ -1801,6 +1806,11 @@ void MacroAssembler::PopRegsInMaskIgnore(LiveRegisterSet set,
loadPtr(Address(StackPointer, diff), *iter);
}
}
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
for (FloatRegisterBackwardIterator iter(set.fpus().reduceSetForPush());
iter.more(); ++iter) {
diff -= sizeof(double);
@ -1828,6 +1838,10 @@ void MacroAssembler::storeRegsInMask(LiveRegisterSet set, Address dest,
}
MOZ_ASSERT(diffG == 0);
#ifdef ENABLE_WASM_SIMD
# error "Needs more careful logic if SIMD is enabled"
#endif
for (FloatRegisterBackwardIterator iter(fpuSet); iter.more(); ++iter) {
FloatRegister reg = *iter;
diffF -= reg.size();

View File

@ -16,21 +16,11 @@
namespace js {
namespace jit {
static const bool SupportsSimd = false;
static const uint32_t SimdMemoryAlignment =
4; // Make it 4 to avoid a bunch of div-by-zero warnings
static const uint32_t WasmStackAlignment = 8;
static const uint32_t WasmTrapInstructionLength = 0;
// Does this architecture support SIMD conversions between Uint32x4 and
// Float32x4?
static constexpr bool SupportsUint32x4FloatConversions = false;
// Does this architecture support comparisons of unsigned integer vectors?
static constexpr bool SupportsUint8x16Compares = false;
static constexpr bool SupportsUint16x8Compares = false;
static constexpr bool SupportsUint32x4Compares = false;
class Registers {
public:
enum RegisterID {

View File

@ -231,7 +231,6 @@ class MacroAssemblerNone : public Assembler {
}
static bool SupportsFloatingPoint() { return false; }
static bool SupportsSimd() { return false; }
static bool SupportsUnalignedAccesses() { return false; }
static bool SupportsFastUnalignedAccesses() { return false; }

View File

@ -83,9 +83,14 @@ CodeGeneratorShared::CodeGeneratorShared(MIRGenerator* gen, LIRGraph* graph,
MOZ_ASSERT(graph->argumentSlotCount() == 0);
frameDepth_ += gen->wasmMaxStackArgBytes();
static_assert(!SupportsSimd,
"we need padding so that local slots are SIMD-aligned and "
"the stack must be kept SIMD-aligned too.");
#ifdef ENABLE_WASM_SIMD
# ifdef JS_CODEGEN_X64
MOZ_CRASH("FIXME for SIMD");
# else
# error \
"we may need padding so that local slots are SIMD-aligned and the stack must be kept SIMD-aligned too."
# endif
#endif
if (gen->needsStaticStackAlignment()) {
// An MWasmCall does not align the stack pointer at calls sites but

View File

@ -249,11 +249,6 @@ static_assert(JitStackAlignment % sizeof(Value) == 0 &&
JitStackValueAlignment >= 1,
"Stack alignment should be a non-zero multiple of sizeof(Value)");
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
static constexpr bool SupportsSimd = false;
static constexpr uint32_t SimdMemoryAlignment = 16;
static_assert(CodeAlignment % SimdMemoryAlignment == 0,

View File

@ -332,27 +332,27 @@ void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
// Push AllRegs in a way that is compatible with RegisterDump, regardless of
// what PushRegsInMask might do to reduce the set size.
static void DumpAllRegs(MacroAssembler& masm) {
if (JitSupportsSimd()) {
masm.PushRegsInMask(AllRegs);
} else {
// When SIMD isn't supported, PushRegsInMask reduces the set of float
// registers to be double-sized, while the RegisterDump expects each of
// the float registers to have the maximal possible size
// (Simd128DataSize). To work around this, we just spill the double
// registers by hand here, using the register dump offset directly.
for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
++iter) {
masm.Push(*iter);
}
masm.reserveStack(sizeof(RegisterDump::FPUArray));
for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
++iter) {
FloatRegister reg = *iter;
Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
masm.storeDouble(reg, spillAddress);
}
#ifdef ENABLE_WASM_SIMD
masm.PushRegsInMask(AllRegs);
#else
// When SIMD isn't supported, PushRegsInMask reduces the set of float
// registers to be double-sized, while the RegisterDump expects each of
// the float registers to have the maximal possible size
// (Simd128DataSize). To work around this, we just spill the double
// registers by hand here, using the register dump offset directly.
for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
++iter) {
masm.Push(*iter);
}
masm.reserveStack(sizeof(RegisterDump::FPUArray));
for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
++iter) {
FloatRegister reg = *iter;
Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
masm.storeDouble(reg, spillAddress);
}
#endif
}
void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {
@ -545,6 +545,7 @@ void JitRuntime::generateArgumentsRectifier(MacroAssembler& masm) {
static void PushBailoutFrame(MacroAssembler& masm, Register spArg) {
// Push registers such that we can access them from [base + code].
DumpAllRegs(masm);
// Get the stack pointer into a register, pre-alignment.
masm.movq(rsp, spArg);
}

View File

@ -42,9 +42,9 @@ js::jit::FloatRegisterSet js::jit::FloatRegister::ReduceSetForPush(
SetType bits = s.bits();
// Ignore all SIMD register, if not supported.
if (!JitSupportsSimd()) {
bits &= Codes::AllPhysMask * Codes::SpreadScalar;
}
#ifndef ENABLE_WASM_SIMD
bits &= Codes::AllPhysMask * Codes::SpreadScalar;
#endif
// Exclude registers which are already pushed with a larger type. High bits
// are associated with larger register types. Thus we keep the set of

View File

@ -22,15 +22,6 @@
namespace js {
namespace jit {
// Does this architecture support SIMD conversions between Uint32x4 and
// Float32x4?
static constexpr bool SupportsUint32x4FloatConversions = false;
// Does this architecture support comparisons of unsigned integer vectors?
static constexpr bool SupportsUint8x16Compares = false;
static constexpr bool SupportsUint16x8Compares = false;
static constexpr bool SupportsUint32x4Compares = false;
#if defined(JS_CODEGEN_X86)
// In bytes: slots needed for potential memory->memory move spills.
// +8 for cycles
@ -186,10 +177,17 @@ class FloatRegisters {
public:
using Encoding = X86Encoding::XMMRegisterID;
// Observe that there is a Simd128 type on both x86 and x64 whether SIMD is
// implemented/enabled or not, and that the RegisterContent union is large
// enough for a V128 datum always. Producers and consumers of a register dump
// must be aware of this even if they don't need to save/restore values in the
// high lanes of the SIMD registers. See the DumpAllRegs() implementations,
// for example.
enum ContentType {
Single, // 32-bit float.
Double, // 64-bit double.
Simd128, // 128-bit SIMD type (int32x4, bool16x8, etc).
Simd128, // 128-bit Wasm SIMD type.
NumTypes
};
@ -197,8 +195,7 @@ class FloatRegisters {
union RegisterContent {
float s;
double d;
int32_t i4[4];
float s4[4];
uint8_t v128[16];
};
static const char* GetName(Encoding code) {

View File

@ -1082,9 +1082,7 @@ class AssemblerX86Shared : public AssemblerShared {
static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
static bool SupportsUnalignedAccesses() { return true; }
static bool SupportsFastUnalignedAccesses() { return true; }
static bool SupportsSimd() {
return js::jit::SupportsSimd && CPUInfo::IsSSE2Present();
}
static bool SupportsWasmSimd() { return CPUInfo::IsSSE41Present(); }
static bool HasAVX() { return CPUInfo::IsAVXPresent(); }
static bool HasRoundInstruction(RoundingMode mode) {

View File

@ -386,7 +386,7 @@ void MacroAssembler::PushRegsInMask(LiveRegisterSet set) {
}
MOZ_ASSERT(numFpu == 0);
// x64 padding to keep the stack aligned on uintptr_t. Keep in sync with
// GetPushBytesInSize.
// GetPushSizeInBytes.
diffF -= diffF % sizeof(uintptr_t);
MOZ_ASSERT(diffF == 0);
}

View File

@ -161,11 +161,6 @@ static_assert(JitStackAlignment % sizeof(Value) == 0 &&
JitStackValueAlignment >= 1,
"Stack alignment should be a non-zero multiple of sizeof(Value)");
// This boolean indicates whether we support SIMD instructions flavoured for
// this architecture or not. Rather than a method in the LIRGenerator, it is
// here such that it is accessible from the entire codebase. Once full support
// for SIMD is reached on all tier-1 platforms, this constant can be deleted.
static constexpr bool SupportsSimd = false;
static constexpr uint32_t SimdMemoryAlignment = 16;
static_assert(CodeAlignment % SimdMemoryAlignment == 0,

View File

@ -314,27 +314,27 @@ void JitRuntime::generateEnterJIT(JSContext* cx, MacroAssembler& masm) {
// Push AllRegs in a way that is compatible with RegisterDump, regardless of
// what PushRegsInMask might do to reduce the set size.
static void DumpAllRegs(MacroAssembler& masm) {
if (JitSupportsSimd()) {
masm.PushRegsInMask(AllRegs);
} else {
// When SIMD isn't supported, PushRegsInMask reduces the set of float
// registers to be double-sized, while the RegisterDump expects each of
// the float registers to have the maximal possible size
// (Simd128DataSize). To work around this, we just spill the double
// registers by hand here, using the register dump offset directly.
for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
++iter) {
masm.Push(*iter);
}
masm.reserveStack(sizeof(RegisterDump::FPUArray));
for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
++iter) {
FloatRegister reg = *iter;
Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
masm.storeDouble(reg, spillAddress);
}
#ifdef ENABLE_WASM_SIMD
masm.PushRegsInMask(AllRegs);
#else
// When SIMD isn't supported, PushRegsInMask reduces the set of float
// registers to be double-sized, while the RegisterDump expects each of
// the float registers to have the maximal possible size
// (Simd128DataSize). To work around this, we just spill the double
// registers by hand here, using the register dump offset directly.
for (GeneralRegisterBackwardIterator iter(AllRegs.gprs()); iter.more();
++iter) {
masm.Push(*iter);
}
masm.reserveStack(sizeof(RegisterDump::FPUArray));
for (FloatRegisterBackwardIterator iter(AllRegs.fpus()); iter.more();
++iter) {
FloatRegister reg = *iter;
Address spillAddress(StackPointer, reg.getRegisterDumpOffsetInBytes());
masm.storeDouble(reg, spillAddress);
}
#endif
}
void JitRuntime::generateInvalidator(MacroAssembler& masm, Label* bailoutTail) {

View File

@ -437,6 +437,13 @@ JS::ContextOptions& JS::ContextOptions::setWasmMultiValue(bool flag) {
return *this;
}
JS::ContextOptions& JS::ContextOptions::setWasmSimd(bool flag) {
#ifdef ENABLE_WASM_SIMD
wasmSimd_ = flag;
#endif
return *this;
}
JS::ContextOptions& JS::ContextOptions::setFuzzing(bool flag) {
#ifdef FUZZING
fuzzing_ = flag;

View File

@ -509,6 +509,9 @@ bool shell::enableWasmGc = false;
#ifdef ENABLE_WASM_MULTI_VALUE
bool shell::enableWasmMultiValue = true;
#endif
#ifdef ENABLE_WASM_SIMD
bool shell::enableWasmSimd = false;
#endif
bool shell::enableWasmVerbose = false;
bool shell::enableTestWasmAwaitTier2 = false;
#ifdef ENABLE_WASM_BIGINT
@ -10504,6 +10507,9 @@ static bool SetContextOptions(JSContext* cx, const OptionParser& op) {
#endif
#ifdef ENABLE_WASM_MULTI_VALUE
enableWasmMultiValue = !op.getBoolOption("no-wasm-multi-value");
#endif
#ifdef ENABLE_WASM_SIMD
enableWasmSimd = op.getBoolOption("wasm-simd");
#endif
enableWasmVerbose = op.getBoolOption("wasm-verbose");
enableTestWasmAwaitTier2 = op.getBoolOption("test-wasm-await-tier2");
@ -10536,6 +10542,9 @@ static bool SetContextOptions(JSContext* cx, const OptionParser& op) {
#endif
#ifdef ENABLE_WASM_MULTI_VALUE
.setWasmMultiValue(enableWasmMultiValue)
#endif
#ifdef ENABLE_WASM_SIMD
.setWasmSimd(enableWasmSimd)
#endif
.setWasmVerbose(enableWasmVerbose)
.setTestWasmAwaitTier2(enableTestWasmAwaitTier2)
@ -10910,6 +10919,9 @@ static void SetWorkerContextOptions(JSContext* cx) {
#ifdef ENABLE_WASM_MULTI_VALUE
.setWasmMultiValue(enableWasmMultiValue)
#endif
#ifdef ENABLE_WASM_SIMD
.setWasmSimd(enableWasmSimd)
#endif
#ifdef ENABLE_WASM_BIGINT
.setWasmBigIntEnabled(enableWasmBigInt)
#endif
@ -11352,7 +11364,12 @@ int main(int argc, char** argv, char** envp) {
#else
!op.addBoolOption('\0', "no-wasm-multi-value", "No-op") ||
#endif
#ifdef ENABLE_WASM_SIMD
!op.addBoolOption('\0', "wasm-simd",
"Enable experimental wasm SIMD features") ||
#else
!op.addBoolOption('\0', "wasm-simd", "No-op") ||
#endif
!op.addBoolOption('\0', "no-native-regexp",
"Disable native regexp compilation") ||
#ifdef ENABLE_NEW_REGEXP

View File

@ -114,6 +114,9 @@ extern bool enableWasmGc;
#ifdef ENABLE_WASM_MULTI_VALUE
extern bool enableWasmMultiValue;
#endif
#ifdef ENABLE_WASM_SIMD
extern bool enableWasmSimd;
#endif
extern bool enableWasmVerbose;
extern bool enableTestWasmAwaitTier2;
#ifdef ENABLE_WASM_BIGINT

View File

@ -12951,7 +12951,6 @@ bool js::wasm::BaselinePlatformSupport() {
return false;
}
#endif
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)

View File

@ -32,6 +32,11 @@
#include "jit/Simulator.h"
#include "js/Printf.h"
#include "js/PropertySpec.h" // JS_{PS,FN}{,_END}
#if defined(JS_CODEGEN_X64) // Assembler::HasSSE41
# include "jit/x64/Assembler-x64.h"
# include "jit/x86-shared/Architecture-x86-shared.h"
# include "jit/x86-shared/Assembler-x86-shared.h"
#endif
#include "util/StringBuffer.h"
#include "util/Text.h"
#include "vm/ErrorObject.h"
@ -76,6 +81,14 @@ static inline bool WasmMultiValueFlag(JSContext* cx) {
#endif
}
static inline bool WasmSimdFlag(JSContext* cx) {
#ifdef ENABLE_WASM_SIMD
return cx->options().wasmSimd() && js::jit::JitSupportsWasmSimd();
#else
return false;
#endif
}
// Compiler availability predicates. These must be kept in sync with the
// feature predicates in the next section below.
//
@ -119,9 +132,10 @@ static inline bool Append(JSStringBuilder* reason, const char (&s)[ArrayLength],
bool wasm::IonDisabledByFeatures(JSContext* cx, bool* isDisabled,
JSStringBuilder* reason) {
// Ion has no debugging support, no gc support.
// Ion has no debugging support, no gc support, no simd support.
bool debug = cx->realm() && cx->realm()->debuggerObservesAsmJS();
bool gc = cx->options().wasmGc();
bool simd = WasmSimdFlag(cx);
if (reason) {
char sep = 0;
if (debug && !Append(reason, "debug", &sep)) {
@ -130,8 +144,11 @@ bool wasm::IonDisabledByFeatures(JSContext* cx, bool* isDisabled,
if (gc && !Append(reason, "gc", &sep)) {
return false;
}
if (simd && !Append(reason, "simd", &sep)) {
return false;
}
}
*isDisabled = debug || gc;
*isDisabled = debug || gc || simd;
return true;
}
@ -147,7 +164,7 @@ bool wasm::CraneliftAvailable(JSContext* cx) {
bool wasm::CraneliftDisabledByFeatures(JSContext* cx, bool* isDisabled,
JSStringBuilder* reason) {
// Cranelift has no debugging support, no gc support, no multi-value support,
// no threads, and on ARM64, no reference types.
// no threads, no simd, and on ARM64, no reference types.
bool debug = cx->realm() && cx->realm()->debuggerObservesAsmJS();
bool gc = cx->options().wasmGc();
bool multiValue = WasmMultiValueFlag(cx);
@ -160,6 +177,7 @@ bool wasm::CraneliftDisabledByFeatures(JSContext* cx, bool* isDisabled,
// On other platforms, assume reftypes has been implemented.
bool reftypesOnArm64 = false;
#endif
bool simd = WasmSimdFlag(cx);
if (reason) {
char sep = 0;
if (debug && !Append(reason, "debug", &sep)) {
@ -177,8 +195,11 @@ bool wasm::CraneliftDisabledByFeatures(JSContext* cx, bool* isDisabled,
if (reftypesOnArm64 && !Append(reason, "reftypes", &sep)) {
return false;
}
if (simd && !Append(reason, "simd", &sep)) {
return false;
}
}
*isDisabled = debug || gc || multiValue || threads || reftypesOnArm64;
*isDisabled = debug || gc || multiValue || threads || reftypesOnArm64 || simd;
return true;
}
@ -219,6 +240,11 @@ bool wasm::I64BigIntConversionAvailable(JSContext* cx) {
#endif
}
bool wasm::SimdAvailable(JSContext* cx) {
// Cranelift and Ion do not support SIMD.
return WasmSimdFlag(cx) && BaselineAvailable(cx);
}
bool wasm::ThreadsAvailable(JSContext* cx) {
// Cranelift does not support atomics.
return cx->realm() &&

View File

@ -112,6 +112,9 @@ bool I64BigIntConversionAvailable(JSContext* cx);
// Shared memory and atomics.
bool ThreadsAvailable(JSContext* cx);
// SIMD data and operations.
bool SimdAvailable(JSContext* cx);
// Compiles the given binary wasm module given the ArrayBufferObject
// and links the module's imports with the given import object.

View File

@ -2491,8 +2491,9 @@ static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask & ~((uint32_t(1) << Registers::sp) |
(uint32_t(1) << Registers::pc))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too.");
# ifdef ENABLE_WASM_SIMD
# error "high lanes of SIMD registers need to be saved too."
# endif
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask &
@ -2501,8 +2502,9 @@ static const LiveRegisterSet RegsToPreserve(
(uint32_t(1) << Registers::sp) |
(uint32_t(1) << Registers::zero))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too.");
# ifdef ENABLE_WASM_SIMD
# error "high lanes of SIMD registers need to be saved too."
# endif
#elif defined(JS_CODEGEN_ARM64)
// We assume that traps do not happen while lr is live. This both ensures that
// the size of RegsToPreserve is a multiple of 2 (preserving WasmStackAlignment)
@ -2512,15 +2514,22 @@ static const LiveRegisterSet RegsToPreserve(
~((uint32_t(1) << Registers::StackPointer) |
(uint32_t(1) << Registers::lr))),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too");
#else
# ifdef ENABLE_WASM_SIMD
# error "high lanes of SIMD registers need to be saved too."
# endif
#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
// It's fine to use AllVector128Mask even when SIMD is not enabled:
// PushRegsInMask strips out the high lanes of the XMM registers in this case.
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(Registers::AllMask &
~(uint32_t(1) << Registers::StackPointer)),
FloatRegisterSet(FloatRegisters::AllDoubleMask));
static_assert(!SupportsSimd,
"high lanes of SIMD registers need to be saved too");
FloatRegisterSet(FloatRegisters::AllVector128Mask));
#else
static const LiveRegisterSet RegsToPreserve(
GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
# ifdef ENABLE_WASM_SIMD
# error "no SIMD support"
# endif
#endif
// Generate a MachineState which describes the locations of the GPRs as saved

View File

@ -916,6 +916,9 @@ static void ReloadPrefsCallback(const char* pref, void* aXpccx) {
#ifdef ENABLE_WASM_MULTI_VALUE
bool useWasmMultiValue =
Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_multi_value");
#endif
#ifdef ENABLE_WASM_SIMD
bool useWasmSimd = Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_simd");
#endif
bool useWasmVerbose = Preferences::GetBool(JS_OPTIONS_DOT_STR "wasm_verbose");
bool throwOnAsmJSValidationFailure = Preferences::GetBool(
@ -976,6 +979,9 @@ static void ReloadPrefsCallback(const char* pref, void* aXpccx) {
#endif
#ifdef ENABLE_WASM_MULTI_VALUE
.setWasmMultiValue(useWasmMultiValue)
#endif
#ifdef ENABLE_WASM_SIMD
.setWasmSimd(useWasmSimd)
#endif
.setWasmVerbose(useWasmVerbose)
.setThrowOnAsmJSValidationFailure(throwOnAsmJSValidationFailure)

View File

@ -1123,6 +1123,9 @@ pref("javascript.options.wasm_reftypes", true);
#ifdef ENABLE_WASM_MULTI_VALUE
pref("javascript.options.wasm_multi_value", true);
#endif
#ifdef ENABLE_WASM_SIMD
pref("javascript.options.wasm_simd", false);
#endif
pref("javascript.options.native_regexp", true);
pref("javascript.options.parallel_parsing", true);
// Async stacks instrumentation adds overhead that is only