Bug 1287967 - Baldr: Add current_memory and grow_memory (r=luke,sunfish)

MozReview-Commit-ID: 8whwVTSYV9a
This commit is contained in:
Dimo 2016-08-29 21:30:04 -05:00
parent d2db92ebce
commit 71e7008950
70 changed files with 1515 additions and 674 deletions

View File

@ -22,4 +22,4 @@
# changes to stick? As of bug 928195, this shouldn't be necessary! Please
# don't change CLOBBER for WebIDL changes any more.
Bug 1294660 - CSS properties regeneration needs a clobber
Bug 1287967 - Changing js/src/old-configure.in seems to require clobber

View File

@ -11,6 +11,7 @@
<body>
<script>
const wasmTextToBinary = SpecialPowers.unwrap(SpecialPowers.Cu.getJSTestingFunctions().wasmTextToBinary);
const wasmIsSupported = SpecialPowers.Cu.getJSTestingFunctions().wasmIsSupported
const fooModuleCode = wasmTextToBinary(`(module
(func $foo (result i32) (i32.const 42))
(export "foo" $foo)
@ -27,7 +28,10 @@ function propertiesExist() {
ok(WebAssembly, "WebAssembly object should exist");
ok(WebAssembly.compile, "WebAssembly.compile function should exist");
runTest();
if (!wasmIsSupported())
SimpleTest.finish();
else
runTest();
}
function compileFail() {

View File

@ -1772,7 +1772,7 @@ class MOZ_STACK_CLASS ModuleValidator
if (!args.initFromContext(cx_, Move(scriptedCaller)))
return false;
auto genData = MakeUnique<ModuleGeneratorData>(args.assumptions.usesSignal, ModuleKind::AsmJS);
auto genData = MakeUnique<ModuleGeneratorData>(ModuleKind::AsmJS);
if (!genData ||
!genData->sigs.resize(MaxSigs) ||
!genData->funcSigs.resize(MaxFuncs) ||
@ -7806,8 +7806,7 @@ CheckBuffer(JSContext* cx, const AsmJSMetadata& metadata, HandleValue bufferVal,
if (buffer->is<ArrayBufferObject>()) {
Rooted<ArrayBufferObject*> abheap(cx, &buffer->as<ArrayBufferObject>());
bool useSignalHandlers = metadata.assumptions.usesSignal.forOOB;
if (!ArrayBufferObject::prepareForAsmJS(cx, abheap, useSignalHandlers))
if (!ArrayBufferObject::prepareForAsmJS(cx, abheap))
return LinkFail(cx, "Unable to prepare ArrayBuffer for asm.js use");
}
@ -8817,28 +8816,17 @@ js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
return out.finishString();
}
/*****************************************************************************/
// asm.js heap
// The asm.js valid heap lengths are precisely the WASM valid heap lengths for ARM
// greater or equal to MinHeapLength
static const size_t MinHeapLength = PageSize;
// From the asm.js spec Linking section:
// the heap object's byteLength must be either
// 2^n for n in [12, 24)
// or
// 2^24 * n for n >= 1.
bool
js::IsValidAsmJSHeapLength(uint32_t length)
{
bool valid = length >= MinHeapLength &&
(IsPowerOfTwo(length) ||
(length & 0x00ffffff) == 0);
if (length < MinHeapLength)
return false;
MOZ_ASSERT_IF(valid, length % PageSize == 0);
MOZ_ASSERT_IF(valid, length == RoundUpToNextValidAsmJSHeapLength(length));
return valid;
return wasm::IsValidARMLengthImmediate(length);
}
uint32_t
@ -8847,9 +8835,5 @@ js::RoundUpToNextValidAsmJSHeapLength(uint32_t length)
if (length <= MinHeapLength)
return MinHeapLength;
if (length <= 16 * 1024 * 1024)
return mozilla::RoundUpPow2(length);
MOZ_ASSERT(length <= 0xff000000);
return (length + 0x00ffffff) & ~0x00ffffff;
return wasm::RoundUpToNextValidARMLengthImmediate(length);
}

View File

@ -199,13 +199,13 @@ enum class AstExprKind
GetLocal,
If,
Load,
Nop,
Return,
SetGlobal,
SetLocal,
Store,
TernaryOperator,
UnaryOperator,
NullaryOperator,
Unreachable
};
@ -228,14 +228,6 @@ class AstExpr : public AstNode
}
};
struct AstNop : AstExpr
{
static const AstExprKind Kind = AstExprKind::Nop;
AstNop()
: AstExpr(AstExprKind::Nop)
{}
};
struct AstUnreachable : AstExpr
{
static const AstExprKind Kind = AstExprKind::Unreachable;
@ -833,6 +825,20 @@ class AstModule : public AstNode
}
};
class AstNullaryOperator final : public AstExpr
{
Expr expr_;
public:
static const AstExprKind Kind = AstExprKind::NullaryOperator;
explicit AstNullaryOperator(Expr expr)
: AstExpr(Kind),
expr_(expr)
{}
Expr expr() const { return expr_; }
};
class AstUnaryOperator final : public AstExpr
{
Expr expr_;

View File

@ -96,6 +96,7 @@
#include "asmjs/WasmBaselineCompile.h"
#include "asmjs/WasmBinaryIterator.h"
#include "asmjs/WasmGenerator.h"
#include "asmjs/WasmSignalHandlers.h"
#include "jit/AtomicOp.h"
#include "jit/IonTypes.h"
#include "jit/JitAllocPolicy.h"
@ -450,6 +451,8 @@ class BaseCompiler
ValTypeVector SigDD_;
ValTypeVector SigD_;
ValTypeVector SigF_;
ValTypeVector SigI_;
ValTypeVector Sig_;
Label returnLabel_;
Label outOfLinePrologue_;
Label bodyLabel_;
@ -2011,6 +2014,10 @@ class BaseCompiler
}
}
const ABIArg reserveArgument(FunctionCall& call) {
return call.abi_.next(MIRType::Pointer);
}
// TODO / OPTIMIZE: Note passArg is used only in one place. I'm
// not saying we should manually inline it, but we could hoist the
// dispatch into the caller and have type-specific implementations
@ -2180,17 +2187,21 @@ class BaseCompiler
callSymbolic(builtin, call);
}
void builtinInstanceMethodCall(SymbolicAddress builtin, const ABIArg& instanceArg,
const FunctionCall& call)
{
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
masm.wasmCallBuiltinInstanceMethod(instanceArg, builtin);
}
//////////////////////////////////////////////////////////////////////
//
// Sundry low-level code generators.
void addInterruptCheck()
{
if (mg_.usesSignal.forInterrupt)
return;
// FIXME - implement this.
MOZ_CRASH("Only interrupting signal handlers supported");
// Always use signals for interrupts with Asm.JS/Wasm
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
}
void jumpTable(LabelVector& labels) {
@ -2902,16 +2913,15 @@ class BaseCompiler
// space for a guard region. Also, on x64 the atomic loads and stores
// can't (yet) use the signal handlers.
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
if (mg_.usesSignal.forOOB && !access.isAtomicAccess())
return false;
#endif
#ifdef WASM_HUGE_MEMORY
return false;
#else
return access.needsBoundsCheck();
#endif
}
bool throwOnOutOfBounds(const MWasmMemoryAccess& access) {
return access.isAtomicAccess() || !isCompilingAsmJS();
return !isCompilingAsmJS();
}
// For asm.js code only: If we have a non-zero offset, it's possible that
@ -2925,19 +2935,6 @@ class BaseCompiler
}
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
# if defined(JS_CODEGEN_X64)
// TODO / CLEANUP - copied from CodeGenerator-x64.cpp, should share.
MemoryAccess
WasmMemoryAccess(uint32_t before)
{
if (isCompilingAsmJS())
return MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset);
return MemoryAccess(before, MemoryAccess::Throw, MemoryAccess::DontWrapOffset);
}
# endif
class OffsetBoundsCheck : public OutOfLineCode
{
Label* maybeOutOfBounds;
@ -3146,7 +3143,8 @@ class BaseCompiler
}
}
masm.append(WasmMemoryAccess(before));
if (isCompilingAsmJS())
masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
// TODO: call verifyHeapAccessDisassembly somehow
# elif defined(JS_CODEGEN_X86)
Operand srcAddr(ptr.reg, access.offset());
@ -3226,7 +3224,8 @@ class BaseCompiler
MOZ_CRASH("Compiler bug: Unexpected array type");
}
masm.append(WasmMemoryAccess(before));
if (isCompilingAsmJS())
masm.append(MemoryAccess(before, MemoryAccess::CarryOn, MemoryAccess::WrapOffset));
// TODO: call verifyHeapAccessDisassembly somehow
# elif defined(JS_CODEGEN_X86)
Operand dstAddr(ptr.reg, access.offset());
@ -3497,6 +3496,8 @@ class BaseCompiler
void emitConvertU64ToF64();
void emitReinterpretI32AsF32();
void emitReinterpretI64AsF64();
MOZ_MUST_USE bool emitGrowMemory(uint32_t callOffset);
MOZ_MUST_USE bool emitCurrentMemory(uint32_t callOffset);
};
void
@ -6085,6 +6086,72 @@ BaseCompiler::emitStoreWithCoercion(ValType resultType, Scalar::Type viewType)
return true;
}
bool
BaseCompiler::emitGrowMemory(uint32_t callOffset)
{
if (deadCode_)
return skipCall(SigI_, ExprType::I32);
uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
sync();
uint32_t numArgs = 1;
size_t stackSpace = stackConsumed(numArgs);
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, EscapesSandbox(true), IsBuiltinCall(true));
ABIArg instanceArg = reserveArgument(baselineCall);
if (!emitCallArgs(SigI_, baselineCall))
return false;
if (!iter_.readCallReturn(ExprType::I32))
return false;
builtinInstanceMethodCall(SymbolicAddress::GrowMemory, instanceArg, baselineCall);
endCall(baselineCall);
popValueStackBy(numArgs);
masm.freeStack(stackSpace);
pushReturned(baselineCall, ExprType::I32);
return true;
}
bool
BaseCompiler::emitCurrentMemory(uint32_t callOffset)
{
if (deadCode_)
return skipCall(Sig_, ExprType::I32);
uint32_t lineOrBytecode = readCallSiteLineOrBytecode(callOffset);
sync();
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, EscapesSandbox(true), IsBuiltinCall(true));
ABIArg instanceArg = reserveArgument(baselineCall);
if (!emitCallArgs(Sig_, baselineCall))
return false;
if (!iter_.readCallReturn(ExprType::I32))
return false;
builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, instanceArg, baselineCall);
endCall(baselineCall);
pushReturned(baselineCall, ExprType::I32);
return true;
}
bool
BaseCompiler::emitBody()
{
@ -6144,7 +6211,7 @@ BaseCompiler::emitBody()
switch (expr) {
// Control opcodes
case Expr::Nop:
CHECK(iter_.readNullary());
CHECK(iter_.readNullary(ExprType::Void));
if (!deadCode_)
pushVoid();
NEXT();
@ -6634,11 +6701,11 @@ BaseCompiler::emitBody()
case Expr::I32AtomicsExchange:
MOZ_CRASH("Unimplemented Atomics");
// Future opcodes
case Expr::CurrentMemory:
MOZ_CRASH("Unimplemented CurrentMemory");
// Memory Related
case Expr::GrowMemory:
MOZ_CRASH("Unimplemented GrowMemory");
CHECK_NEXT(emitGrowMemory(exprOffset));
case Expr::CurrentMemory:
CHECK_NEXT(emitCurrentMemory(exprOffset));
case Expr::Limit:;
}
@ -6784,6 +6851,8 @@ BaseCompiler::init()
return false;
if (!SigF_.append(ValType::F32))
return false;
if (!SigI_.append(ValType::I32))
return false;
const ValTypeVector& args = func_.sig().args();
@ -6886,10 +6955,11 @@ LiveRegisterSet BaseCompiler::VolatileReturnGPR = volatileReturnGPR();
bool
js::wasm::BaselineCanCompile(const FunctionGenerator* fg)
{
#if defined(JS_CODEGEN_X64)
if (!fg->usesSignalsForInterrupts())
return false;
// On all platforms we require signals for AsmJS/Wasm.
// If we made it this far we must have signals.
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
#if defined(JS_CODEGEN_X64)
if (fg->usesAtomics())
return false;

View File

@ -108,6 +108,7 @@ wasm::Classify(Expr expr)
case Expr::B8x16not:
case Expr::B16x8not:
case Expr::B32x4not:
case Expr::GrowMemory:
return ExprKind::Unary;
case Expr::I32Add:
case Expr::I32Sub:
@ -471,8 +472,7 @@ wasm::Classify(Expr expr)
case Expr::F32x4lessThanOrEqual:
return ExprKind::SimdComparison;
case Expr::CurrentMemory:
case Expr::GrowMemory:
break;
return ExprKind::Nullary;
}
MOZ_MAKE_COMPILER_ASSUME_IS_UNREACHABLE("unimplemented opcode");
}

View File

@ -454,7 +454,7 @@ class MOZ_STACK_CLASS ExprIter : private Policy
LinearMemoryAddress<Value>* addr);
MOZ_MUST_USE bool readStore(ValType resultType, uint32_t byteSize,
LinearMemoryAddress<Value>* addr, Value* value);
MOZ_MUST_USE bool readNullary();
MOZ_MUST_USE bool readNullary(ExprType retType);
MOZ_MUST_USE bool readSelect(ExprType* type,
Value* trueValue, Value* falseValue, Value* condition);
MOZ_MUST_USE bool readGetLocal(const ValTypeVector& locals, uint32_t* id);
@ -1118,11 +1118,11 @@ ExprIter<Policy>::readStore(ValType resultType, uint32_t byteSize,
template <typename Policy>
inline bool
ExprIter<Policy>::readNullary()
ExprIter<Policy>::readNullary(ExprType retType)
{
MOZ_ASSERT(Classify(expr_) == ExprKind::Nullary);
return push(ExprType::Void);
return push(retType);
}
template <typename Policy>

View File

@ -513,6 +513,20 @@ AstDecodeUnary(AstDecodeContext& c, ValType type, Expr expr)
return true;
}
static bool
AstDecodeNullary(AstDecodeContext& c, ExprType type, Expr expr)
{
if (!c.iter().readNullary(type))
return false;
AstNullaryOperator* nullary = new(c.lifo) AstNullaryOperator(expr);
if (!nullary)
return false;
c.iter().setResult(AstDecodeStackItem(nullary, 0));
return true;
}
static bool
AstDecodeBinary(AstDecodeContext& c, ValType type, Expr expr)
{
@ -713,12 +727,8 @@ AstDecodeExpr(AstDecodeContext& c)
AstExpr* tmp;
switch (expr) {
case Expr::Nop:
if (!c.iter().readNullary())
if (!AstDecodeNullary(c, ExprType::Void, expr))
return false;
tmp = new(c.lifo) AstNop();
if (!tmp)
return false;
c.iter().setResult(AstDecodeStackItem(tmp));
break;
case Expr::Call:
if (!AstDecodeCall(c))
@ -800,6 +810,7 @@ AstDecodeExpr(AstDecodeContext& c)
case Expr::I32Clz:
case Expr::I32Ctz:
case Expr::I32Popcnt:
case Expr::GrowMemory:
if (!AstDecodeUnary(c, ValType::I32, expr))
return false;
break;
@ -1084,6 +1095,10 @@ AstDecodeExpr(AstDecodeContext& c)
if (!AstDecodeReturn(c))
return false;
break;
case Expr::CurrentMemory:
if (!AstDecodeNullary(c, ExprType::I32, expr))
return false;
break;
case Expr::Unreachable:
if (!c.iter().readUnreachable())
return false;

View File

@ -156,7 +156,8 @@ IsDropValueExpr(AstExpr& expr)
return !expr.as<AstBranchTable>().maybeValue();
case AstExprKind::If:
return !expr.as<AstIf>().hasElse();
case AstExprKind::Nop:
case AstExprKind::NullaryOperator:
return expr.as<AstNullaryOperator>().expr() == Expr::Nop;
case AstExprKind::Unreachable:
case AstExprKind::Return:
return true;
@ -357,9 +358,17 @@ PrintBlockLevelExpr(WasmPrintContext& c, AstExpr& expr, bool isLast)
// binary format parsing and rendering
static bool
PrintNop(WasmPrintContext& c, AstNop& nop)
PrintNullaryOperator(WasmPrintContext& c, AstNullaryOperator& op)
{
return c.buffer.append("nop");
const char* opStr;
switch (op.expr()) {
case Expr::Nop: opStr = "nop"; break;
case Expr::CurrentMemory: opStr = "curent_memory"; break;
default: return false;
}
return c.buffer.append(opStr, strlen(opStr));
}
static bool
@ -641,6 +650,7 @@ PrintUnaryOperator(WasmPrintContext& c, AstUnaryOperator& op)
case Expr::F64Ceil: opStr = "f64.ceil"; break;
case Expr::F64Floor: opStr = "f64.floor"; break;
case Expr::F64Sqrt: opStr = "f64.sqrt"; break;
case Expr::GrowMemory: opStr = "grow_memory"; break;
default: return false;
}
@ -1337,8 +1347,8 @@ PrintExpr(WasmPrintContext& c, AstExpr& expr)
}
switch (expr.kind()) {
case AstExprKind::Nop:
return PrintNop(c, expr.as<AstNop>());
case AstExprKind::NullaryOperator:
return PrintNullaryOperator(c, expr.as<AstNullaryOperator>());
case AstExprKind::Unreachable:
return PrintUnreachable(c, expr.as<AstUnreachable>());
case AstExprKind::Call:

View File

@ -221,12 +221,6 @@ RenderFullLine(WasmRenderContext& c, AstExpr& expr)
/*****************************************************************************/
// binary format parsing and rendering
static bool
RenderNop(WasmRenderContext& c, AstNop& nop)
{
return c.buffer.append("(nop)");
}
static bool
RenderUnreachable(WasmRenderContext& c, AstUnreachable& unreachable)
{
@ -410,6 +404,25 @@ RenderBlock(WasmRenderContext& c, AstBlock& block)
return c.buffer.append(")");
}
static bool
RenderNullaryOperator(WasmRenderContext& c, AstNullaryOperator& op)
{
if (!c.buffer.append("("))
return false;
const char* opStr;
switch (op.expr()) {
case Expr::Nop: opStr = "nop"; break;
case Expr::CurrentMemory: opStr = "current_memory"; break;
default: return false;
}
if (!c.buffer.append(opStr, strlen(opStr)))
return false;
return c.buffer.append(")");
}
static bool
RenderUnaryOperator(WasmRenderContext& c, AstUnaryOperator& op)
{
@ -437,6 +450,7 @@ RenderUnaryOperator(WasmRenderContext& c, AstUnaryOperator& op)
case Expr::F64Ceil: opStr = "f64.ceil"; break;
case Expr::F64Floor: opStr = "f64.floor"; break;
case Expr::F64Sqrt: opStr = "f64.sqrt"; break;
case Expr::GrowMemory: opStr = "grow_memory"; break;
default: return false;
}
if (!c.buffer.append(opStr, strlen(opStr)))
@ -955,8 +969,8 @@ static bool
RenderExpr(WasmRenderContext& c, AstExpr& expr)
{
switch (expr.kind()) {
case AstExprKind::Nop:
return RenderNop(c, expr.as<AstNop>());
case AstExprKind::NullaryOperator:
return RenderNullaryOperator(c, expr.as<AstNullaryOperator>());
case AstExprKind::Unreachable:
return RenderUnreachable(c, expr.as<AstUnreachable>());
case AstExprKind::Call:

View File

@ -108,8 +108,14 @@ StaticallyLink(CodeSegment& cs, const LinkData& linkData, ExclusiveContext* cx)
static void
SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryObject memory)
{
for (const BoundsCheck& check : metadata.boundsChecks)
Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), memory->buffer().byteLength());
if (!metadata.boundsChecks.empty()) {
uint32_t length = memory->buffer().wasmBoundsCheckLimit();
MOZ_RELEASE_ASSERT(length == LegalizeMapLength(length));
MOZ_RELEASE_ASSERT(length >= memory->buffer().wasmActualByteLength());
for (const BoundsCheck& check : metadata.boundsChecks)
Assembler::UpdateBoundsCheck(check.patchAt(cs.base()), length);
}
#if defined(JS_CODEGEN_X86)
uint8_t* base = memory->buffer().dataPointerEither().unwrap();
@ -601,7 +607,7 @@ Code::lookupRange(void* pc) const
return &metadata_->codeRanges[match];
}
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
#ifdef WASM_HUGE_MEMORY
struct MemoryAccessOffset
{
const MemoryAccessVector& accesses;
@ -626,7 +632,7 @@ Code::lookupMemoryAccess(void* pc) const
return &metadata_->memoryAccesses[match];
}
#endif // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
#endif
bool
Code::getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const

View File

@ -417,7 +417,7 @@ class MetadataCacheablePod
ModuleKind kind;
MemoryUsage memoryUsage;
uint32_t minMemoryLength;
uint32_t maxMemoryLength;
Maybe<uint32_t> maxMemoryLength;
explicit MetadataCacheablePod(ModuleKind kind) {
mozilla::PodZero(this);
@ -522,7 +522,7 @@ class Code
const CallSite* lookupCallSite(void* returnAddress) const;
const CodeRange* lookupRange(void* pc) const;
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS
#ifdef WASM_HUGE_MEMORY
const MemoryAccess* lookupMemoryAccess(void* pc) const;
#endif

View File

@ -24,6 +24,7 @@
#include "asmjs/WasmBinaryIterator.h"
#include "asmjs/WasmGenerator.h"
#include "asmjs/WasmSignalHandlers.h"
using namespace js;
using namespace js::jit;
@ -197,7 +198,7 @@ DecodeExpr(FunctionDecoder& f)
switch (expr) {
case Expr::Nop:
return f.iter().readNullary();
return f.iter().readNullary(ExprType::Void);
case Expr::Call:
return DecodeCall(f);
case Expr::CallIndirect:
@ -439,6 +440,12 @@ DecodeExpr(FunctionDecoder& f)
case Expr::F64Store:
return f.checkHasMemory() &&
f.iter().readStore(ValType::F64, 8, nullptr, nullptr);
case Expr::GrowMemory:
return f.checkHasMemory() &&
f.iter().readUnary(ValType::I32, nullptr);
case Expr::CurrentMemory:
return f.checkHasMemory() &&
f.iter().readNullary(ExprType::I32);
case Expr::Br:
return f.iter().readBr(nullptr, nullptr, nullptr);
case Expr::BrIf:
@ -669,9 +676,7 @@ DecodeResizableMemory(Decoder& d, ModuleGeneratorData* init)
if (!maximumBytes.isValid())
return Fail(d, "maximum memory size too big");
init->maxMemoryLength = maximumBytes.value();
} else {
init->maxMemoryLength = UINT32_MAX;
init->maxMemoryLength = Some(maximumBytes.value());
}
return true;
@ -938,7 +943,7 @@ DecodeMemorySection(Decoder& d, bool newFormat, ModuleGeneratorData* init, bool*
MOZ_ASSERT(init->memoryUsage == MemoryUsage::None);
init->memoryUsage = MemoryUsage::Unshared;
init->minMemoryLength = initialSize.value();
init->maxMemoryLength = maxSize.value();
init->maxMemoryLength = Some(maxSize.value());
}
if (!d.finishSection(sectionStart, sectionSize))
@ -1534,9 +1539,11 @@ CompileArgs::initFromContext(ExclusiveContext* cx, ScriptedCaller&& scriptedCall
SharedModule
wasm::Compile(const ShareableBytes& bytecode, const CompileArgs& args, UniqueChars* error)
{
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
bool newFormat = args.assumptions.newFormat;
auto init = js::MakeUnique<ModuleGeneratorData>(args.assumptions.usesSignal);
auto init = js::MakeUnique<ModuleGeneratorData>();
if (!init)
return nullptr;

View File

@ -50,10 +50,9 @@ typedef Vector<FuncImportGenDesc, 0, SystemAllocPolicy> FuncImportGenDescVector;
struct ModuleGeneratorData
{
ModuleKind kind;
SignalUsage usesSignal;
MemoryUsage memoryUsage;
mozilla::Atomic<uint32_t> minMemoryLength;
uint32_t maxMemoryLength;
Maybe<uint32_t> maxMemoryLength;
SigWithIdVector sigs;
SigWithIdPtrVector funcSigs;
@ -62,12 +61,10 @@ struct ModuleGeneratorData
TableDescVector tables;
Uint32Vector asmJSSigToTableIndex;
explicit ModuleGeneratorData(SignalUsage usesSignal, ModuleKind kind = ModuleKind::Wasm)
explicit ModuleGeneratorData(ModuleKind kind = ModuleKind::Wasm)
: kind(kind),
usesSignal(usesSignal),
memoryUsage(MemoryUsage::None),
minMemoryLength(0),
maxMemoryLength(UINT32_MAX)
minMemoryLength(0)
{}
bool isAsmJS() const {
@ -145,7 +142,6 @@ class MOZ_STACK_CLASS ModuleGenerator
Metadata* maybeAsmJSMetadata = nullptr);
bool isAsmJS() const { return metadata_->kind == ModuleKind::AsmJS; }
SignalUsage usesSignal() const { return metadata_->assumptions.usesSignal; }
jit::MacroAssembler& masm() { return masm_; }
// Memory:
@ -250,10 +246,6 @@ class MOZ_STACK_CLASS FunctionGenerator
usesAtomics_ = true;
}
bool usesSignalsForInterrupts() const {
return m_->usesSignal().forInterrupt;
}
Bytes& bytes() {
return bytes_;
}

View File

@ -280,6 +280,54 @@ Instance::callImport_f64(Instance* instance, int32_t funcImportIndex, int32_t ar
return ToNumber(cx, rval, (double*)argv);
}
/* static */ uint32_t
Instance::growMemory_i32(Instance* instance, uint32_t delta)
{
return instance->growMemory(delta);
}
/* static */ uint32_t
Instance::currentMemory_i32(Instance* instance)
{
return instance->currentMemory();
}
uint32_t
Instance::growMemory(uint32_t delta)
{
MOZ_RELEASE_ASSERT(memory_);
// Using uint64_t to avoid worrying about overflows in safety comp.
uint64_t curNumPages = currentMemory();
uint64_t newNumPages = curNumPages + (uint64_t) delta;
if (metadata().maxMemoryLength) {
ArrayBufferObject &buf = memory_->buffer().as<ArrayBufferObject>();
// Guaranteed by instantiateMemory
MOZ_RELEASE_ASSERT(buf.wasmMaxSize() && buf.wasmMaxSize() <= metadata().maxMemoryLength);
if (newNumPages * wasm::PageSize > buf.wasmMaxSize().value())
return (uint32_t) -1;
// Try to grow the memory
if (!buf.growForWasm(delta))
return (uint32_t) -1;
} else {
return -1; // TODO: implement grow_memory w/o max when we add realloc
}
return curNumPages;
}
uint32_t
Instance::currentMemory()
{
MOZ_RELEASE_ASSERT(memory_);
uint32_t curMemByteLen = memory_->buffer().wasmActualByteLength();
MOZ_ASSERT(curMemByteLen % wasm::PageSize == 0);
return curMemByteLen / wasm::PageSize;
}
Instance::Instance(JSContext* cx,
Handle<WasmInstanceObject*> object,
UniqueCode code,
@ -403,6 +451,28 @@ Instance::~Instance()
}
}
size_t
Instance::memoryMappedSize() const
{
return memory_->buffer().wasmMappedSize();
}
bool
Instance::memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const
{
MOZ_ASSERT(numBytes > 0);
if (!metadata().usesMemory())
return false;
uint8_t* base = memoryBase().unwrap(/* comparison */);
if (addr < base)
return false;
size_t lastByteOffset = addr - base + (numBytes - 1);
return lastByteOffset >= memoryLength() && lastByteOffset < memoryMappedSize();
}
void
Instance::tracePrivate(JSTracer* trc)
{
@ -444,7 +514,7 @@ Instance::memoryBase() const
size_t
Instance::memoryLength() const
{
return memory_->buffer().byteLength();
return memory_->buffer().wasmActualByteLength();
}
template<typename T>

View File

@ -56,9 +56,13 @@ class Instance
static int32_t callImport_i32(Instance*, int32_t, int32_t, uint64_t*);
static int32_t callImport_i64(Instance*, int32_t, int32_t, uint64_t*);
static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
static uint32_t currentMemory_i32(Instance* instance);
bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
MutableHandleValue rval);
uint32_t growMemory(uint32_t delta);
uint32_t currentMemory();
// Only WasmInstanceObject can call the private trace function.
friend class js::WasmInstanceObject;
@ -87,6 +91,8 @@ class Instance
const SharedTableVector& tables() const { return tables_; }
SharedMem<uint8_t*> memoryBase() const;
size_t memoryLength() const;
size_t memoryMappedSize() const;
bool memoryAccessInGuardRegion(uint8_t* addr, unsigned numBytes) const;
TlsData& tlsData() { return tlsData_; }
// This method returns a pointer to the GC object that owns this Instance.
@ -107,6 +113,7 @@ class Instance
// be notified so it can go back to the generic callImport.
void deoptimizeImportExit(uint32_t funcImportIndex);
bool memoryAccessWouldFault(uint8_t* addr, unsigned numBytes);
// See Code::ensureProfilingState comment.

View File

@ -21,6 +21,7 @@
#include "asmjs/WasmBaselineCompile.h"
#include "asmjs/WasmBinaryIterator.h"
#include "asmjs/WasmGenerator.h"
#include "asmjs/WasmSignalHandlers.h"
#include "jit/CodeGenerator.h"
@ -84,6 +85,9 @@ class CallCompileState
// Accumulates the register arguments while compiling arguments.
MWasmCall::Args regArgs_;
// Reserved argument for passing Instance* to builtin instance method calls.
ABIArg instanceArg_;
// Accumulates the stack arguments while compiling arguments. This is only
// necessary to track when childClobbers_ is true so that the stack offsets
// can be updated.
@ -706,8 +710,10 @@ class FunctionCompiler
curBlock_ = nullptr;
return false;
}
if (!mg().usesSignal.forOOB)
curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
#ifndef WASM_HUGE_MEMORY
curBlock_->add(MWasmBoundsCheck::New(alloc(), base, access));
#endif
return true;
}
@ -831,21 +837,8 @@ class FunctionCompiler
void addInterruptCheck()
{
if (mg_.usesSignal.forInterrupt)
return;
if (inDeadCode())
return;
// WasmHandleExecutionInterrupt takes 0 arguments and the stack is
// always ABIStackAlignment-aligned, but don't forget to account for
// ShadowStackSpace and any other ABI warts.
ABIArgGenerator abi;
propagateMaxStackArgBytes(abi.stackBytesConsumedSoFar());
CallSiteDesc callDesc(0, CallSiteDesc::Relative);
curBlock_->add(MAsmJSInterruptCheck::New(alloc()));
// We rely on signal handlers for interrupts on Asm.JS/Wasm
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
}
MDefinition* extractSimdElement(unsigned lane, MDefinition* base, MIRType type, SimdSign sign)
@ -899,6 +892,17 @@ class FunctionCompiler
return callStack_.append(call);
}
bool passInstance(CallCompileState* args)
{
if (inDeadCode())
return true;
// Should only pass an instance once.
MOZ_ASSERT(args->instanceArg_ == ABIArg());
args->instanceArg_ = args->abi_.next(MIRType::Pointer);
return true;
}
bool passArg(MDefinition* argDef, ValType type, CallCompileState* call)
{
if (inDeadCode())
@ -973,6 +977,13 @@ class FunctionCompiler
call->spIncrement_ = AlignBytes(call->maxChildStackBytes_, AsmJSStackAlignment);
for (MAsmJSPassStackArg* stackArg : call->stackArgs_)
stackArg->incrementOffset(call->spIncrement_);
// If instanceArg_ is not initialized then instanceArg_.kind() != ABIArg::Stack
if (call->instanceArg_.kind() == ABIArg::Stack) {
call->instanceArg_ = ABIArg(call->instanceArg_.offsetFromArgBase() +
call->spIncrement_);
}
stackBytes += call->spIncrement_;
} else {
call->spIncrement_ = 0;
@ -1091,6 +1102,26 @@ class FunctionCompiler
return true;
}
bool builtinInstanceMethodCall(SymbolicAddress builtin, const CallCompileState& call,
ValType ret, MDefinition** def)
{
if (inDeadCode()) {
*def = nullptr;
return true;
}
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
call.instanceArg_, call.regArgs_,
ToMIRType(ret), call.spIncrement_);
if (!ins)
return false;
curBlock_->add(ins);
*def = ins;
return true;
}
/*********************************************** Control flow generation */
inline bool inDeadCode() const {
@ -2962,6 +2993,61 @@ EmitSimdOp(FunctionCompiler& f, ValType type, SimdOperation op, SimdSign sign)
MOZ_CRASH("unexpected opcode");
}
static bool
EmitGrowMemory(FunctionCompiler& f, uint32_t callOffset)
{
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
CallCompileState args(f, lineOrBytecode);
if (!f.startCall(&args))
return false;
if (!f.passInstance(&args))
return false;
MDefinition* delta;
if (!f.iter().readUnary(ValType::I32, &delta))
return false;
if (!f.passArg(delta, ValType::I32, &args))
return false;
f.finishCall(&args, PassTls::False, InterModule::False);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))
return false;
f.iter().setResult(ret);
return true;
}
static bool
EmitCurrentMemory(FunctionCompiler& f, uint32_t callOffset)
{
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode(callOffset);
CallCompileState args(f, lineOrBytecode);
if (!f.iter().readNullary(ExprType::I32))
return false;
if (!f.startCall(&args))
return false;
if (!f.passInstance(&args))
return false;
f.finishCall(&args, PassTls::False, InterModule::False);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::CurrentMemory, args, ValType::I32, &ret))
return false;
f.iter().setResult(ret);
return true;
}
static bool
EmitExpr(FunctionCompiler& f)
{
@ -2977,7 +3063,7 @@ EmitExpr(FunctionCompiler& f)
switch (expr) {
// Control opcodes
case Expr::Nop:
return f.iter().readNullary();
return f.iter().readNullary(ExprType::Void);
case Expr::Block:
return EmitBlock(f);
case Expr::Loop:
@ -3532,11 +3618,11 @@ EmitExpr(FunctionCompiler& f)
return EmitAtomicsCompareExchange(f);
case Expr::I32AtomicsExchange:
return EmitAtomicsExchange(f);
// Future opcodes
case Expr::CurrentMemory:
// Memory Operators
case Expr::GrowMemory:
MOZ_CRASH("NYI");
return EmitGrowMemory(f, exprOffset);
case Expr::CurrentMemory:
return EmitCurrentMemory(f, exprOffset);
case Expr::Limit:;
}
@ -3569,7 +3655,6 @@ wasm::IonCompileFunction(IonCompileTask* task)
CompileInfo compileInfo(locals.length());
MIRGenerator mir(nullptr, options, &results.alloc(), &graph, &compileInfo,
IonOptimizations.get(OptimizationLevel::AsmJS));
mir.initUsesSignalHandlersForAsmJSOOB(task->mg().usesSignal.forOOB);
mir.initMinAsmJSHeapLength(task->mg().minMemoryLength);
// Build MIR graph

View File

@ -18,9 +18,12 @@
#include "asmjs/WasmJS.h"
#include "mozilla/Maybe.h"
#include "asmjs/WasmCompile.h"
#include "asmjs/WasmInstance.h"
#include "asmjs/WasmModule.h"
#include "asmjs/WasmSignalHandlers.h"
#include "builtin/Promise.h"
#include "jit/JitOptions.h"
#include "vm/Interpreter.h"
@ -32,6 +35,7 @@
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::Nothing;
bool
wasm::HasCompilerSupport(ExclusiveContext* cx)
@ -42,6 +46,9 @@ wasm::HasCompilerSupport(ExclusiveContext* cx)
if (!cx->jitSupportsUnalignedAccesses())
return false;
if (!wasm::HaveSignalHandlers())
return false;
#if defined(JS_CODEGEN_NONE) || defined(JS_CODEGEN_ARM64)
return false;
#else
@ -746,11 +753,16 @@ WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp)
JSAtom* initialAtom = Atomize(cx, "initial", strlen("initial"));
if (!initialAtom)
return false;
RootedId initialId(cx, AtomToId(initialAtom));
JSAtom* maximumAtom = Atomize(cx, "maximum", strlen("maximum"));
if (!maximumAtom)
return false;
RootedId maximumId(cx, AtomToId(maximumAtom));
RootedObject obj(cx, &args[0].toObject());
RootedId id(cx, AtomToId(initialAtom));
RootedValue initialVal(cx);
if (!GetProperty(cx, obj, obj, id, &initialVal))
if (!GetProperty(cx, obj, obj, initialId, &initialVal))
return false;
double initialDbl;
@ -762,9 +774,29 @@ WasmMemoryObject::construct(JSContext* cx, unsigned argc, Value* vp)
return false;
}
uint32_t bytes = uint32_t(initialDbl) * PageSize;
bool signalsForOOB = SignalUsage().forOOB;
RootedArrayBufferObject buffer(cx, ArrayBufferObject::createForWasm(cx, bytes, signalsForOOB));
Maybe<uint32_t> maxSize;
bool found;
if (HasProperty(cx, obj, maximumId, &found) && found) {
RootedValue maxVal(cx);
if (!GetProperty(cx, obj, obj, maximumId, &maxVal))
return false;
double maxDbl;
if (!ToInteger(cx, maxVal, &maxDbl))
return false;
if (maxDbl < initialDbl || maxDbl > UINT32_MAX / PageSize) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_SIZE, "Memory",
"maximum");
return false;
}
maxSize = Some<uint32_t>(uint32_t(maxDbl) * PageSize);
}
uint32_t initialSize = uint32_t(initialDbl) * PageSize;
RootedArrayBufferObject buffer(cx, ArrayBufferObject::createForWasm(cx, initialSize, maxSize));
if (!buffer)
return false;

View File

@ -517,23 +517,42 @@ Module::instantiateMemory(JSContext* cx, MutableHandleWasmMemoryObject memory) c
RootedArrayBufferObjectMaybeShared buffer(cx);
if (memory) {
buffer = &memory->buffer();
uint32_t length = buffer->byteLength();
if (length < metadata_->minMemoryLength || length > metadata_->maxMemoryLength) {
uint32_t length = buffer->wasmActualByteLength();
uint32_t declaredMaxLength = metadata_->maxMemoryLength.valueOr(UINT32_MAX);
// It's not an error to import a memory whose mapped size is less than
// the maxMemoryLength required for the module. This is the same as trying to
// map up to maxMemoryLength but actually getting less.
if (length < metadata_->minMemoryLength || length > declaredMaxLength) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE, "Memory");
return false;
}
// This can't happen except via the shell toggling signals.enabled.
if (metadata_->assumptions.usesSignal.forOOB &&
!buffer->is<SharedArrayBufferObject>() &&
!buffer->as<ArrayBufferObject>().isWasmMapped())
{
JS_ReportError(cx, "can't access same buffer with and without signals enabled");
return false;
// For asm.js maxMemoryLength doesn't play a role since we can't grow memory.
// For wasm we require that either both memory and module don't specify a max size
// OR that the memory's max size is less than the modules.
if (!metadata_->isAsmJS()) {
Maybe<uint32_t> memMaxSize =
buffer->as<ArrayBufferObject>().wasmMaxSize();
if (metadata_->maxMemoryLength.isSome() != memMaxSize.isSome() ||
metadata_->maxMemoryLength < memMaxSize) {
JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMP_SIZE,
"Memory");
return false;
}
}
MOZ_RELEASE_ASSERT(buffer->is<SharedArrayBufferObject>() ||
buffer->as<ArrayBufferObject>().isWasm());
// We currently assume SharedArrayBuffer => asm.js. Can remove this
// once wasmMaxSize/mappedSize/growForWasm have been implemented in SAB
MOZ_ASSERT_IF(buffer->is<SharedArrayBufferObject>(), metadata_->isAsmJS());
} else {
buffer = ArrayBufferObject::createForWasm(cx, metadata_->minMemoryLength,
metadata_->assumptions.usesSignal.forOOB);
metadata_->maxMemoryLength);
if (!buffer)
return false;

View File

@ -319,7 +319,7 @@ enum { REG_EIP = 14 };
// the same as CONTEXT, but on Mac we use a different structure since we call
// into the emulator code from a Mach exception handler rather than a
// sigaction-style signal handler.
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
#if defined(XP_DARWIN)
# if defined(JS_CODEGEN_X64)
struct macos_x64_context {
x86_thread_state64_t thread;
@ -367,8 +367,6 @@ ContextToPC(CONTEXT* context)
#endif
}
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
#if defined(JS_CODEGEN_X64)
MOZ_COLD static void
SetFPRegToNaN(size_t size, void* fp_reg)
@ -538,7 +536,6 @@ AddressOfGPRegisterSlot(EMULATOR_CONTEXT* context, Registers::Code code)
MOZ_CRASH();
}
# endif // !XP_DARWIN
#endif // JS_CODEGEN_X64
MOZ_COLD static void
SetRegisterToCoercedUndefined(EMULATOR_CONTEXT* context, size_t size,
@ -609,7 +606,6 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
const MemoryAccess* memoryAccess, const Instance& instance)
{
MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(pc));
MOZ_RELEASE_ASSERT(instance.metadata().assumptions.usesSignal.forOOB);
MOZ_RELEASE_ASSERT(memoryAccess->insnOffset() == (pc - instance.codeBase()));
// Disassemble the instruction which caused the trap so that we can extract
@ -620,7 +616,6 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
MOZ_RELEASE_ASSERT(end > pc);
MOZ_RELEASE_ASSERT(instance.codeSegment().containsFunctionPC(end));
#if defined(JS_CODEGEN_X64)
// Check x64 asm.js heap access invariants.
MOZ_RELEASE_ASSERT(address.disp() >= 0);
MOZ_RELEASE_ASSERT(address.base() == HeapReg.code());
@ -638,7 +633,6 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
AddressOfGPRegisterSlot(context, address.index()));
MOZ_RELEASE_ASSERT(uint32_t(index) == index);
}
#endif
// Determine the actual effective address of the faulting access. We can't
// rely on the faultingAddress given to us by the OS, because we need the
@ -650,9 +644,11 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
"faulting address range");
MOZ_RELEASE_ASSERT(accessAddress >= instance.memoryBase(),
"Access begins outside the asm.js heap");
MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() + MappedSize,
MOZ_RELEASE_ASSERT(accessAddress + access.size() <= instance.memoryBase() +
instance.memoryMappedSize(),
"Access extends beyond the asm.js heap guard region");
MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() + instance.memoryLength(),
MOZ_RELEASE_ASSERT(accessAddress + access.size() > instance.memoryBase() +
instance.memoryLength(),
"Computed access address is not actually out of bounds");
// Wasm loads/stores don't wrap offsets at all, so hitting the guard page
@ -742,32 +738,13 @@ EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddre
return end;
}
#elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
MOZ_COLD static uint8_t*
EmulateHeapAccess(EMULATOR_CONTEXT* context, uint8_t* pc, uint8_t* faultingAddress,
const MemoryAccess* memoryAccess, const Instance& instance)
{
// We forbid ARM instruction sets below ARMv7, so that solves unaligned
// integer memory accesses. So the only way to land here is because of a
// non-default configured kernel or an unaligned floating-point access.
// TODO Handle FPU unaligned accesses on ARM (bug 1283121).
return instance.codeSegment().unalignedAccessCode();
}
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
#endif // JS_CODEGEN_X64
MOZ_COLD static bool
IsHeapAccessAddress(const Instance &instance, uint8_t* faultingAddress)
{
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
size_t accessLimit = MappedSize;
#elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
size_t accessLimit = instance.memoryLength();
#endif
size_t accessLimit = instance.memoryMappedSize();
return instance.metadata().usesMemory() &&
faultingAddress >= instance.memoryBase() &&
faultingAddress < instance.memoryBase() + accessLimit;
@ -822,15 +799,18 @@ HandleFault(PEXCEPTION_POINTERS exception)
// case and silence the exception ourselves (the exception will
// retrigger after the interrupt jumps back to resumePC).
return pc == instance->codeSegment().interruptCode() &&
instance->codeSegment().containsFunctionPC(activation->resumePC()) &&
instance->code().lookupMemoryAccess(activation->resumePC());
instance->codeSegment().containsFunctionPC(activation->resumePC());
}
#ifdef WASM_HUGE_MEMORY
const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess)
return false;
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
*ppc = instance->codeSegment().outOfBoundsCode();
else
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
#else
*ppc = instance->codeSegment().outOfBoundsCode();
#endif
return true;
}
@ -953,11 +933,15 @@ HandleMachException(JSRuntime* rt, const ExceptionRequest& request)
if (!IsHeapAccessAddress(*instance, faultingAddress))
return false;
#ifdef WASM_HUGE_MEMORY
const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (!memoryAccess)
return false;
*ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance);
*ppc = instance->codeSegment().outOfBoundsCode();
else
*ppc = EmulateHeapAccess(&context, pc, faultingAddress, memoryAccess, *instance);
#else
*ppc = instance->codeSegment().outOfBoundsCode();
#endif
// Update the thread state with the new pc and register values.
kret = thread_set_state(rtThread, float_state, (thread_state_t)&context.float_, float_state_count);
@ -1168,11 +1152,23 @@ HandleFault(int signum, siginfo_t* info, void* ctx)
if (!IsHeapAccessAddress(*instance, faultingAddress))
return false;
#ifdef WASM_HUGE_MEMORY
MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
const MemoryAccess* memoryAccess = instance->code().lookupMemoryAccess(pc);
if (signal == Signal::SegFault && !memoryAccess)
return false;
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
if (!memoryAccess)
*ppc = instance->codeSegment().outOfBoundsCode();
else
*ppc = EmulateHeapAccess(context, pc, faultingAddress, memoryAccess, *instance);
#elif defined(JS_CODEGEN_ARM)
MOZ_RELEASE_ASSERT(signal == Signal::BusError || signal == Signal::SegFault);
if (signal == Signal::BusError)
*ppc = instance->codeSegment().unalignedAccessCode();
else
*ppc = instance->codeSegment().outOfBoundsCode();
#else
MOZ_RELEASE_ASSERT(signal == Signal::SegFault);
*ppc = instance->codeSegment().outOfBoundsCode();
#endif
return true;
}
@ -1211,7 +1207,6 @@ AsmJSFaultHandler(int signum, siginfo_t* info, void* context)
previousSignal->sa_handler(signum);
}
# endif // XP_WIN || XP_DARWIN || assume unix
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
static void
RedirectIonBackedgesToInterruptCheck(JSRuntime* rt)
@ -1276,15 +1271,16 @@ JitInterruptHandler(int signum, siginfo_t* info, void* context)
}
#endif
static bool sTriedInstallSignalHandlers = false;
static bool sHaveSignalHandlers = false;
static bool
ProcessHasSignalHandlers()
{
// We assume that there are no races creating the first JSRuntime of the process.
static bool sTried = false;
static bool sResult = false;
if (sTried)
return sResult;
sTried = true;
if (sTriedInstallSignalHandlers)
return sHaveSignalHandlers;
sTriedInstallSignalHandlers = true;
// Developers might want to forcibly disable signals to avoid seeing
// spurious SIGSEGVs in the debugger.
@ -1333,7 +1329,6 @@ ProcessHasSignalHandlers()
}
#endif // defined(XP_WIN)
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
// Install a SIGSEGV handler to handle safely-out-of-bounds asm.js heap
// access and/or unaligned accesses.
# if defined(XP_WIN)
@ -1347,14 +1342,16 @@ ProcessHasSignalHandlers()
// handling the signal, and fall through to the Breakpad handler by testing
// handlingSegFault.
# if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
// Allow handling OOB with signals on all architectures
struct sigaction faultHandler;
faultHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
faultHandler.sa_sigaction = &AsmJSFaultHandler<Signal::SegFault>;
sigemptyset(&faultHandler.sa_mask);
if (sigaction(SIGSEGV, &faultHandler, &sPrevSEGVHandler))
MOZ_CRASH("unable to install segv handler");
# elif defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
# if defined(JS_CODEGEN_ARM)
// On Arm Handle Unaligned Accesses
struct sigaction busHandler;
busHandler.sa_flags = SA_SIGINFO | SA_NODEFER;
busHandler.sa_sigaction = &AsmJSFaultHandler<Signal::BusError>;
@ -1363,9 +1360,8 @@ ProcessHasSignalHandlers()
MOZ_CRASH("unable to install sigbus handler");
# endif
# endif
#endif // defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
sResult = true;
sHaveSignalHandlers = true;
return true;
}
@ -1376,7 +1372,7 @@ wasm::EnsureSignalHandlers(JSRuntime* rt)
if (!ProcessHasSignalHandlers())
return true;
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
#if defined(XP_DARWIN)
// On OSX, each JSRuntime gets its own handler thread.
if (!rt->wasmMachExceptionHandler.installed() && !rt->wasmMachExceptionHandler.install(rt))
return false;
@ -1385,21 +1381,11 @@ wasm::EnsureSignalHandlers(JSRuntime* rt)
return true;
}
static bool sHandlersSuppressedForTesting = false;
bool
wasm::HaveSignalHandlers()
{
if (!ProcessHasSignalHandlers())
return false;
return !sHandlersSuppressedForTesting;
}
void
wasm::SuppressSignalHandlersForTesting(bool suppress)
{
sHandlersSuppressedForTesting = suppress;
MOZ_ASSERT(sTriedInstallSignalHandlers);
return sHaveSignalHandlers;
}
// JSRuntime::requestInterrupt sets interrupt_ (which is checked frequently by

View File

@ -21,7 +21,7 @@
#include "mozilla/Attributes.h"
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
#if defined(XP_DARWIN)
# include <mach/mach.h>
#endif
#include "threading/Thread.h"
@ -42,17 +42,12 @@ namespace wasm {
MOZ_MUST_USE bool
EnsureSignalHandlers(JSRuntime* rt);
// Return whether signals can be used in this process for interrupts or, ifdef
// ASMJS_MAY_USE_SIGNAL_HANDLERS, asm.js/wasm out-of-bounds. This value can
// change over time solely due to DisableSignalHandlersForTesting.
// Return whether signals can be used in this process for interrupts or
// asm.js/wasm out-of-bounds.
bool
HaveSignalHandlers();
// Artificially suppress signal handler support, for testing purposes.
void
SuppressSignalHandlersForTesting(bool suppress);
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
#if defined(XP_DARWIN)
// On OSX we are forced to use the lower-level Mach exception mechanism instead
// of Unix signals. Mach exceptions are not handled on the victim's stack but
// rather require an extra thread. For simplicity, we create one such thread

View File

@ -100,7 +100,6 @@ class WasmToken
Loop,
Module,
Name,
Nop,
Offset,
OpenParen,
Param,
@ -118,6 +117,7 @@ class WasmToken
Text,
Then,
Type,
NullaryOpcode,
UnaryOpcode,
Unreachable,
UnsignedInteger,
@ -195,7 +195,7 @@ class WasmToken
MOZ_ASSERT(begin != end);
MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
kind_ == Load || kind_ == Store);
kind_ == Load || kind_ == Store || kind_ == NullaryOpcode);
u.expr_ = expr;
}
explicit WasmToken(const char16_t* begin)
@ -245,7 +245,7 @@ class WasmToken
Expr expr() const {
MOZ_ASSERT(kind_ == UnaryOpcode || kind_ == BinaryOpcode || kind_ == TernaryOpcode ||
kind_ == ComparisonOpcode || kind_ == ConversionOpcode ||
kind_ == Load || kind_ == Store);
kind_ == Load || kind_ == Store || kind_ == NullaryOpcode);
return u.expr_;
}
};
@ -757,6 +757,8 @@ WasmTokenStream::next()
return WasmToken(WasmToken::CallImport, begin, cur_);
return WasmToken(WasmToken::Call, begin, cur_);
}
if (consume(u"current_memory"))
return WasmToken(WasmToken::NullaryOpcode, Expr::CurrentMemory, begin, cur_);
break;
case 'd':
@ -988,6 +990,8 @@ WasmTokenStream::next()
return WasmToken(WasmToken::GetLocal, begin, cur_);
if (consume(u"global"))
return WasmToken(WasmToken::Global, begin, cur_);
if (consume(u"grow_memory"))
return WasmToken(WasmToken::UnaryOpcode, Expr::GrowMemory, begin, cur_);
break;
case 'i':
@ -1302,7 +1306,7 @@ WasmTokenStream::next()
if (consume(u"nan"))
return nan(begin);
if (consume(u"nop"))
return WasmToken(WasmToken::Nop, begin, cur_);
return WasmToken(WasmToken::NullaryOpcode, Expr::Nop, begin, cur_);
break;
case 'o':
@ -1914,6 +1918,12 @@ ParseUnaryOperator(WasmParseContext& c, Expr expr)
return new(c.lifo) AstUnaryOperator(expr, op);
}
static AstNullaryOperator*
ParseNullaryOperator(WasmParseContext& c, Expr expr)
{
return new(c.lifo) AstNullaryOperator(expr);
}
static AstBinaryOperator*
ParseBinaryOperator(WasmParseContext& c, Expr expr)
{
@ -2183,8 +2193,6 @@ ParseExprInsideParens(WasmParseContext& c)
WasmToken token = c.ts.get();
switch (token.kind()) {
case WasmToken::Nop:
return new(c.lifo) AstNop;
case WasmToken::Unreachable:
return new(c.lifo) AstUnreachable;
case WasmToken::BinaryOpcode:
@ -2231,6 +2239,8 @@ ParseExprInsideParens(WasmParseContext& c)
return ParseTernaryOperator(c, token.expr());
case WasmToken::UnaryOpcode:
return ParseUnaryOperator(c, token.expr());
case WasmToken::NullaryOpcode:
return ParseNullaryOperator(c, token.expr());
default:
c.ts.generateError(token, c.error);
return nullptr;
@ -3117,7 +3127,7 @@ static bool
ResolveExpr(Resolver& r, AstExpr& expr)
{
switch (expr.kind()) {
case AstExprKind::Nop:
case AstExprKind::NullaryOperator:
case AstExprKind::Unreachable:
return true;
case AstExprKind::BinaryOperator:
@ -3443,6 +3453,12 @@ EncodeUnaryOperator(Encoder& e, AstUnaryOperator& b)
e.writeExpr(b.expr());
}
static bool
EncodeNullaryOperator(Encoder& e, AstNullaryOperator& b)
{
return e.writeExpr(b.expr());
}
static bool
EncodeBinaryOperator(Encoder& e, AstBinaryOperator& b)
{
@ -3580,8 +3596,6 @@ static bool
EncodeExpr(Encoder& e, AstExpr& expr)
{
switch (expr.kind()) {
case AstExprKind::Nop:
return e.writeExpr(Expr::Nop);
case AstExprKind::Unreachable:
return e.writeExpr(Expr::Unreachable);
case AstExprKind::BinaryOperator:
@ -3622,6 +3636,8 @@ EncodeExpr(Encoder& e, AstExpr& expr)
return EncodeTernaryOperator(e, expr.as<AstTernaryOperator>());
case AstExprKind::UnaryOperator:
return EncodeUnaryOperator(e, expr.as<AstUnaryOperator>());
case AstExprKind::NullaryOperator:
return EncodeNullaryOperator(e, expr.as<AstNullaryOperator>());
}
MOZ_CRASH("Bad expr kind");
}

View File

@ -351,6 +351,10 @@ wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
return FuncCast(ecmaPow, Args_Double_DoubleDouble);
case SymbolicAddress::ATan2D:
return FuncCast(ecmaAtan2, Args_Double_DoubleDouble);
case SymbolicAddress::GrowMemory:
return FuncCast<uint32_t (Instance*, uint32_t)>(Instance::growMemory_i32, Args_General2);
case SymbolicAddress::CurrentMemory:
return FuncCast<uint32_t (Instance*)>(Instance::currentMemory_i32, Args_General1);
case SymbolicAddress::Limit:
break;
}
@ -358,27 +362,6 @@ wasm::AddressOf(SymbolicAddress imm, ExclusiveContext* cx)
MOZ_CRASH("Bad SymbolicAddress");
}
SignalUsage::SignalUsage()
:
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
// Signal-handling is only used to eliminate bounds checks when the OS page
// size is an even divisor of the WebAssembly page size.
forOOB(HaveSignalHandlers() &&
gc::SystemPageSize() <= PageSize &&
PageSize % gc::SystemPageSize() == 0 &&
!JitOptions.wasmExplicitBoundsChecks),
#else
forOOB(false),
#endif
forInterrupt(HaveSignalHandlers())
{}
bool
SignalUsage::operator==(SignalUsage rhs) const
{
return forOOB == rhs.forOOB && forInterrupt == rhs.forInterrupt;
}
static uint32_t
GetCPUID()
{
@ -565,15 +548,13 @@ SigWithId::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
}
Assumptions::Assumptions(JS::BuildIdCharVector&& buildId)
: usesSignal(),
cpuId(GetCPUID()),
: cpuId(GetCPUID()),
buildId(Move(buildId)),
newFormat(false)
{}
Assumptions::Assumptions()
: usesSignal(),
cpuId(GetCPUID()),
: cpuId(GetCPUID()),
buildId(),
newFormat(false)
{}
@ -591,7 +572,6 @@ Assumptions::initBuildIdFromContext(ExclusiveContext* cx)
bool
Assumptions::clone(const Assumptions& other)
{
usesSignal = other.usesSignal;
cpuId = other.cpuId;
newFormat = other.newFormat;
return buildId.appendAll(other.buildId);
@ -600,8 +580,7 @@ Assumptions::clone(const Assumptions& other)
bool
Assumptions::operator==(const Assumptions& rhs) const
{
return usesSignal == rhs.usesSignal &&
cpuId == rhs.cpuId &&
return cpuId == rhs.cpuId &&
buildId.length() == rhs.buildId.length() &&
PodEqual(buildId.begin(), rhs.buildId.begin(), buildId.length()) &&
newFormat == rhs.newFormat;
@ -610,8 +589,7 @@ Assumptions::operator==(const Assumptions& rhs) const
size_t
Assumptions::serializedSize() const
{
return sizeof(usesSignal) +
sizeof(uint32_t) +
return sizeof(uint32_t) +
SerializedPodVectorSize(buildId) +
sizeof(bool);
}
@ -619,7 +597,6 @@ Assumptions::serializedSize() const
uint8_t*
Assumptions::serialize(uint8_t* cursor) const
{
cursor = WriteBytes(cursor, &usesSignal, sizeof(usesSignal));
cursor = WriteScalar<uint32_t>(cursor, cpuId);
cursor = SerializePodVector(cursor, buildId);
cursor = WriteScalar<bool>(cursor, newFormat);
@ -629,7 +606,6 @@ Assumptions::serialize(uint8_t* cursor) const
const uint8_t*
Assumptions::deserialize(const uint8_t* cursor)
{
(cursor = ReadBytes(cursor, &usesSignal, sizeof(usesSignal))) &&
(cursor = ReadScalar<uint32_t>(cursor, &cpuId)) &&
(cursor = DeserializePodVector(cursor, &buildId)) &&
(cursor = ReadScalar<bool>(cursor, &newFormat));
@ -641,3 +617,57 @@ Assumptions::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
{
return buildId.sizeOfExcludingThis(mallocSizeOf);
}
// Heap length on ARM should fit in an ARM immediate. We approximate the set
// of valid ARM immediates with the predicate:
// 2^n for n in [16, 24)
// or
// 2^24 * n for n >= 1.
bool
wasm::IsValidARMLengthImmediate(uint32_t length)
{
bool valid = (IsPowerOfTwo(length) ||
(length & 0x00ffffff) == 0);
MOZ_ASSERT_IF(valid, length % PageSize == 0);
return valid;
}
uint32_t
wasm::RoundUpToNextValidARMLengthImmediate(uint32_t length)
{
MOZ_ASSERT(length <= 0xff000000);
if (length <= 16 * 1024 * 1024)
length = length ? mozilla::RoundUpPow2(length) : 0;
else
length = (length + 0x00ffffff) & ~0x00ffffff;
MOZ_ASSERT(IsValidARMLengthImmediate(length));
return length;
}
size_t
wasm::LegalizeMapLength(size_t requestedSize)
{
#ifdef WASM_HUGE_MEMORY
// On 64-bit platforms just give us a 4G guard region
return wasm::MappedSize;
#else
uint32_t res = requestedSize;
// On 32-bit platforms clamp down to 1GB
uint32_t MaxMappedSize = (1 << 30);
res = Min(res, MaxMappedSize);
# ifdef JS_CODEGEN_ARM
// On Arm round so that it fits in a single instruction
res = RoundUpToNextValidARMLengthImmediate(res);
MOZ_RELEASE_ASSERT(res <= MaxMappedSize);
# endif
return res;
#endif
}

View File

@ -810,9 +810,9 @@ class BoundsCheck
// Summarizes a heap access made by wasm code that needs to be patched later
// and/or looked up by the wasm signal handlers. Different architectures need
// to know different things (x64: offset and length, ARM: where to patch in
// heap length, x86: where to patch in heap length and base).
// to know different things (x64: intruction offset, wrapping and failure
// behavior, ARM: nothing, x86: offset of end of instruction (heap length to
// patch is last 4 bytes of instruction)).
#if defined(JS_CODEGEN_X86)
class MemoryAccess
{
@ -868,7 +868,7 @@ class MemoryAccess
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
defined(JS_CODEGEN_NONE)
// Nothing! We just want bounds checks on these platforms.
// Nothing! We don't patch or emulate memory accesses on these platforms.
class MemoryAccess {
public:
void offsetBy(uint32_t) { MOZ_CRASH(); }
@ -938,6 +938,8 @@ enum class SymbolicAddress
TruncateDoubleToUint64,
Uint64ToFloatingPoint,
Int64ToFloatingPoint,
GrowMemory,
CurrentMemory,
Limit
};
@ -995,28 +997,12 @@ enum class JumpTarget
typedef EnumeratedArray<JumpTarget, JumpTarget::Limit, Uint32Vector> JumpSiteArray;
// The SignalUsage struct captures global parameters that affect all wasm code
// generation. It also currently is the single source of truth for whether or
// not to use signal handlers for different purposes.
struct SignalUsage
{
// NB: these fields are serialized as a POD in Assumptions.
bool forOOB;
bool forInterrupt;
SignalUsage();
bool operator==(SignalUsage rhs) const;
bool operator!=(SignalUsage rhs) const { return !(*this == rhs); }
};
// Assumptions captures ambient state that must be the same when compiling and
// deserializing a module for the compiled code to be valid. If it's not, then
// the module must be recompiled from scratch.
struct Assumptions
{
SignalUsage usesSignal;
uint32_t cpuId;
JS::BuildIdCharVector buildId;
bool newFormat;
@ -1073,7 +1059,9 @@ WASM_DECLARE_POD_VECTOR(TableDesc, TableDescVector)
class CalleeDesc
{
public:
enum Which { Internal, Import, WasmTable, AsmJSTable, Builtin };
// Unlike Builtin, BuilinInstanceMethod expects an implicit Instance*
// as its first argument. (e.g. see Instance::growMemory)
enum Which { Internal, Import, WasmTable, AsmJSTable, Builtin, BuiltinInstanceMethod };
private:
Which which_;
@ -1123,6 +1111,12 @@ class CalleeDesc
c.u.builtin_ = callee;
return c;
}
static CalleeDesc builtinInstanceMethod(SymbolicAddress callee) {
CalleeDesc c;
c.which_ = BuiltinInstanceMethod;
c.u.builtin_ = callee;
return c;
}
Which which() const {
return which_;
}
@ -1154,7 +1148,7 @@ class CalleeDesc
return u.table.sigId_;
}
SymbolicAddress builtin() const {
MOZ_ASSERT(which_ == Builtin);
MOZ_ASSERT(which_ == Builtin || which_ == BuiltinInstanceMethod);
return u.builtin_;
}
};
@ -1252,11 +1246,16 @@ struct ExternalTableElem
// requires linear memory to always be a multiple of 64KiB.
static const unsigned PageSize = 64 * 1024;
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
#ifdef JS_CODEGEN_X64
#define WASM_HUGE_MEMORY
static const uint64_t Uint32Range = uint64_t(UINT32_MAX) + 1;
static const uint64_t MappedSize = 2 * Uint32Range + PageSize;
#endif
bool IsValidARMLengthImmediate(uint32_t length);
uint32_t RoundUpToNextValidARMLengthImmediate(uint32_t length);
size_t LegalizeMapLength(size_t requestedSize);
static const unsigned NaN64GlobalDataOffset = 0;
static const unsigned NaN32GlobalDataOffset = NaN64GlobalDataOffset + sizeof(double);
static const unsigned InitialGlobalDataBytes = NaN32GlobalDataOffset + sizeof(float);

View File

@ -513,28 +513,6 @@ WasmIsSupported(JSContext* cx, unsigned argc, Value* vp)
return true;
}
static bool
WasmUsesSignalForOOB(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
args.rval().setBoolean(wasm::SignalUsage().forOOB);
return true;
}
static bool
SuppressSignalHandlers(JSContext* cx, unsigned argc, Value* vp)
{
CallArgs args = CallArgsFromVp(argc, vp);
if (!args.requireAtLeast(cx, "suppressSignalHandlers", 1))
return false;
wasm::SuppressSignalHandlersForTesting(ToBoolean(args[0]));
args.rval().setUndefined();
return true;
}
static bool
WasmTextToBinary(JSContext* cx, unsigned argc, Value* vp)
{
@ -3835,15 +3813,6 @@ gc::ZealModeHelpText),
"wasmIsSupported()",
" Returns a boolean indicating whether WebAssembly is supported on the current device."),
JS_FN_HELP("wasmUsesSignalForOOB", WasmUsesSignalForOOB, 0, 0,
"wasmUsesSignalForOOB()",
" Return whether wasm and asm.js use a signal handler for detecting out-of-bounds."),
JS_FN_HELP("suppressSignalHandlers", SuppressSignalHandlers, 1, 0,
"suppressSignalHandlers(suppress)",
" This function allows artificially suppressing signal handler support, even if the underlying "
" platform supports it."),
JS_FN_HELP("wasmTextToBinary", WasmTextToBinary, 1, 0,
"wasmTextToBinary(str)",
" Translates the given text wasm module into its binary encoding."),

View File

@ -25,24 +25,17 @@ assertEq(f(0x7f),0x7f);
assertEq(f(0xff),-1);
assertEq(f(0x100),0);
// Test signal handlers deactivation
if (wasmUsesSignalForOOB()) {
suppressSignalHandlers(true);
assertEq(wasmUsesSignalForOOB(), false);
{
var buf = new ArrayBuffer(BUF_MIN);
var code = asmCompile('glob', 'imp', 'b', USE_ASM + HEAP_IMPORTS + '/* not a clone */ function f(i) {i=i|0; i32[0] = i; return i8[0]|0}; return f');
var f = asmLink(code, this, null, buf);
assertEq(f(0),0);
assertEq(f(0x7f),0x7f);
assertEq(f(0xff),-1);
assertEq(f(0x100),0);
var buf = new ArrayBuffer(BUF_MIN);
var code = asmCompile('glob', 'imp', 'b', USE_ASM + HEAP_IMPORTS + '/* not a clone */ function f(i) {i=i|0; i32[0] = i; return i8[0]|0}; return f');
var f = asmLink(code, this, null, buf);
assertEq(f(0),0);
assertEq(f(0x7f),0x7f);
assertEq(f(0xff),-1);
assertEq(f(0x100),0);
// Bug 1088655
assertEq(asmLink(asmCompile('stdlib', 'foreign', 'heap', USE_ASM + 'var i32=new stdlib.Int32Array(heap); function f(i) {i=i|0;var j=0x10000;return (i32[j>>2] = i)|0 } return f'), this, null, buf)(1), 1);
suppressSignalHandlers(false);
assertEq(wasmUsesSignalForOOB(), true);
// Bug 1088655
assertEq(asmLink(asmCompile('stdlib', 'foreign', 'heap', USE_ASM + 'var i32=new stdlib.Int32Array(heap); function f(i) {i=i|0;var j=0x10000;return (i32[j>>2] = i)|0 } return f'), this, null, buf)(1), 1);
}
setCachingEnabled(false);

View File

@ -7,45 +7,40 @@ for (var i = 0; i < 100; i++)
fatFunc += "function f100() { return 42 }\n";
fatFunc += "return f0";
for (var signals = 0; signals <= 1; signals++) {
suppressSignalHandlers(Boolean(signals));
for (let threshold of [0, 50, 100, 5000, -1]) {
setJitCompilerOption("jump-threshold", threshold);
for (let threshold of [0, 50, 100, 5000, -1]) {
setJitCompilerOption("jump-threshold", threshold);
assertEq(asmCompile(
USE_ASM + `
function h() { return ((g()|0)+2)|0 }
function g() { return ((f()|0)+1)|0 }
function f() { return 42 }
return h
`)()(), 45);
assertEq(asmCompile(
if (isSimdAvailable() && this.SIMD) {
var buf = new ArrayBuffer(BUF_MIN);
new Int32Array(buf)[0] = 10;
new Float32Array(buf)[1] = 42;
assertEq(asmCompile('stdlib', 'ffis', 'buf',
USE_ASM + `
function h() { return ((g()|0)+2)|0 }
function g() { return ((f()|0)+1)|0 }
function f() { return 42 }
var H = new stdlib.Uint8Array(buf);
var i4 = stdlib.SIMD.Int32x4;
var f4 = stdlib.SIMD.Float32x4;
var i4load = i4.load;
var f4load = f4.load;
var toi4 = i4.fromFloat32x4;
var i4ext = i4.extractLane;
function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
function h(i) { i=i|0; return g(i)|0 }
return h
`)()(), 45);
if (isSimdAvailable() && this.SIMD) {
var buf = new ArrayBuffer(BUF_MIN);
new Int32Array(buf)[0] = 10;
new Float32Array(buf)[1] = 42;
assertEq(asmCompile('stdlib', 'ffis', 'buf',
USE_ASM + `
var H = new stdlib.Uint8Array(buf);
var i4 = stdlib.SIMD.Int32x4;
var f4 = stdlib.SIMD.Float32x4;
var i4load = i4.load;
var f4load = f4.load;
var toi4 = i4.fromFloat32x4;
var i4ext = i4.extractLane;
function f(i) { i=i|0; return i4ext(i4load(H, i), 0)|0 }
function g(i) { i=i|0; return (i4ext(toi4(f4load(H, i)),1) + (f(i)|0))|0 }
function h(i) { i=i|0; return g(i)|0 }
return h
`)(this, null, buf)(0), 52);
}
enableSPSProfiling();
asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
disableSPSProfiling();
assertEq(asmCompile(fatFunc)()(), 142);
`)(this, null, buf)(0), 52);
}
}
enableSPSProfiling();
asmLink(asmCompile(USE_ASM + 'function f() {} function g() { f() } function h() { g() } return h'))();
disableSPSProfiling();
assertEq(asmCompile(fatFunc)()(), 142);
}

View File

@ -1,30 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
setCachingEnabled(true);
var jco = getJitCompilerOptions();
if (!isCachingEnabled() || !isAsmJSCompilationAvailable())
quit(6);
// Modules compiled without signal handlers should still work even if signal
// handlers have been reactivated.
suppressSignalHandlers(true);
var code = USE_ASM + "function f() {} function g() { while(1) { f() } } return g";
var m = asmCompile(code);
assertEq(isAsmJSModule(m), true);
assertEq(isAsmJSModuleLoadedFromCache(m), false);
suppressSignalHandlers(false);
var m = asmCompile(code);
assertEq(isAsmJSModule(m), true);
assertEq(isAsmJSModuleLoadedFromCache(m), false);
var g = asmLink(m);
timeout(1);
g();
assertEq(true, false);

View File

@ -1,9 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
suppressSignalHandlers(true);
var g = asmLink(asmCompile(USE_ASM + "function f() {} function g() { while(1) { f() } } return g"));
timeout(1);
g();
assertEq(true, false);

View File

@ -1,9 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
suppressSignalHandlers(true);
var g = asmLink(asmCompile(USE_ASM + "function g() { while(1) {} } return g"));
timeout(1);
g();
assertEq(true, false);

View File

@ -1,9 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
suppressSignalHandlers(true);
var f = asmLink(asmCompile(USE_ASM + "function f(i) { i=i|0; if (!i) return; f((i-1)|0); f((i-1)|0); f((i-1)|0); f((i-1)|0); f((i-1)|0); } return f"));
timeout(1);
f(100);
assertEq(true, false);

View File

@ -1,9 +0,0 @@
// |jit-test| exitstatus: 6;
load(libdir + "asm.js");
suppressSignalHandlers(true);
var g = asmLink(asmCompile(USE_ASM + "function f(d) { d=+d; d=d*.1; d=d/.4; return +d } function g() { while(1) { +f(1.1) } } return g"));
timeout(1);
g();
assertEq(true, false);

View File

@ -1,5 +1,5 @@
// |jit-test| exitstatus: 6;
suppressSignalHandlers(true);
setJitCompilerOption('ion.interrupt-without-signals', 1);
timeout(1);
for(;;);

View File

@ -0,0 +1,52 @@
// |jit-test| test-also-wasm-baseline
load(libdir + "wasm.js");
function linearModule(min, max, ops) {
var opsText = ops.map(function (op) {
if (op[0] == "CM") {
res = `(if (i32.ne (current_memory) (i32.const ${op[1]}))
(i32.load offset=10 (i32.const 4294967295))
(i32.const 0))`
} else if (op[0] == "GM") {
res = `(if (i32.ne (grow_memory (i32.const ${op[1]})) (i32.const ${op[2]}))
(i32.load offset=10 (i32.const 4294967295))
(i32.const 0))`
} else if (op[0] == "L") {
var type = op[1];
var ext = op[2];
var off = op[3];
var loc = op[4]
var align = 0;
res = `(${type}.load${ext} offset=${off} (i32.const ${op[4]}))`;
} else if (op[0] == "S") {
var type = op[1];
var ext = op[2];
var off = op[3];
var loc = op[4]
var align = 0;
res = `(${type}.store${ext} offset=${off} (i32.const ${op[4]}) (i32.const 42))`;
}
return res;
}).join("\n")
text =
`(module
(memory ${min} ${max}` +
(min != 0 ? `(segment 0 "\\00\\01\\02\\03\\04\\05\\06\\07\\08\\09\\0a\\0b\\0c\\0d\\0e\\0f")
(segment 16 "\\f0\\f1\\f2\\f3\\f4\\f5\\f6\\f7\\f8\\f9\\fa\\fb\\fc\\fd\\fe\\ff")`
: "") +
`)
(func (result i32)
` + opsText + `
(current_memory)
) (export "" 0))`
return wasmEvalText(text);
}
function assertOOB(lambda) {
assertErrorMessage(lambda, Error, /invalid or out-of-range index/);
}
// Just grow some memory
assertEq(linearModule(3,5, [["CM", 3]])(), 3);

View File

@ -148,9 +148,14 @@ for (let [type, ext] of [
assertErrorMessage(() => badStoreModule(type, ext), TypeError, /can't touch memory/);
}
for (var ind = 0; ind < 2; ind++) {
for (var ind = 0; ind < 1; ind++) {
/*
* TODO: wasm.explicit-bounds-check option is being deprecated. We will be adding a
* new option that treats all offset as "non-foldable". When that is added trigger
* it here when ind == 1.
if (ind == 1)
setJitCompilerOption('wasm.explicit-bounds-checks', 1);
*/
testLoad('i32', '', 0, 0, 0, 0x03020100);
testLoad('i32', '', 1, 0, 1, 0x04030201);

View File

@ -8,9 +8,15 @@ const Memory = WebAssembly.Memory;
const Table = WebAssembly.Table;
const mem1Page = new Memory({initial:1});
const mem1PageMax1 = new Memory({initial:1, maximum: 1});
const mem2Page = new Memory({initial:2});
const mem2PageMax2 = new Memory({initial:2, maximum: 2});
const mem2PageMax3 = new Memory({initial:2, maximum: 3});
const mem2PageMax4 = new Memory({initial:2, maximum: 4});
const mem3Page = new Memory({initial:3});
const mem3PageMax3 = new Memory({initial:3, maximum: 3});
const mem4Page = new Memory({initial:4});
const mem4PageMax4 = new Memory({initial:4, maximum: 4});
const tab1Elem = new Table({initial:1, element:"anyfunc"});
const tab2Elem = new Table({initial:2, element:"anyfunc"});
const tab3Elem = new Table({initial:3, element:"anyfunc"});
@ -20,6 +26,8 @@ const tab4Elem = new Table({initial:4, element:"anyfunc"});
// is used by default everywhere.
const textToBinary = str => wasmTextToBinary(str, 'new-format');
assertErrorMessage(() => new Memory({initial:2, maximum:1}), TypeError, /bad Memory maximum size/);
const m1 = new Module(textToBinary('(module (import "foo" "bar") (import "baz" "quux"))'));
assertErrorMessage(() => new Instance(m1), TypeError, /no import object given/);
assertErrorMessage(() => new Instance(m1, {foo:null}), TypeError, /import object field is not an Object/);
@ -33,9 +41,15 @@ assertErrorMessage(() => new Instance(m2), TypeError, /no import object given/);
assertErrorMessage(() => new Instance(m2, {x:null}), TypeError, /import object field is not an Object/);
assertErrorMessage(() => new Instance(m2, {x:{y:{}}}), TypeError, /import object field is not a Memory/);
assertErrorMessage(() => new Instance(m2, {x:{y:mem1Page}}), TypeError, /imported Memory with incompatible size/);
assertErrorMessage(() => new Instance(m2, {x:{y:mem1PageMax1}}), TypeError, /imported Memory with incompatible size/);
assertErrorMessage(() => new Instance(m2, {x:{y:mem4Page}}), TypeError, /imported Memory with incompatible size/);
assertEq(new Instance(m2, {x:{y:mem2Page}}) instanceof Instance, true);
assertEq(new Instance(m2, {x:{y:mem3Page}}) instanceof Instance, true);
assertErrorMessage(() => new Instance(m2, {x:{y:mem4PageMax4}}), TypeError, /imported Memory with incompatible size/);
assertErrorMessage(() => new Instance(m2, {x:{y:mem2Page}}), TypeError, /imported Memory with incompatible size/);
assertEq(new Instance(m2, {x:{y:mem2PageMax2}}) instanceof Instance, true);
assertErrorMessage(() => new Instance(m2, {x:{y:mem3Page}}), TypeError, /imported Memory with incompatible size/);
assertEq(new Instance(m2, {x:{y:mem3PageMax3}}) instanceof Instance, true);
assertEq(new Instance(m2, {x:{y:mem2PageMax3}}) instanceof Instance, true);
assertErrorMessage(() => new Instance(m2, {x:{y:mem2PageMax4}}), TypeError, /imported Memory with incompatible size/);
const m3 = new Module(textToBinary('(module (import "foo" "bar" (memory 1 1)) (import "baz" "quux"))'));
assertErrorMessage(() => new Instance(m3), TypeError, /no import object given/);
@ -43,7 +57,8 @@ assertErrorMessage(() => new Instance(m3, {foo:null}), TypeError, /import object
assertErrorMessage(() => new Instance(m3, {foo:{bar:{}}}), TypeError, /import object field is not a Memory/);
assertErrorMessage(() => new Instance(m3, {foo:{bar:mem1Page}, baz:null}), TypeError, /import object field is not an Object/);
assertErrorMessage(() => new Instance(m3, {foo:{bar:mem1Page}, baz:{quux:mem1Page}}), TypeError, /import object field is not a Function/);
assertEq(new Instance(m3, {foo:{bar:mem1Page}, baz:{quux:()=>{}}}) instanceof Instance, true);
assertErrorMessage(() => new Instance(m3, {foo:{bar:mem1Page}, baz:{quux:()=>{}}}), TypeError, /imported Memory with incompatible size/);
assertEq(new Instance(m3, {foo:{bar:mem1PageMax1}, baz:{quux:()=>{}}}) instanceof Instance, true);
const m4 = new Module(textToBinary('(module (import "baz" "quux") (import "foo" "bar" (memory 1 1)))'));
assertErrorMessage(() => new Instance(m4), TypeError, /no import object given/);
@ -51,7 +66,8 @@ assertErrorMessage(() => new Instance(m4, {baz:null}), TypeError, /import object
assertErrorMessage(() => new Instance(m4, {baz:{quux:{}}}), TypeError, /import object field is not a Function/);
assertErrorMessage(() => new Instance(m4, {baz:{quux:()=>{}}, foo:null}), TypeError, /import object field is not an Object/);
assertErrorMessage(() => new Instance(m4, {baz:{quux:()=>{}}, foo:{bar:()=>{}}}), TypeError, /import object field is not a Memory/);
assertEq(new Instance(m3, {baz:{quux:()=>{}}, foo:{bar:mem1Page}}) instanceof Instance, true);
assertErrorMessage(() => new Instance(m4, {baz:{quux:()=>{}}, foo:{bar:mem1Page}}), TypeError, /imported Memory with incompatible size/);
assertEq(new Instance(m3, {baz:{quux:()=>{}}, foo:{bar:mem1PageMax1}}) instanceof Instance, true);
const m5 = new Module(textToBinary('(module (import "a" "b" (memory 2)))'));
assertErrorMessage(() => new Instance(m5, {a:{b:mem1Page}}), TypeError, /imported Memory with incompatible size/);
@ -125,7 +141,7 @@ var arr = [];
var importObj = {
get foo() {
arr.push("foo");
return { get bar() { arr.push("bar"); return new WebAssembly.Memory({initial:1}) } }
return { get bar() { arr.push("bar"); return new WebAssembly.Memory({initial:1, maximum:1}) } }
},
get baz() {
arr.push("baz");
@ -264,7 +280,7 @@ assertEq(e.tbl1.get(0), e.tbl1.get(3));
// Re-exports and Identity:
var code = textToBinary('(module (import "a" "b" (memory 1 1)) (export "foo" memory) (export "bar" memory))');
var mem = new Memory({initial:1});
var mem = new Memory({initial:1, maximum:1});
var e = new Instance(new Module(code), {a:{b:mem}}).exports;
assertEq(mem, e.foo);
assertEq(mem, e.bar);
@ -320,7 +336,7 @@ var m = new Module(textToBinary(`
(i32.load8_u (get_local $p)))
(export "get" $get))
`));
var mem = new Memory({initial:1});
var mem = new Memory({initial:1, maximum:1});
var {get} = new Instance(m, {a:{b:mem}}).exports;
assertEq(get(0), 0xa);
assertEq(get(1), 0xb);

View File

@ -123,6 +123,9 @@ assertErrorMessage(() => new Memory(1), TypeError, "first argument must be a mem
assertErrorMessage(() => new Memory({initial:{valueOf() { throw new Error("here")}}}), Error, "here");
assertErrorMessage(() => new Memory({initial:-1}), TypeError, /bad Memory initial size/);
assertErrorMessage(() => new Memory({initial:Math.pow(2,32)}), TypeError, /bad Memory initial size/);
assertErrorMessage(() => new Memory({initial:1, maximum: Math.pow(2,32)/Math.pow(2,14) }), TypeError, /bad Memory maximum size/);
assertErrorMessage(() => new Memory({initial:2, maximum: 1 }), TypeError, /bad Memory maximum size/);
assertErrorMessage(() => new Memory({maximum: -1 }), TypeError, /bad Memory maximum size/);
assertEq(new Memory({initial:1}) instanceof Memory, true);
assertEq(new Memory({initial:1.5}).buffer.byteLength, 64*1024);

View File

@ -1,29 +0,0 @@
load(libdir + 'wasm.js');
load(libdir + 'asserts.js');
// Explicitly opt into the new binary format for imports and exports until it
// is used by default everywhere.
const textToBinary = str => wasmTextToBinary(str, 'new-format');
if (!wasmUsesSignalForOOB())
quit();
const Module = WebAssembly.Module;
const Instance = WebAssembly.Instance;
const Memory = WebAssembly.Memory;
const code = textToBinary('(module (import "x" "y" (memory 1 1)))');
suppressSignalHandlers(true);
var mem = new Memory({initial:1});
suppressSignalHandlers(false);
var m = new Module(code);
suppressSignalHandlers(true);
assertErrorMessage(() => new Instance(m, {x:{y:mem}}), Error, /signals/);
var m = new Module(code);
suppressSignalHandlers(false);
assertEq(new Instance(m, {x:{y:mem}}) instanceof Instance, true);
var mem = new Memory({initial:1});
suppressSignalHandlers(true);
var m = new Module(code);
suppressSignalHandlers(false);
assertEq(new Instance(m, {x:{y:mem}}) instanceof Instance, true);

View File

@ -0,0 +1,127 @@
;; Withut a memory, can't use current_memory and grow_memory.
(assert_invalid
(module
(func $cm (result i32)
(current_memory)
)
)
"memory operators require a memory section"
)
(assert_invalid
(module
(func $gm (param i32) (result i32)
(grow_memory (get_local 0))
)
)
"memory operators require a memory section"
)
;; Test Current Memory
(module (memory 0 10)
(func $gm (param i32) (result i32)
(grow_memory (get_local 0))
)
(func $cm (result i32)
(current_memory)
)
(func $ldst8 (param i32) (param i32) (result i32)
(block
(i32.store8 (get_local 0) (get_local 1))
(i32.load8_u (get_local 0))
)
)
(func $ldst16 (param i32) (param i32) (result i32)
(block
(i32.store16 (get_local 0) (get_local 1))
(i32.load16_u (get_local 0))
)
)
(func $ldst32 (param i32) (param i32) (result i32)
(block
(i32.store (get_local 0) (get_local 1))
(i32.load (get_local 0))
)
)
(func $ldst64 (param i32) (param i64) (result i64)
(block
(i64.store (get_local 0) (get_local 1))
(i64.load (get_local 0))
)
)
(export "cm" $cm)
(export "gm" $gm)
(export "ldst8" $ldst8)
(export "ldst16" $ldst16)
(export "ldst32" $ldst32)
(export "ldst64" $ldst64)
)
;; Call current_memory on 0-sized memory
(assert_return (invoke "cm") (i32.const 0))
;; Growing by 0 is ok and doesn't map any new pages
(assert_return (invoke "gm" (i32.const 0)) (i32.const 0))
(assert_return (invoke "cm") (i32.const 0))
(assert_trap (invoke "ldst8" (i32.const 0) (i32.const 42)) "out of bounds memory access")
;; Can't grow by more than whats allowed
(assert_return (invoke "gm" (i32.const 11)) (i32.const -1))
(assert_return (invoke "cm") (i32.const 0))
(assert_trap (invoke "ldst8" (i32.const 0) (i32.const 42)) "out of bounds memory access")
;; Growing by X enables exactly X pages
(assert_return (invoke "gm" (i32.const 1)) (i32.const 0))
(assert_return (invoke "cm") (i32.const 1))
(assert_return (invoke "ldst8" (i32.const 0) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst8" (i32.const 65535) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst16" (i32.const 65534) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst32" (i32.const 65532) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst64" (i32.const 65528) (i64.const 42)) (i64.const 42))
(assert_trap (invoke "ldst8" (i32.const 65536) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst16" (i32.const 65535) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst32" (i32.const 65533) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst64" (i32.const 65529) (i64.const 42)) "out of bounds memory access")
;; grow_memory returns last page size and again we've added only as many pages as requested.
(assert_return (invoke "gm" (i32.const 2)) (i32.const 1))
(assert_return (invoke "cm") (i32.const 3))
;; and again we have only allocated 2 additional pages.
(assert_return (invoke "ldst8" (i32.const 0) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst8" (i32.const 196607) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst16" (i32.const 196606) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst32" (i32.const 196604) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst64" (i32.const 196600) (i64.const 42)) (i64.const 42))
(assert_trap (invoke "ldst8" (i32.const 196608) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst16" (i32.const 196607) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst32" (i32.const 196605) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst64" (i32.const 196601) (i64.const 42)) "out of bounds memory access")
;; One more time can't grow by more than whats allowed and failed growing doesn't add new pages
(assert_return (invoke "gm" (i32.const 8)) (i32.const -1))
(assert_return (invoke "cm") (i32.const 3))
(assert_return (invoke "ldst8" (i32.const 196607) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst16" (i32.const 196606) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst32" (i32.const 196604) (i32.const 42)) (i32.const 42))
(assert_return (invoke "ldst64" (i32.const 196600) (i64.const 42)) (i64.const 42))
(assert_trap (invoke "ldst8" (i32.const 196608) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst16" (i32.const 196607) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst32" (i32.const 196605) (i32.const 42)) "out of bounds memory access")
(assert_trap (invoke "ldst64" (i32.const 196601) (i64.const 42)) "out of bounds memory access")
;; Can't grow by number of pages that would overflow UINT32 when scaled by the wasm page size
(assert_return (invoke "gm" (i32.const 65534)) (i32.const -1))
(assert_return (invoke "cm") (i32.const 3))
(assert_return (invoke "gm" (i32.const 65535)) (i32.const -1))
(assert_return (invoke "cm") (i32.const 3))
(assert_return (invoke "gm" (i32.const 65536)) (i32.const -1))
(assert_return (invoke "cm") (i32.const 3))
(assert_return (invoke "gm" (i32.const 65537)) (i32.const -1))
(assert_return (invoke "cm") (i32.const 3))

View File

@ -0,0 +1,2 @@
// |jit-test| test-also-wasm-baseline
var importedArgs = ['grow-memory.wast']; load(scriptdir + '../spec.js');

View File

@ -224,9 +224,9 @@ DefaultJitOptions::DefaultJitOptions()
// Test whether wasm int64 / double NaN bits testing is enabled.
SET_DEFAULT(wasmTestMode, false);
// Determines whether explicit bounds check will be used for OOB
// instead of signals (even when signals are available).
SET_DEFAULT(wasmExplicitBoundsChecks, false);
// Determines whether we suppress using signal handlers
// for interrupting jit-ed code. This is used only for testing.
SET_DEFAULT(ionInterruptWithoutSignals, false);
}
bool

View File

@ -70,7 +70,7 @@ struct DefaultJitOptions
bool limitScriptSize;
bool osr;
bool wasmTestMode;
bool wasmExplicitBoundsChecks;
bool ionInterruptWithoutSignals;
uint32_t baselineWarmUpThreshold;
uint32_t exceptionBailoutThreshold;
uint32_t frequentBailoutThreshold;

View File

@ -94,7 +94,7 @@ static void
TryToUseImplicitInterruptCheck(MIRGraph& graph, MBasicBlock* backedge)
{
// Implicit interrupt checks require asm.js signal handlers to be installed.
if (!wasm::HaveSignalHandlers())
if (!wasm::HaveSignalHandlers() || JitOptions.ionInterruptWithoutSignals)
return;
// To avoid triggering expensive interrupts (backedge patching) in

View File

@ -5417,6 +5417,24 @@ MWasmCall::New(TempAllocator& alloc, const wasm::CallSiteDesc& desc, const wasm:
return call;
}
MWasmCall*
MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
const wasm::CallSiteDesc& desc,
const wasm::SymbolicAddress builtin,
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
uint32_t spIncrement)
{
auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
MWasmCall::DontSaveTls, nullptr);
MOZ_ASSERT(instanceArg != ABIArg()); // instanceArg must be initialized.
call->instanceArg_ = instanceArg;
return call;
}
void
MSqrt::trySpecializeFloat32(TempAllocator& alloc) {
if (!input()->canProduceFloat32() || !CheckUsesAreFloat32Consumers(this)) {

View File

@ -13610,6 +13610,7 @@ class MWasmCall final
FixedList<AnyRegister> argRegs_;
uint32_t spIncrement_;
uint32_t tlsStackOffset_;
ABIArg instanceArg_;
MWasmCall(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee, uint32_t spIncrement,
uint32_t tlsStackOffset)
@ -13636,6 +13637,14 @@ class MWasmCall final
uint32_t spIncrement, uint32_t tlsStackOffset,
MDefinition* tableIndex = nullptr);
static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
const wasm::CallSiteDesc& desc,
const wasm::SymbolicAddress builtin,
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
uint32_t spIncrement);
size_t numArgs() const {
return argRegs_.length();
}
@ -13663,6 +13672,10 @@ class MWasmCall final
bool possiblyCalls() const override {
return true;
}
const ABIArg& instanceArg() const {
return instanceArg_;
}
};
class MAsmSelect

View File

@ -39,11 +39,6 @@ class MIRGenerator
TempAllocator* alloc, MIRGraph* graph,
const CompileInfo* info, const OptimizationInfo* optimizationInfo);
void initUsesSignalHandlersForAsmJSOOB(bool init) {
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
usesSignalHandlersForAsmJSOOB_ = init;
#endif
}
void initMinAsmJSHeapLength(uint32_t init) {
minAsmJSHeapLength_ = init;
}
@ -200,9 +195,6 @@ class MIRGenerator
void addAbortedPreliminaryGroup(ObjectGroup* group);
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
bool usesSignalHandlersForAsmJSOOB_;
#endif
uint32_t minAsmJSHeapLength_;
void setForceAbort() {

View File

@ -39,9 +39,6 @@ MIRGenerator::MIRGenerator(CompileCompartment* compartment, const JitCompileOpti
instrumentedProfiling_(false),
instrumentedProfilingIsCached_(false),
safeForMinorGC_(true),
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
usesSignalHandlersForAsmJSOOB_(false),
#endif
minAsmJSHeapLength_(0),
options(options),
gs_(alloc)
@ -116,11 +113,11 @@ MIRGenerator::needsBoundsCheckBranch(const MWasmMemoryAccess* access) const
// We use signal-handlers on x64, but on x86 there isn't enough address
// space for a guard region. Also, on x64 the atomic loads and stores
// can't (yet) use the signal handlers.
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
return false;
#endif
#ifdef WASM_HUGE_MEMORY
return false;
#else
return access->needsBoundsCheck();
#endif
}
size_t
@ -133,19 +130,14 @@ MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
"WasmImmediateRange should be the size of an unconstrained "
"address immediate");
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
#ifdef WASM_HUGE_MEMORY
static_assert(wasm::Uint32Range + WasmImmediateRange + sizeof(wasm::Val) < wasm::MappedSize,
"When using signal handlers for bounds checking, a uint32 is added to the base "
"address followed by an immediate in the range [0, WasmImmediateRange). An "
"unaligned access (whose size is conservatively approximated by wasm::Val) may "
"spill over, so ensure a space at the end.");
// Signal-handling can be dynamically disabled by OS bugs or flags.
// Bug 1254935: Atomic accesses can't be handled with signal handlers yet.
if (usesSignalHandlersForAsmJSOOB_ && !access->isAtomicAccess())
return WasmImmediateRange;
#endif
return WasmImmediateRange;
#else
// On 32-bit platforms, if we've proven the access is in bounds after
// 32-bit wrapping, we can fold full offsets because they're added with
// 32-bit arithmetic.
@ -156,6 +148,7 @@ MIRGenerator::foldableOffsetRange(const MWasmMemoryAccess* access) const
// minimum heap length, and allows explicit bounds checks to fold in the
// offset without overflow.
return WasmCheckedImmediateRange;
#endif
}
void

View File

@ -2748,6 +2748,26 @@ MacroAssembler::wasmCallImport(const wasm::CallSiteDesc& desc, const wasm::Calle
call(desc, ABINonArgReg0);
}
void
MacroAssembler::wasmCallBuiltinInstanceMethod(const ABIArg& instanceArg,
wasm::SymbolicAddress builtin)
{
MOZ_ASSERT(instanceArg != ABIArg());
if (instanceArg.kind() == ABIArg::GPR) {
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), instanceArg.gpr());
} else if (instanceArg.kind() == ABIArg::Stack) {
// Safe to use ABINonArgReg0 since its the last thing before the call
Register scratch = ABINonArgReg0;
loadPtr(Address(WasmTlsReg, offsetof(wasm::TlsData, instance)), scratch);
storePtr(scratch, Address(getStackPointer(), instanceArg.offsetFromArgBase()));
} else {
MOZ_CRASH("Unknown abi passing style for pointer");
}
call(builtin);
}
void
MacroAssembler::wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee)
{

View File

@ -1333,6 +1333,12 @@ class MacroAssembler : public MacroAssemblerSpecific
// WasmTableCallIndexReg must contain the index of the indirect call.
void wasmCallIndirect(const wasm::CallSiteDesc& desc, const wasm::CalleeDesc& callee);
// This function takes care of loading the pointer to the current instance
// as the implicit first argument. It preserves TLS and pinned registers.
// (TLS & pinned regs are non-volatile registers in the system ABI).
void wasmCallBuiltinInstanceMethod(const ABIArg& instanceArg,
wasm::SymbolicAddress builtin);
//}}} check_macroassembler_style
public:

View File

@ -1271,6 +1271,26 @@ class ABIArg
bool argInRegister() const { return kind() != Stack; }
AnyRegister reg() const { return kind_ == GPR ? AnyRegister(gpr()) : AnyRegister(fpu()); }
bool operator==(const ABIArg& rhs) const {
if (kind_ != rhs.kind_)
return false;
switch((int8_t)kind_) {
case GPR: return u.gpr_ == rhs.u.gpr_;
#if defined(JS_CODEGEN_REGISTER_PAIR)
case GPR_PAIR: return u.gpr_ == rhs.u.gpr_;
#endif
case FPU: return u.fpu_ == rhs.u.fpu_;
case Stack: return u.offset_ == rhs.u.offset_;
case -1: return true;
default: MOZ_CRASH("Invalid value for ABIArg kind");
}
}
bool operator!=(const ABIArg& rhs) const {
return !(*this == rhs);
}
};
// Get the set of registers which should be saved by a block of code which

View File

@ -35,6 +35,7 @@
#include "mozilla/MathAlgorithms.h"
#include "mozilla/SizePrintfMacros.h"
#include "asmjs/WasmInstance.h"
#include "asmjs/WasmSignalHandlers.h"
#include "jit/arm/Assembler-arm.h"
#include "jit/arm/disasm/Constants-arm.h"
@ -390,9 +391,9 @@ bool Simulator::ICacheCheckingEnabled = false;
int64_t Simulator::StopSimAt = -1L;
Simulator*
Simulator::Create()
Simulator::Create(JSContext* cx)
{
Simulator* sim = js_new<Simulator>();
Simulator* sim = js_new<Simulator>(cx);
if (!sim)
return nullptr;
@ -1075,7 +1076,8 @@ Simulator::FlushICache(void* start_addr, size_t size)
}
}
Simulator::Simulator()
Simulator::Simulator(JSContext* cx)
: cx_(cx)
{
// Set up simulator support first. Some of this information is needed to
// setup the architecture state.
@ -1496,9 +1498,34 @@ Simulator::exclusiveMonitorClear()
exclusiveMonitorHeld_ = false;
}
// WebAssembly memories contain an extra region of guard pages (see
// WasmArrayRawBuffer comment). The guard pages catch out-of-bounds accesses
// using a signal handler that redirects PC to a stub that safely reports an
// error. However, if the handler is hit by the simulator, the PC is in C++ code
// and cannot be redirected. Therefore, we must avoid hitting the handler by
// redirecting in the simulator before the real handler would have been hit.
bool
Simulator::handleWasmFault(int32_t addr, unsigned numBytes)
{
WasmActivation* act = cx_->wasmActivationStack();
if (!act)
return false;
void* pc = reinterpret_cast<void*>(get_pc());
wasm::Instance* instance = act->compartment()->wasm.lookupInstanceDeprecated(pc);
if (!instance || !instance->memoryAccessInGuardRegion((uint8_t*)addr, numBytes))
return false;
set_pc(int32_t(instance->codeSegment().outOfBoundsCode()));
return true;
}
int
Simulator::readW(int32_t addr, SimInstruction* instr, UnalignedPolicy f)
{
if (handleWasmFault(addr, 4))
return -1;
if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
return *ptr;
@ -1522,6 +1549,9 @@ Simulator::readW(int32_t addr, SimInstruction* instr, UnalignedPolicy f)
void
Simulator::writeW(int32_t addr, int value, SimInstruction* instr, UnalignedPolicy f)
{
if (handleWasmFault(addr, 4))
return;
if ((addr & 3) == 0 || (f == AllowUnaligned && !HasAlignmentFault())) {
intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
*ptr = value;
@ -1594,6 +1624,9 @@ Simulator::writeExW(int32_t addr, int value, SimInstruction* instr)
uint16_t
Simulator::readHU(int32_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return UINT16_MAX;
// The regexp engine emits unaligned loads, so we don't check for them here
// like most of the other methods do.
if ((addr & 1) == 0 || !HasAlignmentFault()) {
@ -1617,6 +1650,9 @@ Simulator::readHU(int32_t addr, SimInstruction* instr)
int16_t
Simulator::readH(int32_t addr, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return -1;
if ((addr & 1) == 0 || !HasAlignmentFault()) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
@ -1638,6 +1674,9 @@ Simulator::readH(int32_t addr, SimInstruction* instr)
void
Simulator::writeH(int32_t addr, uint16_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return;
if ((addr & 1) == 0 || !HasAlignmentFault()) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
@ -1658,6 +1697,9 @@ Simulator::writeH(int32_t addr, uint16_t value, SimInstruction* instr)
void
Simulator::writeH(int32_t addr, int16_t value, SimInstruction* instr)
{
if (handleWasmFault(addr, 2))
return;
if ((addr & 1) == 0 || !HasAlignmentFault()) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
@ -1711,6 +1753,9 @@ Simulator::writeExH(int32_t addr, uint16_t value, SimInstruction* instr)
uint8_t
Simulator::readBU(int32_t addr)
{
if (handleWasmFault(addr, 1))
return UINT8_MAX;
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
return *ptr;
}
@ -1739,6 +1784,9 @@ Simulator::writeExB(int32_t addr, uint8_t value)
int8_t
Simulator::readB(int32_t addr)
{
if (handleWasmFault(addr, 1))
return -1;
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
return *ptr;
}
@ -1746,6 +1794,9 @@ Simulator::readB(int32_t addr)
void
Simulator::writeB(int32_t addr, uint8_t value)
{
if (handleWasmFault(addr, 1))
return;
uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
*ptr = value;
}
@ -1753,6 +1804,9 @@ Simulator::writeB(int32_t addr, uint8_t value)
void
Simulator::writeB(int32_t addr, int8_t value)
{
if (handleWasmFault(addr, 1))
return;
int8_t* ptr = reinterpret_cast<int8_t*>(addr);
*ptr = value;
}

View File

@ -100,12 +100,12 @@ class Simulator
};
// Returns nullptr on OOM.
static Simulator* Create();
static Simulator* Create(JSContext* cx);
static void Destroy(Simulator* simulator);
// Constructor/destructor are for internal use only; use the static methods above.
Simulator();
explicit Simulator(JSContext* cx);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
@ -259,6 +259,9 @@ class Simulator
inline void increaseStopCounter(uint32_t bkpt_code);
void printStopInfo(uint32_t code);
// Handle any wasm faults, returning true if the fault was handled.
inline bool handleWasmFault(int32_t addr, unsigned numBytes);
// Read and write memory.
inline uint8_t readBU(int32_t addr);
inline int8_t readB(int32_t addr);
@ -349,6 +352,8 @@ class Simulator
void callInternal(uint8_t* entry);
JSContext* const cx_;
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q

View File

@ -153,7 +153,7 @@ Simulator* Simulator::Current() {
}
Simulator* Simulator::Create() {
Simulator* Simulator::Create(JSContext* cx) {
Decoder *decoder = js_new<vixl::Decoder>();
if (!decoder)
return nullptr;

View File

@ -713,7 +713,7 @@ class Simulator : public DecoderVisitor {
// Moz changes.
void init(Decoder* decoder, FILE* stream);
static Simulator* Current();
static Simulator* Create();
static Simulator* Create(JSContext* cx);
static void Destroy(Simulator* sim);
uintptr_t stackLimit() const;
uintptr_t* addressOfStackLimit();

View File

@ -518,7 +518,7 @@ bool Simulator::ICacheCheckingEnabled = false;
int Simulator::StopSimAt = -1;
Simulator*
Simulator::Create()
Simulator::Create(JSContext* cx)
{
Simulator* sim = js_new<Simulator>();
if (!sim)

View File

@ -142,7 +142,7 @@ class Simulator {
};
// Returns nullptr on OOM.
static Simulator* Create();
static Simulator* Create(JSContext* cx);
static void Destroy(Simulator* simulator);

View File

@ -556,7 +556,7 @@ bool Simulator::ICacheCheckingEnabled = false;
int64_t Simulator::StopSimAt = -1;
Simulator *
Simulator::Create()
Simulator::Create(JSContext* cx)
{
Simulator* sim = js_new<Simulator>();
if (!sim)

View File

@ -147,7 +147,7 @@ class Simulator {
};
// Returns nullptr on OOM.
static Simulator* Create();
static Simulator* Create(JSContext* cx);
static void Destroy(Simulator* simulator);

View File

@ -1519,6 +1519,9 @@ CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
case wasm::CalleeDesc::Builtin:
masm.call(callee.builtin());
break;
case wasm::CalleeDesc::BuiltinInstanceMethod:
masm.wasmCallBuiltinInstanceMethod(mir->instanceArg(), callee.builtin());
break;
}
// After return, restore the caller's TLS and pinned registers.

View File

@ -470,14 +470,6 @@ AsmJSMemoryAccess(uint32_t before, wasm::MemoryAccess::OutOfBoundsBehavior throw
offsetWithinWholeSimdVector);
}
static wasm::MemoryAccess
WasmMemoryAccess(uint32_t before)
{
return wasm::MemoryAccess(before,
wasm::MemoryAccess::Throw,
wasm::MemoryAccess::DontWrapOffset);
}
void
CodeGeneratorX64::load(Scalar::Type type, const Operand& srcAddr, AnyRegister out)
{
@ -554,8 +546,6 @@ CodeGeneratorX64::emitWasmLoad(T* ins)
verifyLoadDisassembly(before, after, isInt64, accessType, /* numElems */ 0, srcAddr,
*ins->output()->output());
masm.append(WasmMemoryAccess(before));
}
void
@ -593,8 +583,6 @@ CodeGeneratorX64::emitWasmStore(T* ins)
verifyStoreDisassembly(before, after, mir->value()->type() == MIRType::Int64,
accessType, /* numElems */ 0, dstAddr, *value);
masm.append(WasmMemoryAccess(before));
}
void
@ -699,7 +687,10 @@ CodeGeneratorX64::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
memoryBarrier(mir->barrierAfter());
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
// We cannot emulate atomic accesses currently.
masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ?
wasm::MemoryAccess::Throw :
wasm::MemoryAccess::CarryOn)));
}
void
@ -902,7 +893,10 @@ CodeGeneratorX64::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
memoryBarrier(mir->barrierAfter());
masm.append(AsmJSMemoryAccess(before, wasm::MemoryAccess::CarryOn));
// See comment in visitAsmJSLoadHeap
masm.append(AsmJSMemoryAccess(before, (mir->isAtomicAccess() ?
wasm::MemoryAccess::Throw :
wasm::MemoryAccess::CarryOn)));
}
void

View File

@ -541,7 +541,7 @@ void
CodeGeneratorX86Shared::maybeEmitWasmBoundsCheckBranch(const MWasmMemoryAccess* mir, Register ptr,
bool redundant)
{
if (!mir->needsBoundsCheck())
if (!gen->needsBoundsCheckBranch(mir))
return;
MOZ_ASSERT(mir->endOffset() >= 1,

View File

@ -6171,8 +6171,8 @@ JS_SetGlobalJitCompilerOption(JSContext* cx, JSJitCompilerOption opt, uint32_t v
case JSJITCOMPILER_WASM_TEST_MODE:
jit::JitOptions.wasmTestMode = !!value;
break;
case JSJITCOMPILER_WASM_EXPLICIT_BOUNDS_CHECKS:
jit::JitOptions.wasmExplicitBoundsChecks = !!value;
case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
jit::JitOptions.ionInterruptWithoutSignals = !!value;
break;
default:
break;
@ -6201,8 +6201,8 @@ JS_GetGlobalJitCompilerOption(JSContext* cx, JSJitCompilerOption opt)
return rt->canUseOffthreadIonCompilation();
case JSJITCOMPILER_WASM_TEST_MODE:
return jit::JitOptions.wasmTestMode ? 1 : 0;
case JSJITCOMPILER_WASM_EXPLICIT_BOUNDS_CHECKS:
return jit::JitOptions.wasmExplicitBoundsChecks ? 1 : 0;
case JSJITCOMPILER_ION_INTERRUPT_WITHOUT_SIGNAL:
return jit::JitOptions.ionInterruptWithoutSignals ? 1 : 0;
default:
break;
}

View File

@ -5619,11 +5619,11 @@ JS_SetOffthreadIonCompilationEnabled(JSContext* cx, bool enabled);
Register(ION_GVN_ENABLE, "ion.gvn.enable") \
Register(ION_FORCE_IC, "ion.forceinlineCaches") \
Register(ION_ENABLE, "ion.enable") \
Register(ION_INTERRUPT_WITHOUT_SIGNAL, "ion.interrupt-without-signals") \
Register(BASELINE_ENABLE, "baseline.enable") \
Register(OFFTHREAD_COMPILATION_ENABLE, "offthread-compilation.enable") \
Register(JUMP_THRESHOLD, "jump-threshold") \
Register(WASM_TEST_MODE, "wasm.test-mode") \
Register(WASM_EXPLICIT_BOUNDS_CHECKS, "wasm.explicit-bounds-checks")
Register(WASM_TEST_MODE, "wasm.test-mode")
typedef enum JSJitCompilerOption {
#define JIT_COMPILER_DECLARE(key, str) \

View File

@ -1768,13 +1768,6 @@ elif test "$CPU_ARCH" = "x86"; then
elif test "$CPU_ARCH" = "x86_64"; then
AC_DEFINE(JS_CODEGEN_X64)
JS_CODEGEN_X64=1
dnl Signal-handler OOM checking requires large mprotected guard regions, so
dnl currently it is only implemented on x64.
AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS)
AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
ASMJS_MAY_USE_SIGNAL_HANDLERS=1
ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB=1
elif test "$CPU_ARCH" = "arm"; then
AC_DEFINE(JS_CODEGEN_ARM)
JS_CODEGEN_ARM=1
@ -1785,10 +1778,6 @@ elif test "$CPU_ARCH" = "arm"; then
dnl ARM platforms may trap on unaligned accesses; catch the signal and
dnl recover.
AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS)
AC_DEFINE(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
ASMJS_MAY_USE_SIGNAL_HANDLERS=1
ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED=1
elif test "$CPU_ARCH" = "mips32"; then
AC_DEFINE(JS_CODEGEN_MIPS32)
JS_CODEGEN_MIPS32=1
@ -1810,9 +1799,6 @@ AC_SUBST(JS_CODEGEN_X86)
AC_SUBST(JS_CODEGEN_X64)
AC_SUBST(JS_CODEGEN_NONE)
AC_SUBST(JS_DISASM_ARM)
AC_SUBST(ASMJS_MAY_USE_SIGNAL_HANDLERS)
AC_SUBST(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
AC_SUBST(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_UNALIGNED)
dnl ========================================================
dnl instruments

View File

@ -43,6 +43,26 @@ AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf)
return buf->as<SharedArrayBufferObject>().byteLength();
}
inline size_t
WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf)
{
if (buf->is<ArrayBufferObject>())
return buf->as<ArrayBufferObject>().wasmMappedSize();
#ifdef WASM_HUGE_MEMORY
return wasm::MappedSize;
#else
return buf->as<SharedArrayBufferObject>().byteLength();
#endif
}
inline uint32_t
WasmArrayBufferActualByteLength(const ArrayBufferObjectMaybeShared* buf)
{
if (buf->is<ArrayBufferObject>())
return buf->as<ArrayBufferObject>().wasmActualByteLength();
return buf->as<SharedArrayBufferObject>().byteLength();
}
inline ArrayBufferObjectMaybeShared&
AsAnyArrayBuffer(HandleValue val)
{

View File

@ -4,10 +4,13 @@
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "vm/ArrayBufferObject-inl.h"
#include "vm/ArrayBufferObject.h"
#include "mozilla/Alignment.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/FloatingPoint.h"
#include "mozilla/Maybe.h"
#include "mozilla/PodOperations.h"
#include "mozilla/TaggedAnonymousMemory.h"
@ -34,6 +37,7 @@
#endif
#include "jswrapper.h"
#include "asmjs/WasmSignalHandlers.h"
#include "asmjs/WasmTypes.h"
#include "gc/Barrier.h"
#include "gc/Marking.h"
@ -53,6 +57,9 @@
using JS::ToInt32;
using mozilla::DebugOnly;
using mozilla::CheckedInt;
using mozilla::Some;
using mozilla::Maybe;
using namespace js;
using namespace js::gc;
@ -372,150 +379,359 @@ ArrayBufferObject::changeContents(JSContext* cx, BufferContents newContents)
changeViewContents(cx, firstView(), oldDataPointer, newContents);
}
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
# ifdef XP_WIN
static void*
AllocateWasmMappedMemory(uint32_t numBytes)
{
MOZ_ASSERT(numBytes % wasm::PageSize == 0);
/*
* Wasm Raw Buf Linear Memory Structure
*
* The linear heap in Wasm is an mmaped array buffer. Several
* constants manage its lifetime:
*
* - length - the wasm-visible current length of the buffer. Acesses in the
* range [0, length] succeed. May only increase
*
* - boundsCheckLimit - size against which we perform bounds checks. It is
* always a constant offset smaller than mapped_size. Currently that constant
* offset is 0.
*
* - max - the optional declared limit on how much length can grow.
*
* - mapped_size - the actual mmaped size. Access in the range
* [0, mapped_size] will either succeed, or be handled by the wasm signal
* handlers.
*
* The below diagram shows the layout of the wams heap. The wasm-visible
* portion of the heap starts at 0. There is one extra page prior to the
* start of the wasm heap which contains the WasmArrayRawBuffer struct at
* its end. (i.e. right before the start of the WASM heap).
*
* WasmArrayRawBuffer
* \ ArrayBufferObject::dataPointer()
* \ /
* \ |
* ______|_|____________________________________________________________
* |______|_|______________|___________________|____________|____________|
* 0 length maxSize boundsCheckLimit mappedSize
*
* \_______________________/
* COMMITED
* \____________________________________________/
* SLOP
* \_____________________________________________________________________/
* MAPPED
*
* Invariants:
* - length only increases
* - 0 <= length <= maxSize (if present) <= boundsCheckLimit <= mappedSize
* - on ARM boundsCheckLimit must be a valid ARM immediate.
* - if maxSize is not specified, boundsCheckLimit/mappedSize may grow. They are
* otherwise constant.
*
* NOTE: For asm.js on non-x64 we guarantee that
*
* length == maxSize == boundsCheckLimit == mappedSize
*
* That is, signal handlers will not be invoked, since they cannot emulate
* asm.js accesses on non-x64 architectures.
*
* The region between length and mappedSize is the SLOP - an area where we use
* signal handlers to catch things that slip by bounds checks. Logically it has
* two parts:
*
* - from length to boundsCheckLimit - this part of the SLOP serves to catch
* accesses to memory we have reserved but not yet grown into. This allows us
* to grow memory up to max (when present) without having to patch/update the
* bounds checks.
*
* - from boundsCheckLimit to mappedSize - (Note: In current patch 0) - this
* part of the SLOP allows us to bounds check against base pointers and fold
* some constant offsets inside loads. This enables better Bounds
* Check Elimination.
*
*/
void* data = VirtualAlloc(nullptr, wasm::MappedSize, MEM_RESERVE, PAGE_NOACCESS);
class js::WasmArrayRawBuffer
{
uint32_t length_;
Maybe<uint32_t> maxSize_;
size_t mappedSize_;
protected:
WasmArrayRawBuffer(uint8_t* buffer, uint32_t length, Maybe<uint32_t> maxSize, size_t mappedSize)
: length_(length), maxSize_(maxSize), mappedSize_(mappedSize)
{
MOZ_ASSERT(buffer == dataPointer());
}
public:
static WasmArrayRawBuffer* Allocate(uint32_t numBytes, Maybe<uint32_t> maxSize);
static void Release(void* mem);
uint8_t* dataPointer() {
uint8_t* ptr = reinterpret_cast<uint8_t*>(this);
return ptr + sizeof(WasmArrayRawBuffer);
}
uint8_t* basePointer() {
return dataPointer() - gc::SystemPageSize();
}
// TODO: actualByteLength in WasmArrayRawBuffer is a temporary hack to allow
// keeping track of the size of dynamically growing WASM memory. We can't
// keep it in the containg ArrayBufferObject's byte length field since those
// are immutable. This will be removed in a followup resizing patch.
uint32_t actualByteLength() const {
return length_;
}
size_t mappedSize() const {
return mappedSize_;
}
Maybe<uint32_t> maxSize() const {
return maxSize_;
}
size_t allocatedBytes() const {
return mappedSize_ + gc::SystemPageSize();
}
uint32_t boundsCheckLimit() const {
#ifdef WASM_HUGE_MEMORY
MOZ_CRASH();
return 0;
#else
return (uint32_t) mappedSize_;
#endif
}
MOZ_MUST_USE bool growLength(uint32_t deltaLength)
{
// This should be guaranteed by Instance::growMemory
MOZ_ASSERT(maxSize_);
MOZ_ASSERT(deltaLength % wasm::PageSize == 0);
CheckedInt<uint32_t> curLength = actualByteLength();
CheckedInt<uint32_t> newLength = curLength + deltaLength;
MOZ_RELEASE_ASSERT(newLength.isValid());
MOZ_ASSERT(newLength.value() <= maxSize_.value());
uint8_t* dataEnd = dataPointer() + curLength.value();
MOZ_ASSERT(((intptr_t)dataEnd) % gc::SystemPageSize() == 0);
# ifdef XP_WIN
if (deltaLength && !VirtualAlloc(dataEnd, deltaLength, MEM_COMMIT, PAGE_READWRITE))
return false;
# else // XP_WIN
if (deltaLength && mprotect(dataEnd, deltaLength, PROT_READ | PROT_WRITE))
return false;
# endif // !XP_WIN
# if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, deltaLength);
# endif
MemProfiler::SampleNative(dataEnd, deltaLength);
length_ = newLength.value();
return true;
}
// Try and grow the mapped region of memory. Does not changes current or
// max size. Does not move memory if no space to grow.
void tryGrowMaxSize(uint32_t deltaMaxSize)
{
MOZ_ASSERT(maxSize_);
MOZ_RELEASE_ASSERT(deltaMaxSize % wasm::PageSize == 0);
CheckedInt<uint32_t> curMax = maxSize_.value();
CheckedInt<uint32_t> newMax = curMax + deltaMaxSize;
MOZ_RELEASE_ASSERT(newMax.isValid());
MOZ_RELEASE_ASSERT(newMax.value() % wasm::PageSize == 0);
size_t newMapped = wasm::LegalizeMapLength(newMax.value());
# ifdef XP_WIN
if (!VirtualAlloc(dataPointer(), newMapped, MEM_RESERVE, PAGE_NOACCESS))
return;
# elif defined(XP_DARWIN)
// No mechanism for remapping on MaxOS. Luckily shouldn't need it here
// as most MacOS configs are 64 bit
return;
#else // Unix
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer(), mappedSize_, newMapped, 0))
return;
# endif // !XP_WIN
mappedSize_ = newMapped;
maxSize_ = Some(newMax.value());
return;
}
};
/* static */ WasmArrayRawBuffer*
WasmArrayRawBuffer::Allocate(uint32_t numBytes, Maybe<uint32_t> maxSize)
{
size_t mappedSize = wasm::LegalizeMapLength(maxSize.valueOr(numBytes));
MOZ_RELEASE_ASSERT(mappedSize <= SIZE_MAX - gc::SystemPageSize());
MOZ_RELEASE_ASSERT(numBytes <= maxSize.valueOr(UINT32_MAX));
MOZ_ASSERT(numBytes % gc::SystemPageSize() == 0);
MOZ_ASSERT(mappedSize % gc::SystemPageSize() == 0);
uint64_t mappedSizeWithHeader = mappedSize + gc::SystemPageSize();
uint64_t numBytesWithHeader = numBytes + gc::SystemPageSize();
# ifdef XP_WIN
void* data = VirtualAlloc(nullptr, (size_t) mappedSizeWithHeader, MEM_RESERVE, PAGE_NOACCESS);
if (!data)
return nullptr;
if (numBytes && !VirtualAlloc(data, numBytes, MEM_COMMIT, PAGE_READWRITE)) {
if (!VirtualAlloc(data, numBytesWithHeader, MEM_COMMIT, PAGE_READWRITE)) {
VirtualFree(data, 0, MEM_RELEASE);
return nullptr;
}
MemProfiler::SampleNative(data, numBytes);
return data;
}
static void
ReleaseWasmMappedMemory(void* base)
{
VirtualFree(base, 0, MEM_RELEASE);
MemProfiler::RemoveNative(base);
}
# else // XP_WIN
static void*
AllocateWasmMappedMemory(uint32_t numBytes)
{
void* data = MozTaggedAnonymousMmap(nullptr, wasm::MappedSize, PROT_NONE,
void* data = MozTaggedAnonymousMmap(nullptr, (size_t) mappedSizeWithHeader, PROT_NONE,
MAP_PRIVATE | MAP_ANON, -1, 0, "wasm-reserved");
if (data == MAP_FAILED)
return nullptr;
if (numBytes && mprotect(data, numBytes, PROT_READ | PROT_WRITE)) {
munmap(data, wasm::MappedSize);
// Note we will waste a page on zero-sized memories here
if (mprotect(data, numBytesWithHeader, PROT_READ | PROT_WRITE)) {
munmap(data, mappedSizeWithHeader);
return nullptr;
}
MemProfiler::SampleNative(data, numBytes);
# endif // !XP_WIN
MemProfiler::SampleNative(data, numBytesWithHeader);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + numBytes,
wasm::MappedSize - numBytes);
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)data + numBytesWithHeader,
mappedSizeWithHeader - numBytesWithHeader);
# endif
return data;
uint8_t* base = reinterpret_cast<uint8_t*>(data) + gc::SystemPageSize();
uint8_t* header = base - sizeof(WasmArrayRawBuffer);
auto rawBuf = new (header) WasmArrayRawBuffer(base, numBytes, maxSize, mappedSize);
return rawBuf;
}
static void
ReleaseWasmMappedMemory(void* base)
/* static */ void
WasmArrayRawBuffer::Release(void* mem)
{
munmap(base, wasm::MappedSize);
WasmArrayRawBuffer* header = (WasmArrayRawBuffer*)((uint8_t*)mem - sizeof(WasmArrayRawBuffer));
uint8_t* base = header->basePointer();
MOZ_RELEASE_ASSERT(header->mappedSize() <= SIZE_MAX - gc::SystemPageSize());
size_t mappedSizeWithHeader = header->mappedSize() + gc::SystemPageSize();
# ifdef XP_WIN
VirtualFree(base, 0, MEM_RELEASE);
# else // XP_WIN
munmap(base, mappedSizeWithHeader);
# endif // !XP_WIN
MemProfiler::RemoveNative(base);
# if defined(MOZ_VALGRIND) && defined(VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, wasm::MappedSize);
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(base, mappedSizeWithHeader);
# endif
}
# endif // !XP_WIN
#endif // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
WasmArrayRawBuffer*
ArrayBufferObject::BufferContents::wasmBuffer() const
{
MOZ_RELEASE_ASSERT(kind_ == WASM_MAPPED);
return (WasmArrayRawBuffer*)(data_ - sizeof(WasmArrayRawBuffer));
}
#define ROUND_UP(v, a) ((v) % (a) == 0 ? (v) : v + a - ((v) % (a)))
/* static */ ArrayBufferObject*
ArrayBufferObject::createForWasm(JSContext* cx, uint32_t numBytes, bool signalsForOOB)
ArrayBufferObject::createForWasm(JSContext* cx, uint32_t numBytes, Maybe<uint32_t> maxSize)
{
MOZ_ASSERT(numBytes % wasm::PageSize == 0);
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
if (signalsForOOB) {
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
void* data = AllocateWasmMappedMemory(numBytes);
if (!data) {
// First try to map the maximum requested memory
WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(numBytes, maxSize);
if (!wasmBuf) {
#ifdef WASM_HUGE_MEMORY
ReportOutOfMemory(cx);
return nullptr;
#else
// If we fail, and have a maxSize, try to reserve the biggest chunk in
// the range [numBytes, maxSize) using log backoff.
if (!maxSize) {
ReportOutOfMemory(cx);
return nullptr;
}
BufferContents contents = BufferContents::create<WASM_MAPPED>(data);
ArrayBufferObject* buffer = ArrayBufferObject::create(cx, numBytes, contents);
if (!buffer) {
ReleaseWasmMappedMemory(data);
uint32_t cur = maxSize.value() / 2;
for (; cur > numBytes; cur = cur / 2) {
wasmBuf = WasmArrayRawBuffer::Allocate(numBytes, Some(ROUND_UP(cur, wasm::PageSize)));
if (wasmBuf)
break;
}
if (!wasmBuf) {
ReportOutOfMemory(cx);
return nullptr;
}
return buffer;
#else
MOZ_CRASH("shouldn't be using signal handlers for out-of-bounds");
// Try to grow our chunk as much as possible.
for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2)
wasmBuf->tryGrowMaxSize(ROUND_UP(d, wasm::PageSize));
#endif
}
auto* buffer = ArrayBufferObject::create(cx, numBytes);
if (!buffer)
void *data = wasmBuf->dataPointer();
BufferContents contents = BufferContents::create<WASM_MAPPED>(data);
ArrayBufferObject* buffer = ArrayBufferObject::create(cx, numBytes, contents);
if (!buffer) {
ReportOutOfMemory(cx);
WasmArrayRawBuffer::Release(data);
return nullptr;
}
buffer->setIsWasmMalloced();
return buffer;
}
/* static */ bool
ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool signalsForOOB)
ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer)
{
MOZ_ASSERT(buffer->byteLength() % wasm::PageSize == 0);
if (signalsForOOB) {
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
if (buffer->isWasmMapped())
return true;
// This can't happen except via the shell toggling signals.enabled.
if (buffer->isWasmMalloced()) {
JS_ReportError(cx, "can't access same buffer with and without signals enabled");
return false;
}
if (buffer->forInlineTypedObject()) {
JS_ReportError(cx, "ArrayBuffer can't be used by asm.js");
return false;
}
void* data = AllocateWasmMappedMemory(buffer->byteLength());
if (!data) {
ReportOutOfMemory(cx);
return false;
}
// Copy over the current contents of the typed array.
memcpy(data, buffer->dataPointer(), buffer->byteLength());
// Swap the new elements into the ArrayBufferObject. Mark the
// ArrayBufferObject so we don't do this again.
BufferContents newContents = BufferContents::create<WASM_MAPPED>(data);
buffer->changeContents(cx, newContents);
MOZ_ASSERT(data == buffer->dataPointer());
return true;
#else
MOZ_CRASH("shouldn't be using signal handlers for out-of-bounds");
#endif // ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
}
MOZ_RELEASE_ASSERT(wasm::HaveSignalHandlers());
if (buffer->forInlineTypedObject()) {
JS_ReportError(cx, "ArrayBuffer can't be used by asm.js");
return false;
}
#ifdef WASM_HUGE_MEMORY
if (buffer->isWasmMapped())
return true;
uint32_t length = buffer->byteLength();
// Since asm.js doesn't grow, assume max is same as length.
WasmArrayRawBuffer* wasmBuf = WasmArrayRawBuffer::Allocate(length, Some(length));
void* data = wasmBuf->dataPointer();
if (!data) {
// Note - we don't need the same backoff search as in WASM, since we don't over-map to
// allow growth in asm.js
ReportOutOfMemory(cx);
return false;
}
// Copy over the current contents of the typed array.
memcpy(data, buffer->dataPointer(), length);
// Swap the new elements into the ArrayBufferObject. Mark the
// ArrayBufferObject so we don't do this again.
BufferContents newContents = BufferContents::create<WASM_MAPPED>(data);
buffer->changeContents(cx, newContents);
MOZ_ASSERT(data == buffer->dataPointer());
return true;
#else
if (!buffer->ownsData()) {
BufferContents contents = AllocateArrayBufferContents(cx, buffer->byteLength());
if (!contents)
@ -524,7 +740,12 @@ ArrayBufferObject::prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buf
buffer->changeContents(cx, contents);
}
buffer->setIsWasmMalloced();
buffer->setIsAsmJSMalloced();
// On non-x64 architectures we can't yet emulate asm.js heap access.
MOZ_RELEASE_ASSERT(buffer->wasmActualByteLength() == buffer->wasmMappedSize());
MOZ_RELEASE_ASSERT(buffer->wasmActualByteLength() == buffer->wasmBoundsCheckLimit());
#endif
return true;
}
@ -561,7 +782,7 @@ ArrayBufferObject::releaseData(FreeOp* fop)
switch (bufferKind()) {
case PLAIN:
case WASM_MALLOCED:
case ASMJS_MALLOCED:
fop->free_(dataPointer());
break;
case MAPPED:
@ -569,11 +790,7 @@ ArrayBufferObject::releaseData(FreeOp* fop)
DeallocateMappedContent(dataPointer(), byteLength());
break;
case WASM_MAPPED:
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
ReleaseWasmMappedMemory(dataPointer());
#else
MOZ_CRASH("shouldn't have wasm mapped ArrayBuffer");
#endif
WasmArrayRawBuffer::Release(dataPointer());
break;
}
}
@ -599,6 +816,72 @@ ArrayBufferObject::setByteLength(uint32_t length)
setSlot(BYTE_LENGTH_SLOT, Int32Value(length));
}
size_t
ArrayBufferObject::wasmMappedSize() const
{
if (isWasmMapped()) {
return contents().wasmBuffer()->mappedSize();
} else {
// Can use byteLength() instead of actualByteLength since if !wasmMapped()
// then this is an asm.js buffer, and thus cannot grow.
return byteLength();
}
}
Maybe<uint32_t>
ArrayBufferObject::wasmMaxSize() const
{
if (isWasmMapped())
return contents().wasmBuffer()->maxSize();
else
return Some<uint32_t>(byteLength());
}
uint32_t
ArrayBufferObject::wasmBoundsCheckLimit() const
{
if (isWasmMapped())
return contents().wasmBuffer()->boundsCheckLimit();
else
return byteLength();
}
uint32_t
ArrayBufferObject::wasmActualByteLength() const
{
if (isWasmMapped())
return contents().wasmBuffer()->actualByteLength();
else
return byteLength();
}
uint32_t
ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const
{
if (this->is<ArrayBufferObject>())
return this->as<ArrayBufferObject>().wasmBoundsCheckLimit();
// TODO: When SharedArrayBuffer can be used from wasm, this should be
// replaced by SharedArrayBufferObject::wasmBoundsCheckLimit().
return wasmMappedSize();
}
bool
ArrayBufferObject::growForWasm(uint32_t delta)
{
MOZ_ASSERT(isWasmMapped());
if (delta == 0)
return true;
// Should be guaranteed by Instance::growMemory
CheckedInt<uint32_t> curSize = wasmActualByteLength();
CheckedInt<uint32_t> newSize = curSize + CheckedInt<uint32_t>(delta) * wasm ::PageSize;
MOZ_RELEASE_ASSERT(newSize.isValid());
return contents().wasmBuffer()->growLength(delta * wasm::PageSize);
}
uint32_t
ArrayBufferObject::flags() const
{
@ -633,10 +916,8 @@ ArrayBufferObject::create(JSContext* cx, uint32_t nbytes, BufferContents content
size_t nAllocated = nbytes;
if (contents.kind() == MAPPED)
nAllocated = JS_ROUNDUP(nbytes, js::gc::SystemPageSize());
#ifdef ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB
else if (contents.kind() == WASM_MAPPED)
nAllocated = wasm::MappedSize;
#endif
nAllocated = contents.wasmBuffer()->allocatedBytes();
cx->zone()->updateMallocCounter(nAllocated);
}
} else {
@ -772,7 +1053,7 @@ ArrayBufferObject::addSizeOfExcludingThis(JSObject* obj, mozilla::MallocSizeOf m
case MAPPED:
info->objectsNonHeapElementsNormal += buffer.byteLength();
break;
case WASM_MALLOCED:
case ASMJS_MALLOCED:
info->objectsMallocHeapElementsAsmJS += mallocSizeOf(buffer.dataPointer());
break;
case WASM_MAPPED:

View File

@ -7,6 +7,8 @@
#ifndef vm_ArrayBufferObject_h
#define vm_ArrayBufferObject_h
#include "mozilla/Maybe.h"
#include "jsobj.h"
#include "builtin/TypedObjectConstants.h"
@ -19,6 +21,7 @@ typedef struct JSProperty JSProperty;
namespace js {
class ArrayBufferViewObject;
class WasmArrayRawBuffer;
// The inheritance hierarchy for the various classes relating to typed arrays
// is as follows.
@ -71,6 +74,9 @@ class ArrayBufferViewObject;
class ArrayBufferObjectMaybeShared;
uint32_t AnyArrayBufferByteLength(const ArrayBufferObjectMaybeShared* buf);
uint32_t WasmArrayBufferActualByteLength(const ArrayBufferObjectMaybeShared* buf);
size_t WasmArrayBufferMappedSize(const ArrayBufferObjectMaybeShared* buf);
bool WasmArrayBufferGrowForWasm(ArrayBufferObjectMaybeShared* buf, uint32_t delta);
ArrayBufferObjectMaybeShared& AsAnyArrayBuffer(HandleValue val);
class ArrayBufferObjectMaybeShared : public NativeObject
@ -80,6 +86,15 @@ class ArrayBufferObjectMaybeShared : public NativeObject
return AnyArrayBufferByteLength(this);
}
size_t wasmMappedSize() const {
return WasmArrayBufferMappedSize(this);
}
uint32_t wasmBoundsCheckLimit() const;
uint32_t wasmActualByteLength() const {
return WasmArrayBufferActualByteLength(this);
}
inline bool isDetached() const;
inline SharedMem<uint8_t*> dataPointerEither();
@ -131,7 +146,7 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
enum BufferKind {
PLAIN = 0, // malloced or inline data
WASM_MALLOCED = 1,
ASMJS_MALLOCED = 1,
WASM_MAPPED = 2,
MAPPED = 3,
@ -199,6 +214,7 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
BufferKind kind() const { return kind_; }
explicit operator bool() const { return data_ != nullptr; }
WasmArrayRawBuffer* wasmBuffer() const;
};
static const Class class_;
@ -255,7 +271,7 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
// Return whether the buffer is allocated by js_malloc and should be freed
// with js_free.
bool hasMallocedContents() const {
return (ownsData() && isPlain()) || isWasmMalloced();
return (ownsData() && isPlain()) || isAsmJSMalloced();
}
static void addSizeOfExcludingThis(JSObject* obj, mozilla::MallocSizeOf mallocSizeOf,
@ -289,6 +305,11 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
uint8_t* dataPointer() const;
SharedMem<uint8_t*> dataPointerShared() const;
uint32_t byteLength() const;
uint32_t wasmActualByteLength() const;
size_t wasmMappedSize() const;
uint32_t wasmBoundsCheckLimit() const;
mozilla::Maybe<uint32_t> wasmMaxSize() const;
MOZ_MUST_USE bool growForWasm(uint32_t delta);
BufferContents contents() const {
return BufferContents(dataPointer(), bufferKind());
}
@ -309,13 +330,14 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
BufferKind bufferKind() const { return BufferKind(flags() & BUFFER_KIND_MASK); }
bool isPlain() const { return bufferKind() == PLAIN; }
bool isWasmMapped() const { return bufferKind() == WASM_MAPPED; }
bool isWasmMalloced() const { return bufferKind() == WASM_MALLOCED; }
bool isWasm() const { return isWasmMapped() || isWasmMalloced(); }
bool isAsmJSMalloced() const { return bufferKind() == ASMJS_MALLOCED; }
bool isWasm() const { return isWasmMapped() || isAsmJSMalloced(); }
bool isMapped() const { return bufferKind() == MAPPED; }
bool isDetached() const { return flags() & DETACHED; }
static ArrayBufferObject* createForWasm(JSContext* cx, uint32_t numBytes, bool signalsForOOB);
static bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer, bool signalsForOOB);
static ArrayBufferObject* createForWasm(JSContext* cx, uint32_t numBytes,
mozilla::Maybe<uint32_t> maxSize);
static bool prepareForAsmJS(JSContext* cx, Handle<ArrayBufferObject*> buffer);
static void finalize(FreeOp* fop, JSObject* obj);
@ -351,7 +373,7 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
bool hasTypedObjectViews() const { return flags() & TYPED_OBJECT_VIEWS; }
void setIsWasmMalloced() { setFlags((flags() & ~KIND_MASK) | WASM_MALLOCED); }
void setIsAsmJSMalloced() { setFlags((flags() & ~KIND_MASK) | ASMJS_MALLOCED); }
void setIsDetached() { setFlags(flags() | DETACHED); }
void initialize(size_t byteLength, BufferContents contents, OwnsState ownsState) {

View File

@ -330,7 +330,7 @@ JSRuntime::init(uint32_t maxbytes, uint32_t maxNurseryBytes)
JS::ResetTimeZone();
#ifdef JS_SIMULATOR
simulator_ = js::jit::Simulator::Create();
simulator_ = js::jit::Simulator::Create(contextFromMainThread());
if (!simulator_)
return false;
#endif

View File

@ -920,7 +920,7 @@ struct JSRuntime : public JS::shadow::Runtime,
*/
JSCList onNewGlobalObjectWatchers;
#if defined(XP_DARWIN) && defined(ASMJS_MAY_USE_SIGNAL_HANDLERS)
#if defined(XP_DARWIN)
js::wasm::MachExceptionHandler wasmMachExceptionHandler;
#endif

View File

@ -73,7 +73,7 @@ MarkValidRegion(void* addr, size_t len)
#endif
}
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
#if defined(WASM_HUGE_MEMORY)
// Since this SharedArrayBuffer will likely be used for asm.js code, prepare it
// for asm.js by mapping the 4gb protected zone described in WasmTypes.h.
// Since we want to put the SharedArrayBuffer header immediately before the
@ -113,13 +113,13 @@ SharedArrayRawBuffer::New(JSContext* cx, uint32_t length)
uint32_t allocSize = SharedArrayAllocSize(length);
if (allocSize <= length)
return nullptr;
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
void* p = nullptr;
if (!IsValidAsmJSHeapLength(length)) {
p = MapMemory(allocSize, true);
if (!p)
return nullptr;
} else {
#ifdef WASM_HUGE_MEMORY
// Test >= to guard against the case where multiple extant runtimes
// race to allocate.
if (++numLive >= maxLive) {
@ -148,12 +148,12 @@ SharedArrayRawBuffer::New(JSContext* cx, uint32_t length)
VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)p + allocSize,
SharedArrayMappedSize() - allocSize);
# endif
}
#else
void* p = MapMemory(allocSize, true);
if (!p)
return nullptr;
p = MapMemory(allocSize, true);
if (!p)
return nullptr;
#endif
}
uint8_t* buffer = reinterpret_cast<uint8_t*>(p) + gc::SystemPageSize();
uint8_t* base = buffer - sizeof(SharedArrayRawBuffer);
SharedArrayRawBuffer* rawbuf = new (base) SharedArrayRawBuffer(buffer, length);
@ -182,10 +182,10 @@ SharedArrayRawBuffer::dropReference()
uint8_t* address = p.unwrap(/*safe - only reference*/);
uint32_t allocSize = SharedArrayAllocSize(this->length);
#if defined(ASMJS_MAY_USE_SIGNAL_HANDLERS_FOR_OOB)
if (!IsValidAsmJSHeapLength(this->length)) {
UnmapMemory(address, allocSize);
} else {
#if defined(WASM_HUGE_MEMORY)
numLive--;
UnmapMemory(address, SharedArrayMappedSize());
# if defined(MOZ_VALGRIND) \
@ -195,10 +195,10 @@ SharedArrayRawBuffer::dropReference()
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(address,
SharedArrayMappedSize());
# endif
}
#else
UnmapMemory(address, allocSize);
UnmapMemory(address, allocSize);
#endif
}
}
}