Bug 1298202 - Baldr: also support growing memory when no max is specified (r=sunfish)

MozReview-Commit-ID: 27W5phJe8Q6
This commit is contained in:
Luke Wagner 2016-09-08 00:53:06 -05:00
parent c40c6322bf
commit 67399f9a58
15 changed files with 389 additions and 131 deletions

View File

@ -106,13 +106,14 @@ StaticallyLink(CodeSegment& cs, const LinkData& linkData, ExclusiveContext* cx)
}
static void
SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryObject memory)
SpecializeToMemory(uint8_t* prevMemoryBase, CodeSegment& cs, const Metadata& metadata,
ArrayBufferObjectMaybeShared& buffer)
{
#ifdef WASM_HUGE_MEMORY
MOZ_RELEASE_ASSERT(metadata.boundsChecks.empty());
MOZ_RELEASE_ASSERT(metadata.isAsmJS() || metadata.memoryAccesses.empty());
#else
uint32_t limit = memory->buffer().wasmBoundsCheckLimit();
uint32_t limit = buffer.wasmBoundsCheckLimit();
MOZ_RELEASE_ASSERT(IsValidBoundsCheckImmediate(limit));
for (const BoundsCheck& check : metadata.boundsChecks)
@ -120,13 +121,19 @@ SpecializeToMemory(CodeSegment& cs, const Metadata& metadata, HandleWasmMemoryOb
#endif
#if defined(JS_CODEGEN_X86)
uint8_t* base = memory->buffer().dataPointerEither().unwrap();
for (const MemoryAccess& access : metadata.memoryAccesses) {
// Patch memory pointer immediate.
void* addr = access.patchMemoryPtrImmAt(cs.base());
uint32_t disp = reinterpret_cast<uint32_t>(X86Encoding::GetPointer(addr));
MOZ_ASSERT(disp <= INT32_MAX);
X86Encoding::SetPointer(addr, (void*)(base + disp));
uint8_t* memoryBase = buffer.dataPointerEither().unwrap(/* code patching */);
if (prevMemoryBase != memoryBase) {
for (const MemoryAccess& access : metadata.memoryAccesses) {
void* patchAt = access.patchMemoryPtrImmAt(cs.base());
uint8_t* prevImm = (uint8_t*)X86Encoding::GetPointer(patchAt);
MOZ_ASSERT(prevImm >= prevMemoryBase);
uint32_t offset = prevImm - prevMemoryBase;
MOZ_ASSERT(offset <= INT32_MAX);
X86Encoding::SetPointer(patchAt, memoryBase + offset);
}
}
#endif
}
@ -234,7 +241,7 @@ CodeSegment::create(JSContext* cx,
memcpy(codeBase, bytecode.begin(), bytecode.length());
StaticallyLink(*cs, linkData, cx);
if (memory)
SpecializeToMemory(*cs, metadata, memory);
SpecializeToMemory(nullptr, *cs, metadata, memory->buffer());
}
if (!ExecutableAllocator::makeExecutable(codeBase, cs->codeLength())) {
@ -260,6 +267,16 @@ CodeSegment::~CodeSegment()
DeallocateExecutableMemory(bytes_, totalLength(), gc::SystemPageSize());
}
void
CodeSegment::onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer)
{
AutoWritableJitCode awjc(base(), codeLength());
AutoFlushICache afc("CodeSegment::onMovingGrow");
AutoFlushICache::setRange(uintptr_t(base()), codeLength());
SpecializeToMemory(prevMemoryBase, *this, metadata, buffer);
}
size_t
FuncDefExport::serializedSize() const
{

View File

@ -100,6 +100,11 @@ class CodeSegment
bool containsCodePC(const void* pc) const {
return pc >= base() && pc < (base() + codeLength_);
}
// onMovingGrow must be called if the memory passed to 'create' performs a
// moving grow operation.
void onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, ArrayBufferObject& buffer);
};
// ShareableBytes is a ref-counted vector of bytes which are incrementally built
@ -517,6 +522,7 @@ class Code
const Metadata& metadata,
const ShareableBytes* maybeBytecode);
CodeSegment& segment() { return *segment_; }
const CodeSegment& segment() const { return *segment_; }
const Metadata& metadata() const { return *metadata_; }

View File

@ -292,42 +292,6 @@ Instance::currentMemory_i32(Instance* instance)
return instance->currentMemory();
}
uint32_t
Instance::growMemory(uint32_t delta)
{
MOZ_RELEASE_ASSERT(memory_);
// Using uint64_t to avoid worrying about overflows in safety comp.
uint64_t curNumPages = currentMemory();
uint64_t newNumPages = curNumPages + (uint64_t) delta;
if (metadata().maxMemoryLength) {
ArrayBufferObject &buf = memory_->buffer().as<ArrayBufferObject>();
// Guaranteed by instantiateMemory
MOZ_RELEASE_ASSERT(buf.wasmMaxSize() && buf.wasmMaxSize() <= metadata().maxMemoryLength);
if (newNumPages * wasm::PageSize > buf.wasmMaxSize().value())
return (uint32_t) -1;
// Try to grow the memory
if (!buf.growForWasm(delta))
return (uint32_t) -1;
} else {
return -1; // TODO: implement grow_memory w/o max when we add realloc
}
return curNumPages;
}
uint32_t
Instance::currentMemory()
{
MOZ_RELEASE_ASSERT(memory_);
uint32_t curMemByteLen = memory_->buffer().wasmActualByteLength();
MOZ_ASSERT(curMemByteLen % wasm::PageSize == 0);
return curMemByteLen / wasm::PageSize;
}
Instance::Instance(JSContext* cx,
Handle<WasmInstanceObject*> object,
UniqueCode code,
@ -411,6 +375,9 @@ Instance::Instance(JSContext* cx,
bool
Instance::init(JSContext* cx)
{
if (memory_ && memory_->movingGrowable() && !memory_->addMovingGrowObserver(cx, object_))
return false;
if (!metadata().sigIds.empty()) {
ExclusiveData<SigIdSet>::Guard lockedSigIdSet = sigIdSet.lock();
@ -590,6 +557,9 @@ Instance::object() const
bool
Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
{
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither());
if (!cx->compartment()->wasm.ensureProfilingState(cx))
return false;
@ -806,6 +776,33 @@ Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
return true;
}
uint32_t
Instance::currentMemory()
{
MOZ_RELEASE_ASSERT(memory_);
uint32_t byteLength = memory_->buffer().wasmActualByteLength();
MOZ_ASSERT(byteLength % wasm::PageSize == 0);
return byteLength / wasm::PageSize;
}
uint32_t
Instance::growMemory(uint32_t delta)
{
MOZ_ASSERT(!isAsmJS());
uint32_t ret = memory_->grow(delta);
MOZ_RELEASE_ASSERT(tlsData_.memoryBase == memory_->buffer().dataPointerEither());
return ret;
}
void
Instance::onMovingGrow(uint8_t* prevMemoryBase)
{
MOZ_ASSERT(!isAsmJS());
ArrayBufferObject& buffer = memory_->buffer().as<ArrayBufferObject>();
tlsData_.memoryBase = buffer.dataPointer();
code_->segment().onMovingGrow(prevMemoryBase, metadata(), buffer);
}
void
Instance::deoptimizeImportExit(uint32_t funcImportIndex)
{

View File

@ -58,16 +58,17 @@ class Instance
static int32_t callImport_f64(Instance*, int32_t, int32_t, uint64_t*);
static uint32_t growMemory_i32(Instance* instance, uint32_t delta);
static uint32_t currentMemory_i32(Instance* instance);
bool callImport(JSContext* cx, uint32_t funcImportIndex, unsigned argc, const uint64_t* argv,
MutableHandleValue rval);
uint32_t growMemory(uint32_t delta);
uint32_t currentMemory();
// Only WasmInstanceObject can call the private trace function.
friend class js::WasmInstanceObject;
void tracePrivate(JSTracer* trc);
// Only WasmMemoryObject can call the private onMovingGrow notification.
friend class js::WasmMemoryObject;
void onMovingGrow(uint8_t* prevMemoryBase);
public:
Instance(JSContext* cx,
HandleWasmInstanceObject object,
@ -106,6 +107,12 @@ class Instance
MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args);
// These methods implement their respective wasm operator but may also be
// called via the Memory JS API.
uint32_t currentMemory();
uint32_t growMemory(uint32_t delta);
// Initially, calls to imports in wasm code call out through the generic
// callImport method. If the imported callee gets JIT compiled and the types
// match up, callImport will patch the code to instead call through a thunk

View File

@ -1094,7 +1094,8 @@ class FunctionCompiler
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Register);
auto* ins = MWasmCall::NewBuiltinInstanceMethodCall(alloc(), desc, builtin,
call.instanceArg_, call.regArgs_,
ToMIRType(ret), call.spIncrement_);
ToMIRType(ret), call.spIncrement_,
call.tlsStackOffset_);
if (!ins)
return false;
@ -3006,7 +3007,10 @@ EmitGrowMemory(FunctionCompiler& f, uint32_t callOffset)
if (!f.passArg(delta, ValType::I32, &args))
return false;
f.finishCall(&args, PassTls::False, InterModule::False);
// As a short-cut, pretend this is an inter-module call so that any pinned
// heap pointer will be reloaded after the call. This hack will go away once
// we can stop pinning registers.
f.finishCall(&args, PassTls::True, InterModule::True);
MDefinition* ret;
if (!f.builtinInstanceMethodCall(SymbolicAddress::GrowMemory, args, ValType::I32, &ret))

View File

@ -18,6 +18,7 @@
#include "asmjs/WasmJS.h"
#include "mozilla/CheckedInt.h"
#include "mozilla/Maybe.h"
#include "asmjs/WasmCompile.h"
@ -35,6 +36,7 @@
using namespace js;
using namespace js::jit;
using namespace js::wasm;
using mozilla::CheckedInt;
using mozilla::Nothing;
bool
@ -710,13 +712,35 @@ wasm::ExportedFunctionToDefinitionIndex(JSFunction* fun)
// ============================================================================
// WebAssembly.Memory class and methods
const ClassOps WasmMemoryObject::classOps_ =
{
nullptr, /* addProperty */
nullptr, /* delProperty */
nullptr, /* getProperty */
nullptr, /* setProperty */
nullptr, /* enumerate */
nullptr, /* resolve */
nullptr, /* mayResolve */
WasmMemoryObject::finalize
};
const Class WasmMemoryObject::class_ =
{
"WebAssembly.Memory",
JSCLASS_DELAY_METADATA_BUILDER |
JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS)
JSCLASS_HAS_RESERVED_SLOTS(WasmMemoryObject::RESERVED_SLOTS) |
JSCLASS_FOREGROUND_FINALIZE,
&WasmMemoryObject::classOps_
};
/* static */ void
WasmMemoryObject::finalize(FreeOp* fop, JSObject* obj)
{
WasmMemoryObject& memory = obj->as<WasmMemoryObject>();
if (memory.hasObservers())
fop->delete_(&memory.observers());
}
/* static */ WasmMemoryObject*
WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShared buffer,
HandleObject proto)
@ -727,6 +751,7 @@ WasmMemoryObject::create(ExclusiveContext* cx, HandleArrayBufferObjectMaybeShare
return nullptr;
obj->initReservedSlot(BUFFER_SLOT, ObjectValue(*buffer));
MOZ_ASSERT(!obj->hasObservers());
return obj;
}
@ -840,6 +865,104 @@ WasmMemoryObject::buffer() const
return getReservedSlot(BUFFER_SLOT).toObject().as<ArrayBufferObjectMaybeShared>();
}
bool
WasmMemoryObject::hasObservers() const
{
return !getReservedSlot(OBSERVERS_SLOT).isUndefined();
}
WasmMemoryObject::WeakInstanceSet&
WasmMemoryObject::observers() const
{
MOZ_ASSERT(hasObservers());
return *reinterpret_cast<WeakInstanceSet*>(getReservedSlot(OBSERVERS_SLOT).toPrivate());
}
WasmMemoryObject::WeakInstanceSet*
WasmMemoryObject::getOrCreateObservers(JSContext* cx)
{
if (!hasObservers()) {
auto observers = MakeUnique<WeakInstanceSet>(cx->zone(), InstanceSet());
if (!observers || !observers->init()) {
ReportOutOfMemory(cx);
return nullptr;
}
setReservedSlot(OBSERVERS_SLOT, PrivateValue(observers.release()));
}
return &observers();
}
bool
WasmMemoryObject::movingGrowable() const
{
#ifdef WASM_HUGE_MEMORY
return false;
#else
return !buffer().wasmMaxSize();
#endif
}
bool
WasmMemoryObject::addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance)
{
MOZ_ASSERT(movingGrowable());
WeakInstanceSet* observers = getOrCreateObservers(cx);
if (!observers)
return false;
if (!observers->putNew(instance)) {
ReportOutOfMemory(cx);
return false;
}
return true;
}
uint32_t
WasmMemoryObject::grow(uint32_t delta)
{
ArrayBufferObject &buf = buffer().as<ArrayBufferObject>();
MOZ_ASSERT(buf.wasmActualByteLength() % PageSize == 0);
uint32_t oldNumPages = buf.wasmActualByteLength() / PageSize;
CheckedInt<uint32_t> newSize = oldNumPages;
newSize += delta;
newSize *= PageSize;
if (!newSize.isValid())
return -1;
if (Maybe<uint32_t> maxSize = buf.wasmMaxSize()) {
if (newSize.value() > maxSize.value())
return -1;
if (!buf.wasmGrowToSizeInPlace(newSize.value()))
return -1;
} else {
#ifdef WASM_HUGE_MEMORY
if (!buf.wasmGrowToSizeInPlace(newSize.value()))
return -1;
#else
MOZ_ASSERT(movingGrowable());
uint8_t* prevMemoryBase = buf.dataPointer();
if (!buf.wasmMovingGrowToSize(newSize.value()))
return -1;
if (hasObservers()) {
for (InstanceSet::Range r = observers().all(); !r.empty(); r.popFront())
r.front()->instance().onMovingGrow(prevMemoryBase);
}
#endif
}
return oldNumPages;
}
// ============================================================================
// WebAssembly.Table class and methods

View File

@ -160,9 +160,20 @@ class WasmInstanceObject : public NativeObject
class WasmMemoryObject : public NativeObject
{
static const unsigned BUFFER_SLOT = 0;
static const unsigned OBSERVERS_SLOT = 1;
static const ClassOps classOps_;
static void finalize(FreeOp* fop, JSObject* obj);
using InstanceSet = GCHashSet<ReadBarrieredWasmInstanceObject,
MovableCellHasher<ReadBarrieredWasmInstanceObject>,
SystemAllocPolicy>;
using WeakInstanceSet = JS::WeakCache<InstanceSet>;
bool hasObservers() const;
WeakInstanceSet& observers() const;
WeakInstanceSet* getOrCreateObservers(JSContext* cx);
public:
static const unsigned RESERVED_SLOTS = 1;
static const unsigned RESERVED_SLOTS = 2;
static const Class class_;
static const JSPropertySpec properties[];
static const JSFunctionSpec methods[];
@ -172,6 +183,10 @@ class WasmMemoryObject : public NativeObject
Handle<ArrayBufferObjectMaybeShared*> buffer,
HandleObject proto);
ArrayBufferObjectMaybeShared& buffer() const;
bool movingGrowable() const;
bool addMovingGrowObserver(JSContext* cx, WasmInstanceObject* instance);
uint32_t grow(uint32_t delta);
};
// The class of WebAssembly.Table. A WasmTableObject holds a refcount on a

View File

@ -0,0 +1,77 @@
// |jit-test| test-also-wasm-baseline
load(libdir + "wasm.js");
const Module = WebAssembly.Module;
const Instance = WebAssembly.Instance;
const Table = WebAssembly.Table;
const Memory = WebAssembly.Memory;
// Test for stale heap pointers after resize
// Grow directly from builtin call:
assertEq(evalText(`(module
(memory 1)
(func $test (result i32)
(i32.store (i32.const 0) (i32.const 1))
(i32.store (i32.const 65532) (i32.const 10))
(grow_memory (i32.const 99))
(i32.store (i32.const 6553596) (i32.const 100))
(i32.add
(i32.load (i32.const 0))
(i32.add
(i32.load (i32.const 65532))
(i32.load (i32.const 6553596)))))
(export "test" $test)
)`).exports.test(), 111);
// Grow during call_import:
var exports = evalText(`(module
(import $imp "a" "imp")
(memory 1)
(func $grow (grow_memory (i32.const 99)))
(export "grow" $grow)
(func $test (result i32)
(i32.store (i32.const 0) (i32.const 1))
(i32.store (i32.const 65532) (i32.const 10))
(call $imp)
(i32.store (i32.const 6553596) (i32.const 100))
(i32.add
(i32.load (i32.const 0))
(i32.add
(i32.load (i32.const 65532))
(i32.load (i32.const 6553596)))))
(export "test" $test)
)`, {a:{imp() { exports.grow() }}}).exports;
setJitCompilerOption("baseline.warmup.trigger", 2);
setJitCompilerOption("ion.warmup.trigger", 4);
for (var i = 0; i < 10; i++)
assertEq(exports.test(), 111);
// Grow during call_indirect:
var mem = new Memory({initial:1});
var tbl = new Table({initial:1, element:"anyfunc"});
var exports1 = evalText(`(module
(import "a" "mem" (memory 1))
(func $grow
(i32.store (i32.const 65532) (i32.const 10))
(grow_memory (i32.const 99))
(i32.store (i32.const 6553596) (i32.const 100)))
(export "grow" $grow)
)`, {a:{mem}}).exports;
var exports2 = evalText(`(module
(import "a" "tbl" (table 1))
(import "a" "mem" (memory 1))
(type $v2v (func))
(func $test (result i32)
(i32.store (i32.const 0) (i32.const 1))
(call_indirect $v2v (i32.const 0))
(i32.add
(i32.load (i32.const 0))
(i32.add
(i32.load (i32.const 65532))
(i32.load (i32.const 6553596)))))
(export "test" $test)
)`, {a:{tbl, mem}}).exports;
tbl.set(0, exports1.grow);
assertEq(exports2.test(), 111);

View File

@ -16,7 +16,7 @@
)
(export "grow_memory" $grow_memory)
(func $grow_memory (param i32)
(func $grow_memory (param i32) (result i32)
(grow_memory (get_local 0))
)
)
@ -33,4 +33,4 @@
(assert_trap (invoke "load" (i32.const 0)) "out of bounds memory access")
(assert_trap (invoke "store" (i32.const 0x80000000) (i32.const 13)) "out of bounds memory access")
(assert_trap (invoke "load" (i32.const 0x80000000)) "out of bounds memory access")
(assert_trap (invoke "grow_memory" (i32.const 0x80000000)) "memory size exceeds implementation limit")
(assert_return (invoke "grow_memory" (i32.const 0x80000000)) (i32.const -1))

View File

@ -1,4 +1,2 @@
// |jit-test| test-also-wasm-baseline
// TODO current_memory opcode + traps on OOB
quit();
var importedArgs = ['memory_trap.wast']; load(scriptdir + '../spec.js');

View File

@ -1,4 +1,2 @@
// |jit-test| test-also-wasm-baseline
// TODO memory resizing (you don't say)
quit();
var importedArgs = ['resizing.wast']; load(scriptdir + '../spec.js');

View File

@ -5445,15 +5445,16 @@ MWasmCall::NewBuiltinInstanceMethodCall(TempAllocator& alloc,
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
uint32_t spIncrement)
uint32_t spIncrement,
uint32_t tlsStackOffset)
{
auto callee = wasm::CalleeDesc::builtinInstanceMethod(builtin);
MWasmCall* call = MWasmCall::New(alloc, desc, callee, args, resultType, spIncrement,
MWasmCall::DontSaveTls, nullptr);
tlsStackOffset, nullptr);
if (!call)
return nullptr;
MOZ_ASSERT(instanceArg != ABIArg()); // instanceArg must be initialized.
MOZ_ASSERT(instanceArg != ABIArg());
call->instanceArg_ = instanceArg;
return call;
}

View File

@ -13688,9 +13688,13 @@ class MWasmCall final
static const uint32_t DontSaveTls = UINT32_MAX;
static MWasmCall* New(TempAllocator& alloc, const wasm::CallSiteDesc& desc,
const wasm::CalleeDesc& callee, const Args& args, MIRType resultType,
uint32_t spIncrement, uint32_t tlsStackOffset,
static MWasmCall* New(TempAllocator& alloc,
const wasm::CallSiteDesc& desc,
const wasm::CalleeDesc& callee,
const Args& args,
MIRType resultType,
uint32_t spIncrement,
uint32_t tlsStackOffset,
MDefinition* tableIndex = nullptr);
static MWasmCall* NewBuiltinInstanceMethodCall(TempAllocator& alloc,
@ -13699,7 +13703,8 @@ class MWasmCall final
const ABIArg& instanceArg,
const Args& args,
MIRType resultType,
uint32_t spIncrement);
uint32_t spIncrement,
uint32_t tlsStackOffset);
size_t numArgs() const {
return argRegs_.length();

View File

@ -60,6 +60,7 @@ using mozilla::DebugOnly;
using mozilla::CheckedInt;
using mozilla::Some;
using mozilla::Maybe;
using mozilla::Nothing;
using namespace js;
using namespace js::gc;
@ -503,72 +504,72 @@ class js::WasmArrayRawBuffer
}
#endif
MOZ_MUST_USE bool growLength(uint32_t deltaLength)
{
// This should be guaranteed by Instance::growMemory
MOZ_ASSERT(maxSize_);
MOZ_ASSERT(deltaLength % wasm::PageSize == 0);
MOZ_MUST_USE bool growToSizeInPlace(uint32_t newSize) {
MOZ_ASSERT(newSize >= actualByteLength());
MOZ_ASSERT_IF(maxSize(), newSize <= maxSize().value());
MOZ_ASSERT(newSize <= mappedSize());
CheckedInt<uint32_t> curLength = actualByteLength();
CheckedInt<uint32_t> newLength = curLength + deltaLength;
MOZ_RELEASE_ASSERT(newLength.isValid());
MOZ_ASSERT(newLength.value() <= maxSize_.value());
uint32_t delta = newSize - actualByteLength();
MOZ_ASSERT(delta % wasm::PageSize == 0);
uint8_t* dataEnd = dataPointer() + curLength.value();
MOZ_ASSERT(((intptr_t)dataEnd) % gc::SystemPageSize() == 0);
uint8_t* dataEnd = dataPointer() + actualByteLength();
MOZ_ASSERT(uintptr_t(dataEnd) % gc::SystemPageSize() == 0);
# ifdef XP_WIN
if (deltaLength && !VirtualAlloc(dataEnd, deltaLength, MEM_COMMIT, PAGE_READWRITE))
if (delta && !VirtualAlloc(dataEnd, delta, MEM_COMMIT, PAGE_READWRITE))
return false;
# else // XP_WIN
if (deltaLength && mprotect(dataEnd, deltaLength, PROT_READ | PROT_WRITE))
if (delta && mprotect(dataEnd, delta, PROT_READ | PROT_WRITE))
return false;
# endif // !XP_WIN
# if defined(MOZ_VALGRIND) && defined(VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE)
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, deltaLength);
VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE((unsigned char*)dataEnd, delta);
# endif
MemProfiler::SampleNative(dataEnd, deltaLength);
MemProfiler::SampleNative(dataEnd, delta);
length_ = newLength.value();
length_ = newSize;
return true;
}
#ifndef WASM_HUGE_MEMORY
// Try and grow the mapped region of memory. Does not changes current or
// max size. Does not move memory if no space to grow.
void tryGrowMaxSize(uint32_t deltaMaxSize)
{
MOZ_ASSERT(maxSize_);
MOZ_RELEASE_ASSERT(deltaMaxSize % wasm::PageSize == 0);
CheckedInt<uint32_t> newMaxSize = maxSize_.value() + deltaMaxSize;
MOZ_RELEASE_ASSERT(newMaxSize.isValid());
MOZ_RELEASE_ASSERT(newMaxSize.value() % wasm::PageSize == 0);
size_t newMappedSize = wasm::ComputeMappedSize(newMaxSize.value());
MOZ_ASSERT(newMappedSize >= mappedSize_);
bool extendMappedSize(uint32_t maxSize) {
size_t newMappedSize = wasm::ComputeMappedSize(maxSize);
MOZ_ASSERT(mappedSize_ <= newMappedSize);
if (mappedSize_ == newMappedSize)
return;
return true;
# ifdef XP_WIN
uint8_t* mappedEnd = dataPointer() + mappedSize_;
uint32_t delta = newMappedSize - mappedSize_;
if (!VirtualAlloc(mappedEnd, delta, MEM_RESERVE, PAGE_NOACCESS))
return;
return false;
# elif defined(XP_LINUX)
// Note this will not move memory (no MREMAP_MAYMOVE specified)
if (MAP_FAILED == mremap(dataPointer(), mappedSize_, newMappedSize, 0))
return;
return false;
# else
// No mechanism for remapping on MacOS and other Unices. Luckily
// shouldn't need it here as most of these are 64-bit.
return;
return false;
# endif
mappedSize_ = newMappedSize;
return true;
}
// Try and grow the mapped region of memory. Does not changes current size.
// Does not move memory if no space to grow.
void tryGrowMaxSizeInPlace(uint32_t deltaMaxSize) {
CheckedInt<uint32_t> newMaxSize = maxSize_.value();
newMaxSize += deltaMaxSize;
MOZ_ASSERT(newMaxSize.isValid());
MOZ_ASSERT(newMaxSize.value() % wasm::PageSize == 0);
if (!extendMappedSize(newMaxSize.value()))
return;
maxSize_ = Some(newMaxSize.value());
return;
}
#endif // WASM_HUGE_MEMORY
};
@ -686,7 +687,7 @@ ArrayBufferObject::createForWasm(JSContext* cx, uint32_t initialSize, Maybe<uint
uint32_t cur = maxSize.value() / 2;
for (; cur > initialSize; cur = cur / 2) {
for (; cur > initialSize; cur /= 2) {
wasmBuf = WasmArrayRawBuffer::Allocate(initialSize, Some(ROUND_UP(cur, wasm::PageSize)));
if (wasmBuf)
break;
@ -699,7 +700,7 @@ ArrayBufferObject::createForWasm(JSContext* cx, uint32_t initialSize, Maybe<uint
// Try to grow our chunk as much as possible.
for (size_t d = cur / 2; d >= wasm::PageSize; d /= 2)
wasmBuf->tryGrowMaxSize(ROUND_UP(d, wasm::PageSize));
wasmBuf->tryGrowMaxSizeInPlace(ROUND_UP(d, wasm::PageSize));
#endif
}
@ -866,17 +867,6 @@ ArrayBufferObject::wasmMaxSize() const
return Some<uint32_t>(byteLength());
}
#ifndef WASM_HUGE_MEMORY
uint32_t
ArrayBufferObject::wasmBoundsCheckLimit() const
{
if (isWasmMapped())
return contents().wasmBuffer()->boundsCheckLimit();
else
return byteLength();
}
#endif
uint32_t
ArrayBufferObject::wasmActualByteLength() const
{
@ -886,7 +876,42 @@ ArrayBufferObject::wasmActualByteLength() const
return byteLength();
}
bool
ArrayBufferObject::wasmGrowToSizeInPlace(uint32_t newSize)
{
return contents().wasmBuffer()->growToSizeInPlace(newSize);
}
#ifndef WASM_HUGE_MEMORY
bool
ArrayBufferObject::wasmMovingGrowToSize(uint32_t newSize)
{
WasmArrayRawBuffer* curBuf = contents().wasmBuffer();
if (newSize <= curBuf->boundsCheckLimit() || curBuf->extendMappedSize(newSize))
return curBuf->growToSizeInPlace(newSize);
WasmArrayRawBuffer* newBuf = WasmArrayRawBuffer::Allocate(newSize, Nothing());
if (!newBuf)
return false;
void* newData = newBuf->dataPointer();
memcpy(newData, curBuf->dataPointer(), curBuf->actualByteLength());
BufferContents newContents = BufferContents::create<WASM_MAPPED>(newData);
changeContents(GetJSContextFromMainThread(), newContents);
return true;
}
uint32_t
ArrayBufferObject::wasmBoundsCheckLimit() const
{
if (isWasmMapped())
return contents().wasmBuffer()->boundsCheckLimit();
else
return byteLength();
}
uint32_t
ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const
{
@ -897,22 +922,6 @@ ArrayBufferObjectMaybeShared::wasmBoundsCheckLimit() const
}
#endif
bool
ArrayBufferObject::growForWasm(uint32_t delta)
{
MOZ_ASSERT(isWasmMapped());
if (delta == 0)
return true;
// Should be guaranteed by Instance::growMemory
CheckedInt<uint32_t> curSize = wasmActualByteLength();
CheckedInt<uint32_t> newSize = curSize + CheckedInt<uint32_t>(delta) * wasm ::PageSize;
MOZ_RELEASE_ASSERT(newSize.isValid());
return contents().wasmBuffer()->growLength(delta * wasm::PageSize);
}
uint32_t
ArrayBufferObject::flags() const
{

View File

@ -347,8 +347,9 @@ class ArrayBufferObject : public ArrayBufferObjectMaybeShared
uint32_t wasmActualByteLength() const;
size_t wasmMappedSize() const;
mozilla::Maybe<uint32_t> wasmMaxSize() const;
MOZ_MUST_USE bool growForWasm(uint32_t delta);
MOZ_MUST_USE bool wasmGrowToSizeInPlace(uint32_t newSize);
#ifndef WASM_HUGE_MEMORY
MOZ_MUST_USE bool wasmMovingGrowToSize(uint32_t newSize);
uint32_t wasmBoundsCheckLimit() const;
#endif