Bug 1591047 part 2 - Split memCopy/memFill implementations for shared/non-shared modules. r=lth

Whether a module uses shared memory or not is fixed throughout its lifetime. We
can use this to specialize the implementation of memCopy/memFill and remove a
branch on the memory type. This will also be useful when acquiring the memory
length in a future commit, which will require different code per shared-ness.

Differential Revision: https://phabricator.services.mozilla.com/D50375

--HG--
extra : moz-landing-system : lando
This commit is contained in:
Ryan Hunt 2019-11-01 13:43:45 +00:00
parent ba528581e8
commit 5eb3ba3b97
8 changed files with 103 additions and 37 deletions

View File

@ -2742,6 +2742,8 @@ class BaseCompiler final : public BaseCompilerInterface {
operator MacroAssembler&() const { return masm; }
operator BaseRegAlloc&() { return ra; }
bool usesSharedMemory() const { return env_.usesSharedMemory(); }
private:
////////////////////////////////////////////////////////////
//
@ -10651,8 +10653,10 @@ bool BaseCompiler::emitMemOrTableCopy(bool isMem) {
if (isMem) {
MOZ_ASSERT(srcMemOrTableIndex == 0);
MOZ_ASSERT(dstMemOrTableIndex == 0);
if (!emitInstanceCall(lineOrBytecode, SASigMemCopy,
/*pushReturnedValue=*/false)) {
if (!emitInstanceCall(
lineOrBytecode,
usesSharedMemory() ? SASigMemCopyShared : SASigMemCopy,
/*pushReturnedValue=*/false)) {
return false;
}
} else {
@ -10707,8 +10711,9 @@ bool BaseCompiler::emitMemFill() {
return true;
}
return emitInstanceCall(lineOrBytecode, SASigMemFill,
/*pushReturnedValue=*/false);
return emitInstanceCall(
lineOrBytecode, usesSharedMemory() ? SASigMemFillShared : SASigMemFill,
/*pushReturnedValue=*/false);
}
bool BaseCompiler::emitMemOrTableInit(bool isMem) {

View File

@ -122,6 +122,12 @@ const SymbolicAddressSignature SASigMemCopy = {SymbolicAddress::MemCopy,
_FailOnNegI32,
4,
{_PTR, _I32, _I32, _I32, _END}};
const SymbolicAddressSignature SASigMemCopyShared = {
SymbolicAddress::MemCopyShared,
_VOID,
_FailOnNegI32,
4,
{_PTR, _I32, _I32, _I32, _END}};
const SymbolicAddressSignature SASigDataDrop = {
SymbolicAddress::DataDrop, _VOID, _FailOnNegI32, 2, {_PTR, _I32, _END}};
const SymbolicAddressSignature SASigMemFill = {SymbolicAddress::MemFill,
@ -129,6 +135,12 @@ const SymbolicAddressSignature SASigMemFill = {SymbolicAddress::MemFill,
_FailOnNegI32,
4,
{_PTR, _I32, _I32, _I32, _END}};
const SymbolicAddressSignature SASigMemFillShared = {
SymbolicAddress::MemFillShared,
_VOID,
_FailOnNegI32,
4,
{_PTR, _I32, _I32, _I32, _END}};
const SymbolicAddressSignature SASigMemInit = {
SymbolicAddress::MemInit,
_VOID,
@ -808,12 +820,18 @@ void* wasm::AddressOf(SymbolicAddress imm, ABIFunctionType* abiType) {
case SymbolicAddress::MemCopy:
*abiType = Args_General4;
return FuncCast(Instance::memCopy, *abiType);
case SymbolicAddress::MemCopyShared:
*abiType = Args_General4;
return FuncCast(Instance::memCopyShared, *abiType);
case SymbolicAddress::DataDrop:
*abiType = Args_General2;
return FuncCast(Instance::dataDrop, *abiType);
case SymbolicAddress::MemFill:
*abiType = Args_General4;
return FuncCast(Instance::memFill, *abiType);
case SymbolicAddress::MemFillShared:
*abiType = Args_General4;
return FuncCast(Instance::memFillShared, *abiType);
case SymbolicAddress::MemInit:
*abiType = Args_General5;
return FuncCast(Instance::memInit, *abiType);
@ -954,8 +972,10 @@ bool wasm::NeedsBuiltinThunk(SymbolicAddress sym) {
case SymbolicAddress::CoerceInPlace_JitEntry:
case SymbolicAddress::ReportInt64JSCall:
case SymbolicAddress::MemCopy:
case SymbolicAddress::MemCopyShared:
case SymbolicAddress::DataDrop:
case SymbolicAddress::MemFill:
case SymbolicAddress::MemFillShared:
case SymbolicAddress::MemInit:
case SymbolicAddress::TableCopy:
case SymbolicAddress::ElemDrop:

View File

@ -53,8 +53,10 @@ extern const SymbolicAddressSignature SASigWaitI32;
extern const SymbolicAddressSignature SASigWaitI64;
extern const SymbolicAddressSignature SASigWake;
extern const SymbolicAddressSignature SASigMemCopy;
extern const SymbolicAddressSignature SASigMemCopyShared;
extern const SymbolicAddressSignature SASigDataDrop;
extern const SymbolicAddressSignature SASigMemFill;
extern const SymbolicAddressSignature SASigMemFillShared;
extern const SymbolicAddressSignature SASigMemInit;
extern const SymbolicAddressSignature SASigTableCopy;
extern const SymbolicAddressSignature SASigElemDrop;

View File

@ -1352,10 +1352,12 @@ static const char* ThunkedNativeToDescription(SymbolicAddress func) {
case SymbolicAddress::ReportInt64JSCall:
return "jit call to int64 wasm function";
case SymbolicAddress::MemCopy:
case SymbolicAddress::MemCopyShared:
return "call to native memory.copy function";
case SymbolicAddress::DataDrop:
return "call to native data.drop function";
case SymbolicAddress::MemFill:
case SymbolicAddress::MemFillShared:
return "call to native memory.fill function";
case SymbolicAddress::MemInit:
return "call to native memory.init function";

View File

@ -437,14 +437,10 @@ static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
return int32_t(woken);
}
/* static */ int32_t Instance::memCopy(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len) {
MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
WasmMemoryObject* mem = instance->memory();
uint32_t memLen = mem->volatileMemoryLength();
template <typename T, typename F>
inline int32_t WasmMemoryCopy(T memBase, uint32_t memLen,
uint32_t dstByteOffset, uint32_t srcByteOffset,
uint32_t len, F memMove) {
if (len == 0) {
// Zero length copies that are out-of-bounds do not trap.
return 0;
@ -483,14 +479,7 @@ static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
// the trap that may happen without writing anything, the direction is not
// currently observable as there are no fences nor any read/write protect
// operation. So memmove is good enough to handle overlaps.
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
if (mem->isShared()) {
AtomicOperations::memmoveSafeWhenRacy(
dataPtr + dstByteOffset, dataPtr + srcByteOffset, size_t(len));
} else {
uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
memmove(rawBuf + dstByteOffset, rawBuf + srcByteOffset, size_t(len));
}
memMove(memBase + dstByteOffset, memBase + srcByteOffset, size_t(len));
}
if (!mustTrap) {
@ -503,6 +492,34 @@ static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
return -1;
}
/* static */ int32_t Instance::memCopy(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset, uint32_t len) {
MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
WasmMemoryObject* mem = instance->memory();
uint32_t memLen = mem->volatileMemoryLength();
return WasmMemoryCopy(mem->buffer().dataPointerEither().unwrap(), memLen,
dstByteOffset, srcByteOffset, len, memmove);
}
/* static */ int32_t Instance::memCopyShared(Instance* instance,
uint32_t dstByteOffset,
uint32_t srcByteOffset,
uint32_t len) {
MOZ_ASSERT(SASigMemCopy.failureMode == FailureMode::FailOnNegI32);
typedef void (*RacyMemMove)(SharedMem<uint8_t*>, SharedMem<uint8_t*>, size_t);
WasmMemoryObject* mem = instance->memory();
uint32_t memLen = mem->volatileMemoryLength();
return WasmMemoryCopy<SharedMem<uint8_t*>, RacyMemMove>(
mem->buffer().dataPointerEither(), memLen, dstByteOffset, srcByteOffset,
len, AtomicOperations::memmoveSafeWhenRacy);
}
/* static */ int32_t Instance::dataDrop(Instance* instance, uint32_t segIndex) {
MOZ_ASSERT(SASigDataDrop.failureMode == FailureMode::FailOnNegI32);
@ -523,13 +540,9 @@ static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
return 0;
}
/* static */ int32_t Instance::memFill(Instance* instance, uint32_t byteOffset,
uint32_t value, uint32_t len) {
MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
WasmMemoryObject* mem = instance->memory();
uint32_t memLen = mem->volatileMemoryLength();
template <typename T, typename F>
inline int32_t WasmMemoryFill(T memBase, uint32_t memLen, uint32_t byteOffset,
uint32_t value, uint32_t len, F memSet) {
if (len == 0) {
// Zero length fills that are out-of-bounds do not trap.
return 0;
@ -554,14 +567,7 @@ static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
if (len > 0) {
// The required write direction is upward, but that is not currently
// observable as there are no fences nor any read/write protect operation.
SharedMem<uint8_t*> dataPtr = mem->buffer().dataPointerEither();
if (mem->isShared()) {
AtomicOperations::memsetSafeWhenRacy(dataPtr + byteOffset, int(value),
size_t(len));
} else {
uint8_t* rawBuf = dataPtr.unwrap(/*Unshared*/);
memset(rawBuf + byteOffset, int(value), size_t(len));
}
memSet(memBase + byteOffset, int(value), size_t(len));
}
if (!mustTrap) {
@ -574,6 +580,29 @@ static int32_t PerformWait(Instance* instance, uint32_t byteOffset, T value,
return -1;
}
/* static */ int32_t Instance::memFill(Instance* instance, uint32_t byteOffset,
uint32_t value, uint32_t len) {
MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
WasmMemoryObject* mem = instance->memory();
uint32_t memLen = mem->volatileMemoryLength();
return WasmMemoryFill(mem->buffer().dataPointerEither().unwrap(), memLen,
byteOffset, value, len, memset);
}
/* static */ int32_t Instance::memFillShared(Instance* instance,
uint32_t byteOffset,
uint32_t value, uint32_t len) {
MOZ_ASSERT(SASigMemFill.failureMode == FailureMode::FailOnNegI32);
WasmMemoryObject* mem = instance->memory();
uint32_t memLen = mem->volatileMemoryLength();
return WasmMemoryFill(mem->buffer().dataPointerEither(), memLen, byteOffset,
value, len, AtomicOperations::memsetSafeWhenRacy);
}
/* static */ int32_t Instance::memInit(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len,
uint32_t segIndex) {

View File

@ -198,9 +198,13 @@ class Instance {
static int32_t wake(Instance* instance, uint32_t byteOffset, int32_t count);
static int32_t memCopy(Instance* instance, uint32_t destByteOffset,
uint32_t srcByteOffset, uint32_t len);
static int32_t memCopyShared(Instance* instance, uint32_t destByteOffset,
uint32_t srcByteOffset, uint32_t len);
static int32_t dataDrop(Instance* instance, uint32_t segIndex);
static int32_t memFill(Instance* instance, uint32_t byteOffset,
uint32_t value, uint32_t len);
static int32_t memFillShared(Instance* instance, uint32_t byteOffset,
uint32_t value, uint32_t len);
static int32_t memInit(Instance* instance, uint32_t dstOffset,
uint32_t srcOffset, uint32_t len, uint32_t segIndex);
static int32_t tableCopy(Instance* instance, uint32_t dstOffset,

View File

@ -2880,7 +2880,8 @@ static bool EmitMemOrTableCopy(FunctionCompiler& f, bool isMem) {
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
const SymbolicAddressSignature& callee =
isMem ? SASigMemCopy : SASigTableCopy;
isMem ? (f.env().usesSharedMemory() ? SASigMemCopyShared : SASigMemCopy)
: SASigTableCopy;
CallCompileState args;
if (!f.passInstance(callee.argTypes[0], &args)) {
return false;
@ -2976,7 +2977,8 @@ static bool EmitMemFill(FunctionCompiler& f) {
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
const SymbolicAddressSignature& callee = SASigMemFill;
const SymbolicAddressSignature& callee =
f.env().usesSharedMemory() ? SASigMemFillShared : SASigMemFill;
CallCompileState args;
if (!f.passInstance(callee.argTypes[0], &args)) {
return false;

View File

@ -1922,8 +1922,10 @@ enum class SymbolicAddress {
WaitI64,
Wake,
MemCopy,
MemCopyShared,
DataDrop,
MemFill,
MemFillShared,
MemInit,
TableCopy,
ElemDrop,