mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-11-27 06:43:32 +00:00
Bug 1910194 - wasm: Speculative inlining of call_ref. r=jseward
This commit uses the CallRefHint and cache of exported function objects in order to speculatively inline a call_ref that is monomorphic. Differential Revision: https://phabricator.services.mozilla.com/D217880
This commit is contained in:
parent
104fba1b32
commit
a9f56370e0
@ -1940,6 +1940,20 @@ class FunctionCompiler {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Load the slot on the instance where the result of `ref.func` is cached.
|
||||
// This may be null if a function reference for this function has not been
|
||||
// asked for yet.
|
||||
MDefinition* loadCachedRefFunc(uint32_t funcIndex) {
|
||||
uint32_t exportedFuncIndex = codeMeta().findFuncExportIndex(funcIndex);
|
||||
MWasmLoadInstanceDataField* refFunc = MWasmLoadInstanceDataField::New(
|
||||
alloc(), MIRType::WasmAnyRef,
|
||||
codeMeta_.offsetOfFuncExportInstanceData(exportedFuncIndex) +
|
||||
offsetof(FuncExportInstanceData, func),
|
||||
true, instancePointer_);
|
||||
curBlock_->add(refFunc);
|
||||
return refFunc;
|
||||
}
|
||||
|
||||
MDefinition* loadTableField(uint32_t tableIndex, unsigned fieldOffset,
|
||||
MIRType type) {
|
||||
uint32_t instanceDataOffset = wasm::Instance::offsetInData(
|
||||
@ -7720,9 +7734,95 @@ static bool EmitBrOnNonNull(FunctionCompiler& f) {
|
||||
return f.brOnNonNull(relativeDepth, values, type, condition);
|
||||
}
|
||||
|
||||
// Speculatively inline a call_ref that is likely to target the expected
|
||||
// function index in this module. A fallback for if the actual callee is not
|
||||
// the speculated expected callee is always generated. This leads to a control
|
||||
// flow diamond that is roughly:
|
||||
//
|
||||
// if (ref.func $expectedFuncIndex) == actualCalleeFunc:
|
||||
// (call_inline $expectedFuncIndex)
|
||||
// else:
|
||||
// (call_ref actualCalleeFunc)
|
||||
static bool EmitSpeculativeInlineCallRef(
|
||||
FunctionCompiler& f, uint32_t bytecodeOffset, const FuncType& funcType,
|
||||
uint32_t expectedFuncIndex, MDefinition* actualCalleeFunc,
|
||||
const DefVector& args, DefVector* results) {
|
||||
// Perform an up front null check on the callee function reference.
|
||||
if (!f.refAsNonNull(actualCalleeFunc)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Load the cached value of `ref.func $expectedFuncIndex` for comparing
|
||||
// against `actualCalleeFunc`. This cached value may be null if the `ref.func`
|
||||
// for the expected function has not been executed in this runtime session.
|
||||
//
|
||||
// This is okay because we have done a null check on the `actualCalleeFunc`
|
||||
// already and so comparing it against a null expected callee func will
|
||||
// return false and fall back to the general case. This can only happen if
|
||||
// we've deserialized a cached module in a different session, and then run
|
||||
// the code without ever acquiring a reference to the expected function. In
|
||||
// that case, the expected callee could never be the target of this call_ref,
|
||||
// so performing the fallback path is the right thing to do anyways.
|
||||
MDefinition* expectedCalleeFunc = f.loadCachedRefFunc(expectedFuncIndex);
|
||||
if (!expectedCalleeFunc) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if the callee funcref we have is equals to the expected callee
|
||||
// funcref we're inlining.
|
||||
MDefinition* isExpectedCallee =
|
||||
f.compare(actualCalleeFunc, expectedCalleeFunc, JSOp::Eq,
|
||||
MCompare::Compare_WasmAnyRef);
|
||||
if (!isExpectedCallee) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Start the 'then' block which will have the inlined code
|
||||
MBasicBlock* elseBlock;
|
||||
if (!f.branchAndStartThen(isExpectedCallee, &elseBlock)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Inline the expected callee as we do with direct calls
|
||||
DefVector inlineResults;
|
||||
if (!EmitInlineCall(f, funcType, expectedFuncIndex, args, &inlineResults)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Push the results for joining with the 'else' block
|
||||
if (!f.pushDefs(inlineResults)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Switch to the 'else' block which will have the fallback `call_ref`
|
||||
if (!f.switchToElse(elseBlock, &elseBlock)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Perform a general indirect call to the callee func we have
|
||||
CallCompileState call;
|
||||
if (!EmitCallArgs(f, funcType, args, &call)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DefVector callResults;
|
||||
if (!f.callRef(funcType, actualCalleeFunc, bytecodeOffset, call,
|
||||
&callResults)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Push the results for joining with the 'then' block
|
||||
if (!f.pushDefs(callResults)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Join the two branches together
|
||||
return f.joinIfElse(elseBlock, results);
|
||||
}
|
||||
|
||||
static bool EmitCallRef(FunctionCompiler& f) {
|
||||
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
|
||||
f.readCallRefHint();
|
||||
uint32_t bytecodeOffset = f.readBytecodeOffset();
|
||||
CallRefHint hint = f.readCallRefHint();
|
||||
const FuncType* funcType;
|
||||
MDefinition* callee;
|
||||
DefVector args;
|
||||
@ -7735,13 +7835,25 @@ static bool EmitCallRef(FunctionCompiler& f) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (hint.isInlineFunc() &&
|
||||
f.shouldInlineCallDirect(hint.inlineFuncIndex())) {
|
||||
DefVector results;
|
||||
if (!EmitSpeculativeInlineCallRef(f, bytecodeOffset, *funcType,
|
||||
hint.inlineFuncIndex(), callee, args,
|
||||
&results)) {
|
||||
return false;
|
||||
}
|
||||
f.iter().setResults(results.length(), results);
|
||||
return true;
|
||||
}
|
||||
|
||||
CallCompileState call;
|
||||
if (!EmitCallArgs(f, *funcType, args, &call)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
DefVector results;
|
||||
if (!f.callRef(*funcType, callee, lineOrBytecode, call, &results)) {
|
||||
if (!f.callRef(*funcType, callee, bytecodeOffset, call, &results)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -184,6 +184,7 @@ struct CodeMetadata : public ShareableBase<CodeMetadata> {
|
||||
debugHash(),
|
||||
funcDefsOffsetStart(UINT32_MAX),
|
||||
funcImportsOffsetStart(UINT32_MAX),
|
||||
funcExportsOffsetStart(UINT32_MAX),
|
||||
typeDefsOffsetStart(UINT32_MAX),
|
||||
memoriesOffsetStart(UINT32_MAX),
|
||||
tablesOffsetStart(UINT32_MAX),
|
||||
@ -300,6 +301,12 @@ struct CodeMetadata : public ShareableBase<CodeMetadata> {
|
||||
return funcImportsOffsetStart + funcIndex * sizeof(FuncImportInstanceData);
|
||||
}
|
||||
|
||||
uint32_t offsetOfFuncExportInstanceData(uint32_t funcExportIndex) const {
|
||||
MOZ_ASSERT(funcExportIndex < exportedFuncIndices.length());
|
||||
return funcExportsOffsetStart +
|
||||
funcExportIndex * sizeof(FuncExportInstanceData);
|
||||
}
|
||||
|
||||
uint32_t offsetOfTypeDefInstanceData(uint32_t typeIndex) const {
|
||||
MOZ_ASSERT(typeIndex < types->length());
|
||||
return typeDefsOffsetStart + typeIndex * sizeof(TypeDefInstanceData);
|
||||
|
Loading…
Reference in New Issue
Block a user