Bug 1313180 - Baldr: switch everything to using function indices (r=bbouvier)

MozReview-Commit-ID: 8q9PJZI9lHo

--HG--
extra : rebase_source : e494f2e176e32adee222bd85dea2da2c1c28af1a
This commit is contained in:
Luke Wagner 2016-11-04 17:05:56 -05:00
parent 322851310b
commit ad907414db
29 changed files with 411 additions and 530 deletions

View File

@ -13,7 +13,7 @@ var mod = wasmEvalText(code, {
// The middle one is for the wasm function.
var s = getBacktrace();
assertEq(s.split('\n').length, 4);
assertEq(s.split('\n')[1].startsWith("1 wasm-function[0]("), true);
assertEq(s.split('\n')[1].startsWith("1 wasm-function[1]("), true);
// Let's also run DumpBacktrace() to check if we are not crashing.
backtrace();

View File

@ -438,8 +438,8 @@ assertErrorMessage(() => i2v(5), Error, signatureMismatch);
disableSPSProfiling();
var inner = stack.indexOf("wasm-function[0]");
var outer = stack.indexOf("wasm-function[2]");
var inner = stack.indexOf("wasm-function[1]");
var outer = stack.indexOf("wasm-function[3]");
assertEq(inner === -1, false);
assertEq(outer === -1, false);
assertEq(inner < outer, true);

View File

@ -368,15 +368,16 @@ function runStackTraceTest(namesContent, expectedName) {
assertEq(result, expectedName);
};
runStackTraceTest(null, 'wasm-function[0]');
runStackTraceTest([{name: 'test'}], 'test');
runStackTraceTest([{name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
runStackTraceTest([{name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
runStackTraceTest([{name: 'test1'}, {name: 'test2'}], 'test1');
runStackTraceTest([{name: 'test☃'}], 'test☃');
runStackTraceTest([{name: 'te\xE0\xFF'}], 'te\xE0\xFF');
runStackTraceTest([], 'wasm-function[0]');
runStackTraceTest(null, 'wasm-function[1]');
runStackTraceTest([{name:'blah'}, {name: 'test'}], 'test');
runStackTraceTest([{name:'blah'}, {name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
runStackTraceTest([{name:'blah'}, {name: 'test', locals: [{name: 'var1'}, {name: 'var2'}]}], 'test');
runStackTraceTest([{name:'blah'}, {name: 'test1'}, {name: 'test2'}], 'test1');
runStackTraceTest([{name:'blah'}, {name: 'test☃'}], 'test☃');
runStackTraceTest([{name:'blah'}, {name: 'te\xE0\xFF'}], 'te\xE0\xFF');
runStackTraceTest([{name:'blah'}], 'wasm-function[1]');
runStackTraceTest([], 'wasm-function[1]');
// Notice that invalid names section content shall not fail the parsing
runStackTraceTest([{nameLen: 100, name: 'test'}], 'wasm-function[0]'); // invalid name size
runStackTraceTest([{name: 'test', locals: [{nameLen: 40, name: 'var1'}]}], 'wasm-function[0]'); // invalid variable name size
runStackTraceTest([{name: ''}], 'wasm-function[0]'); // empty name
runStackTraceTest([{name:'blah'}, {nameLen: 100, name: 'test'}], 'wasm-function[1]'); // invalid name size
runStackTraceTest([{name:'blah'}, {name: 'test', locals: [{nameLen: 40, name: 'var1'}]}], 'wasm-function[1]'); // invalid variable name size
runStackTraceTest([{name:'blah'}, {name: ''}], 'wasm-function[1]'); // empty name

View File

@ -7,7 +7,7 @@ assertEq(exp.code instanceof Uint8Array, true);
assertEq(Array.isArray(exp.segments), true);
var funcs = exp.segments.filter(s => s.kind === 0);
assertEq(funcs.length, 1);
assertEq(funcs[0].funcDefIndex, 0);
assertEq(funcs[0].funcIndex, 0);
assertEq(funcs[0].begin >= 0, true);
assertEq(funcs[0].begin <= funcs[0].funcBodyBegin, true);
assertEq(funcs[0].funcBodyBegin < funcs[0].funcBodyEnd, true);

View File

@ -218,7 +218,7 @@ WebAssembly.RuntimeError,
enableSPSProfiling();
enableSingleStepProfiling();
assertEq(e2.bar(), 42);
assertEqStacks(disableSingleStepProfiling(), ["", ">", "0,>", "0,0,>", "0,>", ">", ""]);
assertEqStacks(disableSingleStepProfiling(), ["", ">", "1,>", "0,1,>", "1,>", ">", ""]);
disableSPSProfiling();
assertEq(e2.bar(), 42);
@ -228,7 +228,7 @@ WebAssembly.RuntimeError,
var e4 = new Instance(m2, {a:e3}).exports;
enableSingleStepProfiling();
assertEq(e4.bar(), 42);
assertEqStacks(disableSingleStepProfiling(), ["", ">", "0,>", "0,0,>", "0,>", ">", ""]);
assertEqStacks(disableSingleStepProfiling(), ["", ">", "1,>", "0,1,>", "1,>", ">", ""]);
disableSPSProfiling();
assertEq(e4.bar(), 42);
})();

View File

@ -1507,8 +1507,8 @@ CodeGeneratorShared::emitWasmCallBase(LWasmCallBase* ins)
const wasm::CallSiteDesc& desc = mir->desc();
const wasm::CalleeDesc& callee = mir->callee();
switch (callee.which()) {
case wasm::CalleeDesc::Definition:
masm.call(desc, callee.funcDefIndex());
case wasm::CalleeDesc::Func:
masm.call(desc, callee.funcIndex());
break;
case wasm::CalleeDesc::Import:
masm.wasmCallImport(desc, callee);

View File

@ -715,7 +715,7 @@ class FunctionExtended : public JSFunction
* wasm/asm.js exported functions store the function index of the exported
* function in the original module.
*/
static const unsigned WASM_FUNC_DEF_INDEX_SLOT = 1;
static const unsigned WASM_FUNC_INDEX_SLOT = 1;
/*
* asm.js module functions store their WasmModuleObject in the first slot.

View File

@ -358,10 +358,13 @@ struct js::AsmJSMetadata : Metadata, AsmJSMetadataCacheablePod
ScriptSource* maybeScriptSource() const override {
return scriptSource.get();
}
bool getFuncDefName(JSContext* cx, const Bytes*, uint32_t funcDefIndex,
TwoByteName* name) const override
bool getFuncName(JSContext* cx, const Bytes*, uint32_t funcIndex,
TwoByteName* name) const override
{
const char* p = asmJSFuncNames[funcDefIndex].get();
// asm.js doesn't allow exporting imports or putting imports in tables
MOZ_ASSERT(funcIndex >= AsmJSFirstDefFuncIndex);
const char* p = asmJSFuncNames[funcIndex - AsmJSFirstDefFuncIndex].get();
UTF8Chars utf8(p, strlen(p));
size_t twoByteLength;
@ -1840,8 +1843,8 @@ class MOZ_STACK_CLASS ModuleValidator
auto genData = MakeUnique<ModuleGeneratorData>(ModuleKind::AsmJS);
if (!genData ||
!genData->sigs.resize(MaxSigs) ||
!genData->funcDefSigs.resize(MaxFuncs) ||
!genData->funcImports.resize(MaxImports) ||
!genData->funcSigs.resize(MaxFuncs) ||
!genData->funcImportGlobalDataOffsets.resize(AsmJSMaxImports) ||
!genData->tables.resize(MaxTables) ||
!genData->asmJSSigToTableIndex.resize(MaxSigs))
{
@ -2137,8 +2140,8 @@ class MOZ_STACK_CLASS ModuleValidator
return false;
// Declare which function is exported which gives us an index into the
// module FuncDefExportVector.
if (!mg_.addFuncDefExport(Move(fieldChars), mg_.numFuncImports() + func.index()))
// module FuncExportVector.
if (!mg_.addFuncExport(Move(fieldChars), func.index()))
return false;
// The exported function might have already been exported in which case
@ -2151,10 +2154,10 @@ class MOZ_STACK_CLASS ModuleValidator
uint32_t sigIndex;
if (!declareSig(Move(sig), &sigIndex))
return false;
uint32_t funcIndex = numFunctions();
uint32_t funcIndex = AsmJSFirstDefFuncIndex + numFunctions();
if (funcIndex >= MaxFuncs)
return failCurrentOffset("too many functions");
mg_.initFuncDefSig(funcIndex, sigIndex);
mg_.initFuncSig(funcIndex, sigIndex);
Global* global = validationLifo_.new_<Global>(Global::Function);
if (!global)
return false;
@ -2190,23 +2193,23 @@ class MOZ_STACK_CLASS ModuleValidator
table.define();
return mg_.initSigTableElems(table.sigIndex(), Move(elems));
}
bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* importIndex) {
bool declareImport(PropertyName* name, Sig&& sig, unsigned ffiIndex, uint32_t* funcIndex) {
ImportMap::AddPtr p = importMap_.lookupForAdd(NamedSig::Lookup(name, sig));
if (p) {
*importIndex = p->value();
*funcIndex = p->value();
return true;
}
*importIndex = asmJSMetadata_->asmJSImports.length();
if (*importIndex >= MaxImports)
*funcIndex = asmJSMetadata_->asmJSImports.length();
if (*funcIndex > AsmJSMaxImports)
return failCurrentOffset("too many imports");
if (!asmJSMetadata_->asmJSImports.emplaceBack(ffiIndex))
return false;
uint32_t sigIndex;
if (!declareSig(Move(sig), &sigIndex))
return false;
if (!mg_.initImport(*importIndex, sigIndex))
if (!mg_.initImport(*funcIndex, sigIndex))
return false;
return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *importIndex);
return importMap_.add(p, NamedSig(name, mg_.sig(sigIndex)), *funcIndex);
}
bool tryConstantAccess(uint64_t start, uint64_t width) {
@ -2313,8 +2316,10 @@ class MOZ_STACK_CLASS ModuleValidator
Func* lookupFunction(PropertyName* name) {
if (GlobalMap::Ptr p = globalMap_.lookup(name)) {
Global* value = p->value();
if (value->which() == Global::Function)
return functions_[value->funcIndex()];
if (value->which() == Global::Function) {
MOZ_ASSERT(value->funcIndex() >= AsmJSFirstDefFuncIndex);
return functions_[value->funcIndex() - AsmJSFirstDefFuncIndex];
}
}
return nullptr;
}
@ -4753,7 +4758,7 @@ CheckFunctionSignature(ModuleValidator& m, ParseNode* usepn, Sig&& sig, Property
return m.addFunction(name, usepn->pn_pos.begin, Move(sig), func);
}
if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcDefSig(existing->index())))
if (!CheckSignatureAgainstExisting(m, usepn, sig, m.mg().funcSig(existing->index())))
return false;
*func = existing;
@ -4790,7 +4795,6 @@ CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calle
if (!f.writeCall(callNode, Expr::Call))
return false;
// Function's index, to find out the function's entry
if (!f.encoder().writeVarU32(callee->index()))
return false;
@ -4908,15 +4912,14 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, Type
Sig sig(Move(args), ret.canonicalToExprType());
uint32_t importIndex;
if (!f.m().declareImport(calleeName, Move(sig), ffiIndex, &importIndex))
uint32_t funcIndex;
if (!f.m().declareImport(calleeName, Move(sig), ffiIndex, &funcIndex))
return false;
if (!f.writeCall(callNode, Expr::OldCallImport))
if (!f.writeCall(callNode, Expr::Call))
return false;
// Import index
if (!f.encoder().writeVarU32(importIndex))
if (!f.encoder().writeVarU32(funcIndex))
return false;
*type = Type::ret(ret);
@ -7208,7 +7211,7 @@ CheckFuncPtrTable(ModuleValidator& m, ParseNode* var)
if (!func)
return m.fail(elem, "function-pointer table's elements must be names of functions");
const Sig& funcSig = m.mg().funcDefSig(func->index());
const Sig& funcSig = m.mg().funcSig(func->index());
if (sig) {
if (*sig != funcSig)
return m.fail(elem, "all functions in table must have same signature");
@ -7268,14 +7271,11 @@ CheckModuleExportFunction(ModuleValidator& m, ParseNode* pn, PropertyName* maybe
return m.fail(pn, "expected name of exported function");
PropertyName* funcName = pn->name();
const ModuleValidator::Global* global = m.lookupGlobal(funcName);
if (!global)
return m.failName(pn, "exported function name '%s' not found", funcName);
const ModuleValidator::Func* func = m.lookupFunction(funcName);
if (!func)
return m.failName(pn, "function '%s' not found", funcName);
if (global->which() != ModuleValidator::Global::Function)
return m.failName(pn, "'%s' is not a function", funcName);
return m.addExportField(pn, m.function(global->funcIndex()), maybeFieldName);
return m.addExportField(pn, *func, maybeFieldName);
}
static bool
@ -8929,7 +8929,7 @@ js::AsmJSFunctionToString(JSContext* cx, HandleFunction fun)
MOZ_ASSERT(IsAsmJSFunction(fun));
const AsmJSMetadata& metadata = ExportedFunctionToInstance(fun).metadata().asAsmJS();
const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToDefinitionIndex(fun));
const AsmJSExport& f = metadata.lookupAsmJSExport(ExportedFunctionToFuncIndex(fun));
uint32_t begin = metadata.srcStart + f.startOffsetInModule();
uint32_t end = metadata.srcStart + f.endOffsetInModule();

View File

@ -1974,7 +1974,7 @@ class BaseCompiler
void beginFunction() {
JitSpew(JitSpew_Codegen, "# Emitting wasm baseline code");
SigIdDesc sigId = mg_.funcDefSigs[func_.defIndex()]->id;
SigIdDesc sigId = mg_.funcSigs[func_.index()]->id;
GenerateFunctionPrologue(masm, localSize_, sigId, prologueTrapOffset_,
&compileResults_.offsets());
@ -2306,10 +2306,10 @@ class BaseCompiler
}
}
void callDefinition(uint32_t funcDefIndex, const FunctionCall& call)
void callDefinition(uint32_t funcIndex, const FunctionCall& call)
{
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::FuncDef);
masm.call(desc, funcDefIndex);
CallSiteDesc desc(call.lineOrBytecode, CallSiteDesc::Func);
masm.call(desc, funcIndex);
}
void callSymbolic(SymbolicAddress callee, const FunctionCall& call) {
@ -3592,9 +3592,7 @@ class BaseCompiler
MOZ_MUST_USE bool emitBrTable();
MOZ_MUST_USE bool emitReturn();
MOZ_MUST_USE bool emitCallArgs(const ValTypeVector& args, FunctionCall& baselineCall);
MOZ_MUST_USE bool emitCallImportCommon(uint32_t lineOrBytecode, uint32_t funcImportIndex);
MOZ_MUST_USE bool emitCall();
MOZ_MUST_USE bool emitOldCallImport();
MOZ_MUST_USE bool emitCallIndirect(bool oldStyle);
MOZ_MUST_USE bool emitCommonMathCall(uint32_t lineOrBytecode, SymbolicAddress callee,
ValTypeVector& signature, ExprType retType);
@ -5348,72 +5346,28 @@ BaseCompiler::pushReturned(const FunctionCall& call, ExprType type)
// parallel assignment to the argument registers or onto the stack
// for outgoing arguments. A sync() is just simpler.
bool
BaseCompiler::emitCallImportCommon(uint32_t lineOrBytecode, uint32_t funcImportIndex)
{
const FuncImportGenDesc& funcImport = mg_.funcImports[funcImportIndex];
const Sig& sig = *funcImport.sig;
if (deadCode_)
return true;
sync();
uint32_t numArgs = sig.args().length();
size_t stackSpace = stackConsumed(numArgs);
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, UseABI::Wasm, InterModule::True);
if (!emitCallArgs(sig.args(), baselineCall))
return false;
if (!iter_.readCallReturn(sig.ret()))
return false;
callImport(funcImport.globalDataOffset, baselineCall);
endCall(baselineCall);
// TODO / OPTIMIZE: It would be better to merge this freeStack()
// into the one in endCall, if we can.
popValueStackBy(numArgs);
masm.freeStack(stackSpace);
if (!IsVoid(sig.ret()))
pushReturned(baselineCall, sig.ret());
return true;
}
bool
BaseCompiler::emitCall()
{
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t calleeIndex;
if (!iter_.readCall(&calleeIndex))
uint32_t funcIndex;
if (!iter_.readCall(&funcIndex))
return false;
// For asm.js, imports are not part of the function index space so in
// these cases firstFuncDefIndex is fixed to 0, even if there are
// function imports.
if (calleeIndex < mg_.firstFuncDefIndex)
return emitCallImportCommon(lineOrBytecode, calleeIndex);
if (deadCode_)
return true;
sync();
uint32_t funcDefIndex = calleeIndex - mg_.firstFuncDefIndex;
const Sig& sig = *mg_.funcDefSigs[funcDefIndex];
const Sig& sig = *mg_.funcSigs[funcIndex];
bool import = mg_.funcIsImport(funcIndex);
uint32_t numArgs = sig.args().length();
size_t stackSpace = stackConsumed(numArgs);
FunctionCall baselineCall(lineOrBytecode);
beginCall(baselineCall, UseABI::Wasm, InterModule::False);
beginCall(baselineCall, UseABI::Wasm, import ? InterModule::True : InterModule::False);
if (!emitCallArgs(sig.args(), baselineCall))
return false;
@ -5421,7 +5375,10 @@ BaseCompiler::emitCall()
if (!iter_.readCallReturn(sig.ret()))
return false;
callDefinition(funcDefIndex, baselineCall);
if (import)
callImport(mg_.funcImportGlobalDataOffsets[funcIndex], baselineCall);
else
callDefinition(funcIndex, baselineCall);
endCall(baselineCall);
@ -5437,23 +5394,6 @@ BaseCompiler::emitCall()
return true;
}
bool
BaseCompiler::emitOldCallImport()
{
MOZ_ASSERT(!mg_.firstFuncDefIndex);
uint32_t lineOrBytecode = readCallSiteLineOrBytecode();
uint32_t funcImportIndex;
if (!iter_.readCall(&funcImportIndex))
return false;
if (deadCode_)
return true;
return emitCallImportCommon(lineOrBytecode, funcImportIndex);
}
bool
BaseCompiler::emitCallIndirect(bool oldStyle)
{
@ -6671,8 +6611,6 @@ BaseCompiler::emitBody()
CHECK_NEXT(emitCallIndirect(/* oldStyle = */ false));
case Expr::OldCallIndirect:
CHECK_NEXT(emitCallIndirect(/* oldStyle = */ true));
case Expr::OldCallImport:
CHECK_NEXT(emitOldCallImport());
// Locals and globals
case Expr::GetLocal:

View File

@ -389,11 +389,6 @@ enum class Expr : uint32_t // fix type so we can cast from any u16 in decoder
// asm.js-style call_indirect with the callee evaluated first.
OldCallIndirect,
// asm.js-style call to an import; asm.js imports are not (and cannot be,
// due to streaming compilation and lazy discovery) injected into the
// function index space so Expr::Call cannot be used.
OldCallImport,
// Atomics
I32AtomicsCompareExchange,
I32AtomicsExchange,

View File

@ -353,7 +353,6 @@ wasm::Classify(Expr expr)
case Expr::TeeGlobal:
return ExprKind::TeeGlobal;
case Expr::Call:
case Expr::OldCallImport:
return ExprKind::Call;
case Expr::CallIndirect:
return ExprKind::CallIndirect;

View File

@ -92,7 +92,7 @@ class AstDecodeContext
private:
AstModule& module_;
AstIndexVector funcSigs_;
AstIndexVector funcDefSigs_;
AstDecodeExprIter *iter_;
AstDecodeStack exprs_;
DepthStack depths_;
@ -110,7 +110,7 @@ class AstDecodeContext
d(d),
generateNames(generateNames),
module_(module),
funcSigs_(lifo),
funcDefSigs_(lifo),
iter_(nullptr),
exprs_(lifo),
depths_(lifo),
@ -121,7 +121,7 @@ class AstDecodeContext
{}
AstModule& module() { return module_; }
AstIndexVector& funcSigs() { return funcSigs_; }
AstIndexVector& funcDefSigs() { return funcDefSigs_; }
AstDecodeExprIter& iter() { return *iter_; }
AstDecodeStack& exprs() { return exprs_; }
DepthStack& depths() { return depths_; }
@ -322,8 +322,8 @@ AstDecodeDrop(AstDecodeContext& c)
static bool
AstDecodeCall(AstDecodeContext& c)
{
uint32_t calleeIndex;
if (!c.iter().readCall(&calleeIndex))
uint32_t funcIndex;
if (!c.iter().readCall(&funcIndex))
return false;
if (!c.iter().inReachableCode())
@ -331,18 +331,18 @@ AstDecodeCall(AstDecodeContext& c)
uint32_t sigIndex;
AstRef funcRef;
if (calleeIndex < c.module().funcImportNames().length()) {
AstImport* import = c.module().imports()[calleeIndex];
if (funcIndex < c.module().funcImportNames().length()) {
AstImport* import = c.module().imports()[funcIndex];
sigIndex = import->funcSig().index();
funcRef = AstRef(import->name());
} else {
uint32_t funcDefIndex = calleeIndex - c.module().funcImportNames().length();
if (funcDefIndex >= c.funcSigs().length())
uint32_t funcDefIndex = funcIndex - c.module().funcImportNames().length();
if (funcDefIndex >= c.funcDefSigs().length())
return c.iter().fail("callee index out of range");
sigIndex = c.funcSigs()[funcDefIndex];
sigIndex = c.funcDefSigs()[funcDefIndex];
if (!AstDecodeGenerateRef(c, AstName(u"func"), calleeIndex, &funcRef))
if (!AstDecodeGenerateRef(c, AstName(u"func"), funcIndex, &funcRef))
return false;
}
@ -1529,11 +1529,11 @@ AstDecodeFunctionSection(AstDecodeContext& c)
if (numDecls > MaxFuncs)
return c.d.fail("too many functions");
if (!c.funcSigs().resize(numDecls))
if (!c.funcDefSigs().resize(numDecls))
return false;
for (uint32_t i = 0; i < numDecls; i++) {
if (!AstDecodeSignatureIndex(c, &c.funcSigs()[i]))
if (!AstDecodeSignatureIndex(c, &c.funcDefSigs()[i]))
return false;
}
@ -1897,7 +1897,7 @@ AstDecodeExportSection(AstDecodeContext& c)
}
static bool
AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcIndex, AstFunc** func)
AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcDefIndex, AstFunc** func)
{
uint32_t offset = c.d.currentOffset();
uint32_t bodySize;
@ -1912,7 +1912,7 @@ AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcIndex, AstFunc** func)
AstDecodeExprIter iter(c.d);
uint32_t sigIndex = c.funcSigs()[funcIndex];
uint32_t sigIndex = c.funcDefSigs()[funcDefIndex];
const AstSig* sig = c.module().sigs()[sigIndex];
AstValTypeVector vars(c.lifo);
@ -1930,7 +1930,7 @@ AstDecodeFunctionBody(AstDecodeContext &c, uint32_t funcIndex, AstFunc** func)
AstName funcName;
if (!AstDecodeGenerateName(c, AstName(u"func"),
c.module().funcImportNames().length() + funcIndex,
c.module().funcImportNames().length() + funcDefIndex,
&funcName))
return false;
@ -2000,7 +2000,7 @@ AstDecodeCodeSection(AstDecodeContext &c)
return false;
if (sectionStart == Decoder::NotStarted) {
if (c.funcSigs().length() != 0)
if (c.funcDefSigs().length() != 0)
return c.d.fail("expected function bodies");
return false;
@ -2010,12 +2010,12 @@ AstDecodeCodeSection(AstDecodeContext &c)
if (!c.d.readVarU32(&numFuncBodies))
return c.d.fail("expected function body count");
if (numFuncBodies != c.funcSigs().length())
if (numFuncBodies != c.funcDefSigs().length())
return c.d.fail("function body count does not match function signature count");
for (uint32_t funcIndex = 0; funcIndex < numFuncBodies; funcIndex++) {
for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncBodies; funcDefIndex++) {
AstFunc* func;
if (!AstDecodeFunctionBody(c, funcIndex, &func))
if (!AstDecodeFunctionBody(c, funcDefIndex, &func))
return false;
if (!c.module().append(func))
return false;

View File

@ -164,7 +164,7 @@ SendCodeRangesToProfiler(JSContext* cx, CodeSegment& cs, const Bytes& bytecode,
uintptr_t size = end - start;
TwoByteName name(cx);
if (!metadata.getFuncDefName(cx, &bytecode, codeRange.funcDefIndex(), &name))
if (!metadata.getFuncName(cx, &bytecode, codeRange.funcIndex(), &name))
return false;
UniqueChars chars(
@ -280,14 +280,14 @@ CodeSegment::onMovingGrow(uint8_t* prevMemoryBase, const Metadata& metadata, Arr
}
size_t
FuncDefExport::serializedSize() const
FuncExport::serializedSize() const
{
return sig_.serializedSize() +
sizeof(pod);
}
uint8_t*
FuncDefExport::serialize(uint8_t* cursor) const
FuncExport::serialize(uint8_t* cursor) const
{
cursor = sig_.serialize(cursor);
cursor = WriteBytes(cursor, &pod, sizeof(pod));
@ -295,7 +295,7 @@ FuncDefExport::serialize(uint8_t* cursor) const
}
const uint8_t*
FuncDefExport::deserialize(const uint8_t* cursor)
FuncExport::deserialize(const uint8_t* cursor)
{
(cursor = sig_.deserialize(cursor)) &&
(cursor = ReadBytes(cursor, &pod, sizeof(pod)));
@ -303,7 +303,7 @@ FuncDefExport::deserialize(const uint8_t* cursor)
}
size_t
FuncDefExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
FuncExport::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
{
return sig_.sizeOfExcludingThis(mallocSizeOf);
}
@ -341,7 +341,7 @@ CodeRange::CodeRange(Kind kind, Offsets offsets)
: begin_(offsets.begin),
profilingReturn_(0),
end_(offsets.end),
funcDefIndex_(0),
funcIndex_(0),
funcLineOrBytecode_(0),
funcBeginToTableEntry_(0),
funcBeginToTableProfilingJump_(0),
@ -358,7 +358,7 @@ CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
: begin_(offsets.begin),
profilingReturn_(offsets.profilingReturn),
end_(offsets.end),
funcDefIndex_(0),
funcIndex_(0),
funcLineOrBytecode_(0),
funcBeginToTableEntry_(0),
funcBeginToTableProfilingJump_(0),
@ -372,11 +372,11 @@ CodeRange::CodeRange(Kind kind, ProfilingOffsets offsets)
MOZ_ASSERT(kind_ == ImportJitExit || kind_ == ImportInterpExit || kind_ == TrapExit);
}
CodeRange::CodeRange(uint32_t funcDefIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
CodeRange::CodeRange(uint32_t funcIndex, uint32_t funcLineOrBytecode, FuncOffsets offsets)
: begin_(offsets.begin),
profilingReturn_(offsets.profilingReturn),
end_(offsets.end),
funcDefIndex_(funcDefIndex),
funcIndex_(funcIndex),
funcLineOrBytecode_(funcLineOrBytecode),
funcBeginToTableEntry_(offsets.tableEntry - begin_),
funcBeginToTableProfilingJump_(offsets.tableProfilingJump - begin_),
@ -445,7 +445,7 @@ Metadata::serializedSize() const
{
return sizeof(pod()) +
SerializedVectorSize(funcImports) +
SerializedVectorSize(funcDefExports) +
SerializedVectorSize(funcExports) +
SerializedVectorSize(sigIds) +
SerializedPodVectorSize(globals) +
SerializedPodVectorSize(tables) +
@ -464,7 +464,7 @@ Metadata::serialize(uint8_t* cursor) const
{
cursor = WriteBytes(cursor, &pod(), sizeof(pod()));
cursor = SerializeVector(cursor, funcImports);
cursor = SerializeVector(cursor, funcDefExports);
cursor = SerializeVector(cursor, funcExports);
cursor = SerializeVector(cursor, sigIds);
cursor = SerializePodVector(cursor, globals);
cursor = SerializePodVector(cursor, tables);
@ -484,7 +484,7 @@ Metadata::deserialize(const uint8_t* cursor)
{
(cursor = ReadBytes(cursor, &pod(), sizeof(pod()))) &&
(cursor = DeserializeVector(cursor, &funcImports)) &&
(cursor = DeserializeVector(cursor, &funcDefExports)) &&
(cursor = DeserializeVector(cursor, &funcExports)) &&
(cursor = DeserializeVector(cursor, &sigIds)) &&
(cursor = DeserializePodVector(cursor, &globals)) &&
(cursor = DeserializePodVector(cursor, &tables)) &&
@ -503,7 +503,7 @@ size_t
Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
{
return SizeOfVectorExcludingThis(funcImports, mallocSizeOf) +
SizeOfVectorExcludingThis(funcDefExports, mallocSizeOf) +
SizeOfVectorExcludingThis(funcExports, mallocSizeOf) +
SizeOfVectorExcludingThis(sigIds, mallocSizeOf) +
globals.sizeOfExcludingThis(mallocSizeOf) +
tables.sizeOfExcludingThis(mallocSizeOf) +
@ -517,36 +517,36 @@ Metadata::sizeOfExcludingThis(MallocSizeOf mallocSizeOf) const
filename.sizeOfExcludingThis(mallocSizeOf);
}
struct ProjectIndex
struct ProjectFuncIndex
{
const FuncDefExportVector& funcDefExports;
const FuncExportVector& funcExports;
explicit ProjectIndex(const FuncDefExportVector& funcDefExports)
: funcDefExports(funcDefExports)
explicit ProjectFuncIndex(const FuncExportVector& funcExports)
: funcExports(funcExports)
{}
uint32_t operator[](size_t index) const {
return funcDefExports[index].funcDefIndex();
return funcExports[index].funcIndex();
}
};
const FuncDefExport&
Metadata::lookupFuncDefExport(uint32_t funcDefIndex) const
const FuncExport&
Metadata::lookupFuncExport(uint32_t funcIndex) const
{
size_t match;
if (!BinarySearch(ProjectIndex(funcDefExports), 0, funcDefExports.length(), funcDefIndex, &match))
if (!BinarySearch(ProjectFuncIndex(funcExports), 0, funcExports.length(), funcIndex, &match))
MOZ_CRASH("missing function export");
return funcDefExports[match];
return funcExports[match];
}
bool
Metadata::getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcDefIndex,
TwoByteName* name) const
Metadata::getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
TwoByteName* name) const
{
if (funcDefIndex < funcNames.length()) {
if (funcIndex < funcNames.length()) {
MOZ_ASSERT(maybeBytecode, "NameInBytecode requires preserved bytecode");
const NameInBytecode& n = funcNames[funcDefIndex];
const NameInBytecode& n = funcNames[funcIndex];
MOZ_ASSERT(n.offset + n.length < maybeBytecode->length());
if (n.length == 0)
@ -572,7 +572,7 @@ Metadata::getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t fun
// For names that are out of range or invalid, synthesize a name.
UniqueChars chars(JS_smprintf("wasm-function[%u]", funcDefIndex));
UniqueChars chars(JS_smprintf("wasm-function[%u]", funcIndex));
if (!chars) {
ReportOutOfMemory(cx);
return false;
@ -657,17 +657,17 @@ Code::lookupMemoryAccess(void* pc) const
}
bool
Code::getFuncDefName(JSContext* cx, uint32_t funcDefIndex, TwoByteName* name) const
Code::getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const
{
const Bytes* maybeBytecode = maybeBytecode_ ? &maybeBytecode_.get()->bytes : nullptr;
return metadata_->getFuncDefName(cx, maybeBytecode, funcDefIndex, name);
return metadata_->getFuncName(cx, maybeBytecode, funcIndex, name);
}
JSAtom*
Code::getFuncDefAtom(JSContext* cx, uint32_t funcDefIndex) const
Code::getFuncAtom(JSContext* cx, uint32_t funcIndex) const
{
TwoByteName name(cx);
if (!getFuncDefName(cx, funcDefIndex, &name))
if (!getFuncName(cx, funcIndex, &name))
return nullptr;
return AtomizeChars(cx, name.begin(), name.length());
@ -787,7 +787,7 @@ Code::ensureProfilingState(JSContext* cx, bool newProfilingEnabled)
continue;
TwoByteName name(cx);
if (!getFuncDefName(cx, codeRange.funcDefIndex(), &name))
if (!getFuncName(cx, codeRange.funcIndex(), &name))
return false;
if (!name.append('\0'))
return false;
@ -803,11 +803,11 @@ Code::ensureProfilingState(JSContext* cx, bool newProfilingEnabled)
return false;
}
if (codeRange.funcDefIndex() >= funcLabels_.length()) {
if (!funcLabels_.resize(codeRange.funcDefIndex() + 1))
if (codeRange.funcIndex() >= funcLabels_.length()) {
if (!funcLabels_.resize(codeRange.funcIndex() + 1))
return false;
}
funcLabels_[codeRange.funcDefIndex()] = Move(label);
funcLabels_[codeRange.funcIndex()] = Move(label);
}
} else {
funcLabels_.clear();

View File

@ -122,30 +122,30 @@ struct ShareableBytes : ShareableBase<ShareableBytes>
typedef RefPtr<ShareableBytes> MutableBytes;
typedef RefPtr<const ShareableBytes> SharedBytes;
// A FuncDefExport represents a single function definition inside a wasm Module
// that has been exported one or more times. A FuncDefExport represents an
// A FuncExport represents a single function definition inside a wasm Module
// that has been exported one or more times. A FuncExport represents an
// internal entry point that can be called via function definition index by
// Instance::callExport(). To allow O(log(n)) lookup of a FuncDefExport by
// function definition index, the FuncDefExportVector is stored sorted by
// Instance::callExport(). To allow O(log(n)) lookup of a FuncExport by
// function definition index, the FuncExportVector is stored sorted by
// function definition index.
class FuncDefExport
class FuncExport
{
Sig sig_;
MOZ_INIT_OUTSIDE_CTOR struct CacheablePod {
uint32_t funcDefIndex_;
uint32_t funcIndex_;
uint32_t codeRangeIndex_;
uint32_t entryOffset_;
} pod;
public:
FuncDefExport() = default;
explicit FuncDefExport(Sig&& sig,
uint32_t funcDefIndex,
uint32_t codeRangeIndex)
FuncExport() = default;
explicit FuncExport(Sig&& sig,
uint32_t funcIndex,
uint32_t codeRangeIndex)
: sig_(Move(sig))
{
pod.funcDefIndex_ = funcDefIndex;
pod.funcIndex_ = funcIndex;
pod.codeRangeIndex_ = codeRangeIndex;
pod.entryOffset_ = UINT32_MAX;
}
@ -157,8 +157,8 @@ class FuncDefExport
const Sig& sig() const {
return sig_;
}
uint32_t funcDefIndex() const {
return pod.funcDefIndex_;
uint32_t funcIndex() const {
return pod.funcIndex_;
}
uint32_t codeRangeIndex() const {
return pod.codeRangeIndex_;
@ -168,10 +168,10 @@ class FuncDefExport
return pod.entryOffset_;
}
WASM_DECLARE_SERIALIZABLE(FuncDefExport)
WASM_DECLARE_SERIALIZABLE(FuncExport)
};
typedef Vector<FuncDefExport, 0, SystemAllocPolicy> FuncDefExportVector;
typedef Vector<FuncExport, 0, SystemAllocPolicy> FuncExportVector;
// An FuncImport contains the runtime metadata needed to implement a call to an
// imported function. Each function import has two call stubs: an optimized path
@ -251,7 +251,7 @@ class CodeRange
uint32_t begin_;
uint32_t profilingReturn_;
uint32_t end_;
uint32_t funcDefIndex_;
uint32_t funcIndex_;
uint32_t funcLineOrBytecode_;
uint8_t funcBeginToTableEntry_;
uint8_t funcBeginToTableProfilingJump_;
@ -264,7 +264,7 @@ class CodeRange
CodeRange() = default;
CodeRange(Kind kind, Offsets offsets);
CodeRange(Kind kind, ProfilingOffsets offsets);
CodeRange(uint32_t funcDefIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
CodeRange(uint32_t funcIndex, uint32_t lineOrBytecode, FuncOffsets offsets);
// All CodeRanges have a begin and end.
@ -329,9 +329,9 @@ class CodeRange
MOZ_ASSERT(isFunction());
return profilingReturn_ - funcProfilingEpilogueToProfilingReturn_;
}
uint32_t funcDefIndex() const {
uint32_t funcIndex() const {
MOZ_ASSERT(isFunction());
return funcDefIndex_;
return funcIndex_;
}
uint32_t funcLineOrBytecode() const {
MOZ_ASSERT(isFunction());
@ -363,11 +363,11 @@ struct CallThunk
{
uint32_t offset;
union {
uint32_t funcDefIndex;
uint32_t funcIndex;
uint32_t codeRangeIndex;
} u;
CallThunk(uint32_t offset, uint32_t funcDefIndex) : offset(offset) { u.funcDefIndex = funcDefIndex; }
CallThunk(uint32_t offset, uint32_t funcIndex) : offset(offset) { u.funcIndex = funcIndex; }
CallThunk() = default;
};
@ -447,7 +447,7 @@ struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
const MetadataCacheablePod& pod() const { return *this; }
FuncImportVector funcImports;
FuncDefExportVector funcDefExports;
FuncExportVector funcExports;
SigWithIdVector sigIds;
GlobalDescVector globals;
TableDescVector tables;
@ -463,7 +463,7 @@ struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
bool usesMemory() const { return UsesMemory(memoryUsage); }
bool hasSharedMemory() const { return memoryUsage == MemoryUsage::Shared; }
const FuncDefExport& lookupFuncDefExport(uint32_t funcDefIndex) const;
const FuncExport& lookupFuncExport(uint32_t funcIndex) const;
// AsmJSMetadata derives Metadata iff isAsmJS(). Mostly this distinction is
// encapsulated within AsmJS.cpp, but the additional virtual functions allow
@ -486,8 +486,8 @@ struct Metadata : ShareableBase<Metadata>, MetadataCacheablePod
virtual ScriptSource* maybeScriptSource() const {
return nullptr;
}
virtual bool getFuncDefName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcDefIndex,
TwoByteName* name) const;
virtual bool getFuncName(JSContext* cx, const Bytes* maybeBytecode, uint32_t funcIndex,
TwoByteName* name) const;
WASM_DECLARE_SERIALIZABLE_VIRTUAL(Metadata);
};
@ -527,8 +527,8 @@ class Code
// Return the name associated with a given function index, or generate one
// if none was given by the module.
bool getFuncDefName(JSContext* cx, uint32_t funcDefIndex, TwoByteName* name) const;
JSAtom* getFuncDefAtom(JSContext* cx, uint32_t funcDefIndex) const;
bool getFuncName(JSContext* cx, uint32_t funcIndex, TwoByteName* name) const;
JSAtom* getFuncAtom(JSContext* cx, uint32_t funcIndex) const;
// If the source bytecode was saved when this Code was constructed, this
// method will render the binary as text. Otherwise, a diagnostic string
@ -545,7 +545,7 @@ class Code
MOZ_MUST_USE bool ensureProfilingState(JSContext* cx, bool enabled);
bool profilingEnabled() const { return profilingEnabled_; }
const char* profilingLabel(uint32_t funcDefIndex) const { return funcLabels_[funcDefIndex].get(); }
const char* profilingLabel(uint32_t funcIndex) const { return funcLabels_[funcIndex].get(); }
// about:memory reporting:

View File

@ -105,17 +105,17 @@ DecodeCallReturn(FunctionDecoder& f, const Sig& sig)
static bool
DecodeCall(FunctionDecoder& f)
{
uint32_t calleeIndex;
if (!f.iter().readCall(&calleeIndex))
uint32_t funcIndex;
if (!f.iter().readCall(&funcIndex))
return false;
if (calleeIndex >= f.mg().numFuncs())
if (funcIndex >= f.mg().numFuncs())
return f.iter().fail("callee index out of range");
if (!f.iter().inReachableCode())
return true;
const Sig* sig = &f.mg().funcSig(calleeIndex);
const Sig* sig = &f.mg().funcSig(funcIndex);
return DecodeCallArgs(f, *sig) &&
DecodeCallReturn(f, *sig);
@ -527,15 +527,19 @@ DecodeFunctionSection(Decoder& d, ModuleGeneratorData* init)
if (!d.readVarU32(&numDefs))
return d.fail("expected number of function definitions");
if (numDefs > MaxFuncs)
uint32_t numFuncs = init->funcSigs.length() + numDefs;
if (numFuncs > MaxFuncs)
return d.fail("too many functions");
if (!init->funcDefSigs.resize(numDefs))
if (!init->funcSigs.reserve(numFuncs))
return false;
for (uint32_t i = 0; i < numDefs; i++) {
if (!DecodeSignatureIndex(d, *init, &init->funcDefSigs[i]))
const SigWithId* sig;
if (!DecodeSignatureIndex(d, *init, &sig))
return false;
init->funcSigs.infallibleAppend(sig);
}
if (!d.finishSection(sectionStart, sectionSize, "function"))
@ -640,7 +644,7 @@ DecodeImport(Decoder& d, ModuleGeneratorData* init, ImportVector* imports)
const SigWithId* sig = nullptr;
if (!DecodeSignatureIndex(d, *init, &sig))
return false;
if (!init->funcImports.emplaceBack(sig))
if (!init->funcSigs.emplaceBack(sig))
return false;
break;
}
@ -693,6 +697,10 @@ DecodeImportSection(Decoder& d, ModuleGeneratorData* init, ImportVector* imports
return false;
}
// The global data offsets will be filled in by ModuleGenerator::init.
if (!init->funcImportGlobalDataOffsets.resize(init->funcSigs.length()))
return false;
if (!d.finishSection(sectionStart, sectionSize, "import"))
return false;
@ -820,7 +828,7 @@ DecodeExport(Decoder& d, ModuleGenerator& mg, CStringSet* dupSet)
if (funcIndex >= mg.numFuncs())
return d.fail("exported function index out of bounds");
return mg.addFuncDefExport(Move(fieldName), funcIndex);
return mg.addFuncExport(Move(fieldName), funcIndex);
}
case DefinitionKind::Table: {
uint32_t tableIndex;
@ -895,7 +903,7 @@ DecodeExportSection(Decoder& d, ModuleGenerator& mg)
}
static bool
DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcDefIndex)
DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcIndex)
{
uint32_t bodySize;
if (!d.readVarU32(&bodySize))
@ -912,7 +920,7 @@ DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcDefIndex)
return false;
ValTypeVector locals;
const Sig& sig = mg.funcDefSig(funcDefIndex);
const Sig& sig = mg.funcSig(funcIndex);
if (!locals.appendAll(sig.args()))
return false;
@ -943,7 +951,7 @@ DecodeFunctionBody(Decoder& d, ModuleGenerator& mg, uint32_t funcDefIndex)
memcpy(fg.bytes().begin(), bodyBegin, bodySize);
return mg.finishFuncDef(funcDefIndex, &fg);
return mg.finishFuncDef(funcIndex, &fg);
}
static bool
@ -1002,8 +1010,8 @@ DecodeCodeSection(Decoder& d, ModuleGenerator& mg)
if (numFuncDefs != mg.numFuncDefs())
return d.fail("function body count does not match function signature count");
for (uint32_t i = 0; i < numFuncDefs; i++) {
if (!DecodeFunctionBody(d, mg, i))
for (uint32_t funcDefIndex = 0; funcDefIndex < numFuncDefs; funcDefIndex++) {
if (!DecodeFunctionBody(d, mg, mg.numFuncImports() + funcDefIndex))
return false;
}

View File

@ -191,7 +191,7 @@ FrameIterator::functionDisplayAtom() const
MOZ_ASSERT(codeRange_);
JSAtom* atom = code_->getFuncDefAtom(cx, codeRange_->funcDefIndex());
JSAtom* atom = code_->getFuncAtom(cx, codeRange_->funcIndex());
if (!atom) {
cx->clearPendingException();
return cx->names().empty;
@ -785,7 +785,7 @@ ProfilingFrameIterator::label() const
}
switch (codeRange_->kind()) {
case CodeRange::Function: return code_->profilingLabel(codeRange_->funcDefIndex());
case CodeRange::Function: return code_->profilingLabel(codeRange_->funcIndex());
case CodeRange::Entry: return "entry trampoline (in asm.js)";
case CodeRange::ImportJitExit: return importJitDescription;
case CodeRange::ImportInterpExit: return importInterpDescription;
@ -803,7 +803,7 @@ ProfilingFrameIterator::label() const
void
wasm::ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled)
{
if (callSite.kind() != CallSite::FuncDef)
if (callSite.kind() != CallSite::Func)
return;
uint8_t* callerRetAddr = code.segment().base() + callSite.returnAddressOffset();

View File

@ -41,6 +41,7 @@ using mozilla::MakeEnumeratedRange;
static const unsigned GENERATOR_LIFO_DEFAULT_CHUNK_SIZE = 4 * 1024;
static const unsigned COMPILATION_LIFO_DEFAULT_CHUNK_SIZE = 64 * 1024;
static const uint32_t BAD_CODE_RANGE = UINT32_MAX;
ModuleGenerator::ModuleGenerator(ImportVector&& imports)
: alwaysBaseline_(false),
@ -56,7 +57,8 @@ ModuleGenerator::ModuleGenerator(ImportVector&& imports)
outstanding_(0),
activeFuncDef_(nullptr),
startedFuncDefs_(false),
finishedFuncDefs_(false)
finishedFuncDefs_(false),
numFinishedFuncDefs_(0)
{
MOZ_ASSERT(IsCompilingWasm());
}
@ -103,7 +105,10 @@ ModuleGenerator::init(UniqueModuleGeneratorData shared, const CompileArgs& args,
shared_ = Move(shared);
alwaysBaseline_ = args.alwaysBaseline;
if (!exportedFuncDefs_.init())
if (!exportedFuncs_.init())
return false;
if (!funcToCodeRange_.appendN(BAD_CODE_RANGE, shared_->funcSigs.length()))
return false;
linkData_.globalDataLength = AlignBytes(InitialGlobalDataBytes, sizeof(void*));;
@ -137,13 +142,10 @@ ModuleGenerator::init(UniqueModuleGeneratorData shared, const CompileArgs& args,
numSigs_ = shared_->sigs.length();
numTables_ = shared_->tables.length();
shared_->firstFuncDefIndex = shared_->funcImports.length();
for (FuncImportGenDesc& funcImport : shared_->funcImports) {
MOZ_ASSERT(!funcImport.globalDataOffset);
funcImport.globalDataOffset = linkData_.globalDataLength;
for (size_t i = 0; i < shared_->funcImportGlobalDataOffsets.length(); i++) {
shared_->funcImportGlobalDataOffsets[i] = linkData_.globalDataLength;
linkData_.globalDataLength += sizeof(FuncImportTls);
if (!addFuncImport(*funcImport.sig, funcImport.globalDataOffset))
if (!addFuncImport(*shared_->funcSigs[i], shared_->funcImportGlobalDataOffsets[i]))
return false;
}
@ -222,34 +224,23 @@ ModuleGenerator::finishOutstandingTask()
return finishTask(task);
}
static const uint32_t BadCodeRange = UINT32_MAX;
bool
ModuleGenerator::funcIndexIsDef(uint32_t funcIndex) const
ModuleGenerator::funcIsImport(uint32_t funcIndex) const
{
MOZ_ASSERT(funcIndex < numFuncImports() + numFuncDefs());
return funcIndex >= numFuncImports();
}
uint32_t
ModuleGenerator::funcIndexToDef(uint32_t funcIndex) const
{
MOZ_ASSERT(funcIndexIsDef(funcIndex));
return funcIndex - numFuncImports();
return funcIndex < shared_->funcImportGlobalDataOffsets.length();
}
bool
ModuleGenerator::funcIsDefined(uint32_t funcDefIndex) const
ModuleGenerator::funcIsCompiled(uint32_t funcIndex) const
{
return funcDefIndex < funcDefIndexToCodeRange_.length() &&
funcDefIndexToCodeRange_[funcDefIndex] != BadCodeRange;
return funcToCodeRange_[funcIndex] != BAD_CODE_RANGE;
}
const CodeRange&
ModuleGenerator::funcDefCodeRange(uint32_t funcDefIndex) const
ModuleGenerator::funcCodeRange(uint32_t funcIndex) const
{
MOZ_ASSERT(funcIsDefined(funcDefIndex));
const CodeRange& cr = metadata_->codeRanges[funcDefIndexToCodeRange_[funcDefIndex]];
MOZ_ASSERT(funcIsCompiled(funcIndex));
const CodeRange& cr = metadata_->codeRanges[funcToCodeRange_[funcIndex]];
MOZ_ASSERT(cr.isFunction());
return cr;
}
@ -290,9 +281,9 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
case CallSiteDesc::Dynamic:
case CallSiteDesc::Symbolic:
break;
case CallSiteDesc::FuncDef: {
if (funcIsDefined(cs.funcDefIndex())) {
uint32_t calleeOffset = funcDefCodeRange(cs.funcDefIndex()).funcNonProfilingEntry();
case CallSiteDesc::Func: {
if (funcIsCompiled(cs.funcIndex())) {
uint32_t calleeOffset = funcCodeRange(cs.funcIndex()).funcNonProfilingEntry();
MOZ_RELEASE_ASSERT(calleeOffset < INT32_MAX);
if (uint32_t(abs(int32_t(calleeOffset) - int32_t(callerOffset))) < JumpRange()) {
@ -301,7 +292,7 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
}
}
OffsetMap::AddPtr p = existingCallFarJumps.lookupForAdd(cs.funcDefIndex());
OffsetMap::AddPtr p = existingCallFarJumps.lookupForAdd(cs.funcIndex());
if (!p) {
Offsets offsets;
offsets.begin = masm_.currentOffset();
@ -312,12 +303,12 @@ ModuleGenerator::patchCallSites(TrapExitOffsetArray* maybeTrapExits)
if (!metadata_->codeRanges.emplaceBack(CodeRange::FarJumpIsland, offsets))
return false;
if (!existingCallFarJumps.add(p, cs.funcDefIndex(), offsets.begin))
if (!existingCallFarJumps.add(p, cs.funcIndex(), offsets.begin))
return false;
// Record calls' far jumps in metadata since they must be
// repatched at runtime when profiling mode is toggled.
if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcDefIndex()))
if (!metadata_->callThunks.emplaceBack(jumpOffset, cs.funcIndex()))
return false;
}
@ -380,17 +371,11 @@ ModuleGenerator::finishTask(IonCompileTask* task)
// Add the CodeRange for this function.
uint32_t funcCodeRangeIndex = metadata_->codeRanges.length();
if (!metadata_->codeRanges.emplaceBack(func.defIndex(), func.lineOrBytecode(), results.offsets()))
if (!metadata_->codeRanges.emplaceBack(func.index(), func.lineOrBytecode(), results.offsets()))
return false;
// Maintain a mapping from function index to CodeRange index.
if (func.defIndex() >= funcDefIndexToCodeRange_.length()) {
uint32_t n = func.defIndex() - funcDefIndexToCodeRange_.length() + 1;
if (!funcDefIndexToCodeRange_.appendN(BadCodeRange, n))
return false;
}
MOZ_ASSERT(!funcIsDefined(func.defIndex()));
funcDefIndexToCodeRange_[func.defIndex()] = funcCodeRangeIndex;
MOZ_ASSERT(!funcIsCompiled(func.index()));
funcToCodeRange_[func.index()] = funcCodeRangeIndex;
// Merge the compiled results into the whole-module masm.
mozilla::DebugOnly<size_t> sizeBefore = masm_.size();
@ -403,33 +388,32 @@ ModuleGenerator::finishTask(IonCompileTask* task)
}
bool
ModuleGenerator::finishFuncDefExports()
ModuleGenerator::finishFuncExports()
{
// ModuleGenerator::exportedFuncDefs_ is an unordered HashSet. The
// FuncDefExportVector stored in Metadata needs to be stored sorted by
// ModuleGenerator::exportedFuncs_ is an unordered HashSet. The
// FuncExportVector stored in Metadata needs to be stored sorted by
// function index to allow O(log(n)) lookup at runtime.
Uint32Vector funcDefIndices;
if (!funcDefIndices.reserve(exportedFuncDefs_.count()))
Uint32Vector sorted;
if (!sorted.reserve(exportedFuncs_.count()))
return false;
for (Uint32Set::Range r = exportedFuncDefs_.all(); !r.empty(); r.popFront())
funcDefIndices.infallibleAppend(r.front());
for (Uint32Set::Range r = exportedFuncs_.all(); !r.empty(); r.popFront())
sorted.infallibleAppend(r.front());
std::sort(funcDefIndices.begin(), funcDefIndices.end());
std::sort(sorted.begin(), sorted.end());
MOZ_ASSERT(metadata_->funcDefExports.empty());
if (!metadata_->funcDefExports.reserve(exportedFuncDefs_.count()))
MOZ_ASSERT(metadata_->funcExports.empty());
if (!metadata_->funcExports.reserve(sorted.length()))
return false;
for (uint32_t funcDefIndex : funcDefIndices) {
for (uint32_t funcIndex : sorted) {
Sig sig;
if (!sig.clone(funcDefSig(funcDefIndex)))
if (!sig.clone(funcSig(funcIndex)))
return false;
metadata_->funcDefExports.infallibleEmplaceBack(Move(sig),
funcDefIndex,
funcDefIndexToCodeRange_[funcDefIndex]);
uint32_t codeRangeIndex = funcToCodeRange_[funcIndex];
metadata_->funcExports.infallibleEmplaceBack(Move(sig), funcIndex, codeRangeIndex);
}
return true;
@ -444,8 +428,8 @@ ModuleGenerator::finishCodegen()
masm_.haltingAlign(CodeAlignment);
uint32_t offsetInWhole = masm_.size();
uint32_t numFuncDefExports = metadata_->funcDefExports.length();
MOZ_ASSERT(numFuncDefExports == exportedFuncDefs_.count());
uint32_t numFuncExports = metadata_->funcExports.length();
MOZ_ASSERT(numFuncExports == exportedFuncs_.count());
// Generate stubs in a separate MacroAssembler since, otherwise, for modules
// larger than the JumpImmediateRange, even local uses of Label will fail
@ -465,10 +449,10 @@ ModuleGenerator::finishCodegen()
MacroAssembler masm(MacroAssembler::WasmToken(), alloc);
Label throwLabel;
if (!entries.resize(numFuncDefExports))
if (!entries.resize(numFuncExports))
return false;
for (uint32_t i = 0; i < numFuncDefExports; i++)
entries[i] = GenerateEntry(masm, metadata_->funcDefExports[i]);
for (uint32_t i = 0; i < numFuncExports; i++)
entries[i] = GenerateEntry(masm, metadata_->funcExports[i]);
if (!interpExits.resize(numFuncImports()))
return false;
@ -494,9 +478,9 @@ ModuleGenerator::finishCodegen()
// Adjust each of the resulting Offsets (to account for being merged into
// masm_) and then create code ranges for all the stubs.
for (uint32_t i = 0; i < numFuncDefExports; i++) {
for (uint32_t i = 0; i < numFuncExports; i++) {
entries[i].offsetBy(offsetInWhole);
metadata_->funcDefExports[i].initEntryOffset(entries[i].begin);
metadata_->funcExports[i].initEntryOffset(entries[i].begin);
if (!metadata_->codeRanges.emplaceBack(CodeRange::Entry, entries[i]))
return false;
}
@ -548,10 +532,10 @@ ModuleGenerator::finishCodegen()
// Now that all code has been generated, patch far jumps to destinations.
for (CallThunk& callThunk : metadata_->callThunks) {
uint32_t funcDefIndex = callThunk.u.funcDefIndex;
callThunk.u.codeRangeIndex = funcDefIndexToCodeRange_[funcDefIndex];
uint32_t funcIndex = callThunk.u.funcIndex;
callThunk.u.codeRangeIndex = funcToCodeRange_[funcIndex];
CodeOffset farJump(callThunk.offset);
masm_.patchFarJump(farJump, funcDefCodeRange(funcDefIndex).funcNonProfilingEntry());
masm_.patchFarJump(farJump, funcCodeRange(funcIndex).funcNonProfilingEntry());
}
for (const TrapFarJump& farJump : masm_.trapFarJumps())
@ -713,12 +697,12 @@ ModuleGenerator::sig(uint32_t index) const
}
void
ModuleGenerator::initFuncDefSig(uint32_t funcDefIndex, uint32_t sigIndex)
ModuleGenerator::initFuncSig(uint32_t funcIndex, uint32_t sigIndex)
{
MOZ_ASSERT(isAsmJS());
MOZ_ASSERT(!shared_->funcDefSigs[funcDefIndex]);
MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
shared_->funcDefSigs[funcDefIndex] = &shared_->sigs[sigIndex];
shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
}
void
@ -739,68 +723,67 @@ ModuleGenerator::bumpMinMemoryLength(uint32_t newMinMemoryLength)
shared_->minMemoryLength = newMinMemoryLength;
}
const SigWithId&
ModuleGenerator::funcDefSig(uint32_t funcDefIndex) const
{
MOZ_ASSERT(shared_->funcDefSigs[funcDefIndex]);
return *shared_->funcDefSigs[funcDefIndex];
}
bool
ModuleGenerator::initImport(uint32_t funcImportIndex, uint32_t sigIndex)
ModuleGenerator::initImport(uint32_t funcIndex, uint32_t sigIndex)
{
MOZ_ASSERT(isAsmJS());
MOZ_ASSERT(!shared_->funcSigs[funcIndex]);
shared_->funcSigs[funcIndex] = &shared_->sigs[sigIndex];
uint32_t globalDataOffset;
if (!allocateGlobalBytes(sizeof(FuncImportTls), sizeof(void*), &globalDataOffset))
return false;
MOZ_ASSERT(funcImportIndex == metadata_->funcImports.length());
if (!addFuncImport(sig(sigIndex), globalDataOffset))
return false;
MOZ_ASSERT(!shared_->funcImportGlobalDataOffsets[funcIndex]);
shared_->funcImportGlobalDataOffsets[funcIndex] = globalDataOffset;
FuncImportGenDesc& funcImport = shared_->funcImports[funcImportIndex];
MOZ_ASSERT(!funcImport.sig);
funcImport.sig = &shared_->sigs[sigIndex];
funcImport.globalDataOffset = globalDataOffset;
return true;
MOZ_ASSERT(funcIndex == metadata_->funcImports.length());
return addFuncImport(sig(sigIndex), globalDataOffset);
}
uint32_t
ModuleGenerator::numFuncImports() const
{
// Until all functions have been validated, asm.js doesn't know the total
// number of imports.
MOZ_ASSERT_IF(isAsmJS(), finishedFuncDefs_);
return metadata_->funcImports.length();
}
const FuncImportGenDesc&
ModuleGenerator::funcImport(uint32_t funcImportIndex) const
uint32_t
ModuleGenerator::numFuncDefs() const
{
MOZ_ASSERT(shared_->funcImports[funcImportIndex].sig);
return shared_->funcImports[funcImportIndex];
// asm.js overallocates the length of funcSigs and in general does not know
// the number of function definitions until it's done compiling.
MOZ_ASSERT(!isAsmJS());
return shared_->funcSigs.length() - numFuncImports();
}
uint32_t
ModuleGenerator::numFuncs() const
{
return numFuncImports() + numFuncDefs();
// asm.js pre-reserves a bunch of function index space which is
// incrementally filled in during function-body validation. Thus, there are
// a few possible interpretations of numFuncs() (total index space size vs.
// exact number of imports/definitions encountered so far) and to simplify
// things we simply only define this quantity for wasm.
MOZ_ASSERT(!isAsmJS());
return shared_->funcSigs.length();
}
const SigWithId&
ModuleGenerator::funcSig(uint32_t funcIndex) const
{
MOZ_ASSERT(funcIndex < numFuncs());
if (funcIndex < numFuncImports())
return *funcImport(funcIndex).sig;
return funcDefSig(funcIndex - numFuncImports());
MOZ_ASSERT(shared_->funcSigs[funcIndex]);
return *shared_->funcSigs[funcIndex];
}
bool
ModuleGenerator::addFuncDefExport(UniqueChars fieldName, uint32_t funcIndex)
ModuleGenerator::addFuncExport(UniqueChars fieldName, uint32_t funcIndex)
{
if (funcIndexIsDef(funcIndex)) {
if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
if (!funcIsImport(funcIndex)) {
if (!exportedFuncs_.put(funcIndex))
return false;
}
@ -831,8 +814,8 @@ ModuleGenerator::addGlobalExport(UniqueChars fieldName, uint32_t globalIndex)
bool
ModuleGenerator::setStartFunction(uint32_t funcIndex)
{
if (funcIndexIsDef(funcIndex)) {
if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
if (!funcIsImport(funcIndex)) {
if (!exportedFuncs_.put(funcIndex))
return false;
}
@ -848,7 +831,7 @@ ModuleGenerator::addElemSegment(InitExpr offset, Uint32Vector&& elemFuncIndices)
MOZ_ASSERT(shared_->tables.length() == 1);
for (uint32_t funcIndex : elemFuncIndices) {
if (!funcIndexIsDef(funcIndex)) {
if (funcIsImport(funcIndex)) {
shared_->tables[0].external = true;
break;
}
@ -879,10 +862,10 @@ ModuleGenerator::startFuncDefs()
continue;
for (uint32_t funcIndex : elems.elemFuncIndices) {
if (!funcIndexIsDef(funcIndex))
if (funcIsImport(funcIndex))
continue;
if (!exportedFuncDefs_.put(funcIndexToDef(funcIndex)))
if (!exportedFuncs_.put(funcIndex))
return false;
}
}
@ -954,13 +937,13 @@ ModuleGenerator::startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg)
}
bool
ModuleGenerator::finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg)
ModuleGenerator::finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg)
{
MOZ_ASSERT(activeFuncDef_ == fg);
auto func = js::MakeUnique<FuncBytes>(Move(fg->bytes_),
funcDefIndex,
funcDefSig(funcDefIndex),
funcIndex,
funcSig(funcIndex),
fg->lineOrBytecode_,
Move(fg->callSiteLineNums_));
if (!func)
@ -986,6 +969,7 @@ ModuleGenerator::finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg)
fg->m_ = nullptr;
fg->task_ = nullptr;
activeFuncDef_ = nullptr;
numFinishedFuncDefs_++;
return true;
}
@ -1001,9 +985,25 @@ ModuleGenerator::finishFuncDefs()
return false;
}
linkData_.functionCodeLength = masm_.size();
finishedFuncDefs_ = true;
// In this patch, imports never have an associated code range.
#ifdef DEBUG
for (uint32_t i = 0; i < funcDefIndexToCodeRange_.length(); i++)
MOZ_ASSERT(funcIsDefined(i));
if (isAsmJS()) {
MOZ_ASSERT(numFuncImports() < AsmJSFirstDefFuncIndex);
for (uint32_t i = 0; i < AsmJSFirstDefFuncIndex; i++)
MOZ_ASSERT(funcToCodeRange_[i] == BAD_CODE_RANGE);
for (uint32_t i = AsmJSFirstDefFuncIndex; i < numFinishedFuncDefs_; i++)
MOZ_ASSERT(funcCodeRange(i).funcIndex() == i);
} else {
MOZ_ASSERT(numFinishedFuncDefs_ == numFuncDefs());
for (uint32_t i = 0; i < numFuncImports(); i++)
MOZ_ASSERT(funcToCodeRange_[i] == BAD_CODE_RANGE);
for (uint32_t i = numFuncImports(); i < numFuncs(); i++)
MOZ_ASSERT(funcCodeRange(i).funcIndex() == i);
}
#endif
// Complete element segments with the code range index of every element, now
@ -1017,17 +1017,15 @@ ModuleGenerator::finishFuncDefs()
return false;
for (uint32_t funcIndex : elems.elemFuncIndices) {
if (!funcIndexIsDef(funcIndex)) {
if (funcIsImport(funcIndex)) {
codeRangeIndices.infallibleAppend(UINT32_MAX);
continue;
}
codeRangeIndices.infallibleAppend(funcDefIndexToCodeRange_[funcIndexToDef(funcIndex)]);
codeRangeIndices.infallibleAppend(funcToCodeRange_[funcIndex]);
}
}
linkData_.functionCodeLength = masm_.size();
finishedFuncDefs_ = true;
return true;
}
@ -1056,26 +1054,22 @@ ModuleGenerator::initSigTableLength(uint32_t sigIndex, uint32_t length)
}
bool
ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncDefIndices)
ModuleGenerator::initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices)
{
MOZ_ASSERT(isAsmJS());
MOZ_ASSERT(finishedFuncDefs_);
uint32_t tableIndex = shared_->asmJSSigToTableIndex[sigIndex];
MOZ_ASSERT(shared_->tables[tableIndex].limits.initial == elemFuncDefIndices.length());
MOZ_ASSERT(shared_->tables[tableIndex].limits.initial == elemFuncIndices.length());
Uint32Vector codeRangeIndices;
if (!codeRangeIndices.resize(elemFuncDefIndices.length()))
if (!codeRangeIndices.resize(elemFuncIndices.length()))
return false;
for (size_t i = 0; i < elemFuncDefIndices.length(); i++) {
codeRangeIndices[i] = funcDefIndexToCodeRange_[elemFuncDefIndices[i]];
elemFuncDefIndices[i] += numFuncImports();
}
for (size_t i = 0; i < elemFuncIndices.length(); i++)
codeRangeIndices[i] = funcToCodeRange_[elemFuncIndices[i]];
// By adding numFuncImports to each element, elemFuncDefIndices is now a
// Vector of func indices.
InitExpr offset(Val(uint32_t(0)));
if (!elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncDefIndices)))
if (!elemSegments_.emplaceBack(tableIndex, offset, Move(elemFuncIndices)))
return false;
elemSegments_.back().elemCodeRangeIndices = Move(codeRangeIndices);
@ -1093,7 +1087,7 @@ ModuleGenerator::finish(const ShareableBytes& bytecode)
if (isAsmJS() && !shared_->tables.resize(numTables_))
return nullptr;
if (!finishFuncDefExports())
if (!finishFuncExports())
return nullptr;
if (!finishCodegen())

View File

@ -36,28 +36,16 @@ class FunctionGenerator;
// any given datum before being read by a background thread. In particular,
// once created, the Vectors are never resized.
struct FuncImportGenDesc
{
const SigWithId* sig;
uint32_t globalDataOffset;
FuncImportGenDesc() : sig(nullptr), globalDataOffset(0) {}
explicit FuncImportGenDesc(const SigWithId* sig) : sig(sig), globalDataOffset(0) {}
};
typedef Vector<FuncImportGenDesc, 0, SystemAllocPolicy> FuncImportGenDescVector;
struct ModuleGeneratorData
{
ModuleKind kind;
MemoryUsage memoryUsage;
mozilla::Atomic<uint32_t> minMemoryLength;
Maybe<uint32_t> maxMemoryLength;
uint32_t firstFuncDefIndex;
SigWithIdVector sigs;
SigWithIdPtrVector funcDefSigs;
FuncImportGenDescVector funcImports;
SigWithIdPtrVector funcSigs;
Uint32Vector funcImportGlobalDataOffsets;
GlobalDescVector globals;
TableDescVector tables;
Uint32Vector asmJSSigToTableIndex;
@ -65,13 +53,15 @@ struct ModuleGeneratorData
explicit ModuleGeneratorData(ModuleKind kind = ModuleKind::Wasm)
: kind(kind),
memoryUsage(MemoryUsage::None),
minMemoryLength(0),
firstFuncDefIndex(0)
minMemoryLength(0)
{}
bool isAsmJS() const {
return kind == ModuleKind::AsmJS;
}
bool funcIsImport(uint32_t funcIndex) const {
return funcIndex < funcImportGlobalDataOffsets.length();
}
};
typedef UniquePtr<ModuleGeneratorData> UniqueModuleGeneratorData;
@ -109,8 +99,8 @@ class MOZ_STACK_CLASS ModuleGenerator
jit::JitContext jcx_;
jit::TempAllocator masmAlloc_;
jit::MacroAssembler masm_;
Uint32Vector funcDefIndexToCodeRange_;
Uint32Set exportedFuncDefs_;
Uint32Vector funcToCodeRange_;
Uint32Set exportedFuncs_;
uint32_t lastPatchedCallsite_;
uint32_t startOfUnpatchedCallsites_;
@ -124,15 +114,15 @@ class MOZ_STACK_CLASS ModuleGenerator
DebugOnly<FunctionGenerator*> activeFuncDef_;
DebugOnly<bool> startedFuncDefs_;
DebugOnly<bool> finishedFuncDefs_;
DebugOnly<uint32_t> numFinishedFuncDefs_;
MOZ_MUST_USE bool finishOutstandingTask();
bool funcIndexIsDef(uint32_t funcIndex) const;
uint32_t funcIndexToDef(uint32_t funcIndex) const;
bool funcIsDefined(uint32_t funcDefIndex) const;
const CodeRange& funcDefCodeRange(uint32_t funcDefIndex) const;
bool funcIsImport(uint32_t funcIndex) const;
bool funcIsCompiled(uint32_t funcIndex) const;
const CodeRange& funcCodeRange(uint32_t funcIndex) const;
MOZ_MUST_USE bool patchCallSites(TrapExitOffsetArray* maybeTrapExits = nullptr);
MOZ_MUST_USE bool finishTask(IonCompileTask* task);
MOZ_MUST_USE bool finishFuncDefExports();
MOZ_MUST_USE bool finishFuncExports();
MOZ_MUST_USE bool finishCodegen();
MOZ_MUST_USE bool finishLinkData(Bytes& code);
MOZ_MUST_USE bool addFuncImport(const Sig& sig, uint32_t globalDataOffset);
@ -160,24 +150,18 @@ class MOZ_STACK_CLASS ModuleGenerator
// Signatures:
uint32_t numSigs() const { return numSigs_; }
const SigWithId& sig(uint32_t sigIndex) const;
// Function declarations:
uint32_t numFuncDefs() const { return shared_->funcDefSigs.length(); }
const SigWithId& funcDefSig(uint32_t funcDefIndex) const;
const SigWithId& funcSig(uint32_t funcIndex) const;
// Globals:
const GlobalDescVector& globals() const { return shared_->globals; }
// Imports:
// Functions declarations:
uint32_t numFuncImports() const;
const FuncImportGenDesc& funcImport(uint32_t funcImportIndex) const;
// Function index space:
uint32_t numFuncDefs() const;
uint32_t numFuncs() const;
const SigWithId& funcSig(uint32_t funcIndex) const;
// Exports:
MOZ_MUST_USE bool addFuncDefExport(UniqueChars fieldName, uint32_t funcIndex);
MOZ_MUST_USE bool addFuncExport(UniqueChars fieldName, uint32_t funcIndex);
MOZ_MUST_USE bool addTableExport(UniqueChars fieldName);
MOZ_MUST_USE bool addMemoryExport(UniqueChars fieldName);
MOZ_MUST_USE bool addGlobalExport(UniqueChars fieldName, uint32_t globalIndex);
@ -185,7 +169,7 @@ class MOZ_STACK_CLASS ModuleGenerator
// Function definitions:
MOZ_MUST_USE bool startFuncDefs();
MOZ_MUST_USE bool startFuncDef(uint32_t lineOrBytecode, FunctionGenerator* fg);
MOZ_MUST_USE bool finishFuncDef(uint32_t funcDefIndex, FunctionGenerator* fg);
MOZ_MUST_USE bool finishFuncDef(uint32_t funcIndex, FunctionGenerator* fg);
MOZ_MUST_USE bool finishFuncDefs();
// Start function:
@ -200,10 +184,10 @@ class MOZ_STACK_CLASS ModuleGenerator
// asm.js lazy initialization:
void initSig(uint32_t sigIndex, Sig&& sig);
void initFuncDefSig(uint32_t funcIndex, uint32_t sigIndex);
MOZ_MUST_USE bool initImport(uint32_t importIndex, uint32_t sigIndex);
void initFuncSig(uint32_t funcIndex, uint32_t sigIndex);
MOZ_MUST_USE bool initImport(uint32_t funcIndex, uint32_t sigIndex);
MOZ_MUST_USE bool initSigTableLength(uint32_t sigIndex, uint32_t length);
MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncDefIndices);
MOZ_MUST_USE bool initSigTableElems(uint32_t sigIndex, Uint32Vector&& elemFuncIndices);
void initMemoryUsage(MemoryUsage memoryUsage);
void bumpMinMemoryLength(uint32_t newMinMemoryLength);
MOZ_MUST_USE bool addGlobal(ValType type, bool isConst, uint32_t* index);

View File

@ -530,7 +530,7 @@ Instance::object() const
}
bool
Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
Instance::callExport(JSContext* cx, uint32_t funcIndex, CallArgs args)
{
// If there has been a moving grow, this Instance should have been notified.
MOZ_RELEASE_ASSERT(!memory_ || tlsData_.memoryBase == memory_->buffer().dataPointerEither());
@ -538,7 +538,7 @@ Instance::callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args)
if (!cx->compartment()->wasm.ensureProfilingState(cx))
return false;
const FuncDefExport& func = metadata().lookupFuncDefExport(funcDefIndex);
const FuncExport& func = metadata().lookupFuncExport(funcIndex);
// The calling convention for an external call into wasm is to pass an
// array of 16-byte values where each value contains either a coerced int32

View File

@ -100,7 +100,7 @@ class Instance
// Execute the given export given the JS call arguments, storing the return
// value in args.rval.
MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcDefIndex, CallArgs args);
MOZ_MUST_USE bool callExport(JSContext* cx, uint32_t funcIndex, CallArgs args);
// Initially, calls to imports in wasm code call out through the generic
// callImport method. If the imported callee gets JIT compiled and the types

View File

@ -978,17 +978,17 @@ class FunctionCompiler
return true;
}
bool callDefinition(const Sig& sig, uint32_t funcDefIndex, const CallCompileState& call,
MDefinition** def)
bool callDirect(const Sig& sig, uint32_t funcIndex, const CallCompileState& call,
MDefinition** def)
{
if (inDeadCode()) {
*def = nullptr;
return true;
}
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::FuncDef);
CallSiteDesc desc(call.lineOrBytecode_, CallSiteDesc::Func);
MIRType ret = ToMIRType(sig.ret());
auto callee = CalleeDesc::definition(funcDefIndex);
auto callee = CalleeDesc::function(funcIndex);
auto* ins = MWasmCall::New(alloc(), desc, callee, call.regArgs_, ret,
call.spIncrement_, MWasmCall::DontSaveTls);
if (!ins)
@ -1898,61 +1898,37 @@ EmitCallArgs(FunctionCompiler& f, const Sig& sig, TlsUsage tls, CallCompileState
return f.finishCall(call, tls);
}
static bool
EmitCallImportCommon(FunctionCompiler& f, uint32_t lineOrBytecode, uint32_t funcImportIndex)
{
const FuncImportGenDesc& funcImport = f.mg().funcImports[funcImportIndex];
const Sig& sig = *funcImport.sig;
CallCompileState call(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, TlsUsage::CallerSaved, &call))
return false;
if (!f.iter().readCallReturn(sig.ret()))
return false;
MDefinition* def;
if (!f.callImport(funcImport.globalDataOffset, call, sig.ret(), &def))
return false;
if (IsVoid(sig.ret()))
return true;
f.iter().setResult(def);
return true;
}
static bool
EmitCall(FunctionCompiler& f)
{
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
uint32_t calleeIndex;
if (!f.iter().readCall(&calleeIndex))
uint32_t funcIndex;
if (!f.iter().readCall(&funcIndex))
return false;
if (f.inDeadCode())
return true;
// For asm.js, imports are not part of the function index space so in
// these cases firstFuncDefIndex is fixed to 0, even if there are
// function imports.
if (calleeIndex < f.mg().firstFuncDefIndex)
return EmitCallImportCommon(f, lineOrBytecode, calleeIndex);
uint32_t funcDefIndex = calleeIndex - f.mg().firstFuncDefIndex;
const Sig& sig = *f.mg().funcDefSigs[funcDefIndex];
const Sig& sig = *f.mg().funcSigs[funcIndex];
bool import = f.mg().funcIsImport(funcIndex);
CallCompileState call(f, lineOrBytecode);
if (!EmitCallArgs(f, sig, TlsUsage::Need, &call))
if (!EmitCallArgs(f, sig, import ? TlsUsage::CallerSaved : TlsUsage::Need, &call))
return false;
if (!f.iter().readCallReturn(sig.ret()))
return false;
MDefinition* def;
if (!f.callDefinition(sig, funcDefIndex, call, &def))
return false;
if (import) {
uint32_t globalDataOffset = f.mg().funcImportGlobalDataOffsets[funcIndex];
if (!f.callImport(globalDataOffset, call, sig.ret(), &def))
return false;
} else {
if (!f.callDirect(sig, funcIndex, call, &def))
return false;
}
if (IsVoid(sig.ret()))
return true;
@ -1961,23 +1937,6 @@ EmitCall(FunctionCompiler& f)
return true;
}
static bool
EmitOldCallImport(FunctionCompiler& f)
{
MOZ_ASSERT(!f.mg().firstFuncDefIndex);
uint32_t lineOrBytecode = f.readCallSiteLineOrBytecode();
uint32_t funcImportIndex;
if (!f.iter().readCall(&funcImportIndex))
return false;
if (f.inDeadCode())
return true;
return EmitCallImportCommon(f, lineOrBytecode, funcImportIndex);
}
static bool
EmitCallIndirect(FunctionCompiler& f, bool oldStyle)
{
@ -3182,8 +3141,6 @@ EmitExpr(FunctionCompiler& f)
return EmitCallIndirect(f, /* oldStyle = */ false);
case Expr::OldCallIndirect:
return EmitCallIndirect(f, /* oldStyle = */ true);
case Expr::OldCallImport:
return EmitOldCallImport(f);
// Locals and globals
case Expr::GetLocal:
@ -3813,7 +3770,7 @@ wasm::IonCompileFunction(IonCompileTask* task)
if (!lir)
return false;
SigIdDesc sigId = task->mg().funcDefSigs[func.defIndex()]->id;
SigIdDesc sigId = task->mg().funcSigs[func.index()]->id;
CodeGenerator codegen(&mir, lir, &results.masm());
if (!codegen.generateWasm(sigId, prologueTrapOffset, &results.offsets()))

View File

@ -38,19 +38,19 @@ typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
class FuncBytes
{
Bytes bytes_;
uint32_t defIndex_;
uint32_t index_;
const SigWithId& sig_;
uint32_t lineOrBytecode_;
Uint32Vector callSiteLineNums_;
public:
FuncBytes(Bytes&& bytes,
uint32_t defIndex,
uint32_t index,
const SigWithId& sig,
uint32_t lineOrBytecode,
Uint32Vector&& callSiteLineNums)
: bytes_(Move(bytes)),
defIndex_(defIndex),
index_(index),
sig_(sig),
lineOrBytecode_(lineOrBytecode),
callSiteLineNums_(Move(callSiteLineNums))
@ -58,7 +58,7 @@ class FuncBytes
Bytes& bytes() { return bytes_; }
const Bytes& bytes() const { return bytes_; }
uint32_t defIndex() const { return defIndex_; }
uint32_t index() const { return index_; }
const SigWithId& sig() const { return sig_; }
uint32_t lineOrBytecode() const { return lineOrBytecode_; }
const Uint32Vector& callSiteLineNums() const { return callSiteLineNums_; }

View File

@ -756,34 +756,34 @@ WasmCall(JSContext* cx, unsigned argc, Value* vp)
RootedFunction callee(cx, &args.callee().as<JSFunction>());
Instance& instance = ExportedFunctionToInstance(callee);
uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(callee);
return instance.callExport(cx, funcDefIndex, args);
uint32_t funcIndex = ExportedFunctionToFuncIndex(callee);
return instance.callExport(cx, funcIndex, args);
}
/* static */ bool
WasmInstanceObject::getExportedFunction(JSContext* cx, HandleWasmInstanceObject instanceObj,
uint32_t funcDefIndex, MutableHandleFunction fun)
uint32_t funcIndex, MutableHandleFunction fun)
{
if (ExportMap::Ptr p = instanceObj->exports().lookup(funcDefIndex)) {
if (ExportMap::Ptr p = instanceObj->exports().lookup(funcIndex)) {
fun.set(p->value());
return true;
}
const Instance& instance = instanceObj->instance();
RootedAtom name(cx, instance.code().getFuncDefAtom(cx, funcDefIndex));
RootedAtom name(cx, instance.code().getFuncAtom(cx, funcIndex));
if (!name)
return false;
unsigned numArgs = instance.metadata().lookupFuncDefExport(funcDefIndex).sig().args().length();
unsigned numArgs = instance.metadata().lookupFuncExport(funcIndex).sig().args().length();
fun.set(NewNativeConstructor(cx, WasmCall, numArgs, name, gc::AllocKind::FUNCTION_EXTENDED,
SingletonObject, JSFunction::WASM_CTOR));
if (!fun)
return false;
fun->setExtendedSlot(FunctionExtended::WASM_INSTANCE_SLOT, ObjectValue(*instanceObj));
fun->setExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT, Int32Value(funcDefIndex));
fun->setExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT, Int32Value(funcIndex));
if (!instanceObj->exports().putNew(funcDefIndex, fun)) {
if (!instanceObj->exports().putNew(funcIndex, fun)) {
ReportOutOfMemory(cx);
return false;
}
@ -794,10 +794,10 @@ WasmInstanceObject::getExportedFunction(JSContext* cx, HandleWasmInstanceObject
const CodeRange&
WasmInstanceObject::getExportedFunctionCodeRange(HandleFunction fun)
{
uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(fun);
MOZ_ASSERT(exports().lookup(funcDefIndex)->value() == fun);
uint32_t funcIndex = ExportedFunctionToFuncIndex(fun);
MOZ_ASSERT(exports().lookup(funcIndex)->value() == fun);
const Metadata& metadata = instance().metadata();
return metadata.codeRanges[metadata.lookupFuncDefExport(funcDefIndex).codeRangeIndex()];
return metadata.codeRanges[metadata.lookupFuncExport(funcIndex).codeRangeIndex()];
}
bool
@ -841,10 +841,10 @@ wasm::ExportedFunctionToInstanceObject(JSFunction* fun)
}
uint32_t
wasm::ExportedFunctionToDefinitionIndex(JSFunction* fun)
wasm::ExportedFunctionToFuncIndex(JSFunction* fun)
{
MOZ_ASSERT(IsExportedFunction(fun));
const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_DEF_INDEX_SLOT);
const Value& v = fun->getExtendedSlot(FunctionExtended::WASM_FUNC_INDEX_SLOT);
return v.toInt32();
}
@ -1279,7 +1279,7 @@ WasmTableObject::getImpl(JSContext* cx, const CallArgs& args)
RootedWasmInstanceObject instanceObj(cx, instance.object());
RootedFunction fun(cx);
if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange.funcDefIndex(), &fun))
if (!instanceObj->getExportedFunction(cx, instanceObj, codeRange.funcIndex(), &fun))
return false;
args.rval().setObject(*fun);
@ -1314,17 +1314,17 @@ WasmTableObject::setImpl(JSContext* cx, const CallArgs& args)
if (value) {
RootedWasmInstanceObject instanceObj(cx, ExportedFunctionToInstanceObject(value));
uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(value);
uint32_t funcIndex = ExportedFunctionToFuncIndex(value);
#ifdef DEBUG
RootedFunction f(cx);
MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcDefIndex, &f));
MOZ_ASSERT(instanceObj->getExportedFunction(cx, instanceObj, funcIndex, &f));
MOZ_ASSERT(value == f);
#endif
Instance& instance = instanceObj->instance();
const FuncDefExport& funcDefExport = instance.metadata().lookupFuncDefExport(funcDefIndex);
const CodeRange& codeRange = instance.metadata().codeRanges[funcDefExport.codeRangeIndex()];
const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
const CodeRange& codeRange = instance.metadata().codeRanges[funcExport.codeRangeIndex()];
void* code = instance.codeSegment().base() + codeRange.funcTableEntry();
table.set(index, code, instance);
} else {

View File

@ -97,7 +97,7 @@ extern WasmInstanceObject*
ExportedFunctionToInstanceObject(JSFunction* fun);
extern uint32_t
ExportedFunctionToDefinitionIndex(JSFunction* fun);
ExportedFunctionToFuncIndex(JSFunction* fun);
} // namespace wasm

View File

@ -538,8 +538,8 @@ Module::extractCode(JSContext* cx, MutableHandleValue vp)
if (!JS_DefineProperty(cx, segment, "kind", value, JSPROP_ENUMERATE))
return false;
if (p->isFunction()) {
value.setNumber((uint32_t)p->funcDefIndex());
if (!JS_DefineProperty(cx, segment, "funcDefIndex", value, JSPROP_ENUMERATE))
value.setNumber((uint32_t)p->funcIndex());
if (!JS_DefineProperty(cx, segment, "funcIndex", value, JSPROP_ENUMERATE))
return false;
value.setNumber((uint32_t)p->funcNonProfilingEntry());
if (!JS_DefineProperty(cx, segment, "funcBodyBegin", value, JSPROP_ENUMERATE))
@ -693,11 +693,11 @@ Module::instantiateFunctions(JSContext* cx, Handle<FunctionVector> funcImports)
if (!IsExportedFunction(f) || ExportedFunctionToInstance(f).isAsmJS())
continue;
uint32_t funcDefIndex = ExportedFunctionToDefinitionIndex(f);
uint32_t funcIndex = ExportedFunctionToFuncIndex(f);
Instance& instance = ExportedFunctionToInstance(f);
const FuncDefExport& funcDefExport = instance.metadata().lookupFuncDefExport(funcDefIndex);
const FuncExport& funcExport = instance.metadata().lookupFuncExport(funcIndex);
if (funcDefExport.sig() != metadata_->funcImports[i].sig()) {
if (funcExport.sig() != metadata_->funcImports[i].sig()) {
JS_ReportErrorNumberASCII(cx, GetErrorMessage, nullptr, JSMSG_WASM_BAD_IMPORT_SIG);
return false;
}
@ -834,10 +834,8 @@ GetFunctionExport(JSContext* cx,
return true;
}
uint32_t funcDefIndex = exp.funcIndex() - funcImports.length();
RootedFunction fun(cx);
if (!instanceObj->getExportedFunction(cx, instanceObj, funcDefIndex, &fun))
if (!instanceObj->getExportedFunction(cx, instanceObj, exp.funcIndex(), &fun))
return false;
val.setObject(*fun);
@ -1052,8 +1050,7 @@ Module::instantiate(JSContext* cx,
if (!Call(cx, fval, thisv, args, &rval))
return false;
} else {
uint32_t funcDefIndex = startFuncIndex - funcImports.length();
if (!instance->instance().callExport(cx, funcDefIndex, args))
if (!instance->instance().callExport(cx, startFuncIndex, args))
return false;
}
}

View File

@ -101,7 +101,7 @@ typedef Vector<Import, 0, SystemAllocPolicy> ImportVector;
// Export describes the export of a definition in a Module to a field in the
// export object. For functions, Export stores an index into the
// FuncDefExportVector in Metadata. For memory and table exports, there is
// FuncExportVector in Metadata. For memory and table exports, there is
// at most one (default) memory/table so no index is needed. Note: a single
// definition can be exported by multiple Exports in the ExportVector.
//

View File

@ -96,7 +96,7 @@ static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of ExportFuncPtr to the export's signature's ABI.
Offsets
wasm::GenerateEntry(MacroAssembler& masm, const FuncDefExport& func)
wasm::GenerateEntry(MacroAssembler& masm, const FuncExport& fe)
{
masm.haltingAlign(CodeAlignment);
@ -160,11 +160,11 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncDefExport& func)
masm.andToStackPtr(Imm32(~(WasmStackAlignment - 1)));
// Bump the stack for the call.
masm.reserveStack(AlignBytes(StackArgBytes(func.sig().args()), WasmStackAlignment));
masm.reserveStack(AlignBytes(StackArgBytes(fe.sig().args()), WasmStackAlignment));
// Copy parameters out of argv and into the registers/stack-slots specified by
// the system ABI.
for (ABIArgValTypeIter iter(func.sig().args()); !iter.done(); iter++) {
for (ABIArgValTypeIter iter(fe.sig().args()); !iter.done(); iter++) {
unsigned argOffset = iter.index() * sizeof(ExportArg);
Address src(argv, argOffset);
MIRType type = iter.mirType();
@ -264,7 +264,7 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncDefExport& func)
// Call into the real function.
masm.assertStackAlignment(WasmStackAlignment);
masm.call(CallSiteDesc(CallSiteDesc::FuncDef), func.funcDefIndex());
masm.call(CallSiteDesc(CallSiteDesc::Func), fe.funcIndex());
// Recover the stack pointer value before dynamic alignment.
masm.loadWasmActivationFromTls(scratch);
@ -275,7 +275,7 @@ wasm::GenerateEntry(MacroAssembler& masm, const FuncDefExport& func)
masm.Pop(argv);
// Store the return value in argv[0]
switch (func.sig().ret()) {
switch (fe.sig().ret()) {
case ExprType::Void:
break;
case ExprType::I32:
@ -354,7 +354,7 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
if (type == MIRType::Int64)
masm.store64(i->gpr64(), dstAddr);
else
MOZ_CRASH("AsmJS uses hardfp for function calls.");
MOZ_CRASH("wasm uses hardfp for function calls.");
break;
#endif
case ABIArg::FPU: {
@ -437,8 +437,6 @@ ProfilingOffsets
wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,
Label* throwLabel)
{
const Sig& sig = fi.sig();
masm.setFramePushed(0);
// Argument types for Module::callImport_*:
@ -454,7 +452,7 @@ wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t fu
// The padding between stack args and argv ensures that argv is aligned. The
// padding between argv and retaddr ensures that sp is aligned.
unsigned argOffset = AlignBytes(StackArgBytes(invokeArgTypes), sizeof(double));
unsigned argBytes = Max<size_t>(1, sig.args().length()) * sizeof(Value);
unsigned argBytes = Max<size_t>(1, fi.sig().args().length()) * sizeof(Value);
unsigned framePushed = StackDecrementForCall(masm, ABIStackAlignment, argOffset + argBytes);
ProfilingOffsets offsets;
@ -463,7 +461,7 @@ wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t fu
// Fill the argument array.
unsigned offsetToCallerStackArgs = sizeof(Frame) + masm.framePushed();
Register scratch = ABINonArgReturnReg0;
FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(false));
// Prepare the arguments for the call to Module::callImport_*.
ABIArgMIRTypeIter i(invokeArgTypes);
@ -486,7 +484,7 @@ wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t fu
i++;
// argument 2: argc
unsigned argc = sig.args().length();
unsigned argc = fi.sig().args().length();
if (i->kind() == ABIArg::GPR)
masm.mov(ImmWord(argc), i->gpr());
else
@ -506,7 +504,7 @@ wasm::GenerateInterpExit(MacroAssembler& masm, const FuncImport& fi, uint32_t fu
// Make the call, test whether it succeeded, and extract the return value.
AssertStackAlignment(masm, ABIStackAlignment);
switch (sig.ret()) {
switch (fi.sig().ret()) {
case ExprType::Void:
masm.call(SymbolicAddress::CallImport_Void);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
@ -571,8 +569,6 @@ static const unsigned SavedTlsReg = sizeof(void*);
ProfilingOffsets
wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLabel)
{
const Sig& sig = fi.sig();
masm.setFramePushed(0);
// JIT calls use the following stack layout (sp grows to the left):
@ -583,7 +579,7 @@ wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
// the return address.
static_assert(WasmStackAlignment >= JitStackAlignment, "subsumes");
unsigned sizeOfRetAddr = sizeof(void*);
unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + sig.args().length()) * sizeof(Value);
unsigned jitFrameBytes = 3 * sizeof(void*) + (1 + fi.sig().args().length()) * sizeof(Value);
unsigned totalJitFrameBytes = sizeOfRetAddr + jitFrameBytes + SavedTlsReg;
unsigned jitFramePushed = StackDecrementForCall(masm, JitStackAlignment, totalJitFrameBytes) -
sizeOfRetAddr;
@ -614,7 +610,7 @@ wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
masm.loadBaselineOrIonNoArgCheck(callee, callee, nullptr);
// 3. Argc
unsigned argc = sig.args().length();
unsigned argc = fi.sig().args().length();
masm.storePtr(ImmWord(uintptr_t(argc)), Address(masm.getStackPointer(), argOffset));
argOffset += sizeof(size_t);
@ -624,8 +620,8 @@ wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
// 5. Fill the arguments
unsigned offsetToCallerStackArgs = jitFramePushed + sizeof(Frame);
FillArgumentArray(masm, sig.args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
argOffset += sig.args().length() * sizeof(Value);
FillArgumentArray(masm, fi.sig().args(), argOffset, offsetToCallerStackArgs, scratch, ToValue(true));
argOffset += fi.sig().args().length() * sizeof(Value);
MOZ_ASSERT(argOffset == jitFrameBytes);
// 6. Jit code will clobber all registers, even non-volatiles. WasmTlsReg
@ -704,7 +700,7 @@ wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
Label oolConvert;
switch (sig.ret()) {
switch (fi.sig().ret()) {
case ExprType::Void:
break;
case ExprType::I32:
@ -774,7 +770,7 @@ wasm::GenerateJitExit(MacroAssembler& masm, const FuncImport& fi, Label* throwLa
// Call coercion function
AssertStackAlignment(masm, ABIStackAlignment);
switch (sig.ret()) {
switch (fi.sig().ret()) {
case ExprType::I32:
masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);

View File

@ -27,11 +27,11 @@ namespace jit { class MacroAssembler; class Label; }
namespace wasm {
class FuncDefExport;
class FuncExport;
class FuncImport;
extern Offsets
GenerateEntry(jit::MacroAssembler& masm, const FuncDefExport& func);
GenerateEntry(jit::MacroAssembler& masm, const FuncExport& fe);
extern ProfilingOffsets
GenerateInterpExit(jit::MacroAssembler& masm, const FuncImport& fi, uint32_t funcImportIndex,

View File

@ -777,7 +777,7 @@ class CallSiteDesc
uint32_t kind_ : 2;
public:
enum Kind {
FuncDef, // pc-relative call to a specific function
Func, // pc-relative call to a specific function
Dynamic, // dynamic callee called via register
Symbolic, // call to a single symbolic callee
TrapExit // call to a trap exit
@ -833,12 +833,12 @@ class CallSiteAndTarget : public CallSite
explicit CallSiteAndTarget(CallSite cs)
: CallSite(cs)
{
MOZ_ASSERT(cs.kind() != FuncDef);
MOZ_ASSERT(cs.kind() != Func);
}
CallSiteAndTarget(CallSite cs, uint32_t funcDefIndex)
: CallSite(cs), index_(funcDefIndex)
CallSiteAndTarget(CallSite cs, uint32_t funcIndex)
: CallSite(cs), index_(funcIndex)
{
MOZ_ASSERT(cs.kind() == FuncDef);
MOZ_ASSERT(cs.kind() == Func);
}
CallSiteAndTarget(CallSite cs, Trap trap)
: CallSite(cs),
@ -847,7 +847,7 @@ class CallSiteAndTarget : public CallSite
MOZ_ASSERT(cs.kind() == TrapExit);
}
uint32_t funcDefIndex() const { MOZ_ASSERT(kind() == FuncDef); return index_; }
uint32_t funcIndex() const { MOZ_ASSERT(kind() == Func); return index_; }
Trap trap() const { MOZ_ASSERT(kind() == TrapExit); return Trap(index_); }
};
@ -1099,7 +1099,7 @@ class CalleeDesc
public:
enum Which {
// Calls a function defined in the same module by its index.
Definition,
Func,
// Calls the import identified by the offset of its FuncImportTls in
// thread-local data.
@ -1123,7 +1123,7 @@ class CalleeDesc
Which which_;
union U {
U() {}
uint32_t funcDefIndex_;
uint32_t funcIndex_;
struct {
uint32_t globalDataOffset_;
} import;
@ -1137,10 +1137,10 @@ class CalleeDesc
public:
CalleeDesc() {}
static CalleeDesc definition(uint32_t funcDefIndex) {
static CalleeDesc function(uint32_t funcIndex) {
CalleeDesc c;
c.which_ = Definition;
c.u.funcDefIndex_ = funcDefIndex;
c.which_ = Func;
c.u.funcIndex_ = funcIndex;
return c;
}
static CalleeDesc import(uint32_t globalDataOffset) {
@ -1178,9 +1178,9 @@ class CalleeDesc
Which which() const {
return which_;
}
uint32_t funcDefIndex() const {
MOZ_ASSERT(which_ == Definition);
return u.funcDefIndex_;
uint32_t funcIndex() const {
MOZ_ASSERT(which_ == Func);
return u.funcIndex_;
}
uint32_t importGlobalDataOffset() const {
MOZ_ASSERT(which_ == Import);
@ -1389,6 +1389,18 @@ static const unsigned MaxElemSegments = 64 * 1024;
static const unsigned MaxArgsPerFunc = 4 * 1024;
static const unsigned MaxBrTableElems = 4 * 1024 * 1024;
// To be able to assign function indices during compilation while the number of
// imports is still unknown, asm.js sets a maximum number of imports so it can
// immediately start handing out function indices starting at the maximum + 1.
// this means that there is a "hole" between the last import and the first
// definition, but that's fine.
static const unsigned AsmJSMaxImports = 4 * 1024;
static const unsigned AsmJSFirstDefFuncIndex = AsmJSMaxImports + 1;
static_assert(AsmJSMaxImports <= MaxImports, "conservative");
static_assert(AsmJSFirstDefFuncIndex < MaxFuncs, "conservative");
} // namespace wasm
} // namespace js