Backed out changeset ed20677d8a91 (bug 1577508) for build bustages on a CLOSED TREE

This commit is contained in:
Andreea Pavel 2019-10-03 16:33:02 +03:00
parent d8da468e74
commit ba6757eb39
14 changed files with 1009 additions and 2008 deletions

View File

@ -230,19 +230,10 @@ assertErrorMessage(() => wasmEval(moduleWithSections([
nameSection([moduleNameSubsection('hi')])])
).f(), RuntimeError, /unreachable/);
// Diagnose invalid block signature types.
for (var bad of [0xff, 1, 0x3f])
// Diagnose nonstandard block signature types.
for (var bad of [0xff, 0, 1, 0x3f])
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([0]), bodySection([funcBody({locals:[], body:[BlockCode, bad, EndCode]})])])), CompileError, /invalid .*block type/);
if (wasmMultiValueEnabled()) {
// In this test module, 0 denotes a void-to-void block type.
let binary = moduleWithSections([sigSection([v2vSig]), declSection([0]), bodySection([funcBody({locals:[], body:[BlockCode, 0, EndCode]})])]);
assertEq(WebAssembly.validate(binary), true);
} else {
const bad = 0;
assertErrorMessage(() => wasmEval(moduleWithSections([sigSection([v2vSig]), declSection([0]), bodySection([funcBody({locals:[], body:[BlockCode, bad, EndCode]})])])), CompileError, /invalid .*block type/);
}
// Ensure all invalid opcodes rejected
for (let op of undefinedOpcodes) {
let binary = moduleWithSections([v2vSigSection, declSection([0]), bodySection([funcBody({locals:[], body:[op]})])]);

View File

@ -18,7 +18,7 @@ const invalidRefBlockType = funcBody({locals:[], body:[
0x42,
EndCode,
]});
checkInvalid(invalidRefBlockType, /ref/);
checkInvalid(invalidRefBlockType, /invalid inline block type/);
const invalidTooBigRefType = funcBody({locals:[], body:[
BlockCode,
@ -26,4 +26,4 @@ const invalidTooBigRefType = funcBody({locals:[], body:[
varU32(1000000),
EndCode,
]});
checkInvalid(invalidTooBigRefType, /ref/);
checkInvalid(invalidTooBigRefType, /invalid inline block type/);

View File

@ -202,7 +202,7 @@ wasmFailValidateText(`
(br_table 1 0 (i32.const 15))
)
)
)`, /br_table targets must all have the same arity/);
)`, /br_table operand must be subtype of all target types/);
wasmFailValidateText(`
(module
@ -212,7 +212,7 @@ wasmFailValidateText(`
(br_table 1 0 (i32.const 15))
)
)
)`, /br_table targets must all have the same arity/);
)`, /br_table operand must be subtype of all target types/);
wasmValidateText(`
(module

File diff suppressed because it is too large Load Diff

View File

@ -42,13 +42,7 @@ enum class SectionId {
GcFeatureOptIn = 42 // Arbitrary, but fits in 7 bits
};
// WebAssembly type encodings are all single-byte negative SLEB128s, hence:
// forall tc:TypeCode. ((tc & SLEB128SignMask) == SLEB128SignBit
static const uint8_t SLEB128SignMask = 0xc0;
static const uint8_t SLEB128SignBit = 0x40;
enum class TypeCode {
I32 = 0x7f, // SLEB128(-0x01)
I64 = 0x7e, // SLEB128(-0x02)
F32 = 0x7d, // SLEB128(-0x03)
@ -69,7 +63,7 @@ enum class TypeCode {
// Type constructor for structure types - unofficial
Struct = 0x50, // SLEB128(-0x30)
// The 'empty' case of blocktype.
// Special code representing the block signature ()->()
BlockVoid = 0x40, // SLEB128(-0x40)
// Type designator for null - unofficial, will not appear in the binary format

View File

@ -43,12 +43,10 @@ using mozilla::Some;
namespace {
typedef Vector<MBasicBlock*, 8, SystemAllocPolicy> BlockVector;
typedef Vector<MDefinition*, 8, SystemAllocPolicy> DefVector;
struct IonCompilePolicy {
// We store SSA definitions in the value stack.
typedef MDefinition* Value;
typedef DefVector ValueVector;
// We store loop headers and then/else blocks in the control flow stack.
typedef MBasicBlock* ControlItem;
@ -1153,18 +1151,23 @@ class FunctionCompiler {
inline bool inDeadCode() const { return curBlock_ == nullptr; }
void returnValues(const DefVector& values) {
void returnExpr(MDefinition* operand) {
if (inDeadCode()) {
return;
}
MOZ_ASSERT(values.length() <= 1, "until multi-return");
MWasmReturn* ins = MWasmReturn::New(alloc(), operand);
curBlock_->end(ins);
curBlock_ = nullptr;
}
if (values.empty()) {
curBlock_->end(MWasmReturnVoid::New(alloc()));
} else {
curBlock_->end(MWasmReturn::New(alloc(), values[0]));
void returnVoid() {
if (inDeadCode()) {
return;
}
MWasmReturnVoid* ins = MWasmReturnVoid::New(alloc());
curBlock_->end(ins);
curBlock_ = nullptr;
}
@ -1180,42 +1183,39 @@ class FunctionCompiler {
}
private:
static uint32_t numPushed(MBasicBlock* block) {
return block->stackDepth() - block->info().firstStackSlot();
static bool hasPushed(MBasicBlock* block) {
uint32_t numPushed = block->stackDepth() - block->info().firstStackSlot();
MOZ_ASSERT(numPushed == 0 || numPushed == 1);
return numPushed;
}
public:
void pushDefs(const DefVector& defs) {
void pushDef(MDefinition* def) {
if (inDeadCode()) {
return;
}
MOZ_ASSERT(numPushed(curBlock_) == 0);
for (MDefinition* def : defs) {
MOZ_ASSERT(def->type() != MIRType::None);
MOZ_ASSERT(!hasPushed(curBlock_));
if (def && def->type() != MIRType::None) {
curBlock_->push(def);
}
}
bool popPushedDefs(DefVector* defs) {
size_t n = numPushed(curBlock_);
if (!defs->resizeUninitialized(n)) {
return false;
MDefinition* popDefIfPushed() {
if (!hasPushed(curBlock_)) {
return nullptr;
}
for (; n > 0; n--) {
MDefinition* def = curBlock_->pop();
MOZ_ASSERT(def->type() != MIRType::Value);
(*defs)[n - 1] = def;
}
return true;
MDefinition* def = curBlock_->pop();
MOZ_ASSERT(def->type() != MIRType::Value);
return def;
}
private:
void addJoinPredecessor(const DefVector& defs, MBasicBlock** joinPred) {
void addJoinPredecessor(MDefinition* def, MBasicBlock** joinPred) {
*joinPred = curBlock_;
if (inDeadCode()) {
return;
}
pushDefs(defs);
pushDef(def);
}
public:
@ -1241,15 +1241,15 @@ class FunctionCompiler {
}
bool switchToElse(MBasicBlock* elseBlock, MBasicBlock** thenJoinPred) {
DefVector values;
if (!finishBlock(&values)) {
MDefinition* ifDef;
if (!finishBlock(&ifDef)) {
return false;
}
if (!elseBlock) {
*thenJoinPred = nullptr;
} else {
addJoinPredecessor(values, thenJoinPred);
addJoinPredecessor(ifDef, thenJoinPred);
curBlock_ = elseBlock;
mirGraph().moveBlockToEnd(curBlock_);
@ -1258,44 +1258,47 @@ class FunctionCompiler {
return startBlock();
}
bool joinIfElse(MBasicBlock* thenJoinPred, DefVector* defs) {
DefVector values;
if (!finishBlock(&values)) {
bool joinIfElse(MBasicBlock* thenJoinPred, MDefinition** def) {
MDefinition* elseDef;
if (!finishBlock(&elseDef)) {
return false;
}
if (!thenJoinPred && inDeadCode()) {
return true;
}
*def = nullptr;
} else {
MBasicBlock* elseJoinPred;
addJoinPredecessor(elseDef, &elseJoinPred);
MBasicBlock* elseJoinPred;
addJoinPredecessor(values, &elseJoinPred);
mozilla::Array<MBasicBlock*, 2> blocks;
size_t numJoinPreds = 0;
if (thenJoinPred) {
blocks[numJoinPreds++] = thenJoinPred;
}
if (elseJoinPred) {
blocks[numJoinPreds++] = elseJoinPred;
}
mozilla::Array<MBasicBlock*, 2> blocks;
size_t numJoinPreds = 0;
if (thenJoinPred) {
blocks[numJoinPreds++] = thenJoinPred;
}
if (elseJoinPred) {
blocks[numJoinPreds++] = elseJoinPred;
}
if (numJoinPreds == 0) {
*def = nullptr;
return true;
}
if (numJoinPreds == 0) {
return true;
}
MBasicBlock* join;
if (!goToNewBlock(blocks[0], &join)) {
return false;
}
for (size_t i = 1; i < numJoinPreds; ++i) {
if (!goToExistingBlock(blocks[i], join)) {
MBasicBlock* join;
if (!goToNewBlock(blocks[0], &join)) {
return false;
}
for (size_t i = 1; i < numJoinPreds; ++i) {
if (!goToExistingBlock(blocks[i], join)) {
return false;
}
}
curBlock_ = join;
*def = popDefIfPushed();
}
curBlock_ = join;
return popPushedDefs(defs);
return true;
}
bool startBlock() {
@ -1305,10 +1308,10 @@ class FunctionCompiler {
return true;
}
bool finishBlock(DefVector* defs) {
bool finishBlock(MDefinition** def) {
MOZ_ASSERT(blockDepth_);
uint32_t topLabel = --blockDepth_;
return bindBranches(topLabel, defs);
return bindBranches(topLabel, def);
}
bool startLoop(MBasicBlock** loopHeader) {
@ -1398,7 +1401,7 @@ class FunctionCompiler {
}
public:
bool closeLoop(MBasicBlock* loopHeader, DefVector* loopResults) {
bool closeLoop(MBasicBlock* loopHeader, MDefinition** loopResult) {
MOZ_ASSERT(blockDepth_ >= 1);
MOZ_ASSERT(loopDepth_);
@ -1410,6 +1413,7 @@ class FunctionCompiler {
blockPatches_[headerLabel].empty());
blockDepth_--;
loopDepth_--;
*loopResult = nullptr;
return true;
}
@ -1424,7 +1428,7 @@ class FunctionCompiler {
// branches as forward jumps to a single backward jump. This is
// unfortunate but the optimizer is able to fold these into single jumps
// to backedges.
DefVector _;
MDefinition* _;
if (!bindBranches(headerLabel, &_)) {
return false;
}
@ -1433,7 +1437,7 @@ class FunctionCompiler {
if (curBlock_) {
// We're on the loop backedge block, created by bindBranches.
for (size_t i = 0, n = numPushed(curBlock_); i != n; i++) {
if (hasPushed(curBlock_)) {
curBlock_->pop();
}
@ -1458,7 +1462,8 @@ class FunctionCompiler {
}
blockDepth_ -= 1;
return inDeadCode() || popPushedDefs(loopResults);
*loopResult = inDeadCode() ? nullptr : popDefIfPushed();
return true;
}
bool addControlFlowPatch(MControlInstruction* ins, uint32_t relative,
@ -1474,7 +1479,7 @@ class FunctionCompiler {
return blockPatches_[absolute].append(ControlFlowPatch(ins, index));
}
bool br(uint32_t relativeDepth, const DefVector& values) {
bool br(uint32_t relativeDepth, MDefinition* maybeValue) {
if (inDeadCode()) {
return true;
}
@ -1484,14 +1489,14 @@ class FunctionCompiler {
return false;
}
pushDefs(values);
pushDef(maybeValue);
curBlock_->end(jump);
curBlock_ = nullptr;
return true;
}
bool brIf(uint32_t relativeDepth, const DefVector& values,
bool brIf(uint32_t relativeDepth, MDefinition* maybeValue,
MDefinition* condition) {
if (inDeadCode()) {
return true;
@ -1507,7 +1512,7 @@ class FunctionCompiler {
return false;
}
pushDefs(values);
pushDef(maybeValue);
curBlock_->end(test);
curBlock_ = joinBlock;
@ -1515,7 +1520,7 @@ class FunctionCompiler {
}
bool brTable(MDefinition* operand, uint32_t defaultDepth,
const Uint32Vector& depths, const DefVector& values) {
const Uint32Vector& depths, MDefinition* maybeValue) {
if (inDeadCode()) {
return true;
}
@ -1568,7 +1573,7 @@ class FunctionCompiler {
}
}
pushDefs(values);
pushDef(maybeValue);
curBlock_->end(table);
curBlock_ = nullptr;
@ -1616,9 +1621,10 @@ class FunctionCompiler {
return next->addPredecessor(alloc(), prev);
}
bool bindBranches(uint32_t absolute, DefVector* defs) {
bool bindBranches(uint32_t absolute, MDefinition** def) {
if (absolute >= blockPatches_.length() || blockPatches_[absolute].empty()) {
return inDeadCode() || popPushedDefs(defs);
*def = inDeadCode() ? nullptr : popDefIfPushed();
return true;
}
ControlFlowPatchVector& patches = blockPatches_[absolute];
@ -1658,9 +1664,7 @@ class FunctionCompiler {
curBlock_ = join;
if (!popPushedDefs(defs)) {
return false;
}
*def = popDefIfPushed();
patches.clear();
return true;
@ -1750,13 +1754,11 @@ static bool EmitF64Const(FunctionCompiler& f) {
}
static bool EmitBlock(FunctionCompiler& f) {
ResultType params;
return f.iter().readBlock(&params) && f.startBlock();
return f.iter().readBlock() && f.startBlock();
}
static bool EmitLoop(FunctionCompiler& f) {
ResultType params;
if (!f.iter().readLoop(&params)) {
if (!f.iter().readLoop()) {
return false;
}
@ -1772,9 +1774,8 @@ static bool EmitLoop(FunctionCompiler& f) {
}
static bool EmitIf(FunctionCompiler& f) {
ResultType params;
MDefinition* condition = nullptr;
if (!f.iter().readIf(&params, &condition)) {
if (!f.iter().readIf(&condition)) {
return false;
}
@ -1788,14 +1789,15 @@ static bool EmitIf(FunctionCompiler& f) {
}
static bool EmitElse(FunctionCompiler& f) {
ResultType paramType;
ResultType resultType;
DefVector thenValues;
if (!f.iter().readElse(&paramType, &resultType, &thenValues)) {
ExprType thenType;
MDefinition* thenValue;
if (!f.iter().readElse(&thenType, &thenValue)) {
return false;
}
f.pushDefs(thenValues);
if (!IsVoid(thenType)) {
f.pushDef(thenValue);
}
if (!f.switchToElse(f.iter().controlItem(), &f.iter().controlItem())) {
return false;
@ -1806,33 +1808,40 @@ static bool EmitElse(FunctionCompiler& f) {
static bool EmitEnd(FunctionCompiler& f) {
LabelKind kind;
ResultType type;
DefVector preJoinDefs;
if (!f.iter().readEnd(&kind, &type, &preJoinDefs)) {
ExprType type;
MDefinition* value;
if (!f.iter().readEnd(&kind, &type, &value)) {
return false;
}
MBasicBlock* block = f.iter().controlItem();
f.iter().popEnd();
f.pushDefs(preJoinDefs);
if (!IsVoid(type)) {
f.pushDef(value);
}
DefVector postJoinDefs;
MDefinition* def = nullptr;
switch (kind) {
case LabelKind::Body:
MOZ_ASSERT(f.iter().controlStackEmpty());
if (!f.finishBlock(&postJoinDefs)) {
if (!f.finishBlock(&def)) {
return false;
}
f.returnValues(postJoinDefs);
if (f.inDeadCode() || IsVoid(type)) {
f.returnVoid();
} else {
f.returnExpr(def);
}
return f.iter().readFunctionEnd(f.iter().end());
case LabelKind::Block:
if (!f.finishBlock(&postJoinDefs)) {
if (!f.finishBlock(&def)) {
return false;
}
break;
case LabelKind::Loop:
if (!f.closeLoop(block, &postJoinDefs)) {
if (!f.closeLoop(block, &def)) {
return false;
}
break;
@ -1843,54 +1852,76 @@ static bool EmitEnd(FunctionCompiler& f) {
return false;
}
if (!f.joinIfElse(block, &postJoinDefs)) {
if (!f.joinIfElse(block, &def)) {
return false;
}
break;
case LabelKind::Else:
if (!f.joinIfElse(block, &postJoinDefs)) {
if (!f.joinIfElse(block, &def)) {
return false;
}
break;
}
MOZ_ASSERT_IF(!f.inDeadCode(), postJoinDefs.length() == type.length());
f.iter().setResults(postJoinDefs.length(), postJoinDefs);
if (!IsVoid(type)) {
MOZ_ASSERT_IF(!f.inDeadCode(), def);
f.iter().setResult(def);
}
return true;
}
static bool EmitBr(FunctionCompiler& f) {
uint32_t relativeDepth;
ResultType type;
DefVector values;
if (!f.iter().readBr(&relativeDepth, &type, &values)) {
ExprType type;
MDefinition* value;
if (!f.iter().readBr(&relativeDepth, &type, &value)) {
return false;
}
return f.br(relativeDepth, values);
if (IsVoid(type)) {
if (!f.br(relativeDepth, nullptr)) {
return false;
}
} else {
if (!f.br(relativeDepth, value)) {
return false;
}
}
return true;
}
static bool EmitBrIf(FunctionCompiler& f) {
uint32_t relativeDepth;
ResultType type;
DefVector values;
ExprType type;
MDefinition* value;
MDefinition* condition;
if (!f.iter().readBrIf(&relativeDepth, &type, &values, &condition)) {
if (!f.iter().readBrIf(&relativeDepth, &type, &value, &condition)) {
return false;
}
return f.brIf(relativeDepth, values, condition);
if (IsVoid(type)) {
if (!f.brIf(relativeDepth, nullptr, condition)) {
return false;
}
} else {
if (!f.brIf(relativeDepth, value, condition)) {
return false;
}
}
return true;
}
static bool EmitBrTable(FunctionCompiler& f) {
Uint32Vector depths;
uint32_t defaultDepth;
ResultType branchValueType;
DefVector branchValues;
ExprType branchValueType;
MDefinition* branchValue;
MDefinition* index;
if (!f.iter().readBrTable(&depths, &defaultDepth, &branchValueType,
&branchValues, &index)) {
&branchValue, &index)) {
return false;
}
@ -1906,19 +1937,24 @@ static bool EmitBrTable(FunctionCompiler& f) {
}
if (allSameDepth) {
return f.br(defaultDepth, branchValues);
return f.br(defaultDepth, branchValue);
}
return f.brTable(index, defaultDepth, depths, branchValues);
return f.brTable(index, defaultDepth, depths, branchValue);
}
static bool EmitReturn(FunctionCompiler& f) {
DefVector values;
if (!f.iter().readReturn(&values)) {
MDefinition* value;
if (!f.iter().readReturn(&value)) {
return false;
}
f.returnValues(values);
if (f.funcType().results().length() == 0) {
f.returnVoid();
return true;
}
f.returnExpr(value);
return true;
}
@ -1931,6 +1967,8 @@ static bool EmitUnreachable(FunctionCompiler& f) {
return true;
}
typedef IonOpIter::ValueVector DefVector;
static bool EmitCallArgs(FunctionCompiler& f, const FuncType& funcType,
const DefVector& args, CallCompileState* call) {
for (size_t i = 0, n = funcType.args().length(); i < n; ++i) {

File diff suppressed because it is too large Load Diff

View File

@ -39,93 +39,6 @@ typedef Vector<jit::MIRType, 8, SystemAllocPolicy> MIRTypeVector;
typedef jit::ABIArgIter<MIRTypeVector> ABIArgMIRTypeIter;
typedef jit::ABIArgIter<ValTypeVector> ABIArgValTypeIter;
/*****************************************************************************/
// ABIResultIter implementation
static uint32_t ResultStackSize(ValType type) {
switch (type.code()) {
case ValType::I32:
return ABIResult::StackSizeOfInt32;
case ValType::I64:
return ABIResult::StackSizeOfInt64;
case ValType::F32:
return ABIResult::StackSizeOfFloat;
case ValType::F64:
return ABIResult::StackSizeOfDouble;
case ValType::Ref:
case ValType::FuncRef:
case ValType::AnyRef:
return ABIResult::StackSizeOfPtr;
case ValType::NullRef:
default:
MOZ_CRASH("Unexpected result type");
}
}
uint32_t ABIResult::size() const { return ResultStackSize(type()); }
void ABIResultIter::settleRegister(ValType type) {
MOZ_ASSERT(!done());
MOZ_ASSERT(index() < RegisterResultCount);
static_assert(RegisterResultCount == 1, "expected a single register result");
switch (type.code()) {
case ValType::I32:
cur_ = ABIResult(type, ReturnReg);
break;
case ValType::I64:
cur_ = ABIResult(type, ReturnReg64);
break;
case ValType::F32:
cur_ = ABIResult(type, ReturnFloat32Reg);
break;
case ValType::F64:
cur_ = ABIResult(type, ReturnDoubleReg);
break;
case ValType::Ref:
case ValType::FuncRef:
case ValType::AnyRef:
cur_ = ABIResult(type, ReturnReg);
break;
case ValType::NullRef:
default:
MOZ_CRASH("Unexpected result type");
}
}
void ABIResultIter::settleNext() {
MOZ_ASSERT(direction_ == Next);
MOZ_ASSERT(!done());
uint32_t typeIndex = count_ - index_ - 1;
ValType type = type_[typeIndex];
if (index_ < RegisterResultCount) {
settleRegister(type);
return;
}
cur_ = ABIResult(type, nextStackOffset_);
nextStackOffset_ += ResultStackSize(type);
}
void ABIResultIter::settlePrev() {
MOZ_ASSERT(direction_ == Prev);
MOZ_ASSERT(!done());
uint32_t typeIndex = index_;
ValType type = type_[typeIndex];
if (count_ - index_ - 1 < RegisterResultCount) {
settleRegister(type);
return;
}
uint32_t size = ResultStackSize(type);
MOZ_ASSERT(nextStackOffset_ >= size);
nextStackOffset_ -= size;
cur_ = ABIResult(type, nextStackOffset_);
}
#ifdef WASM_CODEGEN_DEBUG
template <class Closure>
static void GenPrint(DebugChannel channel, MacroAssembler& masm,

View File

@ -20,227 +20,10 @@
#define wasm_stubs_h
#include "wasm/WasmGenerator.h"
#include "wasm/WasmOpIter.h"
namespace js {
namespace wasm {
// ValType and location for a single result: either in a register or on the
// stack.
class ABIResult {
ValType type_;
enum class Location { Gpr, Gpr64, Fpr, Stack } loc_;
union {
Register gpr_;
Register64 gpr64_;
FloatRegister fpr_;
uint32_t stackOffset_;
};
void validate() {
#ifdef DEBUG
if (onStack()) {
return;
}
MOZ_ASSERT(inRegister());
switch (type_.code()) {
case ValType::I32:
MOZ_ASSERT(loc_ == Location::Gpr);
break;
case ValType::I64:
MOZ_ASSERT(loc_ == Location::Gpr64);
break;
case ValType::F32:
case ValType::F64:
MOZ_ASSERT(loc_ == Location::Fpr);
break;
case ValType::AnyRef:
case ValType::FuncRef:
case ValType::Ref:
MOZ_ASSERT(loc_ == Location::Gpr);
break;
default:
MOZ_CRASH("bad value type");
}
#endif
}
friend class ABIResultIter;
ABIResult(){};
public:
// Sizes of items in the stack area.
//
// The size values come from the implementations of Push() in
// MacroAssembler-x86-shared.cpp and MacroAssembler-arm-shared.cpp, and from
// VFPRegister::size() in Architecture-arm.h.
//
// On ARM unlike on x86 we push a single for float.
static constexpr size_t StackSizeOfPtr = sizeof(intptr_t);
static constexpr size_t StackSizeOfInt32 = StackSizeOfPtr;
static constexpr size_t StackSizeOfInt64 = sizeof(int64_t);
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
static constexpr size_t StackSizeOfFloat = sizeof(float);
#else
static constexpr size_t StackSizeOfFloat = sizeof(double);
#endif
static constexpr size_t StackSizeOfDouble = sizeof(double);
ABIResult(ValType type, Register gpr)
: type_(type), loc_(Location::Gpr), gpr_(gpr) {
validate();
}
ABIResult(ValType type, Register64 gpr64)
: type_(type), loc_(Location::Gpr64), gpr64_(gpr64) {
validate();
}
ABIResult(ValType type, FloatRegister fpr)
: type_(type), loc_(Location::Fpr), fpr_(fpr) {
validate();
}
ABIResult(ValType type, uint32_t stackOffset)
: type_(type), loc_(Location::Stack), stackOffset_(stackOffset) {
validate();
}
ValType type() const { return type_; }
bool onStack() const { return loc_ == Location::Stack; }
bool inRegister() const { return !onStack(); }
Register gpr() const {
MOZ_ASSERT(loc_ == Location::Gpr);
return gpr_;
}
Register64 gpr64() const {
MOZ_ASSERT(loc_ == Location::Gpr64);
return gpr64_;
}
FloatRegister fpr() const {
MOZ_ASSERT(loc_ == Location::Fpr);
return fpr_;
}
// Offset from SP.
uint32_t stackOffset() const {
MOZ_ASSERT(loc_ == Location::Stack);
return stackOffset_;
}
uint32_t size() const;
};
// Just as WebAssembly functions can take multiple arguments, they can also
// return multiple results. As with a call, a limited number of results will be
// located in registers, and the rest will be stored in a stack area. The
// |ABIResultIter| computes result locations, given a |ResultType|.
//
// Recall that a |ResultType| represents a sequence of value types t1..tN,
// indexed from 1 to N. In principle it doesn't matter how we decide which
// results get to be in registers and which go to the stack. To better
// harmonize with WebAssembly's abstract stack machine, whose properties are
// taken advantage of by the baseline compiler, our strategy is to start
// allocating result locations in "reverse" order: from result N down to 1.
//
// If a result with index I is in a register, then all results with index J > I
// are also in registers. If a result I is on the stack, then all results with
// index K < I are also on the stack, farther away from the stack pointer than
// result I.
//
// Currently only a single result is ever stored in a register, though this may
// change in the future on register-rich platforms.
//
// NB: The baseline compiler also uses thie ABI for locations of block
// parameters and return values, within individual WebAssembly functions.
class ABIResultIter {
ResultType type_;
uint32_t count_;
uint32_t index_;
uint32_t nextStackOffset_;
enum { Next, Prev } direction_;
ABIResult cur_;
void settleRegister(ValType type);
void settleNext();
void settlePrev();
static constexpr size_t RegisterResultCount = 1;
public:
explicit ABIResultIter(const ResultType& type)
: type_(type), count_(type.length()) {
reset();
}
void reset() {
index_ = nextStackOffset_ = 0;
direction_ = Next;
if (!done()) {
settleNext();
}
}
bool done() const { return index_ == count_; }
uint32_t index() const { return index_; }
uint32_t count() const { return count_; }
uint32_t remaining() const { return count_ - index_; }
void switchToNext() {
MOZ_ASSERT(direction_ == Prev);
if (!done() && cur().onStack()) {
nextStackOffset_ += cur().size();
}
index_ = count_ - index_;
direction_ = Next;
if (!done()) {
settleNext();
}
}
void switchToPrev() {
MOZ_ASSERT(direction_ == Next);
if (!done() && cur().onStack()) {
nextStackOffset_ -= cur().size();
}
index_ = count_ - index_;
direction_ = Prev;
if (!done()) settlePrev();
}
void next() {
MOZ_ASSERT(direction_ == Next);
MOZ_ASSERT(!done());
index_++;
if (!done()) {
settleNext();
}
}
void prev() {
MOZ_ASSERT(direction_ == Prev);
MOZ_ASSERT(!done());
index_++;
if (!done()) {
settlePrev();
}
}
const ABIResult& cur() const {
MOZ_ASSERT(!done());
return cur_;
}
uint32_t stackBytesConsumedSoFar() const { return nextStackOffset_; }
static inline bool HasStackResults(const ResultType& type) {
return type.length() > RegisterResultCount;
}
static uint32_t MeasureStackBytes(const ResultType& type) {
if (!HasStackResults(type)) {
return 0;
}
ABIResultIter iter(type);
while (!iter.done()) {
iter.next();
}
return iter.stackBytesConsumedSoFar();
}
};
extern bool GenerateBuiltinThunk(jit::MacroAssembler& masm,
jit::ABIFunctionType abiType,
ExitReason exitReason, void* funcPtr,

View File

@ -6178,23 +6178,12 @@ static bool EncodeExprList(Encoder& e, const AstExprVector& v) {
return true;
}
static bool EncodeBlockType(Encoder& e, AstExprType& t) {
ExprType type = t.type();
static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
MOZ_ASSERT(size_t(type.code()) < size_t(TypeCode::Limit));
if (type.isRef()) {
return e.writeFixedU8(uint8_t(ExprType::Ref)) &&
e.writeVarU32(type.refTypeIndex());
}
return e.writeFixedU8(uint8_t(type.code()));
}
static bool EncodeBlock(Encoder& e, AstBlock& b) {
if (!e.writeOp(b.op())) {
return false;
}
if (!EncodeBlockType(e, b.type())) {
if (!e.writeBlockType(b.type().type())) {
return false;
}
@ -6379,7 +6368,7 @@ static bool EncodeIf(Encoder& e, AstIf& i) {
return false;
}
if (!EncodeBlockType(e, i.type())) {
if (!e.writeBlockType(i.type().type())) {
return false;
}

View File

@ -227,6 +227,21 @@ uint8_t* FuncType::serialize(uint8_t* cursor) const {
return cursor;
}
namespace js {
namespace wasm {
// ExprType is not POD while ReadScalar requires POD, so specialize.
template <>
inline const uint8_t* ReadScalar<ExprType>(const uint8_t* src, ExprType* dst) {
static_assert(sizeof(PackedTypeCode) == sizeof(ExprType),
"ExprType must carry only a PackedTypeCode");
memcpy(dst->packedPtr(), src, sizeof(PackedTypeCode));
return src + sizeof(*dst);
}
} // namespace wasm
} // namespace js
const uint8_t* FuncType::deserialize(const uint8_t* cursor) {
cursor = DeserializePodVector(cursor, &results_);
if (!cursor) {

View File

@ -213,28 +213,26 @@ static_assert(std::is_pod<PackedTypeCode>::value,
"must be POD to be simply serialized/deserialized");
const uint32_t NoTypeCode = 0xFF; // Only use these
const uint32_t NoRefTypeIndex = 0x3FFFFF; // with PackedTypeCode
const uint32_t NoRefTypeIndex = 0xFFFFFF; // with PackedTypeCode
static inline PackedTypeCode InvalidPackedTypeCode() {
return PackedTypeCode((NoRefTypeIndex << 8) | NoTypeCode);
}
static inline PackedTypeCode PackTypeCode(TypeCode tc) {
MOZ_ASSERT(uint32_t(tc) <= 0xFF);
MOZ_ASSERT(tc != TypeCode::Ref);
return PackedTypeCode((NoRefTypeIndex << 8) | uint32_t(tc));
}
static inline PackedTypeCode PackTypeCode(TypeCode tc, uint32_t refTypeIndex) {
MOZ_ASSERT(uint32_t(tc) <= 0xFF);
MOZ_ASSERT_IF(tc != TypeCode::Ref, refTypeIndex == NoRefTypeIndex);
MOZ_ASSERT_IF(tc == TypeCode::Ref, refTypeIndex <= MaxTypes);
// A PackedTypeCode should be representable in a single word, so in the
// smallest case, 32 bits. However sometimes 2 bits of the word may be taken
// by a pointer tag; for that reason, limit to 30 bits; and then there's the
// 8-bit typecode, so 22 bits left for the type index.
static_assert(MaxTypes < (1 << (30 - 8)), "enough bits");
static_assert(MaxTypes < (1 << (32 - 8)), "enough bits");
return PackedTypeCode((refTypeIndex << 8) | uint32_t(tc));
}
static inline PackedTypeCode PackTypeCode(TypeCode tc) {
return PackTypeCode(tc, NoRefTypeIndex);
}
static inline PackedTypeCode InvalidPackedTypeCode() {
return PackedTypeCode(NoTypeCode);
}
static inline PackedTypeCode PackedTypeCodeFromBits(uint32_t bits) {
return PackTypeCode(TypeCode(bits & 255), bits >> 8);
}

View File

@ -444,24 +444,15 @@ bool wasm::DecodeValidatedLocalEntries(Decoder& d, ValTypeVector* locals) {
// Function body validation.
class NothingVector {
Nothing unused_;
public:
bool resize(size_t length) { return true; }
Nothing& operator[](size_t) { return unused_; }
Nothing& back() { return unused_; }
};
struct ValidatingPolicy {
typedef Nothing Value;
typedef NothingVector ValueVector;
typedef Nothing ControlItem;
};
typedef OpIter<ValidatingPolicy> ValidatingOpIter;
static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
// FIXME(1401675): Replace with BlockType.
uint32_t funcIndex,
const ValTypeVector& locals,
const uint8_t* bodyEnd, Decoder* d) {
@ -482,13 +473,12 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
}
Nothing nothing;
NothingVector nothings;
ResultType unusedType;
switch (op.b0) {
case uint16_t(Op::End): {
LabelKind unusedKind;
if (!iter.readEnd(&unusedKind, &unusedType, &nothings)) {
ExprType unusedType;
if (!iter.readEnd(&unusedKind, &unusedType, &nothing)) {
return false;
}
iter.popEnd();
@ -503,12 +493,12 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
CHECK(iter.readDrop());
case uint16_t(Op::Call): {
uint32_t unusedIndex;
NothingVector unusedArgs;
ValidatingOpIter::ValueVector unusedArgs;
CHECK(iter.readCall(&unusedIndex, &unusedArgs));
}
case uint16_t(Op::CallIndirect): {
uint32_t unusedIndex, unusedIndex2;
NothingVector unusedArgs;
ValidatingOpIter::ValueVector unusedArgs;
CHECK(iter.readCallIndirect(&unusedIndex, &unusedIndex2, &nothing,
&unusedArgs));
}
@ -578,13 +568,15 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
&nothing));
}
case uint16_t(Op::Block):
CHECK(iter.readBlock(&unusedType));
CHECK(iter.readBlock());
case uint16_t(Op::Loop):
CHECK(iter.readLoop(&unusedType));
CHECK(iter.readLoop());
case uint16_t(Op::If):
CHECK(iter.readIf(&unusedType, &nothing));
case uint16_t(Op::Else):
CHECK(iter.readElse(&unusedType, &unusedType, &nothings));
CHECK(iter.readIf(&nothing));
case uint16_t(Op::Else): {
ExprType type;
CHECK(iter.readElse(&type, &nothing));
}
case uint16_t(Op::I32Clz):
case uint16_t(Op::I32Ctz):
case uint16_t(Op::I32Popcnt):
@ -823,20 +815,23 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
CHECK(iter.readMemorySize());
case uint16_t(Op::Br): {
uint32_t unusedDepth;
CHECK(iter.readBr(&unusedDepth, &unusedType, &nothings));
ExprType unusedType;
CHECK(iter.readBr(&unusedDepth, &unusedType, &nothing));
}
case uint16_t(Op::BrIf): {
uint32_t unusedDepth;
CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothings, &nothing));
ExprType unusedType;
CHECK(iter.readBrIf(&unusedDepth, &unusedType, &nothing, &nothing));
}
case uint16_t(Op::BrTable): {
Uint32Vector unusedDepths;
uint32_t unusedDefault;
ExprType unusedType;
CHECK(iter.readBrTable(&unusedDepths, &unusedDefault, &unusedType,
&nothings, &nothing));
&nothing, &nothing));
}
case uint16_t(Op::Return):
CHECK(iter.readReturn(&nothings));
CHECK(iter.readReturn(&nothing));
case uint16_t(Op::Unreachable):
CHECK(iter.readUnreachable());
case uint16_t(Op::MiscPrefix): {
@ -963,7 +958,7 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
return iter.unrecognizedOpcode(&op);
}
uint32_t unusedUint;
NothingVector unusedArgs;
ValidatingOpIter::ValueVector unusedArgs;
CHECK(iter.readStructNew(&unusedUint, &unusedArgs));
}
case uint32_t(MiscOp::StructGet): {
@ -1215,8 +1210,10 @@ static bool DecodeFunctionBodyExprs(const ModuleEnvironment& env,
bool wasm::ValidateFunctionBody(const ModuleEnvironment& env,
uint32_t funcIndex, uint32_t bodySize,
Decoder& d) {
const FuncType& funcType = *env.funcTypes[funcIndex];
ValTypeVector locals;
if (!locals.appendAll(env.funcTypes[funcIndex]->args())) {
if (!locals.appendAll(funcType.args())) {
return false;
}

View File

@ -396,6 +396,15 @@ class Encoder {
}
return writeFixedU8(uint8_t(type.code()));
}
MOZ_MUST_USE bool writeBlockType(ExprType type) {
static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
MOZ_ASSERT(size_t(type.code()) < size_t(TypeCode::Limit));
if (type.isRef()) {
return writeFixedU8(uint8_t(ExprType::Ref)) &&
writeVarU32(type.refTypeIndex());
}
return writeFixedU8(uint8_t(type.code()));
}
MOZ_MUST_USE bool writeOp(Op op) {
static_assert(size_t(Op::Limit) == 256, "fits");
MOZ_ASSERT(size_t(op) < size_t(Op::Limit));
@ -617,16 +626,6 @@ class Decoder {
const uint8_t* begin() const { return beg_; }
const uint8_t* end() const { return end_; }
// Peek at the next byte, if it exists, without advancing the position.
bool peekByte(uint8_t* byte) {
if (done()) {
return false;
}
*byte = *cur_;
return true;
}
// Fixed-size encoding operations simply copy the literal bytes (without
// attempting to align).
@ -709,6 +708,20 @@ class Decoder {
}
return true;
}
MOZ_MUST_USE bool readBlockType(uint8_t* code, uint32_t* refTypeIndex) {
static_assert(size_t(TypeCode::Limit) <= UINT8_MAX, "fits");
if (!readFixedU8(code)) {
return false;
}
if (*code == uint8_t(TypeCode::Ref)) {
if (!readVarU32(refTypeIndex)) {
return false;
}
} else {
*refTypeIndex = NoRefTypeIndex;
}
return true;
}
MOZ_MUST_USE bool readOp(OpBytes* op) {
static_assert(size_t(Op::Limit) == 256, "fits");
uint8_t u8;