diff --git a/js/src/asmjs/AsmJS.cpp b/js/src/asmjs/AsmJS.cpp index e333e4d87776..384c8a9f0f04 100644 --- a/js/src/asmjs/AsmJS.cpp +++ b/js/src/asmjs/AsmJS.cpp @@ -2687,17 +2687,16 @@ class MOZ_STACK_CLASS FunctionValidator return fg_.addSourceCoords(encoder().bytecodeOffset(), line, column); } - template MOZ_WARN_UNUSED_RESULT - bool writeOp(T op) { - static_assert(sizeof(T) == sizeof(uint8_t), "opcodes must be uint8"); + bool writeOp(Expr op) { + static_assert(sizeof(Expr) == sizeof(uint8_t), "opcodes must be uint8"); return encoder().writeU8(uint8_t(op)); } MOZ_WARN_UNUSED_RESULT bool writeDebugCheckPoint() { #ifdef DEBUG - return writeOp(Stmt::DebugCheckPoint); + return writeOp(Expr::DebugCheckPoint); #endif return true; } @@ -2716,7 +2715,7 @@ class MOZ_STACK_CLASS FunctionValidator } MOZ_WARN_UNUSED_RESULT bool writeInt32Lit(int32_t i) { - return writeOp(I32::Literal) && encoder().writeI32(i); + return writeOp(Expr::I32Literal) && encoder().writeI32(i); } MOZ_WARN_UNUSED_RESULT @@ -2727,25 +2726,24 @@ class MOZ_STACK_CLASS FunctionValidator case NumLit::BigUnsigned: return writeInt32Lit(lit.toInt32()); case NumLit::Float: - return writeOp(F32::Literal) && encoder().writeF32(lit.toFloat()); + return writeOp(Expr::F32Literal) && encoder().writeF32(lit.toFloat()); case NumLit::Double: - return writeOp(F64::Literal) && encoder().writeF64(lit.toDouble()); + return writeOp(Expr::F64Literal) && encoder().writeF64(lit.toDouble()); case NumLit::Int32x4: - return writeOp(I32X4::Literal) && encoder().writeI32X4(lit.simdValue().asInt32x4()); + return writeOp(Expr::I32X4Literal) && encoder().writeI32X4(lit.simdValue().asInt32x4()); case NumLit::Float32x4: - return writeOp(F32X4::Literal) && encoder().writeF32X4(lit.simdValue().asFloat32x4()); + return writeOp(Expr::F32X4Literal) && encoder().writeF32X4(lit.simdValue().asFloat32x4()); case NumLit::Bool32x4: // Boolean vectors use the Int32x4 memory representation. - return writeOp(B32X4::Literal) && encoder().writeI32X4(lit.simdValue().asInt32x4()); + return writeOp(Expr::B32X4Literal) && encoder().writeI32X4(lit.simdValue().asInt32x4()); case NumLit::OutOfRangeInt: break; } MOZ_CRASH("unexpected literal type"); } - template - void patchOp(size_t pos, T stmt) { - static_assert(sizeof(T) == sizeof(uint8_t), "opcodes must be uint8"); + void patchOp(size_t pos, Expr stmt) { + static_assert(sizeof(Expr) == sizeof(uint8_t), "opcodes must be uint8"); encoder().patchU8(pos, uint8_t(stmt)); } void patchU8(size_t pos, uint8_t u8) { @@ -2762,7 +2760,7 @@ class MOZ_STACK_CLASS FunctionValidator MOZ_WARN_UNUSED_RESULT bool tempU8(size_t* offset) { - return encoder().writeU8(uint8_t(Stmt::Bad), offset); + return encoder().writeU8(uint8_t(Expr::Bad), offset); } MOZ_WARN_UNUSED_RESULT bool tempOp(size_t* offset) { @@ -2770,20 +2768,20 @@ class MOZ_STACK_CLASS FunctionValidator } MOZ_WARN_UNUSED_RESULT bool temp32(size_t* offset) { - if (!encoder().writeU8(uint8_t(Stmt::Bad), offset)) + if (!encoder().writeU8(uint8_t(Expr::Bad), offset)) return false; for (size_t i = 1; i < 4; i++) { - if (!encoder().writeU8(uint8_t(Stmt::Bad))) + if (!encoder().writeU8(uint8_t(Expr::Bad))) return false; } return true; } MOZ_WARN_UNUSED_RESULT bool tempPtr(size_t* offset) { - if (!encoder().writeU8(uint8_t(Stmt::Bad), offset)) + if (!encoder().writeU8(uint8_t(Expr::Bad), offset)) return false; for (size_t i = 1; i < sizeof(intptr_t); i++) { - if (!encoder().writeU8(uint8_t(Stmt::Bad))) + if (!encoder().writeU8(uint8_t(Expr::Bad))) return false; } return true; @@ -3396,22 +3394,21 @@ CheckFinalReturn(FunctionValidator& f, ParseNode* lastNonEmptyStmt) { if (!f.hasAlreadyReturned()) { f.setReturnedType(ExprType::Void); - return f.writeOp(Stmt::Ret); + return f.writeOp(Expr::Ret); } if (!lastNonEmptyStmt->isKind(PNK_RETURN)) { if (!IsVoid(f.returnedType())) return f.fail(lastNonEmptyStmt, "void incompatible with previous return type"); - return f.writeOp(Stmt::Ret); + return f.writeOp(Expr::Ret); } return true; } -template static bool -SetLocal(FunctionValidator& f, Stmt exprStmt, T setLocal, NumLit lit) +SetLocal(FunctionValidator& f, Expr exprStmt, Expr setLocal, NumLit lit) { return f.writeOp(exprStmt) && f.writeOp(setLocal) && @@ -3445,32 +3442,32 @@ CheckVariable(FunctionValidator& f, ParseNode* var) case NumLit::Fixnum: case NumLit::NegativeInt: case NumLit::BigUnsigned: - if (lit.toInt32() != 0 && !SetLocal(f, Stmt::I32Expr, I32::SetLocal, lit)) + if (lit.toInt32() != 0 && !SetLocal(f, Expr::I32Expr, Expr::I32SetLocal, lit)) return false; break; case NumLit::Double: if ((lit.toDouble() != 0.0 || IsNegativeZero(lit.toDouble())) && - !SetLocal(f, Stmt::F64Expr, F64::SetLocal, lit)) + !SetLocal(f, Expr::F64Expr, Expr::F64SetLocal, lit)) return false; break; case NumLit::Float: if ((lit.toFloat() != 0.f || !IsNegativeZero(lit.toFloat())) && - !SetLocal(f, Stmt::F32Expr, F32::SetLocal, lit)) + !SetLocal(f, Expr::F32Expr, Expr::F32SetLocal, lit)) return false; break; case NumLit::Int32x4: if (lit.simdValue() != SimdConstant::SplatX4(0) && - !SetLocal(f, Stmt::I32X4Expr, I32X4::SetLocal, lit)) + !SetLocal(f, Expr::I32X4Expr, Expr::I32X4SetLocal, lit)) return false; break; case NumLit::Float32x4: if (lit.simdValue() != SimdConstant::SplatX4(0.f) && - !SetLocal(f, Stmt::F32X4Expr, F32X4::SetLocal, lit)) + !SetLocal(f, Expr::F32X4Expr, Expr::F32X4SetLocal, lit)) return false; break; case NumLit::Bool32x4: if (lit.simdValue() != SimdConstant::SplatX4(0) && - !SetLocal(f, Stmt::B32X4Expr, B32X4::SetLocal, lit)) + !SetLocal(f, Expr::B32X4Expr, Expr::B32X4SetLocal, lit)) return false; break; case NumLit::OutOfRangeInt: @@ -3516,13 +3513,13 @@ CheckVarRef(FunctionValidator& f, ParseNode* varRef, Type* type) if (const FunctionValidator::Local* local = f.lookupLocal(name)) { switch (local->type) { - case ValType::I32: if (!f.writeOp(I32::GetLocal)) return false; break; + case ValType::I32: if (!f.writeOp(Expr::I32GetLocal)) return false; break; case ValType::I64: MOZ_CRASH("no int64 in asm.js"); - case ValType::F32: if (!f.writeOp(F32::GetLocal)) return false; break; - case ValType::F64: if (!f.writeOp(F64::GetLocal)) return false; break; - case ValType::I32x4: if (!f.writeOp(I32X4::GetLocal)) return false; break; - case ValType::F32x4: if (!f.writeOp(F32X4::GetLocal)) return false; break; - case ValType::B32x4: if (!f.writeOp(B32X4::GetLocal)) return false; break; + case ValType::F32: if (!f.writeOp(Expr::F32GetLocal)) return false; break; + case ValType::F64: if (!f.writeOp(Expr::F64GetLocal)) return false; break; + case ValType::I32x4: if (!f.writeOp(Expr::I32X4GetLocal)) return false; break; + case ValType::F32x4: if (!f.writeOp(Expr::F32X4GetLocal)) return false; break; + case ValType::B32x4: if (!f.writeOp(Expr::B32X4GetLocal)) return false; break; } if (!f.writeU32(local->slot)) return false; @@ -3539,12 +3536,12 @@ CheckVarRef(FunctionValidator& f, ParseNode* varRef, Type* type) case ModuleValidator::Global::Variable: { *type = global->varOrConstType(); switch (type->which()) { - case Type::Int: if (!f.writeOp(I32::GetGlobal)) return false; break; - case Type::Double: if (!f.writeOp(F64::GetGlobal)) return false; break; - case Type::Float: if (!f.writeOp(F32::GetGlobal)) return false; break; - case Type::Int32x4: if (!f.writeOp(I32X4::GetGlobal)) return false; break; - case Type::Float32x4: if (!f.writeOp(F32X4::GetGlobal)) return false; break; - case Type::Bool32x4: if (!f.writeOp(B32X4::GetGlobal)) return false; break; + case Type::Int: if (!f.writeOp(Expr::I32GetGlobal)) return false; break; + case Type::Double: if (!f.writeOp(Expr::F64GetGlobal)) return false; break; + case Type::Float: if (!f.writeOp(Expr::F32GetGlobal)) return false; break; + case Type::Int32x4: if (!f.writeOp(Expr::I32X4GetGlobal)) return false; break; + case Type::Float32x4: if (!f.writeOp(Expr::F32X4GetGlobal)) return false; break; + case Type::Bool32x4: if (!f.writeOp(Expr::B32X4GetGlobal)) return false; break; default: MOZ_CRASH("unexpected global type"); } if (!f.writeU32(global->varOrConstGlobalDataOffset())) @@ -3702,11 +3699,11 @@ CheckAndPrepareArrayAccess(FunctionValidator& f, ParseNode* viewName, ParseNode* // Don't generate the mask op if there is no need for it which could happen for // a shift of zero or a SIMD access. if (*mask != NoMask) { - f.patchOp(prepareAt, I32::BitAnd); + f.patchOp(prepareAt, Expr::I32BitAnd); return f.writeInt32Lit(*mask); } - f.patchOp(prepareAt, I32::Id); + f.patchOp(prepareAt, Expr::I32Id); return true; } @@ -3726,14 +3723,14 @@ CheckLoadArray(FunctionValidator& f, ParseNode* elem, Type* type) return false; switch (viewType) { - case Scalar::Int8: f.patchOp(opcodeAt, I32::SLoad8); break; - case Scalar::Int16: f.patchOp(opcodeAt, I32::SLoad16); break; - case Scalar::Int32: f.patchOp(opcodeAt, I32::SLoad32); break; - case Scalar::Uint8: f.patchOp(opcodeAt, I32::ULoad8); break; - case Scalar::Uint16: f.patchOp(opcodeAt, I32::ULoad16); break; - case Scalar::Uint32: f.patchOp(opcodeAt, I32::ULoad32); break; - case Scalar::Float32: f.patchOp(opcodeAt, F32::Load); break; - case Scalar::Float64: f.patchOp(opcodeAt, F64::Load); break; + case Scalar::Int8: f.patchOp(opcodeAt, Expr::I32SLoad8); break; + case Scalar::Int16: f.patchOp(opcodeAt, Expr::I32SLoad16); break; + case Scalar::Int32: f.patchOp(opcodeAt, Expr::I32SLoad32); break; + case Scalar::Uint8: f.patchOp(opcodeAt, Expr::I32ULoad8); break; + case Scalar::Uint16: f.patchOp(opcodeAt, Expr::I32ULoad16); break; + case Scalar::Uint32: f.patchOp(opcodeAt, Expr::I32ULoad32); break; + case Scalar::Float32: f.patchOp(opcodeAt, Expr::F32Load); break; + case Scalar::Float64: f.patchOp(opcodeAt, Expr::F64Load); break; default: MOZ_CRASH("unexpected scalar type"); } @@ -3803,27 +3800,27 @@ CheckStoreArray(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type switch (viewType) { case Scalar::Int8: case Scalar::Uint8: - f.patchOp(opcodeAt, I32::Store8); + f.patchOp(opcodeAt, Expr::I32Store8); break; case Scalar::Int16: case Scalar::Uint16: - f.patchOp(opcodeAt, I32::Store16); + f.patchOp(opcodeAt, Expr::I32Store16); break; case Scalar::Int32: case Scalar::Uint32: - f.patchOp(opcodeAt, I32::Store32); + f.patchOp(opcodeAt, Expr::I32Store32); break; case Scalar::Float32: if (rhsType.isFloatish()) - f.patchOp(opcodeAt, F32::StoreF32); + f.patchOp(opcodeAt, Expr::F32StoreF32); else - f.patchOp(opcodeAt, F64::StoreF32); + f.patchOp(opcodeAt, Expr::F64StoreF32); break; case Scalar::Float64: if (rhsType.isFloatish()) - f.patchOp(opcodeAt, F32::StoreF64); + f.patchOp(opcodeAt, Expr::F32StoreF64); else - f.patchOp(opcodeAt, F64::StoreF64); + f.patchOp(opcodeAt, Expr::F64StoreF64); break; default: MOZ_CRASH("unexpected scalar type"); } @@ -3855,13 +3852,13 @@ CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type } switch (lhsVar->type) { - case ValType::I32: f.patchOp(opcodeAt, I32::SetLocal); break; + case ValType::I32: f.patchOp(opcodeAt, Expr::I32SetLocal); break; case ValType::I64: MOZ_CRASH("no int64 in asm.js"); - case ValType::F64: f.patchOp(opcodeAt, F64::SetLocal); break; - case ValType::F32: f.patchOp(opcodeAt, F32::SetLocal); break; - case ValType::I32x4: f.patchOp(opcodeAt, I32X4::SetLocal); break; - case ValType::F32x4: f.patchOp(opcodeAt, F32X4::SetLocal); break; - case ValType::B32x4: f.patchOp(opcodeAt, B32X4::SetLocal); break; + case ValType::F64: f.patchOp(opcodeAt, Expr::F64SetLocal); break; + case ValType::F32: f.patchOp(opcodeAt, Expr::F32SetLocal); break; + case ValType::I32x4: f.patchOp(opcodeAt, Expr::I32X4SetLocal); break; + case ValType::F32x4: f.patchOp(opcodeAt, Expr::F32X4SetLocal); break; + case ValType::B32x4: f.patchOp(opcodeAt, Expr::B32X4SetLocal); break; } f.patch32(indexAt, lhsVar->slot); @@ -3879,12 +3876,12 @@ CheckAssignName(FunctionValidator& f, ParseNode* lhs, ParseNode* rhs, Type* type } switch (global->varOrConstType().which()) { - case Type::Int: f.patchOp(opcodeAt, I32::SetGlobal); break; - case Type::Float: f.patchOp(opcodeAt, F32::SetGlobal); break; - case Type::Double: f.patchOp(opcodeAt, F64::SetGlobal); break; - case Type::Int32x4: f.patchOp(opcodeAt, I32X4::SetGlobal); break; - case Type::Float32x4: f.patchOp(opcodeAt, F32X4::SetGlobal); break; - case Type::Bool32x4: f.patchOp(opcodeAt, B32X4::SetGlobal); break; + case Type::Int: f.patchOp(opcodeAt, Expr::I32SetGlobal); break; + case Type::Float: f.patchOp(opcodeAt, Expr::F32SetGlobal); break; + case Type::Double: f.patchOp(opcodeAt, Expr::F64SetGlobal); break; + case Type::Int32x4: f.patchOp(opcodeAt, Expr::I32X4SetGlobal); break; + case Type::Float32x4: f.patchOp(opcodeAt, Expr::F32X4SetGlobal); break; + case Type::Bool32x4: f.patchOp(opcodeAt, Expr::B32X4SetGlobal); break; default: MOZ_CRASH("unexpected global type"); } @@ -3922,7 +3919,7 @@ CheckMathIMul(FunctionValidator& f, ParseNode* call, Type* type) ParseNode* lhs = CallArgList(call); ParseNode* rhs = NextNode(lhs); - if (!f.writeOp(I32::Mul)) + if (!f.writeOp(Expr::I32Mul)) return false; Type lhsType; @@ -3948,7 +3945,7 @@ CheckMathClz32(FunctionValidator& f, ParseNode* call, Type* type) if (CallArgListLength(call) != 1) return f.fail(call, "Math.clz32 must be passed 1 argument"); - if (!f.writeOp(I32::Clz)) + if (!f.writeOp(Expr::I32Clz)) return false; ParseNode* arg = CallArgList(call); @@ -3981,19 +3978,19 @@ CheckMathAbs(FunctionValidator& f, ParseNode* call, Type* type) return false; if (argType.isSigned()) { - f.patchOp(opcodeAt, I32::Abs); + f.patchOp(opcodeAt, Expr::I32Abs); *type = Type::Unsigned; return true; } if (argType.isMaybeDouble()) { - f.patchOp(opcodeAt, F64::Abs); + f.patchOp(opcodeAt, Expr::F64Abs); *type = Type::Double; return true; } if (argType.isMaybeFloat()) { - f.patchOp(opcodeAt, F32::Abs); + f.patchOp(opcodeAt, Expr::F32Abs); *type = Type::Floatish; return true; } @@ -4018,13 +4015,13 @@ CheckMathSqrt(FunctionValidator& f, ParseNode* call, Type* type) return false; if (argType.isMaybeDouble()) { - f.patchOp(opcodeAt, F64::Sqrt); + f.patchOp(opcodeAt, Expr::F64Sqrt); *type = Type::Double; return true; } if (argType.isMaybeFloat()) { - f.patchOp(opcodeAt, F32::Sqrt); + f.patchOp(opcodeAt, Expr::F32Sqrt); *type = Type::Floatish; return true; } @@ -4051,15 +4048,15 @@ CheckMathMinMax(FunctionValidator& f, ParseNode* callNode, bool isMax, Type* typ if (firstType.isMaybeDouble()) { *type = Type::Double; firstType = Type::MaybeDouble; - f.patchOp(opcodeAt, isMax ? F64::Max : F64::Min); + f.patchOp(opcodeAt, isMax ? Expr::F64Max : Expr::F64Min); } else if (firstType.isMaybeFloat()) { *type = Type::Float; firstType = Type::MaybeFloat; - f.patchOp(opcodeAt, isMax ? F32::Max : F32::Min); + f.patchOp(opcodeAt, isMax ? Expr::F32Max : Expr::F32Min); } else if (firstType.isSigned()) { *type = Type::Signed; firstType = Type::Signed; - f.patchOp(opcodeAt, isMax ? I32::Max : I32::Min); + f.patchOp(opcodeAt, isMax ? Expr::I32Max : Expr::I32Min); } else { return f.failf(firstArg, "%s is not a subtype of double?, float? or signed", firstType.toChars()); @@ -4117,12 +4114,11 @@ CheckAtomicsFence(FunctionValidator& f, ParseNode* call, Type* type) return f.fail(call, "Atomics.fence must be passed 0 arguments"); *type = Type::Void; - return f.writeOp(Stmt::AtomicsFence); + return f.writeOp(Expr::AtomicsFence); } -template static bool -WriteAtomicOperator(FunctionValidator& f, T opcode, size_t* needsBoundsCheckAt, size_t* viewTypeAt) +WriteAtomicOperator(FunctionValidator& f, Expr opcode, size_t* needsBoundsCheckAt, size_t* viewTypeAt) { return f.writeOp(opcode) && f.tempU8(needsBoundsCheckAt) && @@ -4140,7 +4136,7 @@ CheckAtomicsLoad(FunctionValidator& f, ParseNode* call, Type* type) size_t needsBoundsCheckAt; size_t viewTypeAt; - if (!WriteAtomicOperator(f, I32::AtomicsLoad, &needsBoundsCheckAt, &viewTypeAt)) + if (!WriteAtomicOperator(f, Expr::I32AtomicsLoad, &needsBoundsCheckAt, &viewTypeAt)) return false; Scalar::Type viewType; @@ -4168,7 +4164,7 @@ CheckAtomicsStore(FunctionValidator& f, ParseNode* call, Type* type) size_t needsBoundsCheckAt; size_t viewTypeAt; - if (!WriteAtomicOperator(f, I32::AtomicsStore, &needsBoundsCheckAt, &viewTypeAt)) + if (!WriteAtomicOperator(f, Expr::I32AtomicsStore, &needsBoundsCheckAt, &viewTypeAt)) return false; Scalar::Type viewType; @@ -4203,7 +4199,7 @@ CheckAtomicsBinop(FunctionValidator& f, ParseNode* call, Type* type, AtomicOp op size_t needsBoundsCheckAt; size_t viewTypeAt; - if (!WriteAtomicOperator(f, I32::AtomicsBinOp, &needsBoundsCheckAt, &viewTypeAt)) + if (!WriteAtomicOperator(f, Expr::I32AtomicsBinOp, &needsBoundsCheckAt, &viewTypeAt)) return false; if (!f.writeU8(uint8_t(op))) return false; @@ -4257,7 +4253,7 @@ CheckAtomicsCompareExchange(FunctionValidator& f, ParseNode* call, Type* type) size_t needsBoundsCheckAt; size_t viewTypeAt; - if (!WriteAtomicOperator(f, I32::AtomicsCompareExchange, &needsBoundsCheckAt, &viewTypeAt)) + if (!WriteAtomicOperator(f, Expr::I32AtomicsCompareExchange, &needsBoundsCheckAt, &viewTypeAt)) return false; Scalar::Type viewType; @@ -4299,7 +4295,7 @@ CheckAtomicsExchange(FunctionValidator& f, ParseNode* call, Type* type) size_t needsBoundsCheckAt; size_t viewTypeAt; - if (!WriteAtomicOperator(f, I32::AtomicsExchange, &needsBoundsCheckAt, &viewTypeAt)) + if (!WriteAtomicOperator(f, Expr::I32AtomicsExchange, &needsBoundsCheckAt, &viewTypeAt)) return false; Scalar::Type viewType; @@ -4432,14 +4428,14 @@ CheckInternalCall(FunctionValidator& f, ParseNode* callNode, PropertyName* calle ExprType ret, Type* type) { switch (ret) { - case ExprType::Void: if (!f.writeOp(Stmt::CallInternal)) return false; break; - case ExprType::I32: if (!f.writeOp(I32::CallInternal)) return false; break; + case ExprType::Void: if (!f.writeOp(Expr::CallInternal)) return false; break; + case ExprType::I32: if (!f.writeOp(Expr::I32CallInternal)) return false; break; case ExprType::I64: MOZ_CRASH("no int64 in asm.js"); - case ExprType::F32: if (!f.writeOp(F32::CallInternal)) return false; break; - case ExprType::F64: if (!f.writeOp(F64::CallInternal)) return false; break; - case ExprType::I32x4: if (!f.writeOp(I32X4::CallInternal)) return false; break; - case ExprType::F32x4: if (!f.writeOp(F32X4::CallInternal)) return false; break; - case ExprType::B32x4: if (!f.writeOp(B32X4::CallInternal)) return false; break; + case ExprType::F32: if (!f.writeOp(Expr::F32CallInternal)) return false; break; + case ExprType::F64: if (!f.writeOp(Expr::F64CallInternal)) return false; break; + case ExprType::I32x4: if (!f.writeOp(Expr::I32X4CallInternal)) return false; break; + case ExprType::F32x4: if (!f.writeOp(Expr::F32X4CallInternal)) return false; break; + case ExprType::B32x4: if (!f.writeOp(Expr::B32X4CallInternal)) return false; break; } // Function's index, to find out the function's entry @@ -4525,14 +4521,14 @@ CheckFuncPtrCall(FunctionValidator& f, ParseNode* callNode, ExprType ret, Type* // Opcode switch (ret) { - case ExprType::Void: if (!f.writeOp(Stmt::CallIndirect)) return false; break; - case ExprType::I32: if (!f.writeOp(I32::CallIndirect)) return false; break; + case ExprType::Void: if (!f.writeOp(Expr::CallIndirect)) return false; break; + case ExprType::I32: if (!f.writeOp(Expr::I32CallIndirect)) return false; break; case ExprType::I64: MOZ_CRASH("no in64 in asm.js"); - case ExprType::F32: if (!f.writeOp(F32::CallIndirect)) return false; break; - case ExprType::F64: if (!f.writeOp(F64::CallIndirect)) return false; break; - case ExprType::I32x4: if (!f.writeOp(I32X4::CallIndirect)) return false; break; - case ExprType::F32x4: if (!f.writeOp(F32X4::CallIndirect)) return false; break; - case ExprType::B32x4: if (!f.writeOp(B32X4::CallIndirect)) return false; break; + case ExprType::F32: if (!f.writeOp(Expr::F32CallIndirect)) return false; break; + case ExprType::F64: if (!f.writeOp(Expr::F64CallIndirect)) return false; break; + case ExprType::I32x4: if (!f.writeOp(Expr::I32X4CallIndirect)) return false; break; + case ExprType::F32x4: if (!f.writeOp(Expr::F32X4CallIndirect)) return false; break; + case ExprType::B32x4: if (!f.writeOp(Expr::B32X4CallIndirect)) return false; break; } // Table's mask if (!f.writeU32(mask)) @@ -4593,14 +4589,14 @@ CheckFFICall(FunctionValidator& f, ParseNode* callNode, unsigned ffiIndex, ExprT return f.fail(callNode, "FFI calls can't return SIMD values"); switch (ret) { - case ExprType::Void: if (!f.writeOp(Stmt::CallImport)) return false; break; - case ExprType::I32: if (!f.writeOp(I32::CallImport)) return false; break; + case ExprType::Void: if (!f.writeOp(Expr::CallImport)) return false; break; + case ExprType::I32: if (!f.writeOp(Expr::I32CallImport)) return false; break; case ExprType::I64: MOZ_CRASH("no int64 in asm.js"); - case ExprType::F32: if (!f.writeOp(F32::CallImport)) return false; break; - case ExprType::F64: if (!f.writeOp(F64::CallImport)) return false; break; - case ExprType::I32x4: if (!f.writeOp(I32X4::CallImport)) return false; break; - case ExprType::F32x4: if (!f.writeOp(F32X4::CallImport)) return false; break; - case ExprType::B32x4: if (!f.writeOp(B32X4::CallImport)) return false; break; + case ExprType::F32: if (!f.writeOp(Expr::F32CallImport)) return false; break; + case ExprType::F64: if (!f.writeOp(Expr::F64CallImport)) return false; break; + case ExprType::I32x4: if (!f.writeOp(Expr::I32X4CallImport)) return false; break; + case ExprType::F32x4: if (!f.writeOp(Expr::F32X4CallImport)) return false; break; + case ExprType::B32x4: if (!f.writeOp(Expr::B32X4CallImport)) return false; break; } // Global data offset @@ -4637,19 +4633,19 @@ CheckFloatCoercionArg(FunctionValidator& f, ParseNode* inputNode, Type inputType size_t opcodeAt) { if (inputType.isMaybeDouble()) { - f.patchOp(opcodeAt, F32::FromF64); + f.patchOp(opcodeAt, Expr::F32FromF64); return true; } if (inputType.isSigned()) { - f.patchOp(opcodeAt, F32::FromS32); + f.patchOp(opcodeAt, Expr::F32FromS32); return true; } if (inputType.isUnsigned()) { - f.patchOp(opcodeAt, F32::FromU32); + f.patchOp(opcodeAt, Expr::F32FromU32); return true; } if (inputType.isFloatish()) { - f.patchOp(opcodeAt, F32::Id); + f.patchOp(opcodeAt, Expr::F32Id); return true; } @@ -4685,17 +4681,17 @@ CheckCoercionArg(FunctionValidator& f, ParseNode* arg, ValType expected, Type* t case ValType::I32x4: if (!argType.isInt32x4()) return f.fail(arg, "argument to SIMD int32x4 coercion isn't int32x4"); - f.patchOp(opcodeAt, I32X4::Id); + f.patchOp(opcodeAt, Expr::I32X4Id); break; case ValType::F32x4: if (!argType.isFloat32x4()) return f.fail(arg, "argument to SIMD float32x4 coercion isn't float32x4"); - f.patchOp(opcodeAt, F32X4::Id); + f.patchOp(opcodeAt, Expr::F32X4Id); break; case ValType::B32x4: if (!argType.isBool32x4()) return f.fail(arg, "argument to SIMD bool32x4 coercion isn't bool32x4"); - f.patchOp(opcodeAt, B32X4::Id); + f.patchOp(opcodeAt, Expr::B32X4Id); break; case ValType::I32: case ValType::F64: @@ -4727,8 +4723,8 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin Type* type) { unsigned arity = 0; - F32 f32; - F64 f64; + Expr f32; + Expr f64; switch (func) { case AsmJSMathBuiltin_imul: return CheckMathIMul(f, callNode, type); case AsmJSMathBuiltin_clz32: return CheckMathClz32(f, callNode, type); @@ -4737,18 +4733,18 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin case AsmJSMathBuiltin_fround: return CheckMathFRound(f, callNode, type); case AsmJSMathBuiltin_min: return CheckMathMinMax(f, callNode, /* isMax = */ false, type); case AsmJSMathBuiltin_max: return CheckMathMinMax(f, callNode, /* isMax = */ true, type); - case AsmJSMathBuiltin_ceil: arity = 1; f64 = F64::Ceil; f32 = F32::Ceil; break; - case AsmJSMathBuiltin_floor: arity = 1; f64 = F64::Floor; f32 = F32::Floor; break; - case AsmJSMathBuiltin_sin: arity = 1; f64 = F64::Sin; f32 = F32::Bad; break; - case AsmJSMathBuiltin_cos: arity = 1; f64 = F64::Cos; f32 = F32::Bad; break; - case AsmJSMathBuiltin_tan: arity = 1; f64 = F64::Tan; f32 = F32::Bad; break; - case AsmJSMathBuiltin_asin: arity = 1; f64 = F64::Asin; f32 = F32::Bad; break; - case AsmJSMathBuiltin_acos: arity = 1; f64 = F64::Acos; f32 = F32::Bad; break; - case AsmJSMathBuiltin_atan: arity = 1; f64 = F64::Atan; f32 = F32::Bad; break; - case AsmJSMathBuiltin_exp: arity = 1; f64 = F64::Exp; f32 = F32::Bad; break; - case AsmJSMathBuiltin_log: arity = 1; f64 = F64::Log; f32 = F32::Bad; break; - case AsmJSMathBuiltin_pow: arity = 2; f64 = F64::Pow; f32 = F32::Bad; break; - case AsmJSMathBuiltin_atan2: arity = 2; f64 = F64::Atan2; f32 = F32::Bad; break; + case AsmJSMathBuiltin_ceil: arity = 1; f64 = Expr::F64Ceil; f32 = Expr::F32Ceil; break; + case AsmJSMathBuiltin_floor: arity = 1; f64 = Expr::F64Floor; f32 = Expr::F32Floor; break; + case AsmJSMathBuiltin_sin: arity = 1; f64 = Expr::F64Sin; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_cos: arity = 1; f64 = Expr::F64Cos; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_tan: arity = 1; f64 = Expr::F64Tan; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_asin: arity = 1; f64 = Expr::F64Asin; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_acos: arity = 1; f64 = Expr::F64Acos; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_atan: arity = 1; f64 = Expr::F64Atan; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_exp: arity = 1; f64 = Expr::F64Exp; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_log: arity = 1; f64 = Expr::F64Log; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_pow: arity = 2; f64 = Expr::F64Pow; f32 = Expr::Bad; break; + case AsmJSMathBuiltin_atan2: arity = 2; f64 = Expr::F64Atan2; f32 = Expr::Bad; break; default: MOZ_CRASH("unexpected mathBuiltin function"); } @@ -4772,7 +4768,7 @@ CheckMathBuiltinCall(FunctionValidator& f, ParseNode* callNode, AsmJSMathBuiltin return f.fail(argNode, "arguments to math call should be a subtype of double? or float?"); bool opIsDouble = firstType.isMaybeDouble(); - if (!opIsDouble && f32 == F32::Bad) + if (!opIsDouble && f32 == Expr::Bad) return f.fail(callNode, "math builtin cannot be used as float"); if (opIsDouble) @@ -4901,7 +4897,7 @@ class CheckSimdScalarArgs // We emitted a double literal and actually want a float32. MOZ_ASSERT(patchAt != size_t(-1)); - f.patchOp(patchAt, F32::FromF64); + f.patchOp(patchAt, Expr::F32FromF64); return true; } @@ -4910,8 +4906,8 @@ class CheckSimdScalarArgs switch (simdType_) { case AsmJSSimdType_bool32x4: - case AsmJSSimdType_int32x4: f.patchOp(patchAt, I32::Id); return true; - case AsmJSSimdType_float32x4: f.patchOp(patchAt, F32::Id); return true; + case AsmJSSimdType_int32x4: f.patchOp(patchAt, Expr::I32Id); return true; + case AsmJSSimdType_float32x4: f.patchOp(patchAt, Expr::F32Id); return true; } MOZ_CRASH("unexpected simd type"); @@ -4964,9 +4960,9 @@ class CheckSimdVectorScalarArgs return true; switch (formalSimdType_) { - case AsmJSSimdType_int32x4: f.patchOp(patchAt, I32X4::Id); return true; - case AsmJSSimdType_float32x4: f.patchOp(patchAt, F32X4::Id); return true; - case AsmJSSimdType_bool32x4: f.patchOp(patchAt, B32X4::Id); return true; + case AsmJSSimdType_int32x4: f.patchOp(patchAt, Expr::I32X4Id); return true; + case AsmJSSimdType_float32x4: f.patchOp(patchAt, Expr::F32X4Id); return true; + case AsmJSSimdType_bool32x4: f.patchOp(patchAt, Expr::B32X4Id); return true; } MOZ_CRASH("unexpected simd type"); @@ -5026,9 +5022,9 @@ class CheckSimdReplaceLaneArgs Type(formalSimdType_).toChars()); } switch (formalSimdType_) { - case AsmJSSimdType_int32x4: f.patchOp(patchAt, I32X4::Id); break; - case AsmJSSimdType_float32x4: f.patchOp(patchAt, F32X4::Id); break; - case AsmJSSimdType_bool32x4: f.patchOp(patchAt, B32X4::Id); break; + case AsmJSSimdType_int32x4: f.patchOp(patchAt, Expr::I32X4Id); break; + case AsmJSSimdType_float32x4: f.patchOp(patchAt, Expr::F32X4Id); break; + case AsmJSSimdType_bool32x4: f.patchOp(patchAt, Expr::B32X4Id); break; } return true; case 1: @@ -5037,7 +5033,7 @@ class CheckSimdReplaceLaneArgs return f.failf(arg, "lane selector should be a constant integer literal"); if (u32 >= SimdTypeToLength(formalSimdType_)) return f.failf(arg, "lane selector should be in bounds"); - f.patchOp(patchAt, I32::Id); + f.patchOp(patchAt, Expr::I32Id); return true; case 2: // Third argument is the scalar @@ -5050,7 +5046,7 @@ class CheckSimdReplaceLaneArgs } // namespace static bool -SwitchPackOp(FunctionValidator& f, AsmJSSimdType type, I32X4 i32x4, F32X4 f32x4, B32X4 b32x4) +SwitchPackOp(FunctionValidator& f, AsmJSSimdType type, Expr i32x4, Expr f32x4, Expr b32x4) { switch (type) { case AsmJSSimdType_int32x4: return f.writeOp(i32x4); @@ -5064,7 +5060,7 @@ static bool CheckSimdUnary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, MSimdUnaryArith::Operation op, Type* type) { - if (!SwitchPackOp(f, opType, I32X4::Unary, F32X4::Unary, B32X4::Unary)) + if (!SwitchPackOp(f, opType, Expr::I32X4Unary, Expr::F32X4Unary, Expr::B32X4Unary)) return false; if (!f.writeU8(uint8_t(op))) return false; @@ -5091,7 +5087,7 @@ static bool CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, MSimdBinaryArith::Operation op, Type* type) { - return SwitchPackOp(f, opType, I32X4::Binary, F32X4::Binary, B32X4::Binary) && + return SwitchPackOp(f, opType, Expr::I32X4Binary, Expr::F32X4Binary, Expr::B32X4Binary) && CheckSimdBinaryGuts(f, call, opType, op, type); } @@ -5099,7 +5095,7 @@ static bool CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, MSimdBinaryBitwise::Operation op, Type* type) { - return SwitchPackOp(f, opType, I32X4::BinaryBitwise, F32X4::Bad, B32X4::BinaryBitwise) && + return SwitchPackOp(f, opType, Expr::I32X4BinaryBitwise, Expr::Bad, Expr::B32X4BinaryBitwise) && CheckSimdBinaryGuts(f, call, opType, op, type); } @@ -5109,11 +5105,11 @@ CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, { switch (opType) { case AsmJSSimdType_int32x4: - if (!f.writeOp(B32X4::BinaryCompI32X4)) + if (!f.writeOp(Expr::B32X4BinaryCompI32X4)) return false; break; case AsmJSSimdType_float32x4: - if (!f.writeOp(B32X4::BinaryCompF32X4)) + if (!f.writeOp(Expr::B32X4BinaryCompF32X4)) return false; break; case AsmJSSimdType_bool32x4: @@ -5131,7 +5127,7 @@ static bool CheckSimdBinary(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, MSimdShift::Operation op, Type* type) { - if (!f.writeOp(I32X4::BinaryShift) || !f.writeU8(uint8_t(op))) + if (!f.writeOp(Expr::I32X4BinaryShift) || !f.writeU8(uint8_t(op))) return false; if (!CheckSimdCallArgs(f, call, 2, CheckSimdVectorScalarArgs(opType))) return false; @@ -5144,17 +5140,17 @@ CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType { switch (opType) { case AsmJSSimdType_int32x4: - if (!f.writeOp(I32::I32X4ExtractLane)) + if (!f.writeOp(Expr::I32I32X4ExtractLane)) return false; *type = Type::Signed; break; case AsmJSSimdType_float32x4: - if (!f.writeOp(F32::F32X4ExtractLane)) + if (!f.writeOp(Expr::F32F32X4ExtractLane)) return false; *type = Type::Float; break; case AsmJSSimdType_bool32x4: - if (!f.writeOp(I32::B32X4ExtractLane)) + if (!f.writeOp(Expr::I32B32X4ExtractLane)) return false; *type = Type::Int; break; @@ -5165,7 +5161,7 @@ CheckSimdExtractLane(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType static bool CheckSimdReplaceLane(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type) { - if (!SwitchPackOp(f, opType, I32X4::ReplaceLane, F32X4::ReplaceLane, B32X4::ReplaceLane)) + if (!SwitchPackOp(f, opType, Expr::I32X4ReplaceLane, Expr::F32X4ReplaceLane, Expr::B32X4ReplaceLane)) return false; if (!CheckSimdCallArgsPatchable(f, call, 3, CheckSimdReplaceLaneArgs(opType))) return false; @@ -5183,9 +5179,9 @@ CheckSimdCast(FunctionValidator& f, ParseNode* call, AsmJSSimdType fromType, Asm bool bitcast, Type* type) { if (!SwitchPackOp(f, toType, - bitcast ? I32X4::FromF32X4Bits : I32X4::FromF32X4, - bitcast ? F32X4::FromI32X4Bits : F32X4::FromI32X4, - B32X4::Bad)) + bitcast ? Expr::I32X4FromF32X4Bits : Expr::I32X4FromF32X4, + bitcast ? Expr::F32X4FromI32X4Bits : Expr::F32X4FromI32X4, + Expr::Bad)) { return false; } @@ -5218,7 +5214,7 @@ CheckSimdSwizzle(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Ty if (numArgs != 5) return f.failf(call, "expected 5 arguments to SIMD swizzle, got %u", numArgs); - if (!SwitchPackOp(f, opType, I32X4::Swizzle, F32X4::Swizzle, B32X4::Bad)) + if (!SwitchPackOp(f, opType, Expr::I32X4Swizzle, Expr::F32X4Swizzle, Expr::Bad)) return false; Type retType = opType; @@ -5249,7 +5245,7 @@ CheckSimdShuffle(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Ty if (numArgs != 6) return f.failf(call, "expected 6 arguments to SIMD shuffle, got %u", numArgs); - if (!SwitchPackOp(f, opType, I32X4::Shuffle, F32X4::Shuffle, B32X4::Bad)) + if (!SwitchPackOp(f, opType, Expr::I32X4Shuffle, Expr::F32X4Shuffle, Expr::Bad)) return false; Type retType = opType; @@ -5327,7 +5323,7 @@ CheckSimdLoad(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, if (numArgs != 2) return f.failf(call, "expected 2 arguments to SIMD load, got %u", numArgs); - if (!SwitchPackOp(f, opType, I32X4::Load, F32X4::Load, B32X4::Bad)) + if (!SwitchPackOp(f, opType, Expr::I32X4Load, Expr::F32X4Load, Expr::Bad)) return false; size_t viewTypeAt; @@ -5355,7 +5351,7 @@ CheckSimdStore(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, if (numArgs != 3) return f.failf(call, "expected 3 arguments to SIMD store, got %u", numArgs); - if (!SwitchPackOp(f, opType, I32X4::Store, F32X4::Store, B32X4::Bad)) + if (!SwitchPackOp(f, opType, Expr::I32X4Store, Expr::F32X4Store, Expr::Bad)) return false; size_t viewTypeAt; @@ -5386,7 +5382,7 @@ CheckSimdStore(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, static bool CheckSimdSelect(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type) { - if (!SwitchPackOp(f, opType, I32X4::Select, F32X4::Select, B32X4::Bad)) + if (!SwitchPackOp(f, opType, Expr::I32X4Select, Expr::F32X4Select, Expr::Bad)) return false; if (!CheckSimdCallArgs(f, call, 3, CheckSimdSelectArgs(opType))) return false; @@ -5399,7 +5395,7 @@ CheckSimdAllTrue(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Ty { switch (opType) { case AsmJSSimdType_bool32x4: - if (!f.writeOp(I32::B32X4AllTrue)) + if (!f.writeOp(Expr::I32B32X4AllTrue)) return false; break; case AsmJSSimdType_int32x4: @@ -5417,7 +5413,7 @@ CheckSimdAnyTrue(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Ty { switch (opType) { case AsmJSSimdType_bool32x4: - if (!f.writeOp(I32::B32X4AnyTrue)) + if (!f.writeOp(Expr::I32B32X4AnyTrue)) return false; break; case AsmJSSimdType_int32x4: @@ -5443,7 +5439,7 @@ CheckSimdCheck(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type static bool CheckSimdSplat(FunctionValidator& f, ParseNode* call, AsmJSSimdType opType, Type* type) { - if (!SwitchPackOp(f, opType, I32X4::Splat, F32X4::Splat, B32X4::Splat)) + if (!SwitchPackOp(f, opType, Expr::I32X4Splat, Expr::F32X4Splat, Expr::B32X4Splat)) return false; if (!CheckSimdCallArgsPatchable(f, call, 1, CheckSimdScalarArgs(opType))) return false; @@ -5571,7 +5567,7 @@ CheckSimdCtorCall(FunctionValidator& f, ParseNode* call, const ModuleValidator:: MOZ_ASSERT(call->isKind(PNK_CALL)); AsmJSSimdType simdType = global->simdCtorType(); - if (!SwitchPackOp(f, simdType, I32X4::Ctor, F32X4::Ctor, B32X4::Ctor)) + if (!SwitchPackOp(f, simdType, Expr::I32X4Ctor, Expr::F32X4Ctor, Expr::B32X4Ctor)) return false; unsigned length = SimdTypeToLength(simdType); @@ -5614,26 +5610,26 @@ CoerceResult(FunctionValidator& f, ParseNode* expr, ExprType expected, Type actu switch (expected) { case ExprType::Void: if (actual.isIntish()) - f.patchOp(patchAt, Stmt::I32Expr); + f.patchOp(patchAt, Expr::I32Expr); else if (actual.isFloatish()) - f.patchOp(patchAt, Stmt::F32Expr); + f.patchOp(patchAt, Expr::F32Expr); else if (actual.isMaybeDouble()) - f.patchOp(patchAt, Stmt::F64Expr); + f.patchOp(patchAt, Expr::F64Expr); else if (actual.isInt32x4()) - f.patchOp(patchAt, Stmt::I32X4Expr); + f.patchOp(patchAt, Expr::I32X4Expr); else if (actual.isFloat32x4()) - f.patchOp(patchAt, Stmt::F32X4Expr); + f.patchOp(patchAt, Expr::F32X4Expr); else if (actual.isBool32x4()) - f.patchOp(patchAt, Stmt::B32X4Expr); + f.patchOp(patchAt, Expr::B32X4Expr); else if (actual.isVoid()) - f.patchOp(patchAt, Stmt::Id); + f.patchOp(patchAt, Expr::Id); else MOZ_CRASH("unhandled return type"); break; case ExprType::I32: if (!actual.isIntish()) return f.failf(expr, "%s is not a subtype of intish", actual.toChars()); - f.patchOp(patchAt, I32::Id); + f.patchOp(patchAt, Expr::I32Id); break; case ExprType::I64: MOZ_CRASH("no int64 in asm.js"); @@ -5643,30 +5639,30 @@ CoerceResult(FunctionValidator& f, ParseNode* expr, ExprType expected, Type actu break; case ExprType::F64: if (actual.isMaybeDouble()) - f.patchOp(patchAt, F64::Id); + f.patchOp(patchAt, Expr::F64Id); else if (actual.isMaybeFloat()) - f.patchOp(patchAt, F64::FromF32); + f.patchOp(patchAt, Expr::F64FromF32); else if (actual.isSigned()) - f.patchOp(patchAt, F64::FromS32); + f.patchOp(patchAt, Expr::F64FromS32); else if (actual.isUnsigned()) - f.patchOp(patchAt, F64::FromU32); + f.patchOp(patchAt, Expr::F64FromU32); else return f.failf(expr, "%s is not a subtype of double?, float?, signed or unsigned", actual.toChars()); break; case ExprType::I32x4: if (!actual.isInt32x4()) return f.failf(expr, "%s is not a subtype of int32x4", actual.toChars()); - f.patchOp(patchAt, I32X4::Id); + f.patchOp(patchAt, Expr::I32X4Id); break; case ExprType::F32x4: if (!actual.isFloat32x4()) return f.failf(expr, "%s is not a subtype of float32x4", actual.toChars()); - f.patchOp(patchAt, F32X4::Id); + f.patchOp(patchAt, Expr::F32X4Id); break; case ExprType::B32x4: if (!actual.isBool32x4()) return f.failf(expr, "%s is not a subtype of bool32x4", actual.toChars()); - f.patchOp(patchAt, B32X4::Id); + f.patchOp(patchAt, Expr::B32X4Id); break; } @@ -5799,7 +5795,7 @@ CheckNot(FunctionValidator& f, ParseNode* expr, Type* type) MOZ_ASSERT(expr->isKind(PNK_NOT)); ParseNode* operand = UnaryKid(expr); - if (!f.writeOp(I32::Not)) + if (!f.writeOp(Expr::I32Not)) return false; Type operandType; @@ -5828,19 +5824,19 @@ CheckNeg(FunctionValidator& f, ParseNode* expr, Type* type) return false; if (operandType.isInt()) { - f.patchOp(opcodeAt, I32::Neg); + f.patchOp(opcodeAt, Expr::I32Neg); *type = Type::Intish; return true; } if (operandType.isMaybeDouble()) { - f.patchOp(opcodeAt, F64::Neg); + f.patchOp(opcodeAt, Expr::F64Neg); *type = Type::Double; return true; } if (operandType.isMaybeFloat()) { - f.patchOp(opcodeAt, F32::Neg); + f.patchOp(opcodeAt, Expr::F32Neg); *type = Type::Floatish; return true; } @@ -5863,7 +5859,7 @@ CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type) return false; if (operandType.isMaybeDouble() || operandType.isMaybeFloat()) { - f.patchOp(opcodeAt, operandType.isMaybeDouble() ? I32::FromF64 : I32::FromF32); + f.patchOp(opcodeAt, operandType.isMaybeDouble() ? Expr::I32FromF64 : Expr::I32FromF32); *type = Type::Signed; return true; } @@ -5871,7 +5867,7 @@ CheckCoerceToInt(FunctionValidator& f, ParseNode* expr, Type* type) if (!operandType.isIntish()) return f.failf(operand, "%s is not a subtype of double?, float? or intish", operandType.toChars()); - f.patchOp(opcodeAt, I32::Id); + f.patchOp(opcodeAt, Expr::I32Id); *type = Type::Signed; return true; } @@ -5885,7 +5881,7 @@ CheckBitNot(FunctionValidator& f, ParseNode* neg, Type* type) if (operand->isKind(PNK_BITNOT)) return CheckCoerceToInt(f, operand, type); - if (!f.writeOp(I32::BitNot)) + if (!f.writeOp(Expr::I32BitNot)) return false; Type operandType; @@ -5922,17 +5918,17 @@ CheckComma(FunctionValidator& f, ParseNode* comma, Type* type) return false; if (type->isIntish()) - f.patchOp(commaAt, I32::Comma); + f.patchOp(commaAt, Expr::I32Comma); else if (type->isFloatish()) - f.patchOp(commaAt, F32::Comma); + f.patchOp(commaAt, Expr::F32Comma); else if (type->isMaybeDouble()) - f.patchOp(commaAt, F64::Comma); + f.patchOp(commaAt, Expr::F64Comma); else if (type->isInt32x4()) - f.patchOp(commaAt, I32X4::Comma); + f.patchOp(commaAt, Expr::I32X4Comma); else if (type->isFloat32x4()) - f.patchOp(commaAt, F32X4::Comma); + f.patchOp(commaAt, Expr::F32X4Comma); else if (type->isBool32x4()) - f.patchOp(commaAt, B32X4::Comma); + f.patchOp(commaAt, Expr::B32X4Comma); else MOZ_CRASH("unexpected or unimplemented expression statement"); @@ -5968,22 +5964,22 @@ CheckConditional(FunctionValidator& f, ParseNode* ternary, Type* type) return false; if (thenType.isInt() && elseType.isInt()) { - f.patchOp(opcodeAt, I32::Conditional); + f.patchOp(opcodeAt, Expr::I32Conditional); *type = Type::Int; } else if (thenType.isDouble() && elseType.isDouble()) { - f.patchOp(opcodeAt, F64::Conditional); + f.patchOp(opcodeAt, Expr::F64Conditional); *type = Type::Double; } else if (thenType.isFloat() && elseType.isFloat()) { - f.patchOp(opcodeAt, F32::Conditional); + f.patchOp(opcodeAt, Expr::F32Conditional); *type = Type::Float; } else if (elseType.isInt32x4() && thenType.isInt32x4()) { - f.patchOp(opcodeAt, I32X4::Conditional); + f.patchOp(opcodeAt, Expr::I32X4Conditional); *type = Type::Int32x4; } else if (elseType.isFloat32x4() && thenType.isFloat32x4()) { - f.patchOp(opcodeAt, F32X4::Conditional); + f.patchOp(opcodeAt, Expr::F32X4Conditional); *type = Type::Float32x4; } else if (elseType.isBool32x4() && thenType.isBool32x4()) { - f.patchOp(opcodeAt, B32X4::Conditional); + f.patchOp(opcodeAt, Expr::B32X4Conditional); *type = Type::Bool32x4; } else { return f.failf(ternary, "then/else branches of conditional must both produce int, float, " @@ -6042,19 +6038,19 @@ CheckMultiply(FunctionValidator& f, ParseNode* star, Type* type) if (lhsType.isInt() && rhsType.isInt()) { if (!IsValidIntMultiplyConstant(f.m(), lhs) && !IsValidIntMultiplyConstant(f.m(), rhs)) return f.fail(star, "one arg to int multiply must be a small (-2^20, 2^20) int literal"); - f.patchOp(opcodeAt, I32::Mul); + f.patchOp(opcodeAt, Expr::I32Mul); *type = Type::Intish; return true; } if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { - f.patchOp(opcodeAt, F64::Mul); + f.patchOp(opcodeAt, Expr::F64Mul); *type = Type::Double; return true; } if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { - f.patchOp(opcodeAt, F32::Mul); + f.patchOp(opcodeAt, Expr::F32Mul); *type = Type::Floatish; return true; } @@ -6105,13 +6101,13 @@ CheckAddOrSub(FunctionValidator& f, ParseNode* expr, Type* type, unsigned* numAd return f.fail(expr, "too many + or - without intervening coercion"); if (lhsType.isInt() && rhsType.isInt()) { - f.patchOp(opcodeAt, expr->isKind(PNK_ADD) ? I32::Add : I32::Sub); + f.patchOp(opcodeAt, expr->isKind(PNK_ADD) ? Expr::I32Add : Expr::I32Sub); *type = Type::Intish; } else if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { - f.patchOp(opcodeAt, expr->isKind(PNK_ADD) ? F64::Add : F64::Sub); + f.patchOp(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F64Add : Expr::F64Sub); *type = Type::Double; } else if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { - f.patchOp(opcodeAt, expr->isKind(PNK_ADD) ? F32::Add : F32::Sub); + f.patchOp(opcodeAt, expr->isKind(PNK_ADD) ? Expr::F32Add : Expr::F32Sub); *type = Type::Floatish; } else { return f.failf(expr, "operands to + or - must both be int, float? or double?, got %s and %s", @@ -6142,14 +6138,14 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type) return false; if (lhsType.isMaybeDouble() && rhsType.isMaybeDouble()) { - f.patchOp(opcodeAt, expr->isKind(PNK_DIV) ? F64::Div : F64::Mod); + f.patchOp(opcodeAt, expr->isKind(PNK_DIV) ? Expr::F64Div : Expr::F64Mod); *type = Type::Double; return true; } if (lhsType.isMaybeFloat() && rhsType.isMaybeFloat()) { if (expr->isKind(PNK_DIV)) - f.patchOp(opcodeAt, F32::Div); + f.patchOp(opcodeAt, Expr::F32Div); else return f.fail(expr, "modulo cannot receive float arguments"); *type = Type::Floatish; @@ -6157,13 +6153,13 @@ CheckDivOrMod(FunctionValidator& f, ParseNode* expr, Type* type) } if (lhsType.isSigned() && rhsType.isSigned()) { - f.patchOp(opcodeAt, expr->isKind(PNK_DIV) ? I32::SDiv : I32::SMod); + f.patchOp(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32SDiv : Expr::I32SMod); *type = Type::Intish; return true; } if (lhsType.isUnsigned() && rhsType.isUnsigned()) { - f.patchOp(opcodeAt, expr->isKind(PNK_DIV) ? I32::UDiv : I32::UMod); + f.patchOp(opcodeAt, expr->isKind(PNK_DIV) ? Expr::I32UDiv : Expr::I32UMod); *type = Type::Intish; return true; } @@ -6200,45 +6196,45 @@ CheckComparison(FunctionValidator& f, ParseNode* comp, Type* type) "%s and %s are given", lhsType.toChars(), rhsType.toChars()); } - I32 stmt; + Expr stmt; if (lhsType.isSigned() && rhsType.isSigned()) { switch (comp->getOp()) { - case JSOP_EQ: stmt = I32::EqI32; break; - case JSOP_NE: stmt = I32::NeI32; break; - case JSOP_LT: stmt = I32::SLtI32; break; - case JSOP_LE: stmt = I32::SLeI32; break; - case JSOP_GT: stmt = I32::SGtI32; break; - case JSOP_GE: stmt = I32::SGeI32; break; + case JSOP_EQ: stmt = Expr::I32EqI32; break; + case JSOP_NE: stmt = Expr::I32NeI32; break; + case JSOP_LT: stmt = Expr::I32SLtI32; break; + case JSOP_LE: stmt = Expr::I32SLeI32; break; + case JSOP_GT: stmt = Expr::I32SGtI32; break; + case JSOP_GE: stmt = Expr::I32SGeI32; break; default: MOZ_CRASH("unexpected comparison op"); } } else if (lhsType.isUnsigned() && rhsType.isUnsigned()) { switch (comp->getOp()) { - case JSOP_EQ: stmt = I32::EqI32; break; - case JSOP_NE: stmt = I32::NeI32; break; - case JSOP_LT: stmt = I32::ULtI32; break; - case JSOP_LE: stmt = I32::ULeI32; break; - case JSOP_GT: stmt = I32::UGtI32; break; - case JSOP_GE: stmt = I32::UGeI32; break; + case JSOP_EQ: stmt = Expr::I32EqI32; break; + case JSOP_NE: stmt = Expr::I32NeI32; break; + case JSOP_LT: stmt = Expr::I32ULtI32; break; + case JSOP_LE: stmt = Expr::I32ULeI32; break; + case JSOP_GT: stmt = Expr::I32UGtI32; break; + case JSOP_GE: stmt = Expr::I32UGeI32; break; default: MOZ_CRASH("unexpected comparison op"); } } else if (lhsType.isDouble()) { switch (comp->getOp()) { - case JSOP_EQ: stmt = I32::EqF64; break; - case JSOP_NE: stmt = I32::NeF64; break; - case JSOP_LT: stmt = I32::LtF64; break; - case JSOP_LE: stmt = I32::LeF64; break; - case JSOP_GT: stmt = I32::GtF64; break; - case JSOP_GE: stmt = I32::GeF64; break; + case JSOP_EQ: stmt = Expr::I32EqF64; break; + case JSOP_NE: stmt = Expr::I32NeF64; break; + case JSOP_LT: stmt = Expr::I32LtF64; break; + case JSOP_LE: stmt = Expr::I32LeF64; break; + case JSOP_GT: stmt = Expr::I32GtF64; break; + case JSOP_GE: stmt = Expr::I32GeF64; break; default: MOZ_CRASH("unexpected comparison op"); } } else if (lhsType.isFloat()) { switch (comp->getOp()) { - case JSOP_EQ: stmt = I32::EqF32; break; - case JSOP_NE: stmt = I32::NeF32; break; - case JSOP_LT: stmt = I32::LtF32; break; - case JSOP_LE: stmt = I32::LeF32; break; - case JSOP_GT: stmt = I32::GtF32; break; - case JSOP_GE: stmt = I32::GeF32; break; + case JSOP_EQ: stmt = Expr::I32EqF32; break; + case JSOP_NE: stmt = Expr::I32NeF32; break; + case JSOP_LT: stmt = Expr::I32LtF32; break; + case JSOP_LE: stmt = Expr::I32LeF32; break; + case JSOP_GT: stmt = Expr::I32GtF32; break; + case JSOP_GE: stmt = Expr::I32GeF32; break; default: MOZ_CRASH("unexpected comparison op"); } } else { @@ -6291,12 +6287,12 @@ CheckBitwise(FunctionValidator& f, ParseNode* bitwise, Type* type) } switch (bitwise->getKind()) { - case PNK_BITOR: if (!f.writeOp(I32::BitOr)) return false; break; - case PNK_BITAND: if (!f.writeOp(I32::BitAnd)) return false; break; - case PNK_BITXOR: if (!f.writeOp(I32::BitXor)) return false; break; - case PNK_LSH: if (!f.writeOp(I32::Lsh)) return false; break; - case PNK_RSH: if (!f.writeOp(I32::ArithRsh)) return false; break; - case PNK_URSH: if (!f.writeOp(I32::LogicRsh)) return false; break; + case PNK_BITOR: if (!f.writeOp(Expr::I32BitOr)) return false; break; + case PNK_BITAND: if (!f.writeOp(Expr::I32BitAnd)) return false; break; + case PNK_BITXOR: if (!f.writeOp(Expr::I32BitXor)) return false; break; + case PNK_LSH: if (!f.writeOp(Expr::I32Lsh)) return false; break; + case PNK_RSH: if (!f.writeOp(Expr::I32ArithRsh)) return false; break; + case PNK_URSH: if (!f.writeOp(Expr::I32LogicRsh)) return false; break; default: MOZ_CRASH("not a bitwise op"); } @@ -6383,17 +6379,17 @@ CheckAsExprStatement(FunctionValidator& f, ParseNode* expr) return false; if (type.isIntish()) - f.patchOp(opcodeAt, Stmt::I32Expr); + f.patchOp(opcodeAt, Expr::I32Expr); else if (type.isFloatish()) - f.patchOp(opcodeAt, Stmt::F32Expr); + f.patchOp(opcodeAt, Expr::F32Expr); else if (type.isMaybeDouble()) - f.patchOp(opcodeAt, Stmt::F64Expr); + f.patchOp(opcodeAt, Expr::F64Expr); else if (type.isInt32x4()) - f.patchOp(opcodeAt, Stmt::I32X4Expr); + f.patchOp(opcodeAt, Expr::I32X4Expr); else if (type.isFloat32x4()) - f.patchOp(opcodeAt, Stmt::F32X4Expr); + f.patchOp(opcodeAt, Expr::F32X4Expr); else if (type.isBool32x4()) - f.patchOp(opcodeAt, Stmt::B32X4Expr); + f.patchOp(opcodeAt, Expr::B32X4Expr); else MOZ_CRASH("unexpected or unimplemented expression statement"); @@ -6406,7 +6402,7 @@ CheckExprStatement(FunctionValidator& f, ParseNode* exprStmt) MOZ_ASSERT(exprStmt->isKind(PNK_SEMI)); ParseNode* expr = UnaryKid(exprStmt); if (!expr) - return f.writeOp(Stmt::Noop); + return f.writeOp(Expr::Noop); return CheckAsExprStatement(f, expr); } @@ -6423,11 +6419,11 @@ MaybeAddInterruptCheck(FunctionValidator& f, InterruptCheckPosition pos, ParseNo switch (pos) { case InterruptCheckPosition::Head: - if (!f.writeOp(Stmt::InterruptCheckHead)) + if (!f.writeOp(Expr::InterruptCheckHead)) return false; break; case InterruptCheckPosition::Loop: - if (!f.writeOp(Stmt::InterruptCheckLoop)) + if (!f.writeOp(Expr::InterruptCheckLoop)) return false; break; } @@ -6445,7 +6441,7 @@ CheckWhile(FunctionValidator& f, ParseNode* whileStmt) ParseNode* cond = BinaryLeft(whileStmt); ParseNode* body = BinaryRight(whileStmt); - if (!f.writeOp(Stmt::While)) + if (!f.writeOp(Expr::While)) return false; Type condType; @@ -6472,8 +6468,8 @@ CheckFor(FunctionValidator& f, ParseNode* forStmt) ParseNode* maybeCond = TernaryKid2(forHead); ParseNode* maybeInc = TernaryKid3(forHead); - Stmt stmt = maybeInit ? (maybeInc ? Stmt::ForInitInc : Stmt::ForInitNoInc) - : (maybeInc ? Stmt::ForNoInitInc : Stmt::ForNoInitNoInc); + Expr stmt = maybeInit ? (maybeInc ? Expr::ForInitInc : Expr::ForInitNoInc) + : (maybeInc ? Expr::ForNoInitInc : Expr::ForNoInitNoInc); if (!f.writeOp(stmt)) return false; @@ -6509,7 +6505,7 @@ CheckDoWhile(FunctionValidator& f, ParseNode* whileStmt) ParseNode* body = BinaryLeft(whileStmt); ParseNode* cond = BinaryRight(whileStmt); - if (!f.writeOp(Stmt::DoWhile)) + if (!f.writeOp(Expr::DoWhile)) return false; if (!MaybeAddInterruptCheck(f, InterruptCheckPosition::Loop, cond)) @@ -6534,7 +6530,7 @@ CheckLabel(FunctionValidator& f, ParseNode* labeledStmt) PropertyName* label = LabeledStatementLabel(labeledStmt); ParseNode* stmt = LabeledStatementStatement(labeledStmt); - if (!f.writeOp(Stmt::Label)) + if (!f.writeOp(Expr::Label)) return false; uint32_t labelId; @@ -6574,9 +6570,9 @@ CheckIf(FunctionValidator& f, ParseNode* ifStmt) return false; if (!elseStmt) { - f.patchOp(opcodeAt, Stmt::IfThen); + f.patchOp(opcodeAt, Expr::IfThen); } else { - f.patchOp(opcodeAt, Stmt::IfElse); + f.patchOp(opcodeAt, Expr::IfElse); if (elseStmt->isKind(PNK_IF)) { ifStmt = elseStmt; @@ -6680,7 +6676,7 @@ CheckSwitch(FunctionValidator& f, ParseNode* switchStmt) { MOZ_ASSERT(switchStmt->isKind(PNK_SWITCH)); - if (!f.writeOp(Stmt::Switch)) + if (!f.writeOp(Expr::Switch)) return false; // Has default @@ -6779,7 +6775,7 @@ CheckReturn(FunctionValidator& f, ParseNode* returnStmt) { ParseNode* expr = ReturnExpr(returnStmt); - if (!f.writeOp(Stmt::Ret)) + if (!f.writeOp(Expr::Ret)) return false; if (!expr) @@ -6815,7 +6811,7 @@ CheckStatementList(FunctionValidator& f, ParseNode* stmtList) { MOZ_ASSERT(stmtList->isKind(PNK_STATEMENTLIST)); - if (!f.writeOp(Stmt::Block) || !f.writeU32(ListLength(stmtList))) + if (!f.writeOp(Expr::Block) || !f.writeU32(ListLength(stmtList))) return false; for (ParseNode* stmt = ListHead(stmtList); stmt; stmt = NextNode(stmt)) { @@ -6828,7 +6824,7 @@ CheckStatementList(FunctionValidator& f, ParseNode* stmtList) static bool CheckBreakOrContinue(FunctionValidator& f, PropertyName* maybeLabel, - Stmt withoutLabel, Stmt withLabel) + Expr withoutLabel, Expr withLabel) { if (!maybeLabel) return f.writeOp(withoutLabel); @@ -6858,9 +6854,9 @@ CheckStatement(FunctionValidator& f, ParseNode* stmt) case PNK_RETURN: return CheckReturn(f, stmt); case PNK_STATEMENTLIST: return CheckStatementList(f, stmt); case PNK_BREAK: return CheckBreakOrContinue(f, LoopControlMaybeLabel(stmt), - Stmt::Break, Stmt::BreakLabel); + Expr::Break, Expr::BreakLabel); case PNK_CONTINUE: return CheckBreakOrContinue(f, LoopControlMaybeLabel(stmt), - Stmt::Continue, Stmt::ContinueLabel); + Expr::Continue, Expr::ContinueLabel); default:; } diff --git a/js/src/asmjs/WasmBinary.h b/js/src/asmjs/WasmBinary.h index 78e1fa31fdf6..2f4695406194 100644 --- a/js/src/asmjs/WasmBinary.h +++ b/js/src/asmjs/WasmBinary.h @@ -27,7 +27,7 @@ class PropertyName; namespace wasm { -enum class Stmt : uint8_t +enum class Expr : uint8_t { Ret, @@ -57,8 +57,6 @@ enum class Stmt : uint8_t AtomicsFence, - // asm.js specific - // Expression statements (to be removed in the future) I32Expr, F32Expr, F64Expr, @@ -66,354 +64,334 @@ enum class Stmt : uint8_t F32X4Expr, B32X4Expr, + // asm.js specific Id, Noop, InterruptCheckHead, InterruptCheckLoop, DebugCheckPoint, + Bad, - Bad -}; + // I32 opcodes + I32GetLocal, + I32SetLocal, + I32GetGlobal, + I32SetGlobal, -enum class I32 : uint8_t -{ - // Common opcodes - GetLocal, - SetLocal, - GetGlobal, - SetGlobal, + I32CallInternal, + I32CallIndirect, + I32CallImport, - CallInternal, - CallIndirect, - CallImport, + I32Conditional, + I32Comma, - Conditional, - Comma, - - Literal, + I32Literal, // Binary arith opcodes - Add, - Sub, - Mul, - SDiv, - SMod, - UDiv, - UMod, - Min, - Max, + I32Add, + I32Sub, + I32Mul, + I32SDiv, + I32SMod, + I32UDiv, + I32UMod, + I32Min, + I32Max, // Unary arith opcodes - Not, - Neg, + I32Not, + I32Neg, // Bitwise opcodes - BitOr, - BitAnd, - BitXor, - BitNot, + I32BitOr, + I32BitAnd, + I32BitXor, + I32BitNot, - Lsh, - ArithRsh, - LogicRsh, + I32Lsh, + I32ArithRsh, + I32LogicRsh, // Conversion opcodes - FromF32, - FromF64, + I32FromF32, + I32FromF64, // Math builtin opcodes - Clz, - Abs, + I32Clz, + I32Abs, // Comparison opcodes // Ordering matters (EmitComparison expects signed opcodes to be placed // before unsigned opcodes) - EqI32, - NeI32, - SLtI32, - SLeI32, - SGtI32, - SGeI32, - ULtI32, - ULeI32, - UGtI32, - UGeI32, + I32EqI32, + I32NeI32, + I32SLtI32, + I32SLeI32, + I32SGtI32, + I32SGeI32, + I32ULtI32, + I32ULeI32, + I32UGtI32, + I32UGeI32, - EqF32, - NeF32, - LtF32, - LeF32, - GtF32, - GeF32, + I32EqF32, + I32NeF32, + I32LtF32, + I32LeF32, + I32GtF32, + I32GeF32, - EqF64, - NeF64, - LtF64, - LeF64, - GtF64, - GeF64, + I32EqF64, + I32NeF64, + I32LtF64, + I32LeF64, + I32GtF64, + I32GeF64, // Heap accesses opcodes - SLoad8, - SLoad16, - SLoad32, - ULoad8, - ULoad16, - ULoad32, - Store8, - Store16, - Store32, + I32SLoad8, + I32SLoad16, + I32SLoad32, + I32ULoad8, + I32ULoad16, + I32ULoad32, + I32Store8, + I32Store16, + I32Store32, // Atomics opcodes - AtomicsCompareExchange, - AtomicsExchange, - AtomicsLoad, - AtomicsStore, - AtomicsBinOp, + I32AtomicsCompareExchange, + I32AtomicsExchange, + I32AtomicsLoad, + I32AtomicsStore, + I32AtomicsBinOp, // SIMD opcodes - I32X4ExtractLane, - B32X4ExtractLane, - B32X4AllTrue, - B32X4AnyTrue, + I32I32X4ExtractLane, + I32B32X4ExtractLane, + I32B32X4AllTrue, + I32B32X4AnyTrue, // Specific to AsmJS - Id, + I32Id, - Bad -}; - -enum class F32 : uint8_t -{ + // F32 opcdoes // Common opcodes - GetLocal, - SetLocal, - GetGlobal, - SetGlobal, + F32GetLocal, + F32SetLocal, + F32GetGlobal, + F32SetGlobal, - CallInternal, - CallIndirect, - CallImport, + F32CallInternal, + F32CallIndirect, + F32CallImport, - Conditional, - Comma, + F32Conditional, + F32Comma, - Literal, + F32Literal, // Binary arith opcodes - Add, - Sub, - Mul, - Div, - Min, - Max, - Neg, + F32Add, + F32Sub, + F32Mul, + F32Div, + F32Min, + F32Max, + F32Neg, // Math builtin opcodes - Abs, - Sqrt, - Ceil, - Floor, + F32Abs, + F32Sqrt, + F32Ceil, + F32Floor, // Conversion opcodes - FromF64, - FromS32, - FromU32, + F32FromF64, + F32FromS32, + F32FromU32, // Heap accesses opcodes - Load, - StoreF32, - StoreF64, + F32Load, + F32StoreF32, + F32StoreF64, // SIMD opcodes - F32X4ExtractLane, + F32F32X4ExtractLane, // asm.js specific - Id, - Bad -}; + F32Id, -enum class F64 : uint8_t -{ + // F64 opcodes // Common opcodes - GetLocal, - SetLocal, - GetGlobal, - SetGlobal, + F64GetLocal, + F64SetLocal, + F64GetGlobal, + F64SetGlobal, - CallInternal, - CallIndirect, - CallImport, + F64CallInternal, + F64CallIndirect, + F64CallImport, - Conditional, - Comma, + F64Conditional, + F64Comma, - Literal, + F64Literal, // Binary arith opcodes - Add, - Sub, - Mul, - Div, - Min, - Max, - Mod, - Neg, + F64Add, + F64Sub, + F64Mul, + F64Div, + F64Min, + F64Max, + F64Mod, + F64Neg, // Math builtin opcodes - Abs, - Sqrt, - Ceil, - Floor, - Sin, - Cos, - Tan, - Asin, - Acos, - Atan, - Exp, - Log, - Pow, - Atan2, + F64Abs, + F64Sqrt, + F64Ceil, + F64Floor, + F64Sin, + F64Cos, + F64Tan, + F64Asin, + F64Acos, + F64Atan, + F64Exp, + F64Log, + F64Pow, + F64Atan2, // Conversions opcodes - FromF32, - FromS32, - FromU32, + F64FromF32, + F64FromS32, + F64FromU32, // Heap accesses opcodes - Load, - StoreF32, - StoreF64, + F64Load, + F64StoreF32, + F64StoreF64, // asm.js specific - Id, - Bad -}; + F64Id, -enum class I32X4 : uint8_t -{ + // I32X4 opcodes // Common opcodes - GetLocal, - SetLocal, + I32X4GetLocal, + I32X4SetLocal, - GetGlobal, - SetGlobal, + I32X4GetGlobal, + I32X4SetGlobal, - CallInternal, - CallIndirect, - CallImport, + I32X4CallInternal, + I32X4CallIndirect, + I32X4CallImport, - Conditional, - Comma, + I32X4Conditional, + I32X4Comma, - Literal, + I32X4Literal, // Specific opcodes - Ctor, + I32X4Ctor, - Unary, + I32X4Unary, - Binary, - BinaryBitwise, - BinaryShift, + I32X4Binary, + I32X4BinaryBitwise, + I32X4BinaryShift, - ReplaceLane, + I32X4ReplaceLane, - FromF32X4, - FromF32X4Bits, + I32X4FromF32X4, + I32X4FromF32X4Bits, - Swizzle, - Shuffle, - Select, - Splat, + I32X4Swizzle, + I32X4Shuffle, + I32X4Select, + I32X4Splat, - Load, - Store, + I32X4Load, + I32X4Store, // asm.js specific - Id, - Bad -}; + I32X4Id, -enum class F32X4 : uint8_t -{ + // F32X4 opcodes // Common opcodes - GetLocal, - SetLocal, + F32X4GetLocal, + F32X4SetLocal, - GetGlobal, - SetGlobal, + F32X4GetGlobal, + F32X4SetGlobal, - CallInternal, - CallIndirect, - CallImport, + F32X4CallInternal, + F32X4CallIndirect, + F32X4CallImport, - Conditional, - Comma, + F32X4Conditional, + F32X4Comma, - Literal, + F32X4Literal, // Specific opcodes - Ctor, + F32X4Ctor, - Unary, + F32X4Unary, - Binary, + F32X4Binary, - ReplaceLane, + F32X4ReplaceLane, - FromI32X4, - FromI32X4Bits, - Swizzle, - Shuffle, - Select, - Splat, + F32X4FromI32X4, + F32X4FromI32X4Bits, + F32X4Swizzle, + F32X4Shuffle, + F32X4Select, + F32X4Splat, - Load, - Store, + F32X4Load, + F32X4Store, // asm.js specific - Id, - Bad -}; + F32X4Id, -enum class B32X4 : uint8_t -{ + // B32X4 opcodes // Common opcodes - GetLocal, - SetLocal, + B32X4GetLocal, + B32X4SetLocal, - GetGlobal, - SetGlobal, + B32X4GetGlobal, + B32X4SetGlobal, - CallInternal, - CallIndirect, - CallImport, + B32X4CallInternal, + B32X4CallIndirect, + B32X4CallImport, - Conditional, - Comma, + B32X4Conditional, + B32X4Comma, - Literal, + B32X4Literal, // Specific opcodes - Ctor, + B32X4Ctor, - Unary, + B32X4Unary, - Binary, - BinaryCompI32X4, - BinaryCompF32X4, - BinaryBitwise, + B32X4Binary, + B32X4BinaryCompI32X4, + B32X4BinaryCompF32X4, + B32X4BinaryBitwise, - ReplaceLane, + B32X4ReplaceLane, - Splat, + B32X4Splat, // asm.js specific - Id, - Bad + B32X4Id }; enum NeedsBoundsCheck : uint8_t @@ -501,7 +479,7 @@ class Encoder bool pcIsPatchable(size_t pc, unsigned size) const { bool patchable = true; for (unsigned i = 0; patchable && i < size; i++) - patchable &= Stmt((*bytecode_)[pc]) == Stmt::Bad; + patchable &= Expr((*bytecode_)[pc]) == Expr::Bad; return patchable; } #endif diff --git a/js/src/asmjs/WasmIonCompile.cpp b/js/src/asmjs/WasmIonCompile.cpp index a5c9541ddc7e..cf7e5840ea19 100644 --- a/js/src/asmjs/WasmIonCompile.cpp +++ b/js/src/asmjs/WasmIonCompile.cpp @@ -1177,7 +1177,7 @@ class FunctionCompiler SimdConstant readI32X4() { return decoder_.uncheckedReadI32X4(); } SimdConstant readF32X4() { return decoder_.uncheckedReadF32X4(); } - Stmt readStmtOp() { return Stmt(readU8()); } + Expr readOpcode() { return Expr(readU8()); } void readCallLineCol(uint32_t* line, uint32_t* column) { const SourceCoords& sc = func_.sourceCoords(lastReadCallSite_++); @@ -1187,7 +1187,7 @@ class FunctionCompiler } void assertDebugCheckPoint() { - MOZ_ASSERT(Stmt(readU8()) == Stmt::DebugCheckPoint); + MOZ_ASSERT(readOpcode() == Expr::DebugCheckPoint); } bool done() const { return decoder_.done(); } @@ -1629,9 +1629,9 @@ EmitFFICall(FunctionCompiler& f, ExprType ret, MDefinition** def) } static bool -EmitMathBuiltinCall(FunctionCompiler& f, F32 f32, MDefinition** def) +EmitF32MathBuiltinCall(FunctionCompiler& f, Expr f32, MDefinition** def) { - MOZ_ASSERT(f32 == F32::Ceil || f32 == F32::Floor); + MOZ_ASSERT(f32 == Expr::F32Ceil || f32 == Expr::F32Floor); uint32_t lineno, column; f.readCallLineCol(&lineno, &column); @@ -1645,12 +1645,12 @@ EmitMathBuiltinCall(FunctionCompiler& f, F32 f32, MDefinition** def) f.finishCallArgs(&call); - SymbolicAddress callee = f32 == F32::Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF; + SymbolicAddress callee = f32 == Expr::F32Ceil ? SymbolicAddress::CeilF : SymbolicAddress::FloorF; return f.builtinCall(callee, call, ValType::F32, def); } static bool -EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def) +EmitF64MathBuiltinCall(FunctionCompiler& f, Expr f64, MDefinition** def) { uint32_t lineno, column; f.readCallLineCol(&lineno, &column); @@ -1662,7 +1662,7 @@ EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def) if (!EmitF64Expr(f, &firstArg) || !f.passArg(firstArg, ValType::F64, &call)) return false; - if (f64 == F64::Pow || f64 == F64::Atan2) { + if (f64 == Expr::F64Pow || f64 == Expr::F64Atan2) { MDefinition* secondArg; if (!EmitF64Expr(f, &secondArg) || !f.passArg(secondArg, ValType::F64, &call)) return false; @@ -1670,18 +1670,18 @@ EmitMathBuiltinCall(FunctionCompiler& f, F64 f64, MDefinition** def) SymbolicAddress callee; switch (f64) { - case F64::Ceil: callee = SymbolicAddress::CeilD; break; - case F64::Floor: callee = SymbolicAddress::FloorD; break; - case F64::Sin: callee = SymbolicAddress::SinD; break; - case F64::Cos: callee = SymbolicAddress::CosD; break; - case F64::Tan: callee = SymbolicAddress::TanD; break; - case F64::Asin: callee = SymbolicAddress::ASinD; break; - case F64::Acos: callee = SymbolicAddress::ACosD; break; - case F64::Atan: callee = SymbolicAddress::ATanD; break; - case F64::Exp: callee = SymbolicAddress::ExpD; break; - case F64::Log: callee = SymbolicAddress::LogD; break; - case F64::Pow: callee = SymbolicAddress::PowD; break; - case F64::Atan2: callee = SymbolicAddress::ATan2D; break; + case Expr::F64Ceil: callee = SymbolicAddress::CeilD; break; + case Expr::F64Floor: callee = SymbolicAddress::FloorD; break; + case Expr::F64Sin: callee = SymbolicAddress::SinD; break; + case Expr::F64Cos: callee = SymbolicAddress::CosD; break; + case Expr::F64Tan: callee = SymbolicAddress::TanD; break; + case Expr::F64Asin: callee = SymbolicAddress::ASinD; break; + case Expr::F64Acos: callee = SymbolicAddress::ACosD; break; + case Expr::F64Atan: callee = SymbolicAddress::ATanD; break; + case Expr::F64Exp: callee = SymbolicAddress::ExpD; break; + case Expr::F64Log: callee = SymbolicAddress::LogD; break; + case Expr::F64Pow: callee = SymbolicAddress::PowD; break; + case Expr::F64Atan2: callee = SymbolicAddress::ATan2D; break; default: MOZ_CRASH("unexpected double math builtin callee"); } @@ -2132,45 +2132,45 @@ EmitDivOrMod(FunctionCompiler& f, ValType type, bool isDiv, MDefinition** def) } static bool -EmitComparison(FunctionCompiler& f, I32 stmt, MDefinition** def) +EmitComparison(FunctionCompiler& f, Expr stmt, MDefinition** def) { MDefinition *lhs, *rhs; MCompare::CompareType compareType; switch (stmt) { - case I32::EqI32: - case I32::NeI32: - case I32::SLeI32: - case I32::SLtI32: - case I32::ULeI32: - case I32::ULtI32: - case I32::SGeI32: - case I32::SGtI32: - case I32::UGeI32: - case I32::UGtI32: + case Expr::I32EqI32: + case Expr::I32NeI32: + case Expr::I32SLeI32: + case Expr::I32SLtI32: + case Expr::I32ULeI32: + case Expr::I32ULtI32: + case Expr::I32SGeI32: + case Expr::I32SGtI32: + case Expr::I32UGeI32: + case Expr::I32UGtI32: if (!EmitI32Expr(f, &lhs) || !EmitI32Expr(f, &rhs)) return false; // The list of opcodes is sorted such that all signed comparisons // stand before ULtI32. - compareType = stmt < I32::ULtI32 + compareType = stmt < Expr::I32ULtI32 ? MCompare::Compare_Int32 : MCompare::Compare_UInt32; break; - case I32::EqF32: - case I32::NeF32: - case I32::LeF32: - case I32::LtF32: - case I32::GeF32: - case I32::GtF32: + case Expr::I32EqF32: + case Expr::I32NeF32: + case Expr::I32LeF32: + case Expr::I32LtF32: + case Expr::I32GeF32: + case Expr::I32GtF32: if (!EmitF32Expr(f, &lhs) || !EmitF32Expr(f, &rhs)) return false; compareType = MCompare::Compare_Float32; break; - case I32::EqF64: - case I32::NeF64: - case I32::LeF64: - case I32::LtF64: - case I32::GeF64: - case I32::GtF64: + case Expr::I32EqF64: + case Expr::I32NeF64: + case Expr::I32LeF64: + case Expr::I32LtF64: + case Expr::I32GeF64: + case Expr::I32GtF64: if (!EmitF64Expr(f, &lhs) || !EmitF64Expr(f, &rhs)) return false; compareType = MCompare::Compare_Double; @@ -2180,38 +2180,38 @@ EmitComparison(FunctionCompiler& f, I32 stmt, MDefinition** def) JSOp compareOp; switch (stmt) { - case I32::EqI32: - case I32::EqF32: - case I32::EqF64: + case Expr::I32EqI32: + case Expr::I32EqF32: + case Expr::I32EqF64: compareOp = JSOP_EQ; break; - case I32::NeI32: - case I32::NeF32: - case I32::NeF64: + case Expr::I32NeI32: + case Expr::I32NeF32: + case Expr::I32NeF64: compareOp = JSOP_NE; break; - case I32::SLeI32: - case I32::ULeI32: - case I32::LeF32: - case I32::LeF64: + case Expr::I32SLeI32: + case Expr::I32ULeI32: + case Expr::I32LeF32: + case Expr::I32LeF64: compareOp = JSOP_LE; break; - case I32::SLtI32: - case I32::ULtI32: - case I32::LtF32: - case I32::LtF64: + case Expr::I32SLtI32: + case Expr::I32ULtI32: + case Expr::I32LtF32: + case Expr::I32LtF64: compareOp = JSOP_LT; break; - case I32::SGeI32: - case I32::UGeI32: - case I32::GeF32: - case I32::GeF64: + case Expr::I32SGeI32: + case Expr::I32UGeI32: + case Expr::I32GeF32: + case Expr::I32GeF64: compareOp = JSOP_GE; break; - case I32::SGtI32: - case I32::UGtI32: - case I32::GtF32: - case I32::GtF64: + case Expr::I32SGtI32: + case Expr::I32UGtI32: + case Expr::I32GtF32: + case Expr::I32GtF64: compareOp = JSOP_GT; break; default: MOZ_CRASH("unexpected comparison opcode"); @@ -2318,13 +2318,13 @@ EmitWhile(FunctionCompiler& f, const LabelVector* maybeLabels) } static bool -EmitFor(FunctionCompiler& f, Stmt stmt, const LabelVector* maybeLabels) +EmitFor(FunctionCompiler& f, Expr stmt, const LabelVector* maybeLabels) { - MOZ_ASSERT(stmt == Stmt::ForInitInc || stmt == Stmt::ForInitNoInc || - stmt == Stmt::ForNoInitInc || stmt == Stmt::ForNoInitNoInc); + MOZ_ASSERT(stmt == Expr::ForInitInc || stmt == Expr::ForInitNoInc || + stmt == Expr::ForNoInitInc || stmt == Expr::ForNoInitNoInc); size_t headId = f.nextId(); - if (stmt == Stmt::ForInitInc || stmt == Stmt::ForInitNoInc) { + if (stmt == Expr::ForInitInc || stmt == Expr::ForInitNoInc) { if (!EmitStatement(f)) return false; } @@ -2347,7 +2347,7 @@ EmitFor(FunctionCompiler& f, Stmt stmt, const LabelVector* maybeLabels) if (!f.bindContinues(headId, maybeLabels)) return false; - if (stmt == Stmt::ForInitInc || stmt == Stmt::ForNoInitInc) { + if (stmt == Expr::ForInitInc || stmt == Expr::ForNoInitInc) { if (!EmitStatement(f)) return false; } @@ -2400,7 +2400,7 @@ EmitLabel(FunctionCompiler& f, LabelVector* maybeLabels) return f.bindLabeledBreaks(&labels); } -static bool EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels = nullptr); +static bool EmitStatement(FunctionCompiler& f, Expr stmt, LabelVector* maybeLabels = nullptr); typedef bool HasElseBlock; @@ -2432,12 +2432,12 @@ EmitIfElse(FunctionCompiler& f, bool hasElse) if (hasElse) { f.switchToElse(elseOrJoinBlock); - Stmt nextStmt(f.readStmtOp()); - if (nextStmt == Stmt::IfThen) { + Expr nextStmt(f.readOpcode()); + if (nextStmt == Expr::IfThen) { hasElse = false; goto recurse; } - if (nextStmt == Stmt::IfElse) { + if (nextStmt == Expr::IfElse) { hasElse = true; goto recurse; } @@ -2545,45 +2545,45 @@ EmitBreak(FunctionCompiler& f, bool hasLabel) } static bool -EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels /*= nullptr */) +EmitStatement(FunctionCompiler& f, Expr stmt, LabelVector* maybeLabels /*= nullptr */) { if (!f.mirGen().ensureBallast()) return false; MDefinition* _; switch (stmt) { - case Stmt::Block: return EmitBlock(f); - case Stmt::IfThen: return EmitIfElse(f, HasElseBlock(false)); - case Stmt::IfElse: return EmitIfElse(f, HasElseBlock(true)); - case Stmt::Switch: return EmitSwitch(f); - case Stmt::While: return EmitWhile(f, maybeLabels); - case Stmt::DoWhile: return EmitDoWhile(f, maybeLabels); - case Stmt::ForInitInc: - case Stmt::ForInitNoInc: - case Stmt::ForNoInitNoInc: - case Stmt::ForNoInitInc: return EmitFor(f, stmt, maybeLabels); - case Stmt::Label: return EmitLabel(f, maybeLabels); - case Stmt::Continue: return EmitContinue(f, HasLabel(false)); - case Stmt::ContinueLabel: return EmitContinue(f, HasLabel(true)); - case Stmt::Break: return EmitBreak(f, HasLabel(false)); - case Stmt::BreakLabel: return EmitBreak(f, HasLabel(true)); - case Stmt::Ret: return EmitRet(f); - case Stmt::I32Expr: return EmitI32Expr(f, &_); - case Stmt::F32Expr: return EmitF32Expr(f, &_); - case Stmt::F64Expr: return EmitF64Expr(f, &_); - case Stmt::I32X4Expr: return EmitI32X4Expr(f, &_); - case Stmt::F32X4Expr: return EmitF32X4Expr(f, &_); - case Stmt::B32X4Expr: return EmitB32X4Expr(f, &_); - case Stmt::CallInternal: return EmitInternalCall(f, ExprType::Void, &_); - case Stmt::CallIndirect: return EmitFuncPtrCall(f, ExprType::Void, &_); - case Stmt::CallImport: return EmitFFICall(f, ExprType::Void, &_); - case Stmt::AtomicsFence: f.memoryBarrier(MembarFull); return true; - case Stmt::Noop: return true; - case Stmt::Id: return EmitStatement(f); - case Stmt::InterruptCheckHead: return EmitInterruptCheck(f); - case Stmt::InterruptCheckLoop: return EmitInterruptCheckLoop(f); - case Stmt::DebugCheckPoint: - case Stmt::Bad: break; + case Expr::Block: return EmitBlock(f); + case Expr::IfThen: return EmitIfElse(f, HasElseBlock(false)); + case Expr::IfElse: return EmitIfElse(f, HasElseBlock(true)); + case Expr::Switch: return EmitSwitch(f); + case Expr::While: return EmitWhile(f, maybeLabels); + case Expr::DoWhile: return EmitDoWhile(f, maybeLabels); + case Expr::ForInitInc: + case Expr::ForInitNoInc: + case Expr::ForNoInitNoInc: + case Expr::ForNoInitInc: return EmitFor(f, stmt, maybeLabels); + case Expr::Label: return EmitLabel(f, maybeLabels); + case Expr::Continue: return EmitContinue(f, HasLabel(false)); + case Expr::ContinueLabel: return EmitContinue(f, HasLabel(true)); + case Expr::Break: return EmitBreak(f, HasLabel(false)); + case Expr::BreakLabel: return EmitBreak(f, HasLabel(true)); + case Expr::Ret: return EmitRet(f); + case Expr::I32Expr: return EmitI32Expr(f, &_); + case Expr::F32Expr: return EmitF32Expr(f, &_); + case Expr::F64Expr: return EmitF64Expr(f, &_); + case Expr::I32X4Expr: return EmitI32X4Expr(f, &_); + case Expr::F32X4Expr: return EmitF32X4Expr(f, &_); + case Expr::B32X4Expr: return EmitB32X4Expr(f, &_); + case Expr::CallInternal: return EmitInternalCall(f, ExprType::Void, &_); + case Expr::CallIndirect: return EmitFuncPtrCall(f, ExprType::Void, &_); + case Expr::CallImport: return EmitFFICall(f, ExprType::Void, &_); + case Expr::AtomicsFence: f.memoryBarrier(MembarFull); return true; + case Expr::Noop: return true; + case Expr::Id: return EmitStatement(f); + case Expr::InterruptCheckHead: return EmitInterruptCheck(f); + case Expr::InterruptCheckLoop: return EmitInterruptCheckLoop(f); + case Expr::DebugCheckPoint: + default: break; } MOZ_CRASH("unexpected statement"); } @@ -2591,139 +2591,139 @@ EmitStatement(FunctionCompiler& f, Stmt stmt, LabelVector* maybeLabels /*= nullp static bool EmitStatement(FunctionCompiler& f, LabelVector* maybeLabels /* = nullptr */) { - Stmt stmt(f.readStmtOp()); + Expr stmt(f.readOpcode()); return EmitStatement(f, stmt, maybeLabels); } static bool EmitI32Expr(FunctionCompiler& f, MDefinition** def) { - I32 op = I32(f.readU8()); + Expr op(f.readOpcode()); switch (op) { - case I32::Id: + case Expr::I32Id: return EmitI32Expr(f, def); - case I32::Literal: + case Expr::I32Literal: return EmitLiteral(f, ValType::I32, def); - case I32::GetLocal: + case Expr::I32GetLocal: return EmitGetLoc(f, DebugOnly(MIRType_Int32), def); - case I32::SetLocal: + case Expr::I32SetLocal: return EmitSetLoc(f, ValType::I32, def); - case I32::GetGlobal: + case Expr::I32GetGlobal: return EmitGetGlo(f, MIRType_Int32, def); - case I32::SetGlobal: + case Expr::I32SetGlobal: return EmitSetGlo(f, ValType::I32, def); - case I32::CallInternal: + case Expr::I32CallInternal: return EmitInternalCall(f, ExprType::I32, def); - case I32::CallIndirect: + case Expr::I32CallIndirect: return EmitFuncPtrCall(f, ExprType::I32, def); - case I32::CallImport: + case Expr::I32CallImport: return EmitFFICall(f, ExprType::I32, def); - case I32::Conditional: + case Expr::I32Conditional: return EmitConditional(f, ValType::I32, def); - case I32::Comma: + case Expr::I32Comma: return EmitComma(f, ValType::I32, def); - case I32::Add: + case Expr::I32Add: return EmitAddOrSub(f, ValType::I32, IsAdd(true), def); - case I32::Sub: + case Expr::I32Sub: return EmitAddOrSub(f, ValType::I32, IsAdd(false), def); - case I32::Mul: + case Expr::I32Mul: return EmitMultiply(f, ValType::I32, def); - case I32::UDiv: - case I32::SDiv: - return EmitDivOrMod(f, ValType::I32, IsDiv(true), IsUnsigned(op == I32::UDiv), def); - case I32::UMod: - case I32::SMod: - return EmitDivOrMod(f, ValType::I32, IsDiv(false), IsUnsigned(op == I32::UMod), def); - case I32::Min: + case Expr::I32UDiv: + case Expr::I32SDiv: + return EmitDivOrMod(f, ValType::I32, IsDiv(true), IsUnsigned(op == Expr::I32UDiv), def); + case Expr::I32UMod: + case Expr::I32SMod: + return EmitDivOrMod(f, ValType::I32, IsDiv(false), IsUnsigned(op == Expr::I32UMod), def); + case Expr::I32Min: return EmitMathMinMax(f, ValType::I32, IsMax(false), def); - case I32::Max: + case Expr::I32Max: return EmitMathMinMax(f, ValType::I32, IsMax(true), def); - case I32::Not: + case Expr::I32Not: return EmitUnary(f, ValType::I32, def); - case I32::FromF32: + case Expr::I32FromF32: return EmitUnary(f, ValType::F32, def); - case I32::FromF64: + case Expr::I32FromF64: return EmitUnary(f, ValType::F64, def); - case I32::Clz: + case Expr::I32Clz: return EmitUnary(f, ValType::I32, def); - case I32::Abs: + case Expr::I32Abs: return EmitUnaryMir(f, ValType::I32, def); - case I32::Neg: + case Expr::I32Neg: return EmitUnaryMir(f, ValType::I32, def); - case I32::BitOr: + case Expr::I32BitOr: return EmitBitwise(f, def); - case I32::BitAnd: + case Expr::I32BitAnd: return EmitBitwise(f, def); - case I32::BitXor: + case Expr::I32BitXor: return EmitBitwise(f, def); - case I32::Lsh: + case Expr::I32Lsh: return EmitBitwise(f, def); - case I32::ArithRsh: + case Expr::I32ArithRsh: return EmitBitwise(f, def); - case I32::LogicRsh: + case Expr::I32LogicRsh: return EmitBitwise(f, def); - case I32::BitNot: + case Expr::I32BitNot: return EmitBitwise(f, def); - case I32::SLoad8: + case Expr::I32SLoad8: return EmitLoadArray(f, Scalar::Int8, def); - case I32::SLoad16: + case Expr::I32SLoad16: return EmitLoadArray(f, Scalar::Int16, def); - case I32::SLoad32: + case Expr::I32SLoad32: return EmitLoadArray(f, Scalar::Int32, def); - case I32::ULoad8: + case Expr::I32ULoad8: return EmitLoadArray(f, Scalar::Uint8, def); - case I32::ULoad16: + case Expr::I32ULoad16: return EmitLoadArray(f, Scalar::Uint16, def); - case I32::ULoad32: + case Expr::I32ULoad32: return EmitLoadArray(f, Scalar::Uint32, def); - case I32::Store8: + case Expr::I32Store8: return EmitStore(f, Scalar::Int8, def); - case I32::Store16: + case Expr::I32Store16: return EmitStore(f, Scalar::Int16, def); - case I32::Store32: + case Expr::I32Store32: return EmitStore(f, Scalar::Int32, def); - case I32::EqI32: - case I32::NeI32: - case I32::SLtI32: - case I32::SLeI32: - case I32::SGtI32: - case I32::SGeI32: - case I32::ULtI32: - case I32::ULeI32: - case I32::UGtI32: - case I32::UGeI32: - case I32::EqF32: - case I32::NeF32: - case I32::LtF32: - case I32::LeF32: - case I32::GtF32: - case I32::GeF32: - case I32::EqF64: - case I32::NeF64: - case I32::LtF64: - case I32::LeF64: - case I32::GtF64: - case I32::GeF64: + case Expr::I32EqI32: + case Expr::I32NeI32: + case Expr::I32SLtI32: + case Expr::I32SLeI32: + case Expr::I32SGtI32: + case Expr::I32SGeI32: + case Expr::I32ULtI32: + case Expr::I32ULeI32: + case Expr::I32UGtI32: + case Expr::I32UGeI32: + case Expr::I32EqF32: + case Expr::I32NeF32: + case Expr::I32LtF32: + case Expr::I32LeF32: + case Expr::I32GtF32: + case Expr::I32GeF32: + case Expr::I32EqF64: + case Expr::I32NeF64: + case Expr::I32LtF64: + case Expr::I32LeF64: + case Expr::I32GtF64: + case Expr::I32GeF64: return EmitComparison(f, op, def); - case I32::AtomicsCompareExchange: + case Expr::I32AtomicsCompareExchange: return EmitAtomicsCompareExchange(f, def); - case I32::AtomicsExchange: + case Expr::I32AtomicsExchange: return EmitAtomicsExchange(f, def); - case I32::AtomicsLoad: + case Expr::I32AtomicsLoad: return EmitAtomicsLoad(f, def); - case I32::AtomicsStore: + case Expr::I32AtomicsStore: return EmitAtomicsStore(f, def); - case I32::AtomicsBinOp: + case Expr::I32AtomicsBinOp: return EmitAtomicsBinOp(f, def); - case I32::I32X4ExtractLane: + case Expr::I32I32X4ExtractLane: return EmitExtractLane(f, ValType::I32x4, def); - case I32::B32X4ExtractLane: + case Expr::I32B32X4ExtractLane: return EmitExtractLane(f, ValType::B32x4, def); - case I32::B32X4AllTrue: + case Expr::I32B32X4AllTrue: return EmitSimdAllTrue(f, ValType::B32x4, def); - case I32::B32X4AnyTrue: + case Expr::I32B32X4AnyTrue: return EmitSimdAnyTrue(f, ValType::B32x4, def); - case I32::Bad: + default: break; } MOZ_CRASH("unexpected i32 expression"); @@ -2732,66 +2732,66 @@ EmitI32Expr(FunctionCompiler& f, MDefinition** def) static bool EmitF32Expr(FunctionCompiler& f, MDefinition** def) { - F32 op = F32(f.readU8()); + Expr op(f.readOpcode()); switch (op) { - case F32::Id: + case Expr::F32Id: return EmitF32Expr(f, def); - case F32::Literal: + case Expr::F32Literal: return EmitLiteral(f, ValType::F32, def); - case F32::GetLocal: + case Expr::F32GetLocal: return EmitGetLoc(f, DebugOnly(MIRType_Float32), def); - case F32::SetLocal: + case Expr::F32SetLocal: return EmitSetLoc(f, ValType::F32, def); - case F32::GetGlobal: + case Expr::F32GetGlobal: return EmitGetGlo(f, MIRType_Float32, def); - case F32::SetGlobal: + case Expr::F32SetGlobal: return EmitSetGlo(f, ValType::F32, def); - case F32::CallInternal: + case Expr::F32CallInternal: return EmitInternalCall(f, ExprType::F32, def); - case F32::CallIndirect: + case Expr::F32CallIndirect: return EmitFuncPtrCall(f, ExprType::F32, def); - case F32::CallImport: + case Expr::F32CallImport: return EmitFFICall(f, ExprType::F32, def); - case F32::Conditional: + case Expr::F32Conditional: return EmitConditional(f, ValType::F32, def); - case F32::Comma: + case Expr::F32Comma: return EmitComma(f, ValType::F32, def); - case F32::Add: + case Expr::F32Add: return EmitAddOrSub(f, ValType::F32, IsAdd(true), def); - case F32::Sub: + case Expr::F32Sub: return EmitAddOrSub(f, ValType::F32, IsAdd(false), def); - case F32::Mul: + case Expr::F32Mul: return EmitMultiply(f, ValType::F32, def); - case F32::Div: + case Expr::F32Div: return EmitDivOrMod(f, ValType::F32, IsDiv(true), def); - case F32::Min: + case Expr::F32Min: return EmitMathMinMax(f, ValType::F32, IsMax(false), def); - case F32::Max: + case Expr::F32Max: return EmitMathMinMax(f, ValType::F32, IsMax(true), def); - case F32::Neg: + case Expr::F32Neg: return EmitUnaryMir(f, ValType::F32, def); - case F32::Abs: + case Expr::F32Abs: return EmitUnaryMir(f, ValType::F32, def); - case F32::Sqrt: + case Expr::F32Sqrt: return EmitUnaryMir(f, ValType::F32, def); - case F32::Ceil: - case F32::Floor: - return EmitMathBuiltinCall(f, op, def); - case F32::FromF64: + case Expr::F32Ceil: + case Expr::F32Floor: + return EmitF32MathBuiltinCall(f, op, def); + case Expr::F32FromF64: return EmitUnary(f, ValType::F64, def); - case F32::FromS32: + case Expr::F32FromS32: return EmitUnary(f, ValType::I32, def); - case F32::FromU32: + case Expr::F32FromU32: return EmitUnary(f, ValType::I32, def); - case F32::Load: + case Expr::F32Load: return EmitLoadArray(f, Scalar::Float32, def); - case F32::StoreF32: + case Expr::F32StoreF32: return EmitStore(f, Scalar::Float32, def); - case F32::StoreF64: + case Expr::F32StoreF64: return EmitStoreWithCoercion(f, Scalar::Float32, Scalar::Float64, def); - case F32::F32X4ExtractLane: + case Expr::F32F32X4ExtractLane: return EmitExtractLane(f, ValType::F32x4, def); - case F32::Bad: + default: break; } MOZ_CRASH("unexpected f32 expression"); @@ -2800,76 +2800,76 @@ EmitF32Expr(FunctionCompiler& f, MDefinition** def) static bool EmitF64Expr(FunctionCompiler& f, MDefinition** def) { - F64 op = F64(f.readU8()); + Expr op(f.readOpcode()); switch (op) { - case F64::Id: + case Expr::F64Id: return EmitF64Expr(f, def); - case F64::GetLocal: + case Expr::F64GetLocal: return EmitGetLoc(f, DebugOnly(MIRType_Double), def); - case F64::SetLocal: + case Expr::F64SetLocal: return EmitSetLoc(f, ValType::F64, def); - case F64::GetGlobal: + case Expr::F64GetGlobal: return EmitGetGlo(f, MIRType_Double, def); - case F64::SetGlobal: + case Expr::F64SetGlobal: return EmitSetGlo(f, ValType::F64, def); - case F64::Literal: + case Expr::F64Literal: return EmitLiteral(f, ValType::F64, def); - case F64::Add: + case Expr::F64Add: return EmitAddOrSub(f, ValType::F64, IsAdd(true), def); - case F64::Sub: + case Expr::F64Sub: return EmitAddOrSub(f, ValType::F64, IsAdd(false), def); - case F64::Mul: + case Expr::F64Mul: return EmitMultiply(f, ValType::F64, def); - case F64::Div: + case Expr::F64Div: return EmitDivOrMod(f, ValType::F64, IsDiv(true), def); - case F64::Mod: + case Expr::F64Mod: return EmitDivOrMod(f, ValType::F64, IsDiv(false), def); - case F64::Min: + case Expr::F64Min: return EmitMathMinMax(f, ValType::F64, IsMax(false), def); - case F64::Max: + case Expr::F64Max: return EmitMathMinMax(f, ValType::F64, IsMax(true), def); - case F64::Neg: + case Expr::F64Neg: return EmitUnaryMir(f, ValType::F64, def); - case F64::Abs: + case Expr::F64Abs: return EmitUnaryMir(f, ValType::F64, def); - case F64::Sqrt: + case Expr::F64Sqrt: return EmitUnaryMir(f, ValType::F64, def); - case F64::Ceil: - case F64::Floor: - case F64::Sin: - case F64::Cos: - case F64::Tan: - case F64::Asin: - case F64::Acos: - case F64::Atan: - case F64::Exp: - case F64::Log: - case F64::Pow: - case F64::Atan2: - return EmitMathBuiltinCall(f, op, def); - case F64::FromF32: + case Expr::F64Ceil: + case Expr::F64Floor: + case Expr::F64Sin: + case Expr::F64Cos: + case Expr::F64Tan: + case Expr::F64Asin: + case Expr::F64Acos: + case Expr::F64Atan: + case Expr::F64Exp: + case Expr::F64Log: + case Expr::F64Pow: + case Expr::F64Atan2: + return EmitF64MathBuiltinCall(f, op, def); + case Expr::F64FromF32: return EmitUnary(f, ValType::F32, def); - case F64::FromS32: + case Expr::F64FromS32: return EmitUnary(f, ValType::I32, def); - case F64::FromU32: + case Expr::F64FromU32: return EmitUnary(f, ValType::I32, def); - case F64::Load: + case Expr::F64Load: return EmitLoadArray(f, Scalar::Float64, def); - case F64::StoreF64: + case Expr::F64StoreF64: return EmitStore(f, Scalar::Float64, def); - case F64::StoreF32: + case Expr::F64StoreF32: return EmitStoreWithCoercion(f, Scalar::Float64, Scalar::Float32, def); - case F64::CallInternal: + case Expr::F64CallInternal: return EmitInternalCall(f, ExprType::F64, def); - case F64::CallIndirect: + case Expr::F64CallIndirect: return EmitFuncPtrCall(f, ExprType::F64, def); - case F64::CallImport: + case Expr::F64CallImport: return EmitFFICall(f, ExprType::F64, def); - case F64::Conditional: + case Expr::F64Conditional: return EmitConditional(f, ValType::F64, def); - case F64::Comma: + case Expr::F64Comma: return EmitComma(f, ValType::F64, def); - case F64::Bad: + default: break; } MOZ_CRASH("unexpected f64 expression"); @@ -2878,59 +2878,59 @@ EmitF64Expr(FunctionCompiler& f, MDefinition** def) static bool EmitI32X4Expr(FunctionCompiler& f, MDefinition** def) { - I32X4 op = I32X4(f.readU8()); + Expr op(f.readOpcode()); switch (op) { - case I32X4::Id: + case Expr::I32X4Id: return EmitI32X4Expr(f, def); - case I32X4::GetLocal: + case Expr::I32X4GetLocal: return EmitGetLoc(f, DebugOnly(MIRType_Int32x4), def); - case I32X4::SetLocal: + case Expr::I32X4SetLocal: return EmitSetLoc(f, ValType::I32x4, def); - case I32X4::GetGlobal: + case Expr::I32X4GetGlobal: return EmitGetGlo(f, MIRType_Int32x4, def); - case I32X4::SetGlobal: + case Expr::I32X4SetGlobal: return EmitSetGlo(f, ValType::I32x4, def); - case I32X4::Comma: + case Expr::I32X4Comma: return EmitComma(f, ValType::I32x4, def); - case I32X4::Conditional: + case Expr::I32X4Conditional: return EmitConditional(f, ValType::I32x4, def); - case I32X4::CallInternal: + case Expr::I32X4CallInternal: return EmitInternalCall(f, ExprType::I32x4, def); - case I32X4::CallIndirect: + case Expr::I32X4CallIndirect: return EmitFuncPtrCall(f, ExprType::I32x4, def); - case I32X4::CallImport: + case Expr::I32X4CallImport: return EmitFFICall(f, ExprType::I32x4, def); - case I32X4::Literal: + case Expr::I32X4Literal: return EmitLiteral(f, ValType::I32x4, def); - case I32X4::Ctor: + case Expr::I32X4Ctor: return EmitSimdCtor(f, ValType::I32x4, def); - case I32X4::Unary: + case Expr::I32X4Unary: return EmitSimdUnary(f, ValType::I32x4, def); - case I32X4::Binary: + case Expr::I32X4Binary: return EmitSimdBinaryArith(f, ValType::I32x4, def); - case I32X4::BinaryBitwise: + case Expr::I32X4BinaryBitwise: return EmitSimdBinaryBitwise(f, ValType::I32x4, def); - case I32X4::BinaryShift: + case Expr::I32X4BinaryShift: return EmitSimdBinaryShift(f, def); - case I32X4::ReplaceLane: + case Expr::I32X4ReplaceLane: return EmitSimdReplaceLane(f, ValType::I32x4, def); - case I32X4::FromF32X4: + case Expr::I32X4FromF32X4: return EmitSimdCast(f, ValType::F32x4, ValType::I32x4, def); - case I32X4::FromF32X4Bits: + case Expr::I32X4FromF32X4Bits: return EmitSimdCast(f, ValType::F32x4, ValType::I32x4, def); - case I32X4::Swizzle: + case Expr::I32X4Swizzle: return EmitSimdSwizzle(f, ValType::I32x4, def); - case I32X4::Shuffle: + case Expr::I32X4Shuffle: return EmitSimdShuffle(f, ValType::I32x4, def); - case I32X4::Select: + case Expr::I32X4Select: return EmitSimdSelect(f, ValType::I32x4, def); - case I32X4::Splat: + case Expr::I32X4Splat: return EmitSimdSplat(f, ValType::I32x4, def); - case I32X4::Load: + case Expr::I32X4Load: return EmitSimdLoad(f, ValType::I32x4, def); - case I32X4::Store: + case Expr::I32X4Store: return EmitSimdStore(f, ValType::I32x4, def); - case I32X4::Bad: + default: break; } MOZ_CRASH("unexpected int32x4 expression"); @@ -2939,55 +2939,55 @@ EmitI32X4Expr(FunctionCompiler& f, MDefinition** def) static bool EmitF32X4Expr(FunctionCompiler& f, MDefinition** def) { - F32X4 op = F32X4(f.readU8()); + Expr op(f.readOpcode()); switch (op) { - case F32X4::Id: + case Expr::F32X4Id: return EmitF32X4Expr(f, def); - case F32X4::GetLocal: + case Expr::F32X4GetLocal: return EmitGetLoc(f, DebugOnly(MIRType_Float32x4), def); - case F32X4::SetLocal: + case Expr::F32X4SetLocal: return EmitSetLoc(f, ValType::F32x4, def); - case F32X4::GetGlobal: + case Expr::F32X4GetGlobal: return EmitGetGlo(f, MIRType_Float32x4, def); - case F32X4::SetGlobal: + case Expr::F32X4SetGlobal: return EmitSetGlo(f, ValType::F32x4, def); - case F32X4::Comma: + case Expr::F32X4Comma: return EmitComma(f, ValType::F32x4, def); - case F32X4::Conditional: + case Expr::F32X4Conditional: return EmitConditional(f, ValType::F32x4, def); - case F32X4::CallInternal: + case Expr::F32X4CallInternal: return EmitInternalCall(f, ExprType::F32x4, def); - case F32X4::CallIndirect: + case Expr::F32X4CallIndirect: return EmitFuncPtrCall(f, ExprType::F32x4, def); - case F32X4::CallImport: + case Expr::F32X4CallImport: return EmitFFICall(f, ExprType::F32x4, def); - case F32X4::Literal: + case Expr::F32X4Literal: return EmitLiteral(f, ValType::F32x4, def); - case F32X4::Ctor: + case Expr::F32X4Ctor: return EmitSimdCtor(f, ValType::F32x4, def); - case F32X4::Unary: + case Expr::F32X4Unary: return EmitSimdUnary(f, ValType::F32x4, def); - case F32X4::Binary: + case Expr::F32X4Binary: return EmitSimdBinaryArith(f, ValType::F32x4, def); - case F32X4::ReplaceLane: + case Expr::F32X4ReplaceLane: return EmitSimdReplaceLane(f, ValType::F32x4, def); - case F32X4::FromI32X4: + case Expr::F32X4FromI32X4: return EmitSimdCast(f, ValType::I32x4, ValType::F32x4, def); - case F32X4::FromI32X4Bits: + case Expr::F32X4FromI32X4Bits: return EmitSimdCast(f, ValType::I32x4, ValType::F32x4, def); - case F32X4::Swizzle: + case Expr::F32X4Swizzle: return EmitSimdSwizzle(f, ValType::F32x4, def); - case F32X4::Shuffle: + case Expr::F32X4Shuffle: return EmitSimdShuffle(f, ValType::F32x4, def); - case F32X4::Select: + case Expr::F32X4Select: return EmitSimdSelect(f, ValType::F32x4, def); - case F32X4::Splat: + case Expr::F32X4Splat: return EmitSimdSplat(f, ValType::F32x4, def); - case F32X4::Load: + case Expr::F32X4Load: return EmitSimdLoad(f, ValType::F32x4, def); - case F32X4::Store: + case Expr::F32X4Store: return EmitSimdStore(f, ValType::F32x4, def); - case F32X4::Bad: + default: break; } MOZ_CRASH("unexpected float32x4 expression"); @@ -2996,47 +2996,47 @@ EmitF32X4Expr(FunctionCompiler& f, MDefinition** def) static bool EmitB32X4Expr(FunctionCompiler& f, MDefinition** def) { - B32X4 op = B32X4(f.readU8()); + Expr op(f.readOpcode()); switch (op) { - case B32X4::Id: + case Expr::B32X4Id: return EmitB32X4Expr(f, def); - case B32X4::GetLocal: + case Expr::B32X4GetLocal: return EmitGetLoc(f, DebugOnly(MIRType_Bool32x4), def); - case B32X4::SetLocal: + case Expr::B32X4SetLocal: return EmitSetLoc(f, ValType::B32x4, def); - case B32X4::GetGlobal: + case Expr::B32X4GetGlobal: return EmitGetGlo(f, MIRType_Bool32x4, def); - case B32X4::SetGlobal: + case Expr::B32X4SetGlobal: return EmitSetGlo(f, ValType::B32x4, def); - case B32X4::Comma: + case Expr::B32X4Comma: return EmitComma(f, ValType::B32x4, def); - case B32X4::Conditional: + case Expr::B32X4Conditional: return EmitConditional(f, ValType::B32x4, def); - case B32X4::CallInternal: + case Expr::B32X4CallInternal: return EmitInternalCall(f, ExprType::B32x4, def); - case B32X4::CallIndirect: + case Expr::B32X4CallIndirect: return EmitFuncPtrCall(f, ExprType::B32x4, def); - case B32X4::CallImport: + case Expr::B32X4CallImport: return EmitFFICall(f, ExprType::B32x4, def); - case B32X4::Literal: + case Expr::B32X4Literal: return EmitLiteral(f, ValType::B32x4, def); - case B32X4::Ctor: + case Expr::B32X4Ctor: return EmitSimdCtor(f, ValType::B32x4, def); - case B32X4::Unary: + case Expr::B32X4Unary: return EmitSimdUnary(f, ValType::B32x4, def); - case B32X4::Binary: + case Expr::B32X4Binary: return EmitSimdBinaryArith(f, ValType::B32x4, def); - case B32X4::BinaryBitwise: + case Expr::B32X4BinaryBitwise: return EmitSimdBinaryBitwise(f, ValType::B32x4, def); - case B32X4::BinaryCompI32X4: + case Expr::B32X4BinaryCompI32X4: return EmitSimdBinaryComp(f, ValType::I32x4, def); - case B32X4::BinaryCompF32X4: + case Expr::B32X4BinaryCompF32X4: return EmitSimdBinaryComp(f, ValType::F32x4, def); - case B32X4::ReplaceLane: + case Expr::B32X4ReplaceLane: return EmitSimdReplaceLane(f, ValType::B32x4, def); - case B32X4::Splat: + case Expr::B32X4Splat: return EmitSimdBooleanSplat(f, def); - case B32X4::Bad: + default: break; } MOZ_CRASH("unexpected bool32x4 expression");