Bug 1251225 - Implement wasm i64 binary arithmetic operators. r=sunfish

This commit is contained in:
Jan de Mooij 2016-03-01 14:20:45 +01:00
parent 90bbd027ca
commit b14c9d9cba
29 changed files with 594 additions and 36 deletions

View File

@ -478,7 +478,6 @@ DecodeExpr(FunctionDecoder& f, ExprType expected)
case Expr::I64DivU:
case Expr::I64RemS:
case Expr::I64RemU:
return f.fail("NYI: i64");
case Expr::I64And:
case Expr::I64Or:
case Expr::I64Xor:

View File

@ -2199,7 +2199,8 @@ EmitDivOrMod(FunctionCompiler& f, ExprType type, bool isDiv, bool isUnsigned, MD
static bool
EmitDivOrMod(FunctionCompiler& f, ExprType type, bool isDiv, MDefinition** def)
{
MOZ_ASSERT(type != ExprType::I32, "int div or mod must precise signedness");
MOZ_ASSERT(type != ExprType::I32 && type != ExprType::I64,
"int div or mod must indicate signedness");
return EmitDivOrMod(f, type, isDiv, false, def);
}
@ -2912,6 +2913,18 @@ EmitExpr(FunctionCompiler& f, ExprType type, MDefinition** def, LabelVector* may
return EmitBitwise<MRsh>(f, ExprType::I64, def);
case Expr::I64ShrU:
return EmitBitwise<MUrsh>(f, ExprType::I64, def);
case Expr::I64Add:
return EmitAddOrSub(f, ExprType::I64, IsAdd(true), def);
case Expr::I64Sub:
return EmitAddOrSub(f, ExprType::I64, IsAdd(false), def);
case Expr::I64Mul:
return EmitMultiply(f, ExprType::I64, def);
case Expr::I64DivS:
case Expr::I64DivU:
return EmitDivOrMod(f, ExprType::I64, IsDiv(true), IsUnsigned(op == Expr::I64DivU), def);
case Expr::I64RemS:
case Expr::I64RemU:
return EmitDivOrMod(f, ExprType::I64, IsDiv(false), IsUnsigned(op == Expr::I64RemU), def);
// F32
case Expr::F32Const:
return EmitLiteral(f, ExprType::F32, def);
@ -3081,13 +3094,6 @@ EmitExpr(FunctionCompiler& f, ExprType type, MDefinition** def, LabelVector* may
case Expr::I64Clz:
case Expr::I64Ctz:
case Expr::I64Popcnt:
case Expr::I64Add:
case Expr::I64Sub:
case Expr::I64Mul:
case Expr::I64DivS:
case Expr::I64DivU:
case Expr::I64RemS:
case Expr::I64RemU:
MOZ_CRASH("NYI");
case Expr::Unreachable:
break;

View File

@ -83,15 +83,38 @@ testComparison('i32', 'ge_u', 40, 40, 1);
//testUnary('i64', 'ctz', 40, 0); // TODO: NYI
//testUnary('i64', 'popcnt', 40, 0); // TODO: NYI
//testBinary('i64', 'add', 40, 2, 42); // TODO: NYI
//testBinary('i64', 'sub', 40, 2, 38); // TODO: NYI
//testBinary('i64', 'mul', 40, 2, 80); // TODO: NYI
//testBinary('i64', 'div_s', -40, 2, -20); // TODO: NYI
//testBinary('i64', 'div_u', -40, 2, 2147483628); // TODO: NYI
//testBinary('i64', 'rem_s', 40, -3, 1); // TODO: NYI
//testBinary('i64', 'rem_u', 40, -3, 40); // TODO: NYI
if (getBuildConfiguration().x64) {
testBinary('i64', 'add', 40, 2, 42);
testBinary('i64', 'add', "0x1234567887654321", -1, "0x1234567887654320");
testBinary('i64', 'add', "0xffffffffffffffff", 1, 0);
testBinary('i64', 'sub', 40, 2, 38);
testBinary('i64', 'sub', "0x1234567887654321", "0x123456789", "0x12345677641fdb98");
testBinary('i64', 'sub', 3, 5, -2);
testBinary('i64', 'mul', 40, 2, 80);
testBinary('i64', 'mul', -1, 2, -2);
testBinary('i64', 'mul', 0x123456, "0x9876543210", "0xad77d2c5f941160");
testBinary('i64', 'div_s', -40, 2, -20);
testBinary('i64', 'div_s', "0x1234567887654321", 2, "0x91a2b3c43b2a190");
testBinary('i64', 'div_s', "0x1234567887654321", "0x1000000000", "0x1234567");
testBinary('i64', 'div_u', -40, 2, "0x7fffffffffffffec");
testBinary('i64', 'div_u', "0x1234567887654321", 9, "0x205d0b80f0b4059");
testBinary('i64', 'rem_s', 40, -3, 1);
testBinary('i64', 'rem_s', "0x1234567887654321", "0x1000000000", "0x887654321");
testBinary('i64', 'rem_s', "0x7fffffffffffffff", -1, 0);
testBinary('i64', 'rem_s', "0x8000000000000001", 1000, -807);
testBinary('i64', 'rem_s', "0x8000000000000000", -1, 0);
testBinary('i64', 'rem_u', 40, -3, 40);
testBinary('i64', 'rem_u', "0x1234567887654321", "0x1000000000", "0x887654321");
testBinary('i64', 'rem_u', "0x8000000000000000", -1, "0x8000000000000000");
testBinary('i64', 'rem_u', "0x8ff00ff00ff00ff0", "0x100000001", "0x80000001");
// These should trap, but for now we match the i32 version.
testBinary('i64', 'div_s', 10, 0, 0);
testBinary('i64', 'div_s', "0x8000000000000000", -1, "0x8000000000000000");
testBinary('i64', 'div_u', 0, 0, 0);
testBinary('i64', 'rem_s', 10, 0, 0);
testBinary('i64', 'rem_u', 10, 0, 0);
testBinary('i64', 'and', 42, 6, 2);
testBinary('i64', 'or', 42, 6, 46);
testBinary('i64', 'xor', 42, 2, 40);

View File

@ -1555,17 +1555,29 @@ LIRGenerator::visitAdd(MAdd* ins)
return;
}
if (ins->specialization() == MIRType_Int64) {
MOZ_ASSERT(lhs->type() == MIRType_Int64);
ReorderCommutative(&lhs, &rhs, ins);
LAddI64* lir = new(alloc()) LAddI64;
lowerForALUInt64(lir, ins, lhs, rhs);
return;
}
if (ins->specialization() == MIRType_Double) {
MOZ_ASSERT(lhs->type() == MIRType_Double);
ReorderCommutative(&lhs, &rhs, ins);
lowerForFPU(new(alloc()) LMathD(JSOP_ADD), ins, lhs, rhs);
} else if (ins->specialization() == MIRType_Float32) {
return;
}
if (ins->specialization() == MIRType_Float32) {
MOZ_ASSERT(lhs->type() == MIRType_Float32);
ReorderCommutative(&lhs, &rhs, ins);
lowerForFPU(new(alloc()) LMathF(JSOP_ADD), ins, lhs, rhs);
} else {
lowerBinaryV(JSOP_ADD, ins);
return;
}
lowerBinaryV(JSOP_ADD, ins);
}
void
@ -1588,15 +1600,27 @@ LIRGenerator::visitSub(MSub* ins)
return;
}
if (ins->specialization() == MIRType_Int64) {
MOZ_ASSERT(lhs->type() == MIRType_Int64);
ReorderCommutative(&lhs, &rhs, ins);
LSubI64* lir = new(alloc()) LSubI64;
lowerForALUInt64(lir, ins, lhs, rhs);
return;
}
if (ins->specialization() == MIRType_Double) {
MOZ_ASSERT(lhs->type() == MIRType_Double);
lowerForFPU(new(alloc()) LMathD(JSOP_SUB), ins, lhs, rhs);
} else if (ins->specialization() == MIRType_Float32) {
return;
}
if (ins->specialization() == MIRType_Float32) {
MOZ_ASSERT(lhs->type() == MIRType_Float32);
lowerForFPU(new(alloc()) LMathF(JSOP_SUB), ins, lhs, rhs);
} else {
lowerBinaryV(JSOP_SUB, ins);
return;
}
lowerBinaryV(JSOP_SUB, ins);
}
void
@ -1616,7 +1640,18 @@ LIRGenerator::visitMul(MMul* ins)
defineReuseInput(new(alloc()) LNegI(useRegisterAtStart(lhs)), ins, 0);
else
lowerMulI(ins, lhs, rhs);
} else if (ins->specialization() == MIRType_Double) {
return;
}
if (ins->specialization() == MIRType_Int64) {
MOZ_ASSERT(lhs->type() == MIRType_Int64);
ReorderCommutative(&lhs, &rhs, ins);
LMulI64* lir = new(alloc()) LMulI64;
lowerForALUInt64(lir, ins, lhs, rhs);
return;
}
if (ins->specialization() == MIRType_Double) {
MOZ_ASSERT(lhs->type() == MIRType_Double);
ReorderCommutative(&lhs, &rhs, ins);
@ -1625,7 +1660,10 @@ LIRGenerator::visitMul(MMul* ins)
defineReuseInput(new(alloc()) LNegD(useRegisterAtStart(lhs)), ins, 0);
else
lowerForFPU(new(alloc()) LMathD(JSOP_MUL), ins, lhs, rhs);
} else if (ins->specialization() == MIRType_Float32) {
return;
}
if (ins->specialization() == MIRType_Float32) {
MOZ_ASSERT(lhs->type() == MIRType_Float32);
ReorderCommutative(&lhs, &rhs, ins);
@ -1634,9 +1672,10 @@ LIRGenerator::visitMul(MMul* ins)
defineReuseInput(new(alloc()) LNegF(useRegisterAtStart(lhs)), ins, 0);
else
lowerForFPU(new(alloc()) LMathF(JSOP_MUL), ins, lhs, rhs);
} else {
lowerBinaryV(JSOP_MUL, ins);
return;
}
lowerBinaryV(JSOP_MUL, ins);
}
void
@ -1649,15 +1688,28 @@ LIRGenerator::visitDiv(MDiv* ins)
if (ins->specialization() == MIRType_Int32) {
MOZ_ASSERT(lhs->type() == MIRType_Int32);
lowerDivI(ins);
} else if (ins->specialization() == MIRType_Double) {
return;
}
if (ins->specialization() == MIRType_Int64) {
MOZ_ASSERT(lhs->type() == MIRType_Int64);
lowerDivI64(ins);
return;
}
if (ins->specialization() == MIRType_Double) {
MOZ_ASSERT(lhs->type() == MIRType_Double);
lowerForFPU(new(alloc()) LMathD(JSOP_DIV), ins, lhs, rhs);
} else if (ins->specialization() == MIRType_Float32) {
return;
}
if (ins->specialization() == MIRType_Float32) {
MOZ_ASSERT(lhs->type() == MIRType_Float32);
lowerForFPU(new(alloc()) LMathF(JSOP_DIV), ins, lhs, rhs);
} else {
lowerBinaryV(JSOP_DIV, ins);
return;
}
lowerBinaryV(JSOP_DIV, ins);
}
void
@ -1669,7 +1721,17 @@ LIRGenerator::visitMod(MMod* ins)
MOZ_ASSERT(ins->type() == MIRType_Int32);
MOZ_ASSERT(ins->lhs()->type() == MIRType_Int32);
lowerModI(ins);
} else if (ins->specialization() == MIRType_Double) {
return;
}
if (ins->specialization() == MIRType_Int64) {
MOZ_ASSERT(ins->type() == MIRType_Int64);
MOZ_ASSERT(ins->lhs()->type() == MIRType_Int64);
lowerModI64(ins);
return;
}
if (ins->specialization() == MIRType_Double) {
MOZ_ASSERT(ins->type() == MIRType_Double);
MOZ_ASSERT(ins->lhs()->type() == MIRType_Double);
MOZ_ASSERT(ins->rhs()->type() == MIRType_Double);
@ -1678,9 +1740,10 @@ LIRGenerator::visitMod(MMod* ins)
LModD* lir = new(alloc()) LModD(useRegisterAtStart(ins->lhs()), useRegisterAtStart(ins->rhs()),
tempFixed(CallTempReg0));
defineReturn(lir, ins);
} else {
lowerBinaryV(JSOP_MOD, ins);
return;
}
lowerBinaryV(JSOP_MOD, ins);
}
void

View File

@ -2704,6 +2704,9 @@ MBinaryArithInstruction::foldsTo(TempAllocator& alloc)
if (specialization_ == MIRType_None)
return this;
if (specialization_ == MIRType_Int64)
return this;
MDefinition* lhs = getOperand(0);
MDefinition* rhs = getOperand(1);
if (MConstant* folded = EvaluateConstantOperands(alloc, this)) {
@ -2916,6 +2919,9 @@ MDiv::foldsTo(TempAllocator& alloc)
if (specialization_ == MIRType_None)
return this;
if (specialization_ == MIRType_Int64)
return this;
if (MDefinition* folded = EvaluateConstantOperands(alloc, this))
return folded;
@ -2978,6 +2984,9 @@ MMod::foldsTo(TempAllocator& alloc)
if (specialization_ == MIRType_None)
return this;
if (specialization_ == MIRType_Int64)
return this;
if (MDefinition* folded = EvaluateConstantOperands(alloc, this))
return folded;

View File

@ -6728,13 +6728,13 @@ class MMod : public MBinaryArithInstruction
}
bool canBeNegativeDividend() const {
MOZ_ASSERT(specialization_ == MIRType_Int32);
MOZ_ASSERT(specialization_ == MIRType_Int32 || specialization_ == MIRType_Int64);
MOZ_ASSERT(!unsigned_);
return canBeNegativeDividend_;
}
bool canBeDivideByZero() const {
MOZ_ASSERT(specialization_ == MIRType_Int32);
MOZ_ASSERT(specialization_ == MIRType_Int32 || specialization_ == MIRType_Int64);
return canBeDivideByZero_;
}

View File

@ -745,6 +745,7 @@ class MacroAssembler : public MacroAssemblerSpecific
inline void subPtr(Register src, Register dest) PER_ARCH;
inline void subPtr(Register src, const Address& dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
inline void subPtr(const Address& addr, Register dest) DEFINED_ON(mips_shared, arm, arm64, x86, x64);
inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;

View File

@ -338,6 +338,18 @@ LIRGeneratorARM::lowerModI(MMod* mod)
defineFixed(lir, mod, LAllocation(AnyRegister(r1)));
}
void
LIRGeneratorARM::lowerDivI64(MDiv* div)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorARM::lowerModI64(MMod* mod)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorARM::visitPowHalf(MPowHalf* ins)
{

View File

@ -75,6 +75,8 @@ class LIRGeneratorARM : public LIRGeneratorShared
void lowerTruncateFToInt32(MTruncateToInt32* ins);
void lowerDivI(MDiv* div);
void lowerModI(MMod* mod);
void lowerDivI64(MDiv* div);
void lowerModI64(MMod* mod);
void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
void lowerUDiv(MDiv* div);
void lowerUMod(MMod* mod);

View File

@ -146,6 +146,18 @@ LIRGeneratorARM64::lowerModI(MMod* mod)
MOZ_CRASH("lowerModI");
}
void
LIRGeneratorARM64::lowerDivI64(MDiv* div)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorARM64::lowerModI64(MMod* mod)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorARM64::visitPowHalf(MPowHalf* ins)
{

View File

@ -77,6 +77,8 @@ class LIRGeneratorARM64 : public LIRGeneratorShared
void lowerTruncateFToInt32(MTruncateToInt32* ins);
void lowerDivI(MDiv* div);
void lowerModI(MMod* mod);
void lowerDivI64(MDiv* div);
void lowerModI64(MMod* mod);
void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
void lowerUDiv(MDiv* div);
void lowerUMod(MMod* mod);

View File

@ -188,6 +188,18 @@ LIRGeneratorMIPSShared::lowerModI(MMod* mod)
define(lir, mod);
}
void
LIRGeneratorMIPSShared::lowerDivI64(MDiv* div)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorMIPSShared::lowerModI64(MMod* mod)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorMIPSShared::visitPowHalf(MPowHalf* ins)
{

View File

@ -63,6 +63,8 @@ class LIRGeneratorMIPSShared : public LIRGeneratorShared
MDefinition* lhs, MDefinition* rhs);
void lowerDivI(MDiv* div);
void lowerModI(MMod* mod);
void lowerDivI64(MDiv* div);
void lowerModI64(MMod* mod);
void lowerMulI(MMul* mul, MDefinition* lhs, MDefinition* rhs);
void lowerUDiv(MDiv* div);
void lowerUMod(MMod* mod);

View File

@ -61,6 +61,8 @@ class LIRGeneratorNone : public LIRGeneratorShared
void lowerTruncateFToInt32(MTruncateToInt32*) { MOZ_CRASH(); }
void lowerDivI(MDiv*) { MOZ_CRASH(); }
void lowerModI(MMod*) { MOZ_CRASH(); }
void lowerDivI64(MDiv*) { MOZ_CRASH(); }
void lowerModI64(MMod*) { MOZ_CRASH(); }
void lowerMulI(MMul*, MDefinition*, MDefinition*) { MOZ_CRASH(); }
void lowerUDiv(MDiv*) { MOZ_CRASH(); }
void lowerUMod(MMod*) { MOZ_CRASH(); }

View File

@ -3418,6 +3418,12 @@ class LAddI : public LBinaryMath<0>
}
};
class LAddI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
{
public:
LIR_HEADER(AddI64)
};
// Subtracts two integers, returning an integer value.
class LSubI : public LBinaryMath<0>
{
@ -3445,6 +3451,18 @@ class LSubI : public LBinaryMath<0>
}
};
class LSubI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
{
public:
LIR_HEADER(SubI64)
};
class LMulI64 : public LInstructionHelper<INT64_PIECES, 2 * INT64_PIECES, 0>
{
public:
LIR_HEADER(MulI64)
};
// Performs an add, sub, mul, or div on two double values.
class LMathD : public LBinaryMath<0>
{

View File

@ -158,8 +158,11 @@
_(NotO) \
_(NotV) \
_(AddI) \
_(AddI64) \
_(SubI) \
_(SubI64) \
_(MulI) \
_(MulI64) \
_(MathD) \
_(MathF) \
_(DivI) \

View File

@ -73,6 +73,35 @@ LIRGeneratorShared::defineFixed(LInstructionHelper<1, X, Y>* lir, MDefinition* m
define(lir, mir, def);
}
template <size_t Ops, size_t Temps> void
LIRGeneratorShared::defineInt64Fixed(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
const LInt64Allocation& output)
{
uint32_t vreg = getVirtualRegister();
#if JS_BITS_PER_WORD == 64
LDefinition def(LDefinition::GENERAL, LDefinition::FIXED);
def.setOutput(output.value());
lir->setDef(0, def);
lir->getDef(0)->setVirtualRegister(vreg);
#else
LDefinition def0(LDefinition::GENERAL, LDefinition::FIXED);
def0.setOutput(output.low());
lir->setDef(0, def0);
lir->getDef(0)->setVirtualRegister(vreg);
getVirtualRegister();
LDefinition def1(LDefinition::GENERAL, LDefinition::FIXED);
def1.setOutput(output.high());
lir->setDef(1, def1);
lir->getDef(1)->setVirtualRegister(vreg + 1);
#endif
lir->setMir(mir);
mir->setVirtualRegister(vreg);
add(lir);
}
template <size_t Ops, size_t Temps> void
LIRGeneratorShared::defineReuseInput(LInstructionHelper<1, Ops, Temps>* lir, MDefinition* mir, uint32_t operand)
{

View File

@ -146,6 +146,10 @@ class LIRGeneratorShared : public MDefinitionVisitor
inline void defineInt64(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
LDefinition::Policy policy = LDefinition::REGISTER);
template <size_t Ops, size_t Temps>
inline void defineInt64Fixed(LInstructionHelper<INT64_PIECES, Ops, Temps>* lir, MDefinition* mir,
const LInt64Allocation& output);
template <size_t Ops, size_t Temps>
inline void defineSinCos(LInstructionHelper<2, Ops, Temps> *lir, MDefinition *mir,
LDefinition::Policy policy = LDefinition::REGISTER);

View File

@ -617,10 +617,40 @@ class Assembler : public AssemblerX86Shared
void imulq(Register src, Register dest) {
masm.imulq_rr(src.encoding(), dest.encoding());
}
void imulq(const Operand& src, Register dest) {
switch (src.kind()) {
case Operand::REG:
masm.imulq_rr(src.reg(), dest.encoding());
break;
case Operand::MEM_REG_DISP:
masm.imulq_mr(src.disp(), src.base(), dest.encoding());
break;
case Operand::MEM_ADDRESS32:
MOZ_CRASH("NYI");
break;
default:
MOZ_CRASH("unexpected operand kind");
}
}
void cqo() {
masm.cqo();
}
void idivq(Register divisor) {
masm.idivq_r(divisor.encoding());
}
void udivq(Register divisor) {
masm.divq_r(divisor.encoding());
}
void vcvtsi2sdq(Register src, FloatRegister dest) {
masm.vcvtsi2sdq_rr(src.encoding(), dest.encoding());
}
void negq(Register reg) {
masm.negq_r(reg.encoding());
}
void mov(ImmWord word, Register dest) {
// Use xor for setting registers to zero, as it is specially optimized
// for this purpose on modern hardware. Note that it does clobber FLAGS

View File

@ -293,6 +293,30 @@ class BaseAssemblerX64 : public BaseAssembler
m_formatter.twoByteOp64(OP2_IMUL_GvEv, src, dst);
}
void imulq_mr(int32_t offset, RegisterID base, RegisterID dst)
{
spew("imulq " MEM_ob ", %s", ADDR_ob(offset, base), GPReg64Name(dst));
m_formatter.twoByteOp64(OP2_IMUL_GvEv, offset, base, dst);
}
void cqo()
{
spew("cqo ");
m_formatter.oneByteOp64(OP_CDQ);
}
void idivq_r(RegisterID divisor)
{
spew("idivq %s", GPReg64Name(divisor));
m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_IDIV);
}
void divq_r(RegisterID divisor)
{
spew("divq %s", GPReg64Name(divisor));
m_formatter.oneByteOp64(OP_GROUP3_Ev, divisor, GROUP3_OP_DIV);
}
// Comparisons:
void cmpq_rr(RegisterID rhs, RegisterID lhs)

View File

@ -6,6 +6,8 @@
#include "jit/x64/CodeGenerator-x64.h"
#include "mozilla/MathAlgorithms.h"
#include "jit/IonCaches.h"
#include "jit/MIR.h"
@ -321,6 +323,160 @@ CodeGeneratorX64::visitShiftI64(LShiftI64* lir)
}
}
void
CodeGeneratorX64::visitAddI64(LAddI64* lir)
{
Register lhs = ToRegister(lir->getOperand(0));
const LAllocation* rhs = lir->getOperand(1);
MOZ_ASSERT(ToRegister(lir->getDef(0)) == lhs);
if (rhs->isConstant())
masm.addPtr(ImmWord(ToInt64(rhs)), lhs);
else
masm.addq(ToOperand(rhs), lhs);
}
void
CodeGeneratorX64::visitSubI64(LSubI64* lir)
{
Register lhs = ToRegister(lir->getOperand(0));
const LAllocation* rhs = lir->getOperand(1);
MOZ_ASSERT(ToRegister(lir->getDef(0)) == lhs);
if (rhs->isConstant())
masm.subPtr(ImmWord(ToInt64(rhs)), lhs);
else
masm.subq(ToOperand(rhs), lhs);
}
void
CodeGeneratorX64::visitMulI64(LMulI64* lir)
{
Register lhs = ToRegister(lir->getOperand(0));
const LAllocation* rhs = lir->getOperand(1);
MOZ_ASSERT(ToRegister(lir->getDef(0)) == lhs);
if (rhs->isConstant()) {
int64_t constant = ToInt64(rhs);
switch (constant) {
case -1:
masm.negq(lhs);
return;
case 0:
masm.xorl(lhs, lhs);
return;
case 1:
// nop
return;
case 2:
masm.addq(lhs, lhs);
return;
default:
if (constant > 0) {
// Use shift if constant is power of 2.
int32_t shift = mozilla::FloorLog2(constant);
if (int64_t(1 << shift) == constant) {
masm.shlq(Imm32(shift), lhs);
return;
}
}
masm.mul64(Imm64(constant), Register64(lhs));
}
} else {
masm.imulq(ToOperand(rhs), lhs);
}
}
void
CodeGeneratorX64::visitDivOrModI64(LDivOrModI64* lir)
{
Register lhs = ToRegister(lir->lhs());
Register rhs = ToRegister(lir->rhs());
Register output = ToRegister(lir->output());
MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
MOZ_ASSERT(rhs != rdx);
MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
Label done;
// Put the lhs in rax.
if (lhs != rax)
masm.mov(lhs, rax);
// Handle divide by zero. For now match asm.js and return 0, but
// eventually this should trap.
if (lir->canBeDivideByZero()) {
Label nonZero;
masm.branchTestPtr(Assembler::NonZero, rhs, rhs, &nonZero);
masm.xorl(output, output);
masm.jump(&done);
masm.bind(&nonZero);
}
// Handle an integer overflow exception from INT64_MIN / -1. Eventually
// signed integer division should trap, instead of returning the
// LHS (INT64_MIN).
if (lir->canBeNegativeOverflow()) {
Label notmin;
masm.branchPtr(Assembler::NotEqual, lhs, ImmWord(INT64_MIN), &notmin);
masm.branchPtr(Assembler::NotEqual, rhs, ImmWord(-1), &notmin);
if (lir->mir()->isMod()) {
masm.xorl(output, output);
} else {
if (lhs != output)
masm.mov(lhs, output);
}
masm.jump(&done);
masm.bind(&notmin);
}
// Sign extend the lhs into rdx to make rdx:rax.
masm.cqo();
masm.idivq(rhs);
masm.bind(&done);
}
void
CodeGeneratorX64::visitUDivOrMod64(LUDivOrMod64* lir)
{
Register lhs = ToRegister(lir->lhs());
Register rhs = ToRegister(lir->rhs());
Register output = ToRegister(lir->output());
MOZ_ASSERT_IF(lhs != rhs, rhs != rax);
MOZ_ASSERT(rhs != rdx);
MOZ_ASSERT_IF(output == rax, ToRegister(lir->remainder()) == rdx);
MOZ_ASSERT_IF(output == rdx, ToRegister(lir->remainder()) == rax);
// Put the lhs in rax.
if (lhs != rax)
masm.mov(lhs, rax);
Label done;
// Prevent divide by zero. For now match asm.js and return 0, but
// eventually this should trap.
if (lir->canBeDivideByZero()) {
Label nonZero;
masm.branchTestPtr(Assembler::NonZero, rhs, rhs, &nonZero);
masm.xorl(output, output);
masm.jump(&done);
masm.bind(&nonZero);
}
// Zero extend the lhs into rdx to make (rdx:rax).
masm.xorl(rdx, rdx);
masm.udivq(rhs);
masm.bind(&done);
}
void
CodeGeneratorX64::visitAsmJSUInt32ToDouble(LAsmJSUInt32ToDouble* lir)
{

View File

@ -46,6 +46,11 @@ class CodeGeneratorX64 : public CodeGeneratorX86Shared
void visitCompare64AndBranch(LCompare64AndBranch* lir);
void visitBitOpI64(LBitOpI64* lir);
void visitShiftI64(LShiftI64* lir);
void visitAddI64(LAddI64* lir);
void visitSubI64(LSubI64* lir);
void visitMulI64(LMulI64* lir);
void visitDivOrModI64(LDivOrModI64* lir);
void visitUDivOrMod64(LUDivOrMod64* lir);
void visitTruncateDToInt32(LTruncateDToInt32* ins);
void visitTruncateFToInt32(LTruncateFToInt32* ins);
void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);

View File

@ -99,6 +99,71 @@ class LAsmJSLoadFuncPtr : public LInstructionHelper<1, 1, 1>
}
};
class LDivOrModI64 : public LBinaryMath<1>
{
public:
LIR_HEADER(DivOrModI64)
LDivOrModI64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
setOperand(0, lhs);
setOperand(1, rhs);
setTemp(0, temp);
}
const LDefinition* remainder() {
return getTemp(0);
}
MBinaryArithInstruction* mir() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
return static_cast<MBinaryArithInstruction*>(mir_);
}
bool canBeDivideByZero() const {
if (mir_->isMod())
return mir_->toMod()->canBeDivideByZero();
return mir_->toDiv()->canBeDivideByZero();
}
bool canBeNegativeOverflow() const {
if (mir_->isMod())
return mir_->toMod()->canBeNegativeDividend();
return mir_->toDiv()->canBeNegativeOverflow();
}
};
// This class performs a simple x86 'div', yielding either a quotient or
// remainder depending on whether this instruction is defined to output
// rax (quotient) or rdx (remainder).
class LUDivOrMod64 : public LBinaryMath<1>
{
public:
LIR_HEADER(UDivOrMod64);
LUDivOrMod64(const LAllocation& lhs, const LAllocation& rhs, const LDefinition& temp) {
setOperand(0, lhs);
setOperand(1, rhs);
setTemp(0, temp);
}
const LDefinition* remainder() {
return getTemp(0);
}
const char* extraName() const {
return mir()->isTruncated() ? "Truncated" : nullptr;
}
MBinaryArithInstruction* mir() const {
MOZ_ASSERT(mir_->isDiv() || mir_->isMod());
return static_cast<MBinaryArithInstruction*>(mir_);
}
bool canBeDivideByZero() const {
if (mir_->isMod())
return mir_->toMod()->canBeDivideByZero();
return mir_->toDiv()->canBeDivideByZero();
}
};
} // namespace jit
} // namespace js

View File

@ -11,6 +11,8 @@
#define LIR_CPU_OPCODE_LIST(_) \
_(DivOrModConstantI) \
_(DivOrModI64) \
_(UDivOrMod64) \
_(SimdValueInt32x4) \
_(SimdValueFloat32x4) \
_(UDivOrMod) \

View File

@ -341,3 +341,47 @@ LIRGeneratorX64::visitRandom(MRandom* ins)
temp());
defineFixed(lir, ins, LFloatReg(ReturnDoubleReg));
}
void
LIRGeneratorX64::lowerDivI64(MDiv* div)
{
if (div->isUnsigned()) {
lowerUDiv64(div);
return;
}
LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(div->lhs()), useRegister(div->rhs()),
tempFixed(rdx));
defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
}
void
LIRGeneratorX64::lowerModI64(MMod* mod)
{
if (mod->isUnsigned()) {
lowerUMod64(mod);
return;
}
LDivOrModI64* lir = new(alloc()) LDivOrModI64(useRegister(mod->lhs()), useRegister(mod->rhs()),
tempFixed(rax));
defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
}
void
LIRGeneratorX64::lowerUDiv64(MDiv* div)
{
LUDivOrMod64* lir = new(alloc()) LUDivOrMod64(useRegister(div->lhs()),
useRegister(div->rhs()),
tempFixed(rdx));
defineInt64Fixed(lir, div, LInt64Allocation(LAllocation(AnyRegister(rax))));
}
void
LIRGeneratorX64::lowerUMod64(MMod* mod)
{
LUDivOrMod64* lir = new(alloc()) LUDivOrMod64(useRegister(mod->lhs()),
useRegister(mod->rhs()),
tempFixed(rax));
defineInt64Fixed(lir, mod, LInt64Allocation(LAllocation(AnyRegister(rdx))));
}

View File

@ -36,6 +36,11 @@ class LIRGeneratorX64 : public LIRGeneratorX86Shared
bool needTempForPostBarrier() { return false; }
void lowerDivI64(MDiv* div);
void lowerModI64(MMod* mod);
void lowerUDiv64(MDiv* div);
void lowerUMod64(MMod* mod);
public:
void visitBox(MBox* box);
void visitUnbox(MUnbox* unbox);

View File

@ -189,6 +189,19 @@ MacroAssembler::subPtr(Imm32 imm, Register dest)
subq(imm, dest);
}
void
MacroAssembler::subPtr(ImmWord imm, Register dest)
{
ScratchRegisterScope scratch(*this);
MOZ_ASSERT(dest != scratch);
if ((intptr_t)imm.value <= INT32_MAX && (intptr_t)imm.value >= INT32_MIN) {
subq(Imm32((int32_t)imm.value), dest);
} else {
mov(imm, scratch);
subq(scratch, dest);
}
}
void
MacroAssembler::subPtr(const Address& addr, Register dest)
{

View File

@ -420,6 +420,18 @@ LIRGeneratorX86::visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr* ins)
define(new(alloc()) LAsmJSLoadFuncPtr(useRegisterAtStart(ins->index())), ins);
}
void
LIRGeneratorX86::lowerDivI64(MDiv* div)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorX86::lowerModI64(MMod* mod)
{
MOZ_CRASH("NYI");
}
void
LIRGeneratorX86::visitSubstr(MSubstr* ins)
{

View File

@ -42,6 +42,9 @@ class LIRGeneratorX86 : public LIRGeneratorX86Shared
void lowerUntypedPhiInput(MPhi* phi, uint32_t inputPosition, LBlock* block, size_t lirIndex);
void defineUntypedPhi(MPhi* phi, size_t lirIndex);
void lowerDivI64(MDiv* div);
void lowerModI64(MMod* mod);
public:
void visitBox(MBox* box);
void visitUnbox(MUnbox* unbox);