Bug 1202650 - split common code into platform variants. r=nbp

This commit is contained in:
Lars T Hansen 2015-09-14 11:37:33 +02:00
parent 7fd39128eb
commit 712c5d1581
10 changed files with 694 additions and 363 deletions

View File

@ -9395,71 +9395,6 @@ CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayEle
}
}
template <typename T>
static inline void
AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem,
Register temp1, Register temp2, AnyRegister output)
{
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
else
masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
}
void
CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
{
MOZ_ASSERT(lir->mir()->hasUses());
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
}
}
template <typename T>
static inline void
AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem)
{
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem);
else
masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem);
}
void
CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
{
MOZ_ASSERT(!lir->mir()->hasUses());
Register elements = ToRegister(lir->elements());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem);
}
}
void
CodeGenerator::visitClampIToUint8(LClampIToUint8* lir)
{

View File

@ -283,8 +283,6 @@ class CodeGenerator : public CodeGeneratorSpecific
void visitAtomicIsLockFree(LAtomicIsLockFree* lir);
void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir);
void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir);
void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
void visitClampIToUint8(LClampIToUint8* lir);
void visitClampDToUint8(LClampDToUint8* lir);
void visitClampVToUint8(LClampVToUint8* lir);

View File

@ -535,257 +535,6 @@ template void
MacroAssembler::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register value, Register temp, AnyRegister output);
template<typename S, typename T>
void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register temp1, Register temp2, AnyRegister output)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
atomicFetchSub32(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
atomicFetchAnd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
atomicFetchOr32(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
atomicFetchXor32(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
switch (op) {
case AtomicFetchAddOp:
atomicFetchAdd32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchSubOp:
atomicFetchSub32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchAndOp:
atomicFetchAnd32(value, mem, temp2, temp1);
break;
case AtomicFetchOrOp:
atomicFetchOr32(value, mem, temp2, temp1);
break;
case AtomicFetchXorOp:
atomicFetchXor32(value, mem, temp2, temp1);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
convertUInt32ToDouble(temp1, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem,
Register temp1, Register temp2, AnyRegister output);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem,
Register temp1, Register temp2, AnyRegister output);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output);
// Binary operation for effect, result discarded.
template<typename S, typename T>
void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
atomicAdd8(value, mem);
break;
case AtomicFetchSubOp:
atomicSub8(value, mem);
break;
case AtomicFetchAndOp:
atomicAnd8(value, mem);
break;
case AtomicFetchOrOp:
atomicOr8(value, mem);
break;
case AtomicFetchXorOp:
atomicXor8(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
atomicAdd16(value, mem);
break;
case AtomicFetchSubOp:
atomicSub16(value, mem);
break;
case AtomicFetchAndOp:
atomicAnd16(value, mem);
break;
case AtomicFetchOrOp:
atomicOr16(value, mem);
break;
case AtomicFetchXorOp:
atomicXor16(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
case Scalar::Uint32:
switch (op) {
case AtomicFetchAddOp:
atomicAdd32(value, mem);
break;
case AtomicFetchSubOp:
atomicSub32(value, mem);
break;
case AtomicFetchAndOp:
atomicAnd32(value, mem);
break;
case AtomicFetchOrOp:
atomicOr32(value, mem);
break;
case AtomicFetchXorOp:
atomicXor32(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem);
template void
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem);
template <typename T>
void
MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output)

View File

@ -1059,15 +1059,6 @@ class MacroAssembler : public MacroAssemblerSpecific
void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value,
Register temp, AnyRegister output);
// Generating a result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register temp1, Register temp2, AnyRegister output);
// Generating no result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest,
unsigned numElems = 0);
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest,

View File

@ -1690,6 +1690,325 @@ CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStati
MOZ_CRASH("NYI");
}
template<typename S, typename T>
void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const S& value, const T& mem, Register temp1,
Register temp2, AnyRegister output)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub32(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr32(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor32(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchSubOp:
masm.atomicFetchSub32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd32(value, mem, temp2, temp1);
break;
case AtomicFetchOrOp:
masm.atomicFetchOr32(value, mem, temp2, temp1);
break;
case AtomicFetchXorOp:
masm.atomicFetchXor32(value, mem, temp2, temp1);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
masm.convertUInt32ToDouble(temp1, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem,
Register temp1, Register temp2, AnyRegister output);
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output);
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem,
Register temp1, Register temp2, AnyRegister output);
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output);
// Binary operation for effect, result discarded.
template<typename S, typename T>
void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd8(value, mem);
break;
case AtomicFetchSubOp:
masm.atomicSub8(value, mem);
break;
case AtomicFetchAndOp:
masm.atomicAnd8(value, mem);
break;
case AtomicFetchOrOp:
masm.atomicOr8(value, mem);
break;
case AtomicFetchXorOp:
masm.atomicXor8(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd16(value, mem);
break;
case AtomicFetchSubOp:
masm.atomicSub16(value, mem);
break;
case AtomicFetchAndOp:
masm.atomicAnd16(value, mem);
break;
case AtomicFetchOrOp:
masm.atomicOr16(value, mem);
break;
case AtomicFetchXorOp:
masm.atomicXor16(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
case Scalar::Uint32:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd32(value, mem);
break;
case AtomicFetchSubOp:
masm.atomicSub32(value, mem);
break;
case AtomicFetchAndOp:
masm.atomicAnd32(value, mem);
break;
case AtomicFetchOrOp:
masm.atomicOr32(value, mem);
break;
case AtomicFetchXorOp:
masm.atomicXor32(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem);
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem);
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem);
template void
CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem);
template <typename T>
static inline void
AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem,
Register temp1, Register temp2, AnyRegister output)
{
if (value->isConstant())
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
else
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
}
void
CodeGeneratorARM::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
{
MOZ_ASSERT(lir->mir()->hasUses());
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
}
}
template <typename T>
static inline void
AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem)
{
if (value->isConstant())
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem);
else
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem);
}
void
CodeGeneratorARM::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
{
MOZ_ASSERT(!lir->mir()->hasUses());
Register elements = ToRegister(lir->elements());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
}
}
void
CodeGeneratorARM::visitAsmJSCall(LAsmJSCall* ins)
{
@ -2008,13 +2327,13 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
}
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
Imm32(ToInt32(value)), srcAddr, temp, InvalidReg,
ToAnyRegister(ins->output()));
else
masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
ToRegister(value), srcAddr, temp, InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt,
ToRegister(value), srcAddr, temp, InvalidReg,
ToAnyRegister(ins->output()));
if (mir->needsBoundsCheck())
masm.append(AsmJSHeapAccess(maybeCmpOffset));
@ -2043,9 +2362,9 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
}
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr);
atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr);
else
masm.atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr);
atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr);
if (mir->needsBoundsCheck())
masm.append(AsmJSHeapAccess(maybeCmpOffset));

View File

@ -194,6 +194,8 @@ class CodeGeneratorARM : public CodeGeneratorShared
void visitNegF(LNegF* lir);
void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins);
void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins);
void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
void visitAsmJSCall(LAsmJSCall* ins);
void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins);
void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins);
@ -216,6 +218,15 @@ class CodeGeneratorARM : public CodeGeneratorShared
void visitRandom(LRandom* ins);
// Generating a result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register temp1, Register temp2, AnyRegister output);
// Generating no result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem);
protected:
void visitEffectiveAddress(LEffectiveAddress* ins);
void visitUDiv(LUDiv* ins);

View File

@ -673,19 +673,19 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
}
uint32_t before = masm.size();
if (value->isConstant()) {
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
} else {
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
srcAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
}
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
@ -718,9 +718,9 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
uint32_t before = masm.size();
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr);
else
masm.atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr);
MOZ_ASSERT(mir->offset() == 0,
"The AsmJS signal handler doesn't yet support emulating "
"atomic accesses in the case of a fault from an unwrapped offset");

View File

@ -3320,6 +3320,323 @@ CodeGeneratorX86Shared::visitSimdSelect(LSimdSelect* ins)
masm.bitwiseOrX4(Operand(temp), output);
}
template<typename S, typename T>
void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register temp1, Register temp2, AnyRegister output)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchSubOp:
masm.atomicFetchSub32(value, mem, temp1, output.gpr());
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd32(value, mem, temp1, output.gpr());
break;
case AtomicFetchOrOp:
masm.atomicFetchOr32(value, mem, temp1, output.gpr());
break;
case AtomicFetchXorOp:
masm.atomicFetchXor32(value, mem, temp1, output.gpr());
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Uint32:
// At the moment, the code in MCallOptimize.cpp requires the output
// type to be double for uint32 arrays. See bug 1077305.
MOZ_ASSERT(output.isFloat());
switch (op) {
case AtomicFetchAddOp:
masm.atomicFetchAdd32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchSubOp:
masm.atomicFetchSub32(value, mem, InvalidReg, temp1);
break;
case AtomicFetchAndOp:
masm.atomicFetchAnd32(value, mem, temp2, temp1);
break;
case AtomicFetchOrOp:
masm.atomicFetchOr32(value, mem, temp2, temp1);
break;
case AtomicFetchXorOp:
masm.atomicFetchXor32(value, mem, temp2, temp1);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
masm.convertUInt32ToDouble(temp1, output.fpu());
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem,
Register temp1, Register temp2, AnyRegister output);
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output);
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem,
Register temp1, Register temp2, AnyRegister output);
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem,
Register temp1, Register temp2, AnyRegister output);
// Binary operation for effect, result discarded.
template<typename S, typename T>
void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem)
{
// Uint8Clamped is explicitly not supported here
switch (arrayType) {
case Scalar::Int8:
case Scalar::Uint8:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd8(value, mem);
break;
case AtomicFetchSubOp:
masm.atomicSub8(value, mem);
break;
case AtomicFetchAndOp:
masm.atomicAnd8(value, mem);
break;
case AtomicFetchOrOp:
masm.atomicOr8(value, mem);
break;
case AtomicFetchXorOp:
masm.atomicXor8(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int16:
case Scalar::Uint16:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd16(value, mem);
break;
case AtomicFetchSubOp:
masm.atomicSub16(value, mem);
break;
case AtomicFetchAndOp:
masm.atomicAnd16(value, mem);
break;
case AtomicFetchOrOp:
masm.atomicOr16(value, mem);
break;
case AtomicFetchXorOp:
masm.atomicXor16(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
case Scalar::Int32:
case Scalar::Uint32:
switch (op) {
case AtomicFetchAddOp:
masm.atomicAdd32(value, mem);
break;
case AtomicFetchSubOp:
masm.atomicSub32(value, mem);
break;
case AtomicFetchAndOp:
masm.atomicAnd32(value, mem);
break;
case AtomicFetchOrOp:
masm.atomicOr32(value, mem);
break;
case AtomicFetchXorOp:
masm.atomicXor32(value, mem);
break;
default:
MOZ_CRASH("Invalid typed array atomic operation");
}
break;
default:
MOZ_CRASH("Invalid typed array type");
}
}
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const Address& mem);
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Imm32& value, const BaseIndex& mem);
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const Address& mem);
template void
CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
const Register& value, const BaseIndex& mem);
template <typename T>
static inline void
AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem,
Register temp1, Register temp2, AnyRegister output)
{
if (value->isConstant())
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
else
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
}
void
CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir)
{
MOZ_ASSERT(lir->mir()->hasUses());
AnyRegister output = ToAnyRegister(lir->output());
Register elements = ToRegister(lir->elements());
Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
}
}
template <typename T>
static inline void
AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op,
Scalar::Type arrayType, const LAllocation* value, const T& mem)
{
if (value->isConstant())
cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem);
else
cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem);
}
void
CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir)
{
MOZ_ASSERT(!lir->mir()->hasUses());
Register elements = ToRegister(lir->elements());
const LAllocation* value = lir->value();
Scalar::Type arrayType = lir->mir()->arrayType();
int width = Scalar::byteSize(arrayType);
if (lir->index()->isConstant()) {
Address mem(elements, ToInt32(lir->index()) * width);
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
} else {
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem);
}
}
void
CodeGeneratorX86Shared::visitMemoryBarrier(LMemoryBarrier* ins)
{

View File

@ -241,6 +241,8 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
virtual void visitUDivOrModConstant(LUDivOrModConstant *ins);
virtual void visitAsmJSPassStackArg(LAsmJSPassStackArg* ins);
virtual void visitMemoryBarrier(LMemoryBarrier* ins);
virtual void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir);
virtual void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir);
void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool);
void visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck);
@ -289,6 +291,15 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool);
void visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool);
void generateInvalidateEpilogue();
// Generating a result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value,
const T& mem, Register temp1, Register temp2, AnyRegister output);
// Generating no result.
template<typename S, typename T>
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem);
};
// An out-of-line bailout thunk.

View File

@ -734,19 +734,19 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
Address memAddr(addrTemp, mir->offset());
if (value->isConstant()) {
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
memAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
Imm32(ToInt32(value)),
memAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
} else {
masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
memAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
ToRegister(value),
memAddr,
temp,
InvalidReg,
ToAnyRegister(ins->output()));
}
}
@ -767,9 +767,9 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
Address memAddr(addrTemp, mir->offset());
if (value->isConstant())
masm.atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr);
else
masm.atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr);
}
void