From 712c5d158166759c8d684b1e4065dc9747571893 Mon Sep 17 00:00:00 2001 From: Lars T Hansen Date: Mon, 14 Sep 2015 11:37:33 +0200 Subject: [PATCH] Bug 1202650 - split common code into platform variants. r=nbp --- js/src/jit/CodeGenerator.cpp | 65 ---- js/src/jit/CodeGenerator.h | 2 - js/src/jit/MacroAssembler.cpp | 251 ------------- js/src/jit/MacroAssembler.h | 9 - js/src/jit/arm/CodeGenerator-arm.cpp | 335 +++++++++++++++++- js/src/jit/arm/CodeGenerator-arm.h | 11 + js/src/jit/x64/CodeGenerator-x64.cpp | 28 +- .../x86-shared/CodeGenerator-x86-shared.cpp | 317 +++++++++++++++++ .../jit/x86-shared/CodeGenerator-x86-shared.h | 11 + js/src/jit/x86/CodeGenerator-x86.cpp | 28 +- 10 files changed, 694 insertions(+), 363 deletions(-) diff --git a/js/src/jit/CodeGenerator.cpp b/js/src/jit/CodeGenerator.cpp index e4a45c3f86c0..18d01e8b746f 100644 --- a/js/src/jit/CodeGenerator.cpp +++ b/js/src/jit/CodeGenerator.cpp @@ -9395,71 +9395,6 @@ CodeGenerator::visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayEle } } -template -static inline void -AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op, - Scalar::Type arrayType, const LAllocation* value, const T& mem, - Register temp1, Register temp2, AnyRegister output) -{ - if (value->isConstant()) - masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output); - else - masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output); -} - -void -CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir) -{ - MOZ_ASSERT(lir->mir()->hasUses()); - - AnyRegister output = ToAnyRegister(lir->output()); - Register elements = ToRegister(lir->elements()); - Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1()); - Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2()); - const LAllocation* value = lir->value(); - - Scalar::Type arrayType = lir->mir()->arrayType(); - int width = Scalar::byteSize(arrayType); - - if (lir->index()->isConstant()) { - Address mem(elements, ToInt32(lir->index()) * width); - AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output); - } else { - BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); - AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output); - } -} - -template -static inline void -AtomicBinopToTypedArray(MacroAssembler& masm, AtomicOp op, - Scalar::Type arrayType, const LAllocation* value, const T& mem) -{ - if (value->isConstant()) - masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem); - else - masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem); -} - -void -CodeGenerator::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir) -{ - MOZ_ASSERT(!lir->mir()->hasUses()); - - Register elements = ToRegister(lir->elements()); - const LAllocation* value = lir->value(); - Scalar::Type arrayType = lir->mir()->arrayType(); - int width = Scalar::byteSize(arrayType); - - if (lir->index()->isConstant()) { - Address mem(elements, ToInt32(lir->index()) * width); - AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem); - } else { - BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); - AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem); - } -} - void CodeGenerator::visitClampIToUint8(LClampIToUint8* lir) { diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h index 899852c898f4..a3d5b326ca23 100644 --- a/js/src/jit/CodeGenerator.h +++ b/js/src/jit/CodeGenerator.h @@ -283,8 +283,6 @@ class CodeGenerator : public CodeGeneratorSpecific void visitAtomicIsLockFree(LAtomicIsLockFree* lir); void visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement* lir); void visitAtomicExchangeTypedArrayElement(LAtomicExchangeTypedArrayElement* lir); - void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir); - void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir); void visitClampIToUint8(LClampIToUint8* lir); void visitClampDToUint8(LClampDToUint8* lir); void visitClampVToUint8(LClampVToUint8* lir); diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp index c741b64300fb..04090ab44226 100644 --- a/js/src/jit/MacroAssembler.cpp +++ b/js/src/jit/MacroAssembler.cpp @@ -535,257 +535,6 @@ template void MacroAssembler::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem, Register value, Register temp, AnyRegister output); -template -void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, - const T& mem, Register temp1, Register temp2, AnyRegister output) -{ - // Uint8Clamped is explicitly not supported here - switch (arrayType) { - case Scalar::Int8: - switch (op) { - case AtomicFetchAddOp: - atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchSubOp: - atomicFetchSub8SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchAndOp: - atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchOrOp: - atomicFetchOr8SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchXorOp: - atomicFetchXor8SignExtend(value, mem, temp1, output.gpr()); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Uint8: - switch (op) { - case AtomicFetchAddOp: - atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchSubOp: - atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchAndOp: - atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchOrOp: - atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchXorOp: - atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr()); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Int16: - switch (op) { - case AtomicFetchAddOp: - atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchSubOp: - atomicFetchSub16SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchAndOp: - atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchOrOp: - atomicFetchOr16SignExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchXorOp: - atomicFetchXor16SignExtend(value, mem, temp1, output.gpr()); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Uint16: - switch (op) { - case AtomicFetchAddOp: - atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchSubOp: - atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchAndOp: - atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchOrOp: - atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr()); - break; - case AtomicFetchXorOp: - atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr()); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Int32: - switch (op) { - case AtomicFetchAddOp: - atomicFetchAdd32(value, mem, temp1, output.gpr()); - break; - case AtomicFetchSubOp: - atomicFetchSub32(value, mem, temp1, output.gpr()); - break; - case AtomicFetchAndOp: - atomicFetchAnd32(value, mem, temp1, output.gpr()); - break; - case AtomicFetchOrOp: - atomicFetchOr32(value, mem, temp1, output.gpr()); - break; - case AtomicFetchXorOp: - atomicFetchXor32(value, mem, temp1, output.gpr()); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Uint32: - // At the moment, the code in MCallOptimize.cpp requires the output - // type to be double for uint32 arrays. See bug 1077305. - MOZ_ASSERT(output.isFloat()); - switch (op) { - case AtomicFetchAddOp: - atomicFetchAdd32(value, mem, InvalidReg, temp1); - break; - case AtomicFetchSubOp: - atomicFetchSub32(value, mem, InvalidReg, temp1); - break; - case AtomicFetchAndOp: - atomicFetchAnd32(value, mem, temp2, temp1); - break; - case AtomicFetchOrOp: - atomicFetchOr32(value, mem, temp2, temp1); - break; - case AtomicFetchXorOp: - atomicFetchXor32(value, mem, temp2, temp1); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - convertUInt32ToDouble(temp1, output.fpu()); - break; - default: - MOZ_CRASH("Invalid typed array type"); - } -} - -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Imm32& value, const Address& mem, - Register temp1, Register temp2, AnyRegister output); -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Imm32& value, const BaseIndex& mem, - Register temp1, Register temp2, AnyRegister output); -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Register& value, const Address& mem, - Register temp1, Register temp2, AnyRegister output); -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Register& value, const BaseIndex& mem, - Register temp1, Register temp2, AnyRegister output); - -// Binary operation for effect, result discarded. -template -void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, - const T& mem) -{ - // Uint8Clamped is explicitly not supported here - switch (arrayType) { - case Scalar::Int8: - case Scalar::Uint8: - switch (op) { - case AtomicFetchAddOp: - atomicAdd8(value, mem); - break; - case AtomicFetchSubOp: - atomicSub8(value, mem); - break; - case AtomicFetchAndOp: - atomicAnd8(value, mem); - break; - case AtomicFetchOrOp: - atomicOr8(value, mem); - break; - case AtomicFetchXorOp: - atomicXor8(value, mem); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Int16: - case Scalar::Uint16: - switch (op) { - case AtomicFetchAddOp: - atomicAdd16(value, mem); - break; - case AtomicFetchSubOp: - atomicSub16(value, mem); - break; - case AtomicFetchAndOp: - atomicAnd16(value, mem); - break; - case AtomicFetchOrOp: - atomicOr16(value, mem); - break; - case AtomicFetchXorOp: - atomicXor16(value, mem); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - case Scalar::Int32: - case Scalar::Uint32: - switch (op) { - case AtomicFetchAddOp: - atomicAdd32(value, mem); - break; - case AtomicFetchSubOp: - atomicSub32(value, mem); - break; - case AtomicFetchAndOp: - atomicAnd32(value, mem); - break; - case AtomicFetchOrOp: - atomicOr32(value, mem); - break; - case AtomicFetchXorOp: - atomicXor32(value, mem); - break; - default: - MOZ_CRASH("Invalid typed array atomic operation"); - } - break; - default: - MOZ_CRASH("Invalid typed array type"); - } -} - -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Imm32& value, const Address& mem); -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Imm32& value, const BaseIndex& mem); -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Register& value, const Address& mem); -template void -MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, - const Register& value, const BaseIndex& mem); - template void MacroAssembler::loadUnboxedProperty(T address, JSValueType type, TypedOrValueRegister output) diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h index 2be545e1da7d..1436fcefc28e 100644 --- a/js/src/jit/MacroAssembler.h +++ b/js/src/jit/MacroAssembler.h @@ -1059,15 +1059,6 @@ class MacroAssembler : public MacroAssemblerSpecific void atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value, Register temp, AnyRegister output); - // Generating a result. - template - void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, - const T& mem, Register temp1, Register temp2, AnyRegister output); - - // Generating no result. - template - void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem); - void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex& dest, unsigned numElems = 0); void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address& dest, diff --git a/js/src/jit/arm/CodeGenerator-arm.cpp b/js/src/jit/arm/CodeGenerator-arm.cpp index 3f6a19071246..cc444a2de1f3 100644 --- a/js/src/jit/arm/CodeGenerator-arm.cpp +++ b/js/src/jit/arm/CodeGenerator-arm.cpp @@ -1690,6 +1690,325 @@ CodeGeneratorARM::visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStati MOZ_CRASH("NYI"); } + +template +void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const S& value, const T& mem, Register temp1, + Register temp2, AnyRegister output) +{ + // Uint8Clamped is explicitly not supported here + switch (arrayType) { + case Scalar::Int8: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor8SignExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Uint8: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int16: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor16SignExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Uint16: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int32: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor32(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Uint32: + // At the moment, the code in MCallOptimize.cpp requires the output + // type to be double for uint32 arrays. See bug 1077305. + MOZ_ASSERT(output.isFloat()); + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd32(value, mem, InvalidReg, temp1); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub32(value, mem, InvalidReg, temp1); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd32(value, mem, temp2, temp1); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr32(value, mem, temp2, temp1); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor32(value, mem, temp2, temp1); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + masm.convertUInt32ToDouble(temp1, output.fpu()); + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const Address& mem, + Register temp1, Register temp2, AnyRegister output); +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const BaseIndex& mem, + Register temp1, Register temp2, AnyRegister output); +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const Address& mem, + Register temp1, Register temp2, AnyRegister output); +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const BaseIndex& mem, + Register temp1, Register temp2, AnyRegister output); + +// Binary operation for effect, result discarded. +template +void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, + const T& mem) +{ + // Uint8Clamped is explicitly not supported here + switch (arrayType) { + case Scalar::Int8: + case Scalar::Uint8: + switch (op) { + case AtomicFetchAddOp: + masm.atomicAdd8(value, mem); + break; + case AtomicFetchSubOp: + masm.atomicSub8(value, mem); + break; + case AtomicFetchAndOp: + masm.atomicAnd8(value, mem); + break; + case AtomicFetchOrOp: + masm.atomicOr8(value, mem); + break; + case AtomicFetchXorOp: + masm.atomicXor8(value, mem); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int16: + case Scalar::Uint16: + switch (op) { + case AtomicFetchAddOp: + masm.atomicAdd16(value, mem); + break; + case AtomicFetchSubOp: + masm.atomicSub16(value, mem); + break; + case AtomicFetchAndOp: + masm.atomicAnd16(value, mem); + break; + case AtomicFetchOrOp: + masm.atomicOr16(value, mem); + break; + case AtomicFetchXorOp: + masm.atomicXor16(value, mem); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int32: + case Scalar::Uint32: + switch (op) { + case AtomicFetchAddOp: + masm.atomicAdd32(value, mem); + break; + case AtomicFetchSubOp: + masm.atomicSub32(value, mem); + break; + case AtomicFetchAndOp: + masm.atomicAnd32(value, mem); + break; + case AtomicFetchOrOp: + masm.atomicOr32(value, mem); + break; + case AtomicFetchXorOp: + masm.atomicXor32(value, mem); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const Address& mem); +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const BaseIndex& mem); +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const Address& mem); +template void +CodeGeneratorARM::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const BaseIndex& mem); + + +template +static inline void +AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op, + Scalar::Type arrayType, const LAllocation* value, const T& mem, + Register temp1, Register temp2, AnyRegister output) +{ + if (value->isConstant()) + cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output); + else + cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output); +} + +void +CodeGeneratorARM::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir) +{ + MOZ_ASSERT(lir->mir()->hasUses()); + + AnyRegister output = ToAnyRegister(lir->output()); + Register elements = ToRegister(lir->elements()); + Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1()); + Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2()); + const LAllocation* value = lir->value(); + + Scalar::Type arrayType = lir->mir()->arrayType(); + int width = Scalar::byteSize(arrayType); + + if (lir->index()->isConstant()) { + Address mem(elements, ToInt32(lir->index()) * width); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output); + } else { + BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output); + } +} + +template +static inline void +AtomicBinopToTypedArray(CodeGeneratorARM* cg, AtomicOp op, + Scalar::Type arrayType, const LAllocation* value, const T& mem) +{ + if (value->isConstant()) + cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem); + else + cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem); +} + +void +CodeGeneratorARM::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir) +{ + MOZ_ASSERT(!lir->mir()->hasUses()); + + Register elements = ToRegister(lir->elements()); + const LAllocation* value = lir->value(); + Scalar::Type arrayType = lir->mir()->arrayType(); + int width = Scalar::byteSize(arrayType); + + if (lir->index()->isConstant()) { + Address mem(elements, ToInt32(lir->index()) * width); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem); + } else { + BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem); + } +} + void CodeGeneratorARM::visitAsmJSCall(LAsmJSCall* ins) { @@ -2008,13 +2327,13 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) } if (value->isConstant()) - masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, - Imm32(ToInt32(value)), srcAddr, temp, InvalidReg, - ToAnyRegister(ins->output())); + atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, + Imm32(ToInt32(value)), srcAddr, temp, InvalidReg, + ToAnyRegister(ins->output())); else - masm.atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, - ToRegister(value), srcAddr, temp, InvalidReg, - ToAnyRegister(ins->output())); + atomicBinopToTypedIntArray(op, vt == Scalar::Uint32 ? Scalar::Int32 : vt, + ToRegister(value), srcAddr, temp, InvalidReg, + ToAnyRegister(ins->output())); if (mir->needsBoundsCheck()) masm.append(AsmJSHeapAccess(maybeCmpOffset)); @@ -2043,9 +2362,9 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff } if (value->isConstant()) - masm.atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr); + atomicBinopToTypedIntArray(op, vt, Imm32(ToInt32(value)), srcAddr); else - masm.atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr); + atomicBinopToTypedIntArray(op, vt, ToRegister(value), srcAddr); if (mir->needsBoundsCheck()) masm.append(AsmJSHeapAccess(maybeCmpOffset)); diff --git a/js/src/jit/arm/CodeGenerator-arm.h b/js/src/jit/arm/CodeGenerator-arm.h index 8f8a6379353a..9154152dc1dd 100644 --- a/js/src/jit/arm/CodeGenerator-arm.h +++ b/js/src/jit/arm/CodeGenerator-arm.h @@ -194,6 +194,8 @@ class CodeGeneratorARM : public CodeGeneratorShared void visitNegF(LNegF* lir); void visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins); void visitStoreTypedArrayElementStatic(LStoreTypedArrayElementStatic* ins); + void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir); + void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir); void visitAsmJSCall(LAsmJSCall* ins); void visitAsmJSLoadHeap(LAsmJSLoadHeap* ins); void visitAsmJSStoreHeap(LAsmJSStoreHeap* ins); @@ -216,6 +218,15 @@ class CodeGeneratorARM : public CodeGeneratorShared void visitRandom(LRandom* ins); + // Generating a result. + template + void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, + const T& mem, Register temp1, Register temp2, AnyRegister output); + + // Generating no result. + template + void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem); + protected: void visitEffectiveAddress(LEffectiveAddress* ins); void visitUDiv(LUDiv* ins); diff --git a/js/src/jit/x64/CodeGenerator-x64.cpp b/js/src/jit/x64/CodeGenerator-x64.cpp index 09d98633226d..b2743a38897c 100644 --- a/js/src/jit/x64/CodeGenerator-x64.cpp +++ b/js/src/jit/x64/CodeGenerator-x64.cpp @@ -673,19 +673,19 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) } uint32_t before = masm.size(); if (value->isConstant()) { - masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, - Imm32(ToInt32(value)), - srcAddr, - temp, - InvalidReg, - ToAnyRegister(ins->output())); + atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, + Imm32(ToInt32(value)), + srcAddr, + temp, + InvalidReg, + ToAnyRegister(ins->output())); } else { - masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, - ToRegister(value), - srcAddr, - temp, - InvalidReg, - ToAnyRegister(ins->output())); + atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, + ToRegister(value), + srcAddr, + temp, + InvalidReg, + ToAnyRegister(ins->output())); } MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " @@ -718,9 +718,9 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff uint32_t before = masm.size(); if (value->isConstant()) - masm.atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr); + atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), srcAddr); else - masm.atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr); + atomicBinopToTypedIntArray(op, accessType, ToRegister(value), srcAddr); MOZ_ASSERT(mir->offset() == 0, "The AsmJS signal handler doesn't yet support emulating " "atomic accesses in the case of a fault from an unwrapped offset"); diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp index ff9039affba5..31e494fa6b6c 100644 --- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp +++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.cpp @@ -3320,6 +3320,323 @@ CodeGeneratorX86Shared::visitSimdSelect(LSimdSelect* ins) masm.bitwiseOrX4(Operand(temp), output); } +template +void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, + const T& mem, Register temp1, Register temp2, AnyRegister output) +{ + // Uint8Clamped is explicitly not supported here + switch (arrayType) { + case Scalar::Int8: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr8SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor8SignExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Uint8: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int16: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr16SignExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor16SignExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Uint16: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int32: + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr32(value, mem, temp1, output.gpr()); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor32(value, mem, temp1, output.gpr()); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Uint32: + // At the moment, the code in MCallOptimize.cpp requires the output + // type to be double for uint32 arrays. See bug 1077305. + MOZ_ASSERT(output.isFloat()); + switch (op) { + case AtomicFetchAddOp: + masm.atomicFetchAdd32(value, mem, InvalidReg, temp1); + break; + case AtomicFetchSubOp: + masm.atomicFetchSub32(value, mem, InvalidReg, temp1); + break; + case AtomicFetchAndOp: + masm.atomicFetchAnd32(value, mem, temp2, temp1); + break; + case AtomicFetchOrOp: + masm.atomicFetchOr32(value, mem, temp2, temp1); + break; + case AtomicFetchXorOp: + masm.atomicFetchXor32(value, mem, temp2, temp1); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + masm.convertUInt32ToDouble(temp1, output.fpu()); + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const Address& mem, + Register temp1, Register temp2, AnyRegister output); +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const BaseIndex& mem, + Register temp1, Register temp2, AnyRegister output); +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const Address& mem, + Register temp1, Register temp2, AnyRegister output); +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const BaseIndex& mem, + Register temp1, Register temp2, AnyRegister output); + +// Binary operation for effect, result discarded. +template +void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, + const T& mem) +{ + // Uint8Clamped is explicitly not supported here + switch (arrayType) { + case Scalar::Int8: + case Scalar::Uint8: + switch (op) { + case AtomicFetchAddOp: + masm.atomicAdd8(value, mem); + break; + case AtomicFetchSubOp: + masm.atomicSub8(value, mem); + break; + case AtomicFetchAndOp: + masm.atomicAnd8(value, mem); + break; + case AtomicFetchOrOp: + masm.atomicOr8(value, mem); + break; + case AtomicFetchXorOp: + masm.atomicXor8(value, mem); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int16: + case Scalar::Uint16: + switch (op) { + case AtomicFetchAddOp: + masm.atomicAdd16(value, mem); + break; + case AtomicFetchSubOp: + masm.atomicSub16(value, mem); + break; + case AtomicFetchAndOp: + masm.atomicAnd16(value, mem); + break; + case AtomicFetchOrOp: + masm.atomicOr16(value, mem); + break; + case AtomicFetchXorOp: + masm.atomicXor16(value, mem); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + case Scalar::Int32: + case Scalar::Uint32: + switch (op) { + case AtomicFetchAddOp: + masm.atomicAdd32(value, mem); + break; + case AtomicFetchSubOp: + masm.atomicSub32(value, mem); + break; + case AtomicFetchAndOp: + masm.atomicAnd32(value, mem); + break; + case AtomicFetchOrOp: + masm.atomicOr32(value, mem); + break; + case AtomicFetchXorOp: + masm.atomicXor32(value, mem); + break; + default: + MOZ_CRASH("Invalid typed array atomic operation"); + } + break; + default: + MOZ_CRASH("Invalid typed array type"); + } +} + +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const Address& mem); +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Imm32& value, const BaseIndex& mem); +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const Address& mem); +template void +CodeGeneratorX86Shared::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, + const Register& value, const BaseIndex& mem); + + +template +static inline void +AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op, + Scalar::Type arrayType, const LAllocation* value, const T& mem, + Register temp1, Register temp2, AnyRegister output) +{ + if (value->isConstant()) + cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output); + else + cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output); +} + +void +CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir) +{ + MOZ_ASSERT(lir->mir()->hasUses()); + + AnyRegister output = ToAnyRegister(lir->output()); + Register elements = ToRegister(lir->elements()); + Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1()); + Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2()); + const LAllocation* value = lir->value(); + + Scalar::Type arrayType = lir->mir()->arrayType(); + int width = Scalar::byteSize(arrayType); + + if (lir->index()->isConstant()) { + Address mem(elements, ToInt32(lir->index()) * width); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output); + } else { + BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output); + } +} + +template +static inline void +AtomicBinopToTypedArray(CodeGeneratorX86Shared* cg, AtomicOp op, + Scalar::Type arrayType, const LAllocation* value, const T& mem) +{ + if (value->isConstant()) + cg->atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem); + else + cg->atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem); +} + +void +CodeGeneratorX86Shared::visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir) +{ + MOZ_ASSERT(!lir->mir()->hasUses()); + + Register elements = ToRegister(lir->elements()); + const LAllocation* value = lir->value(); + Scalar::Type arrayType = lir->mir()->arrayType(); + int width = Scalar::byteSize(arrayType); + + if (lir->index()->isConstant()) { + Address mem(elements, ToInt32(lir->index()) * width); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem); + } else { + BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width)); + AtomicBinopToTypedArray(this, lir->mir()->operation(), arrayType, value, mem); + } +} + void CodeGeneratorX86Shared::visitMemoryBarrier(LMemoryBarrier* ins) { diff --git a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h index 848a45c7992f..366746478d5e 100644 --- a/js/src/jit/x86-shared/CodeGenerator-x86-shared.h +++ b/js/src/jit/x86-shared/CodeGenerator-x86-shared.h @@ -241,6 +241,8 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared virtual void visitUDivOrModConstant(LUDivOrModConstant *ins); virtual void visitAsmJSPassStackArg(LAsmJSPassStackArg* ins); virtual void visitMemoryBarrier(LMemoryBarrier* ins); + virtual void visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop* lir); + virtual void visitAtomicTypedArrayElementBinopForEffect(LAtomicTypedArrayElementBinopForEffect* lir); void visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds* ool); void visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck); @@ -289,6 +291,15 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared void visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool); void visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIntCheck* ool); void generateInvalidateEpilogue(); + + // Generating a result. + template + void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, + const T& mem, Register temp1, Register temp2, AnyRegister output); + + // Generating no result. + template + void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S& value, const T& mem); }; // An out-of-line bailout thunk. diff --git a/js/src/jit/x86/CodeGenerator-x86.cpp b/js/src/jit/x86/CodeGenerator-x86.cpp index 33dcaa26a049..4bcbb6034753 100644 --- a/js/src/jit/x86/CodeGenerator-x86.cpp +++ b/js/src/jit/x86/CodeGenerator-x86.cpp @@ -734,19 +734,19 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins) Address memAddr(addrTemp, mir->offset()); if (value->isConstant()) { - masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, - Imm32(ToInt32(value)), - memAddr, - temp, - InvalidReg, - ToAnyRegister(ins->output())); + atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, + Imm32(ToInt32(value)), + memAddr, + temp, + InvalidReg, + ToAnyRegister(ins->output())); } else { - masm.atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, - ToRegister(value), - memAddr, - temp, - InvalidReg, - ToAnyRegister(ins->output())); + atomicBinopToTypedIntArray(op, accessType == Scalar::Uint32 ? Scalar::Int32 : accessType, + ToRegister(value), + memAddr, + temp, + InvalidReg, + ToAnyRegister(ins->output())); } } @@ -767,9 +767,9 @@ CodeGeneratorX86::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff Address memAddr(addrTemp, mir->offset()); if (value->isConstant()) - masm.atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr); + atomicBinopToTypedIntArray(op, accessType, Imm32(ToInt32(value)), memAddr); else - masm.atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr); + atomicBinopToTypedIntArray(op, accessType, ToRegister(value), memAddr); } void