mirror of
https://github.com/mozilla/gecko-dev.git
synced 2024-10-13 21:35:39 +00:00
Bug 979594 - JIT implementations of the important Atomics. r=sstangl
This commit is contained in:
parent
dd2a241361
commit
a088077703
@ -312,6 +312,20 @@ function testInt16Extremes(a) {
|
||||
assertEq(a[11], 0);
|
||||
}
|
||||
|
||||
function testUint32(a) {
|
||||
var k = 0;
|
||||
for ( var i=0 ; i < 20 ; i++ ) {
|
||||
a[i] = i+5;
|
||||
k += a[i];
|
||||
}
|
||||
|
||||
var sum = 0;
|
||||
for ( var i=0 ; i < 20 ; i++ )
|
||||
sum += Atomics.add(a, i, 1);
|
||||
|
||||
assertEq(sum, k);
|
||||
}
|
||||
|
||||
function isLittleEndian() {
|
||||
var xxx = new ArrayBuffer(2);
|
||||
var xxa = new Int16Array(xxx);
|
||||
@ -395,6 +409,7 @@ function runTests() {
|
||||
testInt8Extremes(new SharedInt8Array(sab));
|
||||
testUint8Extremes(new SharedUint8Array(sab));
|
||||
testInt16Extremes(new SharedInt16Array(sab));
|
||||
testUint32(new SharedUint32Array(sab));
|
||||
}
|
||||
|
||||
if (this.Atomics && this.SharedArrayBuffer && this.SharedInt32Array)
|
||||
|
31
js/src/jit-test/tests/atomics/inline-add.js
Normal file
31
js/src/jit-test/tests/atomics/inline-add.js
Normal file
@ -0,0 +1,31 @@
|
||||
// |jit-test| slow;
|
||||
//
|
||||
// This is intended to be run manually with IONFLAGS=logs and
|
||||
// postprocessing by iongraph to verify manually (by inspecting the
|
||||
// MIR) that:
|
||||
//
|
||||
// - the add operation is inlined as it should be
|
||||
// - loads and stores are not moved across the add
|
||||
//
|
||||
// Be sure to run with --ion-eager --ion-offthread-compile=off.
|
||||
|
||||
function add(ta) {
|
||||
var x = ta[0];
|
||||
Atomics.add(ta, 86, 6);
|
||||
var y = ta[1];
|
||||
var z = y + 1;
|
||||
var w = x + z;
|
||||
return w;
|
||||
}
|
||||
|
||||
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
|
||||
quit(0);
|
||||
|
||||
var sab = new SharedArrayBuffer(4096);
|
||||
var ia = new SharedInt32Array(sab);
|
||||
for ( var i=0, limit=ia.length ; i < limit ; i++ )
|
||||
ia[i] = 37;
|
||||
var v = 0;
|
||||
for ( var i=0 ; i < 1000 ; i++ )
|
||||
v += add(ia);
|
||||
//print(v);
|
31
js/src/jit-test/tests/atomics/inline-add2.js
Normal file
31
js/src/jit-test/tests/atomics/inline-add2.js
Normal file
@ -0,0 +1,31 @@
|
||||
// |jit-test| slow;
|
||||
//
|
||||
// Like inline-add, but with SharedUint32Array, which is a special
|
||||
// case because the value is representable only as a Number.
|
||||
// All this tests is that the Uint32 path is being triggered.
|
||||
//
|
||||
// This is intended to be run manually with IONFLAGS=logs and
|
||||
// postprocessing by iongraph to verify manually (by inspecting the
|
||||
// MIR) that:
|
||||
//
|
||||
// - the add operation is inlined as it should be, with
|
||||
// a return type 'Double'
|
||||
// - loads and stores are not moved across the add
|
||||
//
|
||||
// Be sure to run with --ion-eager --ion-offthread-compile=off.
|
||||
|
||||
function add(ta) {
|
||||
return Atomics.add(ta, 86, 6);
|
||||
}
|
||||
|
||||
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedUint32Array)
|
||||
quit(0);
|
||||
|
||||
var sab = new SharedArrayBuffer(4096);
|
||||
var ia = new SharedUint32Array(sab);
|
||||
for ( var i=0, limit=ia.length ; i < limit ; i++ )
|
||||
ia[i] = 0xdeadbeef; // Important: Not an int32-capable value
|
||||
var v = 0;
|
||||
for ( var i=0 ; i < 1000 ; i++ )
|
||||
v += add(ia);
|
||||
//print(v);
|
31
js/src/jit-test/tests/atomics/inline-cmpxchg.js
Normal file
31
js/src/jit-test/tests/atomics/inline-cmpxchg.js
Normal file
@ -0,0 +1,31 @@
|
||||
// |jit-test| slow;
|
||||
//
|
||||
// This is intended to be run manually with IONFLAGS=logs and
|
||||
// postprocessing by iongraph to verify manually (by inspecting the
|
||||
// MIR) that:
|
||||
//
|
||||
// - the cmpxchg operation is inlined as it should be
|
||||
// - loads and stores are not moved across the cmpxchg
|
||||
//
|
||||
// Be sure to run with --ion-eager --ion-offthread-compile=off.
|
||||
|
||||
function cmpxchg(ta) {
|
||||
var x = ta[0];
|
||||
Atomics.compareExchange(ta, 86, 37, 42);
|
||||
var y = ta[1];
|
||||
var z = y + 1;
|
||||
var w = x + z;
|
||||
return w;
|
||||
}
|
||||
|
||||
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
|
||||
quit(0);
|
||||
|
||||
var sab = new SharedArrayBuffer(4096);
|
||||
var ia = new SharedInt32Array(sab);
|
||||
for ( var i=0, limit=ia.length ; i < limit ; i++ )
|
||||
ia[i] = 37;
|
||||
var v = 0;
|
||||
for ( var i=0 ; i < 1000 ; i++ )
|
||||
v += cmpxchg(ia);
|
||||
//print(v);
|
31
js/src/jit-test/tests/atomics/inline-fence.js
Normal file
31
js/src/jit-test/tests/atomics/inline-fence.js
Normal file
@ -0,0 +1,31 @@
|
||||
// |jit-test| slow;
|
||||
//
|
||||
// This is intended to be run manually with IONFLAGS=logs and
|
||||
// postprocessing by iongraph to verify manually (by inspecting the
|
||||
// MIR) that:
|
||||
//
|
||||
// - the fence operation is inlined as it should be
|
||||
// - loads and stores are not moved across the fence
|
||||
//
|
||||
// Be sure to run with --ion-eager --ion-offthread-compile=off.
|
||||
|
||||
function fence(ta) {
|
||||
var x = ta[0];
|
||||
Atomics.fence();
|
||||
var y = ta[1];
|
||||
var z = y + 1;
|
||||
var w = x + z;
|
||||
return w;
|
||||
}
|
||||
|
||||
if (!this.SharedArrayBuffer || !this.Atomics || !this.SharedInt32Array)
|
||||
quit(0);
|
||||
|
||||
var sab = new SharedArrayBuffer(4096);
|
||||
var ia = new SharedInt32Array(sab);
|
||||
for ( var i=0, limit=ia.length ; i < limit ; i++ )
|
||||
ia[i] = 37;
|
||||
var v = 0;
|
||||
for ( var i=0 ; i < 1000 ; i++ )
|
||||
v += fence(ia);
|
||||
//print(v);
|
54
js/src/jit/AtomicOp.h
Normal file
54
js/src/jit/AtomicOp.h
Normal file
@ -0,0 +1,54 @@
|
||||
/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
|
||||
* vim: set ts=8 sts=4 et sw=4 tw=99:
|
||||
* This Source Code Form is subject to the terms of the Mozilla Public
|
||||
* License, v. 2.0. If a copy of the MPL was not distributed with this
|
||||
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
|
||||
|
||||
#ifndef jit_AtomicOp_h
|
||||
#define jit_AtomicOp_h
|
||||
|
||||
namespace js {
|
||||
namespace jit {
|
||||
|
||||
// Types of atomic operation, shared by MIR and LIR.
|
||||
|
||||
enum AtomicOp {
|
||||
AtomicFetchAddOp,
|
||||
AtomicFetchSubOp,
|
||||
AtomicFetchAndOp,
|
||||
AtomicFetchOrOp,
|
||||
AtomicFetchXorOp
|
||||
};
|
||||
|
||||
// Memory barrier types, shared by MIR and LIR.
|
||||
//
|
||||
// MembarSynchronizing is here because some platforms can make the
|
||||
// distinction (DSB vs DMB on ARM, SYNC vs parameterized SYNC on MIPS)
|
||||
// but there's been no reason to use it yet.
|
||||
|
||||
enum MemoryBarrierBits {
|
||||
MembarLoadLoad = 1,
|
||||
MembarLoadStore = 2,
|
||||
MembarStoreStore = 4,
|
||||
MembarStoreLoad = 8,
|
||||
|
||||
MembarSynchronizing = 16,
|
||||
|
||||
// For validity testing
|
||||
MembarAllbits = 31,
|
||||
};
|
||||
|
||||
// Standard barrier bits for a full barrier.
|
||||
static const int MembarFull = MembarLoadLoad|MembarLoadStore|MembarStoreLoad|MembarStoreStore;
|
||||
|
||||
// Standard sets of barrier bits for atomic loads and stores.
|
||||
// See http://gee.cs.oswego.edu/dl/jmm/cookbook.html for more.
|
||||
static const int MembarBeforeLoad = 0;
|
||||
static const int MembarAfterLoad = MembarLoadLoad|MembarLoadStore;
|
||||
static const int MembarBeforeStore = MembarStoreStore;
|
||||
static const int MembarAfterStore = MembarStoreLoad;
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
#endif /* jit_AtomicOp_h */
|
@ -8963,6 +8963,68 @@ CodeGenerator::visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole *lir)
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement *lir)
|
||||
{
|
||||
Register elements = ToRegister(lir->elements());
|
||||
AnyRegister output = ToAnyRegister(lir->output());
|
||||
Register temp = lir->temp()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp());
|
||||
|
||||
MOZ_ASSERT(lir->oldval()->isRegister());
|
||||
MOZ_ASSERT(lir->newval()->isRegister());
|
||||
|
||||
Register oldval = ToRegister(lir->oldval());
|
||||
Register newval = ToRegister(lir->newval());
|
||||
|
||||
Scalar::Type arrayType = lir->mir()->arrayType();
|
||||
int width = Scalar::byteSize(arrayType);
|
||||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address dest(elements, ToInt32(lir->index()) * width);
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
} else {
|
||||
BaseIndex dest(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
masm.compareExchangeToTypedIntArray(arrayType, dest, oldval, newval, temp, output);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
static inline void
|
||||
AtomicBinopToTypedArray(MacroAssembler &masm, AtomicOp op,
|
||||
Scalar::Type arrayType, const LAllocation *value, const T &mem,
|
||||
Register temp1, Register temp2, AnyRegister output)
|
||||
{
|
||||
if (value->isConstant())
|
||||
masm.atomicBinopToTypedIntArray(op, arrayType, Imm32(ToInt32(value)), mem, temp1, temp2, output);
|
||||
else
|
||||
masm.atomicBinopToTypedIntArray(op, arrayType, ToRegister(value), mem, temp1, temp2, output);
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop *lir)
|
||||
{
|
||||
AnyRegister output = ToAnyRegister(lir->output());
|
||||
Register elements = ToRegister(lir->elements());
|
||||
Register temp1 = lir->temp1()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp1());
|
||||
Register temp2 = lir->temp2()->isBogusTemp() ? InvalidReg : ToRegister(lir->temp2());
|
||||
const LAllocation* value = lir->value();
|
||||
|
||||
Scalar::Type arrayType = lir->mir()->arrayType();
|
||||
int width = Scalar::byteSize(arrayType);
|
||||
|
||||
if (lir->index()->isConstant()) {
|
||||
Address mem(elements, ToInt32(lir->index()) * width);
|
||||
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
|
||||
} else {
|
||||
BaseIndex mem(elements, ToRegister(lir->index()), ScaleFromElemWidth(width));
|
||||
AtomicBinopToTypedArray(masm, lir->mir()->operation(), arrayType, value, mem, temp1, temp2, output);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGenerator::visitClampIToUint8(LClampIToUint8 *lir)
|
||||
{
|
||||
|
@ -265,6 +265,8 @@ class CodeGenerator : public CodeGeneratorSpecific
|
||||
bool visitLoadTypedArrayElementHole(LLoadTypedArrayElementHole *lir);
|
||||
bool visitStoreTypedArrayElement(LStoreTypedArrayElement *lir);
|
||||
bool visitStoreTypedArrayElementHole(LStoreTypedArrayElementHole *lir);
|
||||
bool visitCompareExchangeTypedArrayElement(LCompareExchangeTypedArrayElement *lir);
|
||||
bool visitAtomicTypedArrayElementBinop(LAtomicTypedArrayElementBinop *lir);
|
||||
bool visitClampIToUint8(LClampIToUint8 *lir);
|
||||
bool visitClampDToUint8(LClampDToUint8 *lir);
|
||||
bool visitClampVToUint8(LClampVToUint8 *lir);
|
||||
|
@ -727,6 +727,13 @@ class IonBuilder
|
||||
InliningStatus inlineRegExpExec(CallInfo &callInfo);
|
||||
InliningStatus inlineRegExpTest(CallInfo &callInfo);
|
||||
|
||||
// Atomics natives.
|
||||
InliningStatus inlineAtomicsCompareExchange(CallInfo &callInfo);
|
||||
InliningStatus inlineAtomicsLoad(CallInfo &callInfo);
|
||||
InliningStatus inlineAtomicsStore(CallInfo &callInfo);
|
||||
InliningStatus inlineAtomicsFence(CallInfo &callInfo);
|
||||
InliningStatus inlineAtomicsBinop(CallInfo &callInfo, JSFunction *target);
|
||||
|
||||
// Array intrinsics.
|
||||
InliningStatus inlineUnsafePutElements(CallInfo &callInfo);
|
||||
bool inlineUnsafeSetDenseArrayElement(CallInfo &callInfo, uint32_t base);
|
||||
@ -791,6 +798,9 @@ class IonBuilder
|
||||
MTypeObjectDispatch *dispatch, MGetPropertyCache *cache,
|
||||
MBasicBlock **fallbackTarget);
|
||||
|
||||
bool atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayElementType);
|
||||
void atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index);
|
||||
|
||||
bool testNeedsArgumentCheck(JSFunction *target, CallInfo &callInfo);
|
||||
|
||||
MDefinition *makeCallsiteClone(JSFunction *target, MDefinition *fun);
|
||||
|
@ -11,6 +11,7 @@
|
||||
|
||||
#include "builtin/TypedObject.h"
|
||||
#include "gc/GCTrace.h"
|
||||
#include "jit/AtomicOp.h"
|
||||
#include "jit/Bailouts.h"
|
||||
#include "jit/BaselineFrame.h"
|
||||
#include "jit/BaselineIC.h"
|
||||
@ -397,6 +398,211 @@ template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const A
|
||||
template void MacroAssembler::loadFromTypedArray(Scalar::Type arrayType, const BaseIndex &src, const ValueOperand &dest,
|
||||
bool allowDouble, Register temp, Label *fail);
|
||||
|
||||
template<typename T>
|
||||
void
|
||||
MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem,
|
||||
Register oldval, Register newval,
|
||||
Register temp, AnyRegister output)
|
||||
{
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
compareExchange8SignExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint8Clamped:
|
||||
compareExchange8ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
compareExchange16SignExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
compareExchange16ZeroExtend(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
compareExchange32(mem, oldval, newval, output.gpr());
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
compareExchange32(mem, oldval, newval, temp);
|
||||
convertUInt32ToDouble(temp, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const Address &mem,
|
||||
Register oldval, Register newval, Register temp,
|
||||
AnyRegister output);
|
||||
template void
|
||||
MacroAssembler::compareExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex &mem,
|
||||
Register oldval, Register newval, Register temp,
|
||||
AnyRegister output);
|
||||
|
||||
template<typename S, typename T>
|
||||
void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
|
||||
const T &mem, Register temp1, Register temp2, AnyRegister output)
|
||||
{
|
||||
// Uint8Clamped is explicitly not supported here
|
||||
switch (arrayType) {
|
||||
case Scalar::Int8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicFetchAdd8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicFetchSub8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicFetchAnd8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicFetchOr8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicFetchXor8SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint8:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicFetchAdd8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicFetchSub8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicFetchAnd8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicFetchOr8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicFetchXor8ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicFetchAdd16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicFetchSub16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicFetchAnd16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicFetchOr16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicFetchXor16SignExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint16:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicFetchAdd16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicFetchSub16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicFetchAnd16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicFetchOr16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicFetchXor16ZeroExtend(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Int32:
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicFetchAdd32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicFetchSub32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicFetchAnd32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicFetchOr32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicFetchXor32(value, mem, temp1, output.gpr());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
break;
|
||||
case Scalar::Uint32:
|
||||
// At the moment, the code in MCallOptimize.cpp requires the output
|
||||
// type to be double for uint32 arrays. See bug 1077305.
|
||||
MOZ_ASSERT(output.isFloat());
|
||||
switch (op) {
|
||||
case AtomicFetchAddOp:
|
||||
atomicFetchAdd32(value, mem, InvalidReg, temp1);
|
||||
break;
|
||||
case AtomicFetchSubOp:
|
||||
atomicFetchSub32(value, mem, InvalidReg, temp1);
|
||||
break;
|
||||
case AtomicFetchAndOp:
|
||||
atomicFetchAnd32(value, mem, temp2, temp1);
|
||||
break;
|
||||
case AtomicFetchOrOp:
|
||||
atomicFetchOr32(value, mem, temp2, temp1);
|
||||
break;
|
||||
case AtomicFetchXorOp:
|
||||
atomicFetchXor32(value, mem, temp2, temp1);
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array atomic operation");
|
||||
}
|
||||
convertUInt32ToDouble(temp1, output.fpu());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("Invalid typed array type");
|
||||
}
|
||||
}
|
||||
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32 &value, const Address &mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Imm32 &value, const BaseIndex &mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register &value, const Address &mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
template void
|
||||
MacroAssembler::atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType,
|
||||
const Register &value, const BaseIndex &mem,
|
||||
Register temp1, Register temp2, AnyRegister output);
|
||||
|
||||
// Inlined version of gc::CheckAllocatorState that checks the bare essentials
|
||||
// and bails for anything that cannot be handled with our jit allocators.
|
||||
void
|
||||
|
@ -22,6 +22,7 @@
|
||||
#else
|
||||
# error "Unknown architecture!"
|
||||
#endif
|
||||
#include "jit/AtomicOp.h"
|
||||
#include "jit/IonInstrumentation.h"
|
||||
#include "jit/JitCompartment.h"
|
||||
#include "jit/VMFunctions.h"
|
||||
@ -738,6 +739,14 @@ class MacroAssembler : public MacroAssemblerSpecific
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void compareExchangeToTypedIntArray(Scalar::Type arrayType, const T &mem, Register oldval, Register newval,
|
||||
Register temp, AnyRegister output);
|
||||
|
||||
template<typename S, typename T>
|
||||
void atomicBinopToTypedIntArray(AtomicOp op, Scalar::Type arrayType, const S &value,
|
||||
const T &mem, Register temp1, Register temp2, AnyRegister output);
|
||||
|
||||
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const BaseIndex &dest);
|
||||
void storeToTypedFloatArray(Scalar::Type arrayType, FloatRegister value, const Address &dest);
|
||||
|
||||
|
@ -4871,6 +4871,80 @@ class LStoreTypedArrayElementStatic : public LInstructionHelper<0, 2, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LCompareExchangeTypedArrayElement : public LInstructionHelper<1, 4, 1>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(CompareExchangeTypedArrayElement)
|
||||
|
||||
LCompareExchangeTypedArrayElement(const LAllocation &elements, const LAllocation &index,
|
||||
const LAllocation &oldval, const LAllocation &newval,
|
||||
const LDefinition &temp)
|
||||
{
|
||||
setOperand(0, elements);
|
||||
setOperand(1, index);
|
||||
setOperand(2, oldval);
|
||||
setOperand(3, newval);
|
||||
setTemp(0, temp);
|
||||
}
|
||||
|
||||
const LAllocation *elements() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation *index() {
|
||||
return getOperand(1);
|
||||
}
|
||||
const LAllocation *oldval() {
|
||||
return getOperand(2);
|
||||
}
|
||||
const LAllocation *newval() {
|
||||
return getOperand(3);
|
||||
}
|
||||
const LDefinition *temp() {
|
||||
return getTemp(0);
|
||||
}
|
||||
|
||||
const MCompareExchangeTypedArrayElement *mir() const {
|
||||
return mir_->toCompareExchangeTypedArrayElement();
|
||||
}
|
||||
};
|
||||
|
||||
class LAtomicTypedArrayElementBinop : public LInstructionHelper<1, 3, 2>
|
||||
{
|
||||
public:
|
||||
LIR_HEADER(AtomicTypedArrayElementBinop)
|
||||
|
||||
LAtomicTypedArrayElementBinop(const LAllocation &elements, const LAllocation &index,
|
||||
const LAllocation &value, const LDefinition &temp1,
|
||||
const LDefinition &temp2)
|
||||
{
|
||||
setOperand(0, elements);
|
||||
setOperand(1, index);
|
||||
setOperand(2, value);
|
||||
setTemp(0, temp1);
|
||||
setTemp(1, temp2);
|
||||
}
|
||||
|
||||
const LAllocation *elements() {
|
||||
return getOperand(0);
|
||||
}
|
||||
const LAllocation *index() {
|
||||
return getOperand(1);
|
||||
}
|
||||
const LAllocation *value() {
|
||||
return getOperand(2);
|
||||
}
|
||||
const LDefinition *temp1() {
|
||||
return getTemp(0);
|
||||
}
|
||||
const LDefinition *temp2() {
|
||||
return getTemp(1);
|
||||
}
|
||||
|
||||
const MAtomicTypedArrayElementBinop *mir() const {
|
||||
return mir_->toAtomicTypedArrayElementBinop();
|
||||
}
|
||||
};
|
||||
|
||||
class LEffectiveAddress : public LInstructionHelper<1, 2, 0>
|
||||
{
|
||||
public:
|
||||
@ -6628,6 +6702,30 @@ class LThrowUninitializedLexical : public LCallInstructionHelper<0, 0, 0>
|
||||
}
|
||||
};
|
||||
|
||||
class LMemoryBarrier : public LInstructionHelper<0, 0, 0>
|
||||
{
|
||||
private:
|
||||
const int type_;
|
||||
|
||||
public:
|
||||
LIR_HEADER(MemoryBarrier)
|
||||
|
||||
// The parameter 'type' is a bitwise 'or' of the barrier types needed,
|
||||
// see AtomicOp.h.
|
||||
explicit LMemoryBarrier(int type) : type_(type)
|
||||
{
|
||||
MOZ_ASSERT((type_ & ~MembarAllbits) == 0);
|
||||
}
|
||||
|
||||
int type() const {
|
||||
return type_;
|
||||
}
|
||||
|
||||
const MMemoryBarrier *mir() const {
|
||||
return mir_->toMemoryBarrier();
|
||||
}
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
||||
|
@ -658,7 +658,8 @@ class LNode
|
||||
virtual void setOperand(size_t index, const LAllocation &a) = 0;
|
||||
|
||||
// Returns information about temporary registers needed. Each temporary
|
||||
// register is an LUse with a TEMPORARY policy, or a fixed register.
|
||||
// register is an LDefinition with a fixed or virtual register and
|
||||
// either GENERAL, FLOAT32, or DOUBLE type.
|
||||
virtual size_t numTemps() const = 0;
|
||||
virtual LDefinition *getTemp(size_t index) = 0;
|
||||
virtual void setTemp(size_t index, const LDefinition &a) = 0;
|
||||
|
@ -234,6 +234,8 @@
|
||||
_(StoreTypedArrayElement) \
|
||||
_(StoreTypedArrayElementHole) \
|
||||
_(StoreTypedArrayElementStatic) \
|
||||
_(CompareExchangeTypedArrayElement) \
|
||||
_(AtomicTypedArrayElementBinop) \
|
||||
_(EffectiveAddress) \
|
||||
_(ClampIToUint8) \
|
||||
_(ClampDToUint8) \
|
||||
@ -327,6 +329,7 @@
|
||||
_(AsmJSCall) \
|
||||
_(InterruptCheckPar) \
|
||||
_(RecompileCheck) \
|
||||
_(MemoryBarrier) \
|
||||
_(AssertRangeI) \
|
||||
_(AssertRangeD) \
|
||||
_(AssertRangeF) \
|
||||
|
@ -211,7 +211,7 @@ LinearScanAllocator::allocateRegisters()
|
||||
*
|
||||
* The algorithm is based on the one published in "Linear Scan Register
|
||||
* Allocation on SSA Form" by C. Wimmer et al., for which the full citation
|
||||
* appears above.
|
||||
* appears in LiveRangeAllocator.cpp.
|
||||
*/
|
||||
bool
|
||||
LinearScanAllocator::resolveControlFlow()
|
||||
|
@ -2859,10 +2859,22 @@ LIRGenerator::visitLoadTypedArrayElement(MLoadTypedArrayElement *ins)
|
||||
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type()))
|
||||
tempDef = temp();
|
||||
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarBeforeLoad);
|
||||
if (!add(fence, ins))
|
||||
return false;
|
||||
}
|
||||
LLoadTypedArrayElement *lir = new(alloc()) LLoadTypedArrayElement(elements, index, tempDef);
|
||||
if (ins->fallible() && !assignSnapshot(lir, Bailout_Overflow))
|
||||
return false;
|
||||
return define(lir, ins);
|
||||
if (!define(lir, ins))
|
||||
return false;
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarAfterLoad);
|
||||
if (!add(fence, ins))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
@ -2946,7 +2958,24 @@ LIRGenerator::visitStoreTypedArrayElement(MStoreTypedArrayElement *ins)
|
||||
value = useByteOpRegisterOrNonDoubleConstant(ins->value());
|
||||
else
|
||||
value = useRegisterOrNonDoubleConstant(ins->value());
|
||||
return add(new(alloc()) LStoreTypedArrayElement(elements, index, value), ins);
|
||||
|
||||
// Optimization opportunity for atomics: on some platforms there
|
||||
// is a store instruction that incorporates the necessary
|
||||
// barriers, and we could use that instead of separate barrier and
|
||||
// store instructions. See bug #1077027.
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarBeforeStore);
|
||||
if (!add(fence, ins))
|
||||
return false;
|
||||
}
|
||||
if (!add(new(alloc()) LStoreTypedArrayElement(elements, index, value), ins))
|
||||
return false;
|
||||
if (ins->requiresMemoryBarrier()) {
|
||||
LMemoryBarrier *fence = new(alloc()) LMemoryBarrier(MembarAfterStore);
|
||||
if (!add(fence, ins))
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool
|
||||
@ -3713,6 +3742,13 @@ LIRGenerator::visitRecompileCheck(MRecompileCheck *ins)
|
||||
return assignSafepoint(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitMemoryBarrier(MMemoryBarrier *ins)
|
||||
{
|
||||
LMemoryBarrier *lir = new(alloc()) LMemoryBarrier(ins->type());
|
||||
return add(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGenerator::visitSimdConstant(MSimdConstant *ins)
|
||||
{
|
||||
|
@ -269,6 +269,7 @@ class LIRGenerator : public LIRGeneratorSpecific
|
||||
bool visitGetDOMProperty(MGetDOMProperty *ins);
|
||||
bool visitGetDOMMember(MGetDOMMember *ins);
|
||||
bool visitRecompileCheck(MRecompileCheck *ins);
|
||||
bool visitMemoryBarrier(MMemoryBarrier *ins);
|
||||
bool visitSimdExtractElement(MSimdExtractElement *ins);
|
||||
bool visitSimdInsertElement(MSimdInsertElement *ins);
|
||||
bool visitSimdSignMask(MSimdSignMask *ins);
|
||||
|
@ -6,6 +6,7 @@
|
||||
|
||||
#include "jsmath.h"
|
||||
|
||||
#include "builtin/AtomicsObject.h"
|
||||
#include "builtin/TestingFunctions.h"
|
||||
#include "builtin/TypedObject.h"
|
||||
#include "jit/BaselineInspector.h"
|
||||
@ -34,6 +35,24 @@ IonBuilder::inlineNativeCall(CallInfo &callInfo, JSFunction *target)
|
||||
if (!optimizationInfo().inlineNative())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
// Atomic natives.
|
||||
if (native == atomics_compareExchange)
|
||||
return inlineAtomicsCompareExchange(callInfo);
|
||||
if (native == atomics_load)
|
||||
return inlineAtomicsLoad(callInfo);
|
||||
if (native == atomics_store)
|
||||
return inlineAtomicsStore(callInfo);
|
||||
if (native == atomics_fence)
|
||||
return inlineAtomicsFence(callInfo);
|
||||
if (native == atomics_add ||
|
||||
native == atomics_sub ||
|
||||
native == atomics_and ||
|
||||
native == atomics_or ||
|
||||
native == atomics_xor)
|
||||
{
|
||||
return inlineAtomicsBinop(callInfo, target);
|
||||
}
|
||||
|
||||
// Array natives.
|
||||
if (native == js_Array)
|
||||
return inlineArray(callInfo);
|
||||
@ -2235,6 +2254,225 @@ IonBuilder::inlineBoundFunction(CallInfo &nativeCallInfo, JSFunction *target)
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineAtomicsCompareExchange(CallInfo &callInfo)
|
||||
{
|
||||
if (callInfo.argc() != 4 || callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
Scalar::Type arrayType;
|
||||
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
MDefinition *oldval = callInfo.getArg(2);
|
||||
if (!(oldval->type() == MIRType_Int32 || oldval->type() == MIRType_Double))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
MDefinition *newval = callInfo.getArg(3);
|
||||
if (!(newval->type() == MIRType_Int32 || newval->type() == MIRType_Double))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
callInfo.setImplicitlyUsedUnchecked();
|
||||
|
||||
MInstruction *elements;
|
||||
MDefinition *index;
|
||||
atomicsCheckBounds(callInfo, &elements, &index);
|
||||
|
||||
MDefinition *oldvalToWrite = oldval;
|
||||
if (oldval->type() == MIRType_Double) {
|
||||
oldvalToWrite = MTruncateToInt32::New(alloc(), oldval);
|
||||
current->add(oldvalToWrite->toInstruction());
|
||||
}
|
||||
|
||||
MDefinition *newvalToWrite = newval;
|
||||
if (newval->type() == MIRType_Double) {
|
||||
newvalToWrite = MTruncateToInt32::New(alloc(), newval);
|
||||
current->add(newvalToWrite->toInstruction());
|
||||
}
|
||||
|
||||
MCompareExchangeTypedArrayElement *cas =
|
||||
MCompareExchangeTypedArrayElement::New(alloc(), elements, index, arrayType,
|
||||
oldvalToWrite, newvalToWrite);
|
||||
cas->setResultType(getInlineReturnType());
|
||||
current->add(cas);
|
||||
current->push(cas);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineAtomicsLoad(CallInfo &callInfo)
|
||||
{
|
||||
if (callInfo.argc() != 2 || callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
Scalar::Type arrayType;
|
||||
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
callInfo.setImplicitlyUsedUnchecked();
|
||||
|
||||
MInstruction *elements;
|
||||
MDefinition *index;
|
||||
atomicsCheckBounds(callInfo, &elements, &index);
|
||||
|
||||
MLoadTypedArrayElement *load =
|
||||
MLoadTypedArrayElement::New(alloc(), elements, index, arrayType,
|
||||
DoesRequireMemoryBarrier);
|
||||
load->setResultType(getInlineReturnType());
|
||||
current->add(load);
|
||||
current->push(load);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineAtomicsStore(CallInfo &callInfo)
|
||||
{
|
||||
if (callInfo.argc() != 3 || callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
Scalar::Type arrayType;
|
||||
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
MDefinition *value = callInfo.getArg(2);
|
||||
if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
callInfo.setImplicitlyUsedUnchecked();
|
||||
|
||||
MInstruction *elements;
|
||||
MDefinition *index;
|
||||
atomicsCheckBounds(callInfo, &elements, &index);
|
||||
|
||||
MDefinition *toWrite = value;
|
||||
if (value->type() == MIRType_Double) {
|
||||
toWrite = MTruncateToInt32::New(alloc(), value);
|
||||
current->add(toWrite->toInstruction());
|
||||
}
|
||||
MStoreTypedArrayElement *store =
|
||||
MStoreTypedArrayElement::New(alloc(), elements, index, toWrite, arrayType,
|
||||
DoesRequireMemoryBarrier);
|
||||
current->add(store);
|
||||
current->push(value);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineAtomicsFence(CallInfo &callInfo)
|
||||
{
|
||||
if (callInfo.argc() != 0 || callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
callInfo.setImplicitlyUsedUnchecked();
|
||||
|
||||
MMemoryBarrier *fence = MMemoryBarrier::New(alloc());
|
||||
current->add(fence);
|
||||
pushConstant(UndefinedValue());
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineAtomicsBinop(CallInfo &callInfo, JSFunction *target)
|
||||
{
|
||||
if (callInfo.argc() != 3 || callInfo.constructing())
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
Scalar::Type arrayType;
|
||||
if (!atomicsMeetsPreconditions(callInfo, &arrayType))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
MDefinition *value = callInfo.getArg(2);
|
||||
if (!(value->type() == MIRType_Int32 || value->type() == MIRType_Double))
|
||||
return InliningStatus_NotInlined;
|
||||
|
||||
callInfo.setImplicitlyUsedUnchecked();
|
||||
|
||||
MInstruction *elements;
|
||||
MDefinition *index;
|
||||
atomicsCheckBounds(callInfo, &elements, &index);
|
||||
|
||||
JSNative native = target->native();
|
||||
AtomicOp k = AtomicFetchAddOp;
|
||||
if (native == atomics_add)
|
||||
k = AtomicFetchAddOp;
|
||||
else if (native == atomics_sub)
|
||||
k = AtomicFetchSubOp;
|
||||
else if (native == atomics_and)
|
||||
k = AtomicFetchAndOp;
|
||||
else if (native == atomics_or)
|
||||
k = AtomicFetchOrOp;
|
||||
else if (native == atomics_xor)
|
||||
k = AtomicFetchXorOp;
|
||||
else
|
||||
MOZ_CRASH("Bad atomic operation");
|
||||
|
||||
MDefinition *toWrite = value;
|
||||
if (value->type() == MIRType_Double) {
|
||||
toWrite = MTruncateToInt32::New(alloc(), value);
|
||||
current->add(toWrite->toInstruction());
|
||||
}
|
||||
MAtomicTypedArrayElementBinop *binop =
|
||||
MAtomicTypedArrayElementBinop::New(alloc(), k, elements, index, arrayType, toWrite);
|
||||
binop->setResultType(getInlineReturnType());
|
||||
current->add(binop);
|
||||
current->push(binop);
|
||||
|
||||
return InliningStatus_Inlined;
|
||||
}
|
||||
|
||||
bool
|
||||
IonBuilder::atomicsMeetsPreconditions(CallInfo &callInfo, Scalar::Type *arrayType)
|
||||
{
|
||||
if (callInfo.getArg(0)->type() != MIRType_Object)
|
||||
return false;
|
||||
|
||||
if (callInfo.getArg(1)->type() != MIRType_Int32)
|
||||
return false;
|
||||
|
||||
// Ensure that the first argument is a valid SharedTypedArray.
|
||||
//
|
||||
// Then check both that the element type is something we can
|
||||
// optimize and that the return type is suitable for that element
|
||||
// type.
|
||||
|
||||
types::TemporaryTypeSet *arg0Types = callInfo.getArg(0)->resultTypeSet();
|
||||
if (!arg0Types)
|
||||
return false;
|
||||
|
||||
*arrayType = arg0Types->getSharedTypedArrayType();
|
||||
switch (*arrayType) {
|
||||
case Scalar::Int8:
|
||||
case Scalar::Uint8:
|
||||
case Scalar::Int16:
|
||||
case Scalar::Uint16:
|
||||
case Scalar::Int32:
|
||||
return getInlineReturnType() == MIRType_Int32;
|
||||
case Scalar::Uint32:
|
||||
// Bug 1077305: it would be attractive to allow inlining even
|
||||
// if the inline return type is Int32, which it will frequently
|
||||
// be.
|
||||
return getInlineReturnType() == MIRType_Double;
|
||||
default:
|
||||
// Excludes floating types and Uint8Clamped
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
IonBuilder::atomicsCheckBounds(CallInfo &callInfo, MInstruction **elements, MDefinition **index)
|
||||
{
|
||||
// Perform bounds checking and extract the elements vector.
|
||||
MDefinition *obj = callInfo.getArg(0);
|
||||
MInstruction *length = nullptr;
|
||||
*index = callInfo.getArg(1);
|
||||
*elements = nullptr;
|
||||
addTypedArrayLengthAndData(obj, DoBoundsCheck, index, &length, elements);
|
||||
}
|
||||
|
||||
IonBuilder::InliningStatus
|
||||
IonBuilder::inlineIsConstructing(CallInfo &callInfo)
|
||||
{
|
||||
|
212
js/src/jit/MIR.h
212
js/src/jit/MIR.h
@ -15,6 +15,7 @@
|
||||
#include "mozilla/Array.h"
|
||||
#include "mozilla/DebugOnly.h"
|
||||
|
||||
#include "jit/AtomicOp.h"
|
||||
#include "jit/FixedList.h"
|
||||
#include "jit/InlineList.h"
|
||||
#include "jit/IonAllocPolicy.h"
|
||||
@ -8037,17 +8038,33 @@ class MArrayJoin
|
||||
MDefinition *foldsTo(TempAllocator &alloc);
|
||||
};
|
||||
|
||||
// See comments above MMemoryBarrier, below.
|
||||
|
||||
enum MemoryBarrierRequirement
|
||||
{
|
||||
DoesNotRequireMemoryBarrier,
|
||||
DoesRequireMemoryBarrier
|
||||
};
|
||||
|
||||
// Also see comments above MMemoryBarrier, below.
|
||||
|
||||
class MLoadTypedArrayElement
|
||||
: public MBinaryInstruction
|
||||
{
|
||||
Scalar::Type arrayType_;
|
||||
bool requiresBarrier_;
|
||||
|
||||
MLoadTypedArrayElement(MDefinition *elements, MDefinition *index,
|
||||
Scalar::Type arrayType)
|
||||
: MBinaryInstruction(elements, index), arrayType_(arrayType)
|
||||
Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier)
|
||||
: MBinaryInstruction(elements, index),
|
||||
arrayType_(arrayType),
|
||||
requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier)
|
||||
{
|
||||
setResultType(MIRType_Value);
|
||||
setMovable();
|
||||
if (requiresBarrier_)
|
||||
setGuard(); // Not removable or movable
|
||||
else
|
||||
setMovable();
|
||||
MOZ_ASSERT(elements->type() == MIRType_Elements);
|
||||
MOZ_ASSERT(index->type() == MIRType_Int32);
|
||||
MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::TypeMax);
|
||||
@ -8057,9 +8074,10 @@ class MLoadTypedArrayElement
|
||||
INSTRUCTION_HEADER(LoadTypedArrayElement)
|
||||
|
||||
static MLoadTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements, MDefinition *index,
|
||||
Scalar::Type arrayType)
|
||||
Scalar::Type arrayType,
|
||||
MemoryBarrierRequirement requiresBarrier=DoesNotRequireMemoryBarrier)
|
||||
{
|
||||
return new(alloc) MLoadTypedArrayElement(elements, index, arrayType);
|
||||
return new(alloc) MLoadTypedArrayElement(elements, index, arrayType, requiresBarrier);
|
||||
}
|
||||
|
||||
Scalar::Type arrayType() const {
|
||||
@ -8069,6 +8087,9 @@ class MLoadTypedArrayElement
|
||||
// Bailout if the result does not fit in an int32.
|
||||
return arrayType_ == Scalar::Uint32 && type() == MIRType_Int32;
|
||||
}
|
||||
bool requiresMemoryBarrier() const {
|
||||
return requiresBarrier_;
|
||||
}
|
||||
MDefinition *elements() const {
|
||||
return getOperand(0);
|
||||
}
|
||||
@ -8076,10 +8097,16 @@ class MLoadTypedArrayElement
|
||||
return getOperand(1);
|
||||
}
|
||||
AliasSet getAliasSet() const {
|
||||
// When a barrier is needed make the instruction effectful by
|
||||
// giving it a "store" effect.
|
||||
if (requiresBarrier_)
|
||||
return AliasSet::Store(AliasSet::TypedArrayElement);
|
||||
return AliasSet::Load(AliasSet::TypedArrayElement);
|
||||
}
|
||||
|
||||
bool congruentTo(const MDefinition *ins) const {
|
||||
if (requiresBarrier_)
|
||||
return false;
|
||||
if (!ins->isLoadTypedArrayElement())
|
||||
return false;
|
||||
const MLoadTypedArrayElement *other = ins->toLoadTypedArrayElement();
|
||||
@ -8214,15 +8241,22 @@ class MStoreTypedArrayElement
|
||||
public StoreTypedArrayPolicy::Data
|
||||
{
|
||||
Scalar::Type arrayType_;
|
||||
bool requiresBarrier_;
|
||||
|
||||
// See note in MStoreElementCommon.
|
||||
bool racy_;
|
||||
|
||||
MStoreTypedArrayElement(MDefinition *elements, MDefinition *index, MDefinition *value,
|
||||
Scalar::Type arrayType)
|
||||
: MTernaryInstruction(elements, index, value), arrayType_(arrayType), racy_(false)
|
||||
Scalar::Type arrayType, MemoryBarrierRequirement requiresBarrier)
|
||||
: MTernaryInstruction(elements, index, value),
|
||||
arrayType_(arrayType),
|
||||
requiresBarrier_(requiresBarrier == DoesRequireMemoryBarrier),
|
||||
racy_(false)
|
||||
{
|
||||
setMovable();
|
||||
if (requiresBarrier_)
|
||||
setGuard(); // Not removable or movable
|
||||
else
|
||||
setMovable();
|
||||
MOZ_ASSERT(elements->type() == MIRType_Elements);
|
||||
MOZ_ASSERT(index->type() == MIRType_Int32);
|
||||
MOZ_ASSERT(arrayType >= 0 && arrayType < Scalar::TypeMax);
|
||||
@ -8232,9 +8266,11 @@ class MStoreTypedArrayElement
|
||||
INSTRUCTION_HEADER(StoreTypedArrayElement)
|
||||
|
||||
static MStoreTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements, MDefinition *index,
|
||||
MDefinition *value, Scalar::Type arrayType)
|
||||
MDefinition *value, Scalar::Type arrayType,
|
||||
MemoryBarrierRequirement requiresBarrier = DoesNotRequireMemoryBarrier)
|
||||
{
|
||||
return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType);
|
||||
return new(alloc) MStoreTypedArrayElement(elements, index, value, arrayType,
|
||||
requiresBarrier);
|
||||
}
|
||||
|
||||
Scalar::Type arrayType() const {
|
||||
@ -8261,6 +8297,9 @@ class MStoreTypedArrayElement
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::TypedArrayElement);
|
||||
}
|
||||
bool requiresMemoryBarrier() const {
|
||||
return requiresBarrier_;
|
||||
}
|
||||
bool racy() const {
|
||||
return racy_;
|
||||
}
|
||||
@ -11452,6 +11491,159 @@ class MRecompileCheck : public MNullaryInstruction
|
||||
}
|
||||
};
|
||||
|
||||
// All barriered operations - MMemoryBarrier, MCompareExchangeTypedArrayElement,
|
||||
// and MAtomicTypedArrayElementBinop, as well as MLoadTypedArrayElement and
|
||||
// MStoreTypedArrayElement when they are marked as requiring a memory barrer - have
|
||||
// the following attributes:
|
||||
//
|
||||
// - Not movable
|
||||
// - Not removable
|
||||
// - Not congruent with any other instruction
|
||||
// - Effectful (they alias every TypedArray store)
|
||||
//
|
||||
// The intended effect of those constraints is to prevent all loads
|
||||
// and stores preceding the barriered operation from being moved to
|
||||
// after the barriered operation, and vice versa, and to prevent the
|
||||
// barriered operation from being removed or hoisted.
|
||||
|
||||
class MMemoryBarrier
|
||||
: public MNullaryInstruction
|
||||
{
|
||||
// The type is a combination of the memory barrier types in AtomicOp.h.
|
||||
const int type_;
|
||||
|
||||
explicit MMemoryBarrier(int type)
|
||||
: type_(type)
|
||||
{
|
||||
MOZ_ASSERT((type_ & ~MembarAllbits) == 0);
|
||||
setGuard(); // Not removable
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(MemoryBarrier);
|
||||
|
||||
static MMemoryBarrier *New(TempAllocator &alloc, int type=MembarFull) {
|
||||
return new(alloc) MMemoryBarrier(type);
|
||||
}
|
||||
int type() const {
|
||||
return type_;
|
||||
}
|
||||
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::TypedArrayElement);
|
||||
}
|
||||
};
|
||||
|
||||
class MCompareExchangeTypedArrayElement
|
||||
: public MAryInstruction<4>,
|
||||
public MixPolicy< MixPolicy<ObjectPolicy<0>, IntPolicy<1> >, MixPolicy<IntPolicy<2>, IntPolicy<3> > >
|
||||
{
|
||||
Scalar::Type arrayType_;
|
||||
|
||||
explicit MCompareExchangeTypedArrayElement(MDefinition *elements, MDefinition *index,
|
||||
Scalar::Type arrayType, MDefinition *oldval,
|
||||
MDefinition *newval)
|
||||
: arrayType_(arrayType)
|
||||
{
|
||||
initOperand(0, elements);
|
||||
initOperand(1, index);
|
||||
initOperand(2, oldval);
|
||||
initOperand(3, newval);
|
||||
setGuard(); // Not removable
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(CompareExchangeTypedArrayElement);
|
||||
|
||||
static MCompareExchangeTypedArrayElement *New(TempAllocator &alloc, MDefinition *elements,
|
||||
MDefinition *index, Scalar::Type arrayType,
|
||||
MDefinition *oldval, MDefinition *newval)
|
||||
{
|
||||
return new(alloc) MCompareExchangeTypedArrayElement(elements, index, arrayType, oldval, newval);
|
||||
}
|
||||
bool isByteArray() const {
|
||||
return (arrayType_ == Scalar::Int8 ||
|
||||
arrayType_ == Scalar::Uint8 ||
|
||||
arrayType_ == Scalar::Uint8Clamped);
|
||||
}
|
||||
MDefinition *elements() {
|
||||
return getOperand(0);
|
||||
}
|
||||
MDefinition *index() {
|
||||
return getOperand(1);
|
||||
}
|
||||
MDefinition *oldval() {
|
||||
return getOperand(2);
|
||||
}
|
||||
int oldvalOperand() {
|
||||
return 2;
|
||||
}
|
||||
MDefinition *newval() {
|
||||
return getOperand(3);
|
||||
}
|
||||
Scalar::Type arrayType() const {
|
||||
return arrayType_;
|
||||
}
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::TypedArrayElement);
|
||||
}
|
||||
};
|
||||
|
||||
class MAtomicTypedArrayElementBinop
|
||||
: public MAryInstruction<3>,
|
||||
public Mix3Policy< ObjectPolicy<0>, IntPolicy<1>, IntPolicy<2> >
|
||||
{
|
||||
private:
|
||||
AtomicOp op_;
|
||||
Scalar::Type arrayType_;
|
||||
|
||||
protected:
|
||||
explicit MAtomicTypedArrayElementBinop(AtomicOp op, MDefinition *elements, MDefinition *index,
|
||||
Scalar::Type arrayType, MDefinition *value)
|
||||
: op_(op),
|
||||
arrayType_(arrayType)
|
||||
{
|
||||
initOperand(0, elements);
|
||||
initOperand(1, index);
|
||||
initOperand(2, value);
|
||||
setGuard(); // Not removable
|
||||
}
|
||||
|
||||
public:
|
||||
INSTRUCTION_HEADER(AtomicTypedArrayElementBinop);
|
||||
|
||||
static MAtomicTypedArrayElementBinop *New(TempAllocator &alloc, AtomicOp op,
|
||||
MDefinition *elements, MDefinition *index,
|
||||
Scalar::Type arrayType, MDefinition *value)
|
||||
{
|
||||
return new(alloc) MAtomicTypedArrayElementBinop(op, elements, index, arrayType, value);
|
||||
}
|
||||
|
||||
bool isByteArray() const {
|
||||
return (arrayType_ == Scalar::Int8 ||
|
||||
arrayType_ == Scalar::Uint8 ||
|
||||
arrayType_ == Scalar::Uint8Clamped);
|
||||
}
|
||||
AtomicOp operation() const {
|
||||
return op_;
|
||||
}
|
||||
Scalar::Type arrayType() const {
|
||||
return arrayType_;
|
||||
}
|
||||
MDefinition *elements() {
|
||||
return getOperand(0);
|
||||
}
|
||||
MDefinition *index() {
|
||||
return getOperand(1);
|
||||
}
|
||||
MDefinition *value() {
|
||||
return getOperand(2);
|
||||
}
|
||||
AliasSet getAliasSet() const {
|
||||
return AliasSet::Store(AliasSet::TypedArrayElement);
|
||||
}
|
||||
};
|
||||
|
||||
class MAsmJSNeg : public MUnaryInstruction
|
||||
{
|
||||
MAsmJSNeg(MDefinition *op, MIRType type)
|
||||
|
@ -184,6 +184,8 @@ namespace jit {
|
||||
_(StoreTypedArrayElement) \
|
||||
_(StoreTypedArrayElementHole) \
|
||||
_(StoreTypedArrayElementStatic) \
|
||||
_(CompareExchangeTypedArrayElement) \
|
||||
_(AtomicTypedArrayElementBinop) \
|
||||
_(EffectiveAddress) \
|
||||
_(ClampToUint8) \
|
||||
_(LoadFixedSlot) \
|
||||
@ -251,6 +253,7 @@ namespace jit {
|
||||
_(GuardThreadExclusive) \
|
||||
_(InterruptCheckPar) \
|
||||
_(RecompileCheck) \
|
||||
_(MemoryBarrier) \
|
||||
_(UnknownValue) \
|
||||
_(LexicalCheck) \
|
||||
_(ThrowUninitializedLexical)
|
||||
|
@ -348,11 +348,14 @@ class ParallelSafetyVisitor : public MDefinitionVisitor
|
||||
UNSAFE_OP(AsmJSParameter)
|
||||
UNSAFE_OP(AsmJSCall)
|
||||
DROP_OP(RecompileCheck)
|
||||
UNSAFE_OP(CompareExchangeTypedArrayElement)
|
||||
UNSAFE_OP(AtomicTypedArrayElementBinop)
|
||||
UNSAFE_OP(MemoryBarrier)
|
||||
UNSAFE_OP(UnknownValue)
|
||||
UNSAFE_OP(LexicalCheck)
|
||||
UNSAFE_OP(ThrowUninitializedLexical)
|
||||
|
||||
// It looks like this could easily be made safe:
|
||||
// It looks like these could easily be made safe:
|
||||
UNSAFE_OP(ConvertElementsToDoubles)
|
||||
UNSAFE_OP(MaybeCopyElementsForWrite)
|
||||
};
|
||||
|
@ -424,6 +424,7 @@ IntPolicy<Op>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def)
|
||||
template bool IntPolicy<0>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
|
||||
template bool IntPolicy<1>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
|
||||
template bool IntPolicy<2>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
|
||||
template bool IntPolicy<3>::staticAdjustInputs(TempAllocator &alloc, MInstruction *def);
|
||||
|
||||
template <unsigned Op>
|
||||
bool
|
||||
|
@ -78,6 +78,8 @@ class LIRGeneratorNone : public LIRGeneratorShared
|
||||
bool visitAsmJSLoadFuncPtr(MAsmJSLoadFuncPtr *ins) { MOZ_CRASH(); }
|
||||
bool visitStoreTypedArrayElementStatic(MStoreTypedArrayElementStatic *ins) { MOZ_CRASH(); }
|
||||
bool visitForkJoinGetSlice(MForkJoinGetSlice *ins) { MOZ_CRASH(); }
|
||||
bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins) { MOZ_CRASH(); }
|
||||
bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins) { MOZ_CRASH(); }
|
||||
|
||||
LTableSwitch *newLTableSwitch(LAllocation, LDefinition, MTableSwitch *) { MOZ_CRASH(); }
|
||||
LTableSwitchV *newLTableSwitchV(MTableSwitch *) { MOZ_CRASH(); }
|
||||
|
@ -296,6 +296,37 @@ class MacroAssemblerNone : public Assembler
|
||||
|
||||
template <typename T> void computeEffectiveAddress(T, Register) { MOZ_CRASH(); }
|
||||
|
||||
template <typename T> void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T> void compareExchange32(const T &mem, Register oldval, Register newval, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd8SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd8ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd16SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd16ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAdd32(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub8SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub8ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub16SignExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub16ZeroExtend(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchSub32(const T &value, const S &mem, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchAnd32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchOr32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor8SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor8ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor16SignExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor16ZeroExtend(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
template <typename T, typename S> void atomicFetchXor32(const T &value, const S &mem, Register temp, Register output) { MOZ_CRASH(); }
|
||||
|
||||
void clampIntToUint8(Register) { MOZ_CRASH(); }
|
||||
|
||||
Register splitTagForTest(ValueOperand) { MOZ_CRASH(); }
|
||||
|
@ -629,6 +629,9 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void movsbl(Register src, Register dest) {
|
||||
masm.movsbl_rr(src.code(), dest.code());
|
||||
}
|
||||
void movsbl(const Operand &src, Register dest) {
|
||||
switch (src.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
@ -641,6 +644,21 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void movb(const Operand &src, Register dest) {
|
||||
switch (src.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
masm.movb_mr(src.disp(), src.base(), dest.code());
|
||||
break;
|
||||
case Operand::MEM_SCALE:
|
||||
masm.movb_mr(src.disp(), src.base(), src.index(), src.scale(), dest.code());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void movb(Imm32 src, Register dest) {
|
||||
masm.movb_i8r(src.value & 255, dest.code());
|
||||
}
|
||||
void movb(Register src, const Operand &dest) {
|
||||
switch (dest.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
@ -683,6 +701,14 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
void movzwl(Register src, Register dest) {
|
||||
masm.movzwl_rr(src.code(), dest.code());
|
||||
}
|
||||
void movw(const Operand &src, Register dest) {
|
||||
masm.prefix_16_for_32();
|
||||
movl(src, dest);
|
||||
}
|
||||
void movw(Imm32 src, Register dest) {
|
||||
masm.prefix_16_for_32();
|
||||
movl(src, dest);
|
||||
}
|
||||
void movw(Register src, const Operand &dest) {
|
||||
switch (dest.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
@ -707,6 +733,9 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void movswl(Register src, Register dest) {
|
||||
masm.movswl_rr(src.code(), dest.code());
|
||||
}
|
||||
void movswl(const Operand &src, Register dest) {
|
||||
switch (src.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
@ -921,9 +950,7 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
masm.int3();
|
||||
}
|
||||
|
||||
#ifdef DEBUG
|
||||
static bool HasSSE2() { return CPUInfo::IsSSE2Present(); }
|
||||
#endif
|
||||
static bool HasSSE3() { return CPUInfo::IsSSE3Present(); }
|
||||
static bool HasSSE41() { return CPUInfo::IsSSE41Present(); }
|
||||
static bool SupportsFloatingPoint() { return CPUInfo::IsSSE2Present(); }
|
||||
@ -1060,6 +1087,12 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
// Note, lock_addl() is used for a memory barrier on non-SSE2 systems.
|
||||
// Do not optimize, replace by XADDL, or similar.
|
||||
void lock_addl(Imm32 imm, const Operand &op) {
|
||||
masm.prefix_lock();
|
||||
addl(imm, op);
|
||||
}
|
||||
void subl(Imm32 imm, Register dest) {
|
||||
masm.subl_ir(imm.value, dest.code());
|
||||
}
|
||||
@ -1311,24 +1344,69 @@ class AssemblerX86Shared : public AssemblerShared
|
||||
decl(op);
|
||||
}
|
||||
|
||||
void lock_cmpxchg32(Register src, const Operand &op) {
|
||||
void lock_cmpxchg8(Register src, const Operand &mem) {
|
||||
masm.prefix_lock();
|
||||
switch (op.kind()) {
|
||||
switch (mem.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
masm.cmpxchg32(src.code(), op.disp(), op.base());
|
||||
masm.cmpxchg8(src.code(), mem.disp(), mem.base());
|
||||
break;
|
||||
case Operand::MEM_SCALE:
|
||||
masm.cmpxchg8(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void lock_cmpxchg16(Register src, const Operand &mem) {
|
||||
masm.prefix_lock();
|
||||
switch (mem.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
masm.cmpxchg16(src.code(), mem.disp(), mem.base());
|
||||
break;
|
||||
case Operand::MEM_SCALE:
|
||||
masm.cmpxchg16(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void lock_cmpxchg32(Register src, const Operand &mem) {
|
||||
masm.prefix_lock();
|
||||
switch (mem.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
masm.cmpxchg32(src.code(), mem.disp(), mem.base());
|
||||
break;
|
||||
case Operand::MEM_SCALE:
|
||||
masm.cmpxchg32(src.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
|
||||
void xaddl(Register srcdest, const Operand &mem) {
|
||||
void lock_xaddb(Register srcdest, const Operand &mem) {
|
||||
switch (mem.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
masm.xaddl_rm(srcdest.code(), mem.disp(), mem.base());
|
||||
masm.lock_xaddb_rm(srcdest.code(), mem.disp(), mem.base());
|
||||
break;
|
||||
case Operand::MEM_SCALE:
|
||||
masm.xaddl_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
|
||||
masm.lock_xaddb_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
}
|
||||
}
|
||||
void lock_xaddw(Register srcdest, const Operand &mem) {
|
||||
masm.prefix_16_for_32();
|
||||
lock_xaddl(srcdest, mem);
|
||||
}
|
||||
void lock_xaddl(Register srcdest, const Operand &mem) {
|
||||
switch (mem.kind()) {
|
||||
case Operand::MEM_REG_DISP:
|
||||
masm.lock_xaddl_rm(srcdest.code(), mem.disp(), mem.base());
|
||||
break;
|
||||
case Operand::MEM_SCALE:
|
||||
masm.lock_xaddl_rm(srcdest.code(), mem.disp(), mem.base(), mem.index(), mem.scale());
|
||||
break;
|
||||
default:
|
||||
MOZ_CRASH("unexpected operand kind");
|
||||
|
@ -216,16 +216,21 @@ public:
|
||||
|
||||
private:
|
||||
typedef enum {
|
||||
OP_ADD_EbGb = 0x00,
|
||||
OP_ADD_EvGv = 0x01,
|
||||
OP_ADD_GvEv = 0x03,
|
||||
OP_OR_EbGb = 0x08,
|
||||
OP_OR_EvGv = 0x09,
|
||||
OP_OR_GvEv = 0x0B,
|
||||
OP_2BYTE_ESCAPE = 0x0F,
|
||||
OP_AND_EbGb = 0x20,
|
||||
OP_AND_EvGv = 0x21,
|
||||
OP_AND_GvEv = 0x23,
|
||||
OP_SUB_EbGb = 0x28,
|
||||
OP_SUB_EvGv = 0x29,
|
||||
OP_SUB_GvEv = 0x2B,
|
||||
PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
|
||||
OP_XOR_EbGb = 0x30,
|
||||
OP_XOR_EvGv = 0x31,
|
||||
OP_XOR_GvEv = 0x33,
|
||||
OP_CMP_EvGv = 0x39,
|
||||
@ -255,6 +260,7 @@ private:
|
||||
OP_XCHG_EvGv = 0x87,
|
||||
OP_MOV_EbGv = 0x88,
|
||||
OP_MOV_EvGv = 0x89,
|
||||
OP_MOV_GvEb = 0x8A,
|
||||
OP_MOV_GvEv = 0x8B,
|
||||
OP_LEA = 0x8D,
|
||||
OP_GROUP1A_Ev = 0x8F,
|
||||
@ -349,13 +355,16 @@ private:
|
||||
OP2_MOVDQ_WdqVdq = 0x7F,
|
||||
OP2_JCC_rel32 = 0x80,
|
||||
OP_SETCC = 0x90,
|
||||
OP_FENCE = 0xAE,
|
||||
OP2_IMUL_GvEv = 0xAF,
|
||||
OP2_CMPXCHG_GvEb = 0xB0,
|
||||
OP2_CMPXCHG_GvEw = 0xB1,
|
||||
OP2_BSR_GvEv = 0xBD,
|
||||
OP2_MOVSX_GvEb = 0xBE,
|
||||
OP2_MOVSX_GvEw = 0xBF,
|
||||
OP2_MOVZX_GvEb = 0xB6,
|
||||
OP2_MOVZX_GvEw = 0xB7,
|
||||
OP2_XADD_EbGb = 0xC0,
|
||||
OP2_XADD_EvGv = 0xC1,
|
||||
OP2_CMPPS_VpsWps = 0xC2,
|
||||
OP2_PEXTRW_GdUdIb = 0xC5,
|
||||
@ -683,7 +692,24 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
void xaddl_rm(RegisterID srcdest, int offset, RegisterID base)
|
||||
void lock_xaddb_rm(RegisterID srcdest, int offset, RegisterID base)
|
||||
{
|
||||
spew("lock xaddl %s, %s0x%x(%s)",
|
||||
nameIReg(1, srcdest), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
|
||||
m_formatter.oneByteOp(PRE_LOCK);
|
||||
m_formatter.twoByteOp(OP2_XADD_EbGb, srcdest, base, offset);
|
||||
}
|
||||
|
||||
void lock_xaddb_rm(RegisterID srcdest, int offset, RegisterID base, RegisterID index, int scale)
|
||||
{
|
||||
spew("lock xaddl %s, %s0x%x(%s,%s,%d)",
|
||||
nameIReg(1, srcdest), PRETTY_PRINT_OFFSET(offset),
|
||||
nameIReg(base), nameIReg(index), 1<<scale);
|
||||
m_formatter.oneByteOp(PRE_LOCK);
|
||||
m_formatter.twoByteOp(OP2_XADD_EbGb, srcdest, base, index, scale, offset);
|
||||
}
|
||||
|
||||
void lock_xaddl_rm(RegisterID srcdest, int offset, RegisterID base)
|
||||
{
|
||||
spew("lock xaddl %s, %s0x%x(%s)",
|
||||
nameIReg(4,srcdest), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
|
||||
@ -691,7 +717,7 @@ public:
|
||||
m_formatter.twoByteOp(OP2_XADD_EvGv, srcdest, base, offset);
|
||||
}
|
||||
|
||||
void xaddl_rm(RegisterID srcdest, int offset, RegisterID base, RegisterID index, int scale)
|
||||
void lock_xaddl_rm(RegisterID srcdest, int offset, RegisterID base, RegisterID index, int scale)
|
||||
{
|
||||
spew("lock xaddl %s, %s0x%x(%s,%s,%d)",
|
||||
nameIReg(4, srcdest), PRETTY_PRINT_OFFSET(offset),
|
||||
@ -1427,6 +1453,11 @@ public:
|
||||
m_formatter.oneByteOp(PRE_LOCK);
|
||||
}
|
||||
|
||||
void prefix_16_for_32()
|
||||
{
|
||||
m_formatter.prefix(PRE_OPERAND_SIZE);
|
||||
}
|
||||
|
||||
void incl_m32(int offset, RegisterID base)
|
||||
{
|
||||
spew("incl %s0x%x(%s)", PRETTY_PRINT_OFFSET(offset), nameIReg(base));
|
||||
@ -1439,15 +1470,49 @@ public:
|
||||
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_DEC, base, offset);
|
||||
}
|
||||
|
||||
// Note that CMPXCHG performs comparison against REG = %al/%ax/%eax.
|
||||
// If %REG == [%base+offset], then %src -> [%base+offset].
|
||||
// Otherwise, [%base+offset] -> %REG.
|
||||
// For the 8-bit operations src must also be an 8-bit register.
|
||||
|
||||
void cmpxchg8(RegisterID src, int offset, RegisterID base)
|
||||
{
|
||||
spew("cmpxchg8 %s, %s0x%x(%s)",
|
||||
nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
|
||||
m_formatter.twoByteOp(OP2_CMPXCHG_GvEb, src, base, offset);
|
||||
}
|
||||
void cmpxchg8(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
|
||||
{
|
||||
spew("cmpxchg8 %s, %s0x%x(%s,%s,%d)",
|
||||
nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(index), 1<<scale);
|
||||
m_formatter.twoByteOp(OP2_CMPXCHG_GvEb, src, base, index, scale, offset);
|
||||
}
|
||||
void cmpxchg16(RegisterID src, int offset, RegisterID base)
|
||||
{
|
||||
spew("cmpxchg16 %s, %s0x%x(%s)",
|
||||
nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
|
||||
m_formatter.prefix(PRE_OPERAND_SIZE);
|
||||
m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, offset);
|
||||
}
|
||||
void cmpxchg16(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
|
||||
{
|
||||
spew("cmpxchg16 %s, %s0x%x(%s,%s,%d)",
|
||||
nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(index), 1<<scale);
|
||||
m_formatter.prefix(PRE_OPERAND_SIZE);
|
||||
m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, index, scale, offset);
|
||||
}
|
||||
void cmpxchg32(RegisterID src, int offset, RegisterID base)
|
||||
{
|
||||
// Note that 32-bit CMPXCHG performs comparison against %eax.
|
||||
// If %eax == [%base+offset], then %src -> [%base+offset].
|
||||
// Otherwise, [%base+offset] -> %eax.
|
||||
spew("cmpxchg %s, %s0x%x(%s)",
|
||||
spew("cmpxchg32 %s, %s0x%x(%s)",
|
||||
nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base));
|
||||
m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, offset);
|
||||
}
|
||||
void cmpxchg32(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
|
||||
{
|
||||
spew("cmpxchg32 %s, %s0x%x(%s,%s,%d)",
|
||||
nameIReg(src), PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(index), 1<<scale);
|
||||
m_formatter.twoByteOp(OP2_CMPXCHG_GvEw, src, base, index, scale, offset);
|
||||
}
|
||||
|
||||
|
||||
// Comparisons:
|
||||
@ -1985,6 +2050,14 @@ public:
|
||||
m_formatter.immediate32(imm);
|
||||
}
|
||||
|
||||
void movb_i8r(int imm, RegisterID reg)
|
||||
{
|
||||
spew("movb $0x%x, %s",
|
||||
imm, nameIReg(1, reg));
|
||||
m_formatter.oneByteOp(OP_MOV_EbGv, reg);
|
||||
m_formatter.immediate8(imm);
|
||||
}
|
||||
|
||||
void movb_i8m(int imm, int offset, RegisterID base)
|
||||
{
|
||||
spew("movb $0x%x, %s0x%x(%s)",
|
||||
@ -2275,6 +2348,20 @@ public:
|
||||
m_formatter.oneByteOp8(OP_MOV_EbGv, src, addr);
|
||||
}
|
||||
|
||||
void movb_mr(int offset, RegisterID base, RegisterID dst)
|
||||
{
|
||||
spew("movb %s0x%x(%s), %s",
|
||||
PRETTY_PRINT_OFFSET(offset), nameIReg(base), nameIReg(1, dst));
|
||||
m_formatter.oneByteOp(OP_MOV_GvEb, dst, base, offset);
|
||||
}
|
||||
|
||||
void movb_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
|
||||
{
|
||||
spew("movb %d(%s,%s,%d), %s",
|
||||
offset, nameIReg(base), nameIReg(index), 1<<scale, nameIReg(1, dst));
|
||||
m_formatter.oneByteOp(OP_MOV_GvEb, dst, base, index, scale, offset);
|
||||
}
|
||||
|
||||
void movzbl_mr(int offset, RegisterID base, RegisterID dst)
|
||||
{
|
||||
spew("movzbl %s0x%x(%s), %s",
|
||||
@ -2303,6 +2390,13 @@ public:
|
||||
m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, addr);
|
||||
}
|
||||
|
||||
void movsbl_rr(RegisterID src, RegisterID dst)
|
||||
{
|
||||
spew("movsbl %s, %s",
|
||||
nameIReg(1,src), nameIReg(4,dst));
|
||||
m_formatter.twoByteOp8_movx(OP2_MOVSX_GvEb, dst, src);
|
||||
}
|
||||
|
||||
void movsbl_mr(int offset, RegisterID base, RegisterID dst)
|
||||
{
|
||||
spew("movsbl %s0x%x(%s), %s",
|
||||
@ -2366,6 +2460,13 @@ public:
|
||||
m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, addr);
|
||||
}
|
||||
|
||||
void movswl_rr(RegisterID src, RegisterID dst)
|
||||
{
|
||||
spew("movswl %s, %s",
|
||||
nameIReg(2, src), nameIReg(4, dst));
|
||||
m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, src);
|
||||
}
|
||||
|
||||
void movswl_mr(int offset, RegisterID base, RegisterID dst)
|
||||
{
|
||||
spew("movswl %s0x%x(%s), %s",
|
||||
@ -3903,6 +4004,11 @@ public:
|
||||
}
|
||||
#endif
|
||||
|
||||
void mfence() {
|
||||
spew("mfence");
|
||||
m_formatter.twoByteOp(OP_FENCE, (int)6, (RegisterID)0);
|
||||
}
|
||||
|
||||
// Assembler admin methods:
|
||||
|
||||
JmpDst label()
|
||||
|
@ -3081,5 +3081,13 @@ JitRuntime::generateForkJoinGetSliceStub(JSContext *cx)
|
||||
return code;
|
||||
}
|
||||
|
||||
bool
|
||||
CodeGeneratorX86Shared::visitMemoryBarrier(LMemoryBarrier *ins)
|
||||
{
|
||||
if (ins->type() & MembarStoreLoad)
|
||||
masm.storeLoadFence();
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace jit
|
||||
} // namespace js
|
||||
|
@ -197,6 +197,7 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
|
||||
virtual bool visitEffectiveAddress(LEffectiveAddress *ins);
|
||||
virtual bool visitUDivOrMod(LUDivOrMod *ins);
|
||||
virtual bool visitAsmJSPassStackArg(LAsmJSPassStackArg *ins);
|
||||
virtual bool visitMemoryBarrier(LMemoryBarrier *ins);
|
||||
|
||||
bool visitOutOfLineLoadTypedArrayOutOfBounds(OutOfLineLoadTypedArrayOutOfBounds *ool);
|
||||
|
||||
|
@ -349,6 +349,145 @@ LIRGeneratorX86Shared::visitForkJoinGetSlice(MForkJoinGetSlice *ins)
|
||||
return defineFixed(lir, ins, LAllocation(AnyRegister(ForkJoinGetSliceReg_output)));
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorX86Shared::visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
|
||||
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
|
||||
|
||||
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
|
||||
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
|
||||
|
||||
const LUse elements = useRegister(ins->elements());
|
||||
const LAllocation index = useRegisterOrConstant(ins->index());
|
||||
|
||||
// Register allocation:
|
||||
//
|
||||
// If the target is an integer register then the target must be
|
||||
// eax.
|
||||
//
|
||||
// If the target is a floating register then we need a temp at the
|
||||
// lower level; that temp must be eax.
|
||||
//
|
||||
// oldval must be in a register.
|
||||
//
|
||||
// newval will need to be in a register. If the source is a byte
|
||||
// array then the newval must be a register that has a byte size:
|
||||
// ebx, ecx, or edx, since eax is taken for the output in this
|
||||
// case.
|
||||
//
|
||||
// Bug #1077036 describes some optimization opportunities.
|
||||
|
||||
bool fixedOutput = false;
|
||||
LDefinition tempDef = LDefinition::BogusTemp();
|
||||
LAllocation newval;
|
||||
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
|
||||
tempDef = tempFixed(eax);
|
||||
newval = useRegister(ins->newval());
|
||||
} else {
|
||||
fixedOutput = true;
|
||||
if (ins->isByteArray())
|
||||
newval = useFixed(ins->newval(), ebx);
|
||||
else
|
||||
newval = useRegister(ins->newval());
|
||||
}
|
||||
|
||||
// A register allocator limitation precludes 'useRegisterAtStart()' here.
|
||||
const LAllocation oldval = useRegister(ins->oldval());
|
||||
|
||||
LCompareExchangeTypedArrayElement *lir =
|
||||
new(alloc()) LCompareExchangeTypedArrayElement(elements, index, oldval, newval, tempDef);
|
||||
|
||||
return fixedOutput ? defineFixed(lir, ins, LAllocation(AnyRegister(eax))) : define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorX86Shared::visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins)
|
||||
{
|
||||
MOZ_ASSERT(ins->arrayType() != Scalar::Uint8Clamped);
|
||||
MOZ_ASSERT(ins->arrayType() != Scalar::Float32);
|
||||
MOZ_ASSERT(ins->arrayType() != Scalar::Float64);
|
||||
|
||||
MOZ_ASSERT(ins->elements()->type() == MIRType_Elements);
|
||||
MOZ_ASSERT(ins->index()->type() == MIRType_Int32);
|
||||
|
||||
const LUse elements = useRegister(ins->elements());
|
||||
const LAllocation index = useRegisterOrConstant(ins->index());
|
||||
|
||||
// Register allocation:
|
||||
//
|
||||
// For ADD and SUB we'll use XADD:
|
||||
//
|
||||
// movl src, output
|
||||
// lock xaddl output, mem
|
||||
//
|
||||
// For the 8-bit variants XADD needs a byte register for the
|
||||
// output only.
|
||||
//
|
||||
// For AND/OR/XOR we need to use a CMPXCHG loop:
|
||||
//
|
||||
// movl *mem, eax
|
||||
// L: mov eax, temp
|
||||
// andl src, temp
|
||||
// lock cmpxchg temp, mem ; reads eax also
|
||||
// jnz L
|
||||
// ; result in eax
|
||||
//
|
||||
// Note the placement of L, cmpxchg will update eax with *mem if
|
||||
// *mem does not have the expected value, so reloading it at the
|
||||
// top of the loop is redundant.
|
||||
//
|
||||
// If the array is not a uint32 array then:
|
||||
// - eax should be the output (one result of the cmpxchg)
|
||||
// - there is a temp, which must have a byte register if
|
||||
// the array has 1-byte elements elements
|
||||
//
|
||||
// If the array is a uint32 array then:
|
||||
// - eax is the first temp
|
||||
// - we also need a second temp
|
||||
//
|
||||
// For simplicity we force the 'value' into a byte register if the
|
||||
// array has 1-byte elements, though that could be worked around.
|
||||
//
|
||||
// For simplicity we also choose fixed byte registers even when
|
||||
// any available byte register would have been OK.
|
||||
//
|
||||
// There are optimization opportunities:
|
||||
// - when the result is unused, Bug #1077014.
|
||||
// - better register allocation and instruction selection, Bug #1077036.
|
||||
|
||||
bool bitOp = !(ins->operation() == AtomicFetchAddOp || ins->operation() == AtomicFetchSubOp);
|
||||
bool fixedOutput = true;
|
||||
LDefinition tempDef1 = LDefinition::BogusTemp();
|
||||
LDefinition tempDef2 = LDefinition::BogusTemp();
|
||||
LAllocation value;
|
||||
|
||||
if (ins->arrayType() == Scalar::Uint32 && IsFloatingPointType(ins->type())) {
|
||||
value = useRegister(ins->value());
|
||||
fixedOutput = false;
|
||||
if (bitOp) {
|
||||
tempDef1 = tempFixed(eax);
|
||||
tempDef2 = temp();
|
||||
} else {
|
||||
tempDef1 = temp();
|
||||
}
|
||||
} else if (ins->isByteArray()) {
|
||||
value = useFixed(ins->value(), ebx);
|
||||
if (bitOp)
|
||||
tempDef1 = tempFixed(ecx);
|
||||
}
|
||||
else {
|
||||
value = useRegister(ins->value());
|
||||
if (bitOp)
|
||||
tempDef1 = temp();
|
||||
}
|
||||
|
||||
LAtomicTypedArrayElementBinop *lir =
|
||||
new(alloc()) LAtomicTypedArrayElementBinop(elements, index, value, tempDef1, tempDef2);
|
||||
|
||||
return fixedOutput ? defineFixed(lir, ins, LAllocation(AnyRegister(eax))) : define(lir, ins);
|
||||
}
|
||||
|
||||
bool
|
||||
LIRGeneratorX86Shared::visitSimdTernaryBitwise(MSimdTernaryBitwise *ins)
|
||||
{
|
||||
|
@ -55,6 +55,8 @@ class LIRGeneratorX86Shared : public LIRGeneratorShared
|
||||
bool visitSimdTernaryBitwise(MSimdTernaryBitwise *ins);
|
||||
bool visitSimdSplatX4(MSimdSplatX4 *ins);
|
||||
bool visitSimdValueX4(MSimdValueX4 *ins);
|
||||
bool visitCompareExchangeTypedArrayElement(MCompareExchangeTypedArrayElement *ins);
|
||||
bool visitAtomicTypedArrayElementBinop(MAtomicTypedArrayElementBinop *ins);
|
||||
};
|
||||
|
||||
} // namespace jit
|
||||
|
@ -193,10 +193,293 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
void atomic_dec32(const Operand &addr) {
|
||||
lock_decl(addr);
|
||||
}
|
||||
void atomic_cmpxchg32(Register src, const Operand &addr, Register dest) {
|
||||
void atomic_cmpxchg8(Register newval, const Operand &addr, Register oldval_and_result) {
|
||||
// %eax must be explicitly provided for calling clarity.
|
||||
MOZ_ASSERT(dest.code() == X86Registers::eax);
|
||||
lock_cmpxchg32(src, addr);
|
||||
MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
|
||||
lock_cmpxchg8(newval, addr);
|
||||
}
|
||||
void atomic_cmpxchg16(Register newval, const Operand &addr, Register oldval_and_result) {
|
||||
// %eax must be explicitly provided for calling clarity.
|
||||
MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
|
||||
lock_cmpxchg16(newval, addr);
|
||||
}
|
||||
void atomic_cmpxchg32(Register newval, const Operand &addr, Register oldval_and_result) {
|
||||
// %eax must be explicitly provided for calling clarity.
|
||||
MOZ_ASSERT(oldval_and_result.code() == X86Registers::eax);
|
||||
lock_cmpxchg32(newval, addr);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8SignExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8ZeroExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd8ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(src, output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16SignExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16ZeroExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd16ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(src, output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd32(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchAdd32(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(src, output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8SignExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8ZeroExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(Imm32(-src.value), output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub8ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movb(Imm32(-src.value), output);
|
||||
lock_xaddb(output, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16SignExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16ZeroExtend(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16SignExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(Imm32(-src.value), output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub16ZeroExtend(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
movl(Imm32(-src.value), output);
|
||||
lock_xaddw(output, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub32(Register src, const T &mem, Register temp, Register output) {
|
||||
MOZ_ASSERT(temp == InvalidReg);
|
||||
if (src != output)
|
||||
movl(src, output);
|
||||
negl(output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void atomicFetchSub32(Imm32 src, const T &mem, Register temp, Register output) {
|
||||
movl(Imm32(-src.value), output);
|
||||
lock_xaddl(output, Operand(mem));
|
||||
}
|
||||
|
||||
// requires output == eax
|
||||
#define ATOMIC_BITOP_BODY(LOAD, OP, LOCK_CMPXCHG) \
|
||||
MOZ_ASSERT(output == eax); \
|
||||
LOAD(Operand(mem), eax); \
|
||||
Label again; \
|
||||
bind(&again); \
|
||||
movl(eax, temp); \
|
||||
OP(src, temp); \
|
||||
LOCK_CMPXCHG(temp, Operand(mem)); \
|
||||
j(NonZero, &again);
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd8SignExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchg8)
|
||||
movsbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd8ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movb, andl, lock_cmpxchg8)
|
||||
movzbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd16SignExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchg16)
|
||||
movswl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd16ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, andl, lock_cmpxchg16)
|
||||
movzwl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchAnd32(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movl, andl, lock_cmpxchg32)
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr8SignExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchg8)
|
||||
movsbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr8ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movb, orl, lock_cmpxchg8)
|
||||
movzbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr16SignExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchg16)
|
||||
movswl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr16ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, orl, lock_cmpxchg16)
|
||||
movzwl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchOr32(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movl, orl, lock_cmpxchg32)
|
||||
}
|
||||
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor8SignExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchg8)
|
||||
movsbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor8ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movb, xorl, lock_cmpxchg8)
|
||||
movzbl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor16SignExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchg16)
|
||||
movswl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor16ZeroExtend(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movw, xorl, lock_cmpxchg16)
|
||||
movzwl(eax, eax);
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void atomicFetchXor32(const S &src, const T &mem, Register temp, Register output) {
|
||||
ATOMIC_BITOP_BODY(movl, xorl, lock_cmpxchg32)
|
||||
}
|
||||
|
||||
#undef ATOMIC_BITOP_BODY
|
||||
|
||||
void storeLoadFence() {
|
||||
// This implementation follows Linux.
|
||||
if (HasSSE2())
|
||||
masm.mfence();
|
||||
else
|
||||
lock_addl(Imm32(0), Operand(Address(esp, 0)));
|
||||
}
|
||||
|
||||
void branch16(Condition cond, Register lhs, Register rhs, Label *label) {
|
||||
@ -362,6 +645,24 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
void store8(const S &src, const T &dest) {
|
||||
movb(src, Operand(dest));
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange8ZeroExtend(const T &mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(newval == ebx || newval == ecx || newval == edx);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchg8(newval, Operand(mem));
|
||||
movzbl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange8SignExtend(const T &mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
MOZ_ASSERT(newval == ebx || newval == ecx || newval == edx);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchg8(newval, Operand(mem));
|
||||
movsbl(output, output);
|
||||
}
|
||||
void load16ZeroExtend(const Address &src, Register dest) {
|
||||
movzwl(Operand(src), dest);
|
||||
}
|
||||
@ -372,6 +673,22 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
void store16(const S &src, const T &dest) {
|
||||
movw(src, Operand(dest));
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange16ZeroExtend(const T &mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchg16(newval, Operand(mem));
|
||||
movzwl(output, output);
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange16SignExtend(const T &mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchg16(newval, Operand(mem));
|
||||
movswl(output, output);
|
||||
}
|
||||
void load16SignExtend(const Address &src, Register dest) {
|
||||
movswl(Operand(src), dest);
|
||||
}
|
||||
@ -391,6 +708,13 @@ class MacroAssemblerX86Shared : public Assembler
|
||||
void store32(const S &src, const T &dest) {
|
||||
movl(src, Operand(dest));
|
||||
}
|
||||
template <typename T>
|
||||
void compareExchange32(const T &mem, Register oldval, Register newval, Register output) {
|
||||
MOZ_ASSERT(output == eax);
|
||||
if (oldval != output)
|
||||
movl(oldval, output);
|
||||
lock_cmpxchg32(newval, Operand(mem));
|
||||
}
|
||||
template <typename S, typename T>
|
||||
void store32_NoSecondScratch(const S &src, const T &dest) {
|
||||
store32(src, dest);
|
||||
|
Loading…
Reference in New Issue
Block a user