Bug 1223355: Common out constant pool generation code on x86/x64; r=sunfish

--HG--
extra : rebase_source : 73f10b8e04bb4b45f8f4876bf03a543ef19c48e1
This commit is contained in:
Benjamin Bouvier 2015-11-10 14:10:28 +01:00
parent f016efafb8
commit 06874d3f1e
6 changed files with 140 additions and 219 deletions

View File

@ -23,34 +23,16 @@ MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
{
if (maybeInlineDouble(d, dest))
return;
if (!doubleMap_.initialized()) {
enoughMemory_ &= doubleMap_.init();
if (!enoughMemory_)
return;
}
size_t doubleIndex;
if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) {
doubleIndex = p->value();
} else {
doubleIndex = doubles_.length();
enoughMemory_ &= doubles_.append(Double(d));
if (!enoughMemory_)
return;
enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
if (!enoughMemory_)
return;
}
Double& dbl = doubles_[doubleIndex];
MOZ_ASSERT(!dbl.uses.bound());
Double* dbl = getDouble(d);
if (!dbl)
return;
// The constants will be stored in a pool appended to the text (see
// finish()), so they will always be a fixed distance from the
// instructions which reference them. This allows the instructions to use
// PC-relative addressing. Use "jump" label support code, because we need
// the same PC-relative address patching that jumps use.
JmpSrc j = masm.vmovsd_ripr(dest.encoding());
JmpSrc prev = JmpSrc(dbl.uses.use(j.offset()));
JmpSrc prev = JmpSrc(dbl->uses.use(j.offset()));
masm.setNextJump(j, prev);
}
@ -59,71 +41,25 @@ MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
{
if (maybeInlineFloat(f, dest))
return;
if (!floatMap_.initialized()) {
enoughMemory_ &= floatMap_.init();
if (!enoughMemory_)
return;
}
size_t floatIndex;
if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) {
floatIndex = p->value();
} else {
floatIndex = floats_.length();
enoughMemory_ &= floats_.append(Float(f));
if (!enoughMemory_)
return;
enoughMemory_ &= floatMap_.add(p, f, floatIndex);
if (!enoughMemory_)
return;
}
Float& flt = floats_[floatIndex];
MOZ_ASSERT(!flt.uses.bound());
Float* flt = getFloat(f);
if (!flt)
return;
// See comment in loadConstantDouble
JmpSrc j = masm.vmovss_ripr(dest.encoding());
JmpSrc prev = JmpSrc(flt.uses.use(j.offset()));
JmpSrc prev = JmpSrc(flt->uses.use(j.offset()));
masm.setNextJump(j, prev);
}
MacroAssemblerX64::SimdData*
MacroAssemblerX64::getSimdData(const SimdConstant& v)
{
if (!simdMap_.initialized()) {
enoughMemory_ &= simdMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t index;
if (SimdMap::AddPtr p = simdMap_.lookupForAdd(v)) {
index = p->value();
} else {
index = simds_.length();
enoughMemory_ &= simds_.append(SimdData(v));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= simdMap_.add(p, v, index);
if (!enoughMemory_)
return nullptr;
}
return &simds_[index];
}
void
MacroAssemblerX64::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
{
MOZ_ASSERT(v.type() == SimdConstant::Int32x4);
if (maybeInlineInt32x4(v, dest))
return;
SimdData* val = getSimdData(v);
if (!val)
return;
MOZ_ASSERT(!val->uses.bound());
MOZ_ASSERT(val->type() == SimdConstant::Int32x4);
JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
masm.setNextJump(j, prev);
@ -135,14 +71,10 @@ MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister des
MOZ_ASSERT(v.type() == SimdConstant::Float32x4);
if (maybeInlineFloat32x4(v, dest))
return;
SimdData* val = getSimdData(v);
if (!val)
return;
MOZ_ASSERT(!val->uses.bound());
MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
JmpSrc j = masm.vmovaps_ripr(dest.encoding());
JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
masm.setNextJump(j, prev);

View File

@ -32,6 +32,9 @@ struct ImmTag : public Imm32
{ }
};
struct MacroAssemblerX86Shared::PlatformSpecificLabel : public NonAssertingLabel
{};
class MacroAssemblerX64 : public MacroAssemblerX86Shared
{
private:
@ -39,47 +42,16 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
private:
// These use SystemAllocPolicy since asm.js releases memory after each
// function is compiled, and these need to live until after all functions
// are compiled.
struct Double {
double value;
NonAssertingLabel uses;
explicit Double(double value) : value(value) {}
};
Vector<Double, 0, SystemAllocPolicy> doubles_;
typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy> DoubleMap;
DoubleMap doubleMap_;
struct Float {
float value;
NonAssertingLabel uses;
explicit Float(float value) : value(value) {}
};
Vector<Float, 0, SystemAllocPolicy> floats_;
typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
FloatMap floatMap_;
struct SimdData {
SimdConstant value;
NonAssertingLabel uses;
explicit SimdData(const SimdConstant& v) : value(v) {}
SimdConstant::Type type() { return value.type(); }
};
Vector<SimdData, 0, SystemAllocPolicy> simds_;
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
public:
using MacroAssemblerX86Shared::branch32;
using MacroAssemblerX86Shared::branchTest32;
using MacroAssemblerX86Shared::load32;
using MacroAssemblerX86Shared::store32;
typedef MacroAssemblerX86Shared::Double<> Double;
typedef MacroAssemblerX86Shared::Float<> Float;
typedef MacroAssemblerX86Shared::SimdData<> SimdData;
MacroAssemblerX64()
{
}
@ -1274,9 +1246,6 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
void loadConstantDouble(double d, FloatRegister dest);
void loadConstantFloat32(float f, FloatRegister dest);
private:
SimdData* getSimdData(const SimdConstant& v);
public:
void loadConstantInt32x4(const SimdConstant& v, FloatRegister dest);
void loadConstantFloat32x4(const SimdConstant& v, FloatRegister dest);

View File

@ -232,6 +232,80 @@ template void
MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register value, Register temp, AnyRegister output);
MacroAssemblerX86Shared::Float<>*
MacroAssemblerX86Shared::getFloat(float f)
{
if (!floatMap_.initialized()) {
enoughMemory_ &= floatMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t floatIndex;
if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f)) {
floatIndex = p->value();
} else {
floatIndex = floats_.length();
enoughMemory_ &= floats_.append(Float<>(f));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= floatMap_.add(p, f, floatIndex);
if (!enoughMemory_)
return nullptr;
}
Float<>& flt = floats_[floatIndex];
MOZ_ASSERT(!flt.uses.bound());
return &flt;
}
MacroAssemblerX86Shared::Double<>*
MacroAssemblerX86Shared::getDouble(double d)
{
if (!doubleMap_.initialized()) {
enoughMemory_ &= doubleMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t doubleIndex;
if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d)) {
doubleIndex = p->value();
} else {
doubleIndex = doubles_.length();
enoughMemory_ &= doubles_.append(Double<>(d));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
if (!enoughMemory_)
return nullptr;
}
Double<>& dbl = doubles_[doubleIndex];
MOZ_ASSERT(!dbl.uses.bound());
return &dbl;
}
MacroAssemblerX86Shared::SimdData<>*
MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
{
if (!simdMap_.initialized()) {
enoughMemory_ &= simdMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t index;
if (SimdMap::AddPtr p = simdMap_.lookupForAdd(v)) {
index = p->value();
} else {
index = simds_.length();
enoughMemory_ &= simds_.append(SimdData<>(v));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= simdMap_.add(p, v, index);
if (!enoughMemory_)
return nullptr;
}
SimdData<>& simd = simds_[index];
MOZ_ASSERT(!simd.uses.bound());
return &simd;
}
//{{{ check_macroassembler_style
// ===============================================================

View File

@ -45,6 +45,50 @@ class MacroAssemblerX86Shared : public Assembler
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
protected:
struct PlatformSpecificLabel;
template<class LabelType = PlatformSpecificLabel>
struct Double {
double value;
LabelType uses;
explicit Double(double value) : value(value) {}
};
// These use SystemAllocPolicy since asm.js releases memory after each
// function is compiled, and these need to live until after all functions
// are compiled.
Vector<Double<PlatformSpecificLabel>, 0, SystemAllocPolicy> doubles_;
typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy> DoubleMap;
DoubleMap doubleMap_;
template<class LabelType = PlatformSpecificLabel>
struct Float {
float value;
LabelType uses;
explicit Float(float value) : value(value) {}
};
Vector<Float<PlatformSpecificLabel>, 0, SystemAllocPolicy> floats_;
typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
FloatMap floatMap_;
template<class LabelType = PlatformSpecificLabel>
struct SimdData {
SimdConstant value;
LabelType uses;
explicit SimdData(const SimdConstant& v) : value(v) {}
SimdConstant::Type type() { return value.type(); }
};
Vector<SimdData<PlatformSpecificLabel>, 0, SystemAllocPolicy> simds_;
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
Float<>* getFloat(float f);
Double<>* getDouble(double d);
SimdData<>* getSimdData(const SimdConstant& v);
public:
using Assembler::call;

View File

@ -90,30 +90,6 @@ MacroAssemblerX86::convertUInt64ToDouble(Register64 src, Register temp, FloatReg
vhaddpd(dest128, dest128);
}
MacroAssemblerX86::Double*
MacroAssemblerX86::getDouble(double d)
{
if (!doubleMap_.initialized()) {
enoughMemory_ &= doubleMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t doubleIndex;
DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d);
if (p) {
doubleIndex = p->value();
} else {
doubleIndex = doubles_.length();
enoughMemory_ &= doubles_.append(Double(d));
enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
if (!enoughMemory_)
return nullptr;
}
Double& dbl = doubles_[doubleIndex];
MOZ_ASSERT(!dbl.uses.bound());
return &dbl;
}
void
MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
{
@ -136,30 +112,6 @@ MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest)
dbl->uses.setPrev(masm.size());
}
MacroAssemblerX86::Float*
MacroAssemblerX86::getFloat(float f)
{
if (!floatMap_.initialized()) {
enoughMemory_ &= floatMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t floatIndex;
FloatMap::AddPtr p = floatMap_.lookupForAdd(f);
if (p) {
floatIndex = p->value();
} else {
floatIndex = floats_.length();
enoughMemory_ &= floats_.append(Float(f));
enoughMemory_ &= floatMap_.add(p, f, floatIndex);
if (!enoughMemory_)
return nullptr;
}
Float& flt = floats_[floatIndex];
MOZ_ASSERT(!flt.uses.bound());
return &flt;
}
void
MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
{
@ -182,30 +134,6 @@ MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
flt->uses.setPrev(masm.size());
}
MacroAssemblerX86::SimdData*
MacroAssemblerX86::getSimdData(const SimdConstant& v)
{
if (!simdMap_.initialized()) {
enoughMemory_ &= simdMap_.init();
if (!enoughMemory_)
return nullptr;
}
size_t index;
SimdMap::AddPtr p = simdMap_.lookupForAdd(v);
if (p) {
index = p->value();
} else {
index = simds_.length();
enoughMemory_ &= simds_.append(SimdData(v));
enoughMemory_ &= simdMap_.add(p, v, index);
if (!enoughMemory_)
return nullptr;
}
SimdData& simd = simds_[index];
MOZ_ASSERT(!simd.uses.bound());
return &simd;
}
void
MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest)
{

View File

@ -16,6 +16,9 @@
namespace js {
namespace jit {
struct MacroAssemblerX86Shared::PlatformSpecificLabel : public AbsoluteLabel
{};
class MacroAssemblerX86 : public MacroAssemblerX86Shared
{
private:
@ -23,37 +26,9 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
private:
struct Double {
double value;
AbsoluteLabel uses;
Double(double value) : value(value) {}
};
Vector<Double, 0, SystemAllocPolicy> doubles_;
struct Float {
float value;
AbsoluteLabel uses;
Float(float value) : value(value) {}
};
Vector<Float, 0, SystemAllocPolicy> floats_;
struct SimdData {
SimdConstant value;
AbsoluteLabel uses;
SimdData(const SimdConstant& v) : value(v) {}
SimdConstant::Type type() { return value.type(); }
};
Vector<SimdData, 0, SystemAllocPolicy> simds_;
typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy> DoubleMap;
DoubleMap doubleMap_;
typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
FloatMap floatMap_;
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
Double* getDouble(double d);
Float* getFloat(float f);
SimdData* getSimdData(const SimdConstant& v);
typedef MacroAssemblerX86Shared::Double<> Double;
typedef MacroAssemblerX86Shared::Float<> Float;
typedef MacroAssemblerX86Shared::SimdData<> SimdData;
protected:
MoveResolver moveResolver_;
@ -64,8 +39,7 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
// first push.
if (address.base == StackPointer)
return Operand(address.base, address.offset + 4);
else
return payloadOf(address);
return payloadOf(address);
}
Operand payloadOf(const Address& address) {
return Operand(address.base, address.offset);