Bug 1181612: Merge macro assemblers; r=luke

--HG--
extra : rebase_source : 30c7bb060c694d2378131e38da70c82f4b9269cb
This commit is contained in:
Benjamin Bouvier 2015-11-24 21:26:07 +01:00
parent 175cc535b7
commit fda1f19d95
35 changed files with 433 additions and 262 deletions

View File

@ -341,22 +341,12 @@ AsmJSModule::finish(ExclusiveContext* cx, TokenStream& tokenStream, MacroAssembl
// CodeLabels are used for switch cases and loads from floating-point /
// SIMD values in the constant pool.
for (size_t i = 0; i < masm.numCodeLabels(); i++) {
CodeLabel src = masm.codeLabel(i);
int32_t labelOffset = src.dest()->offset();
int32_t targetOffset = src.src()->offset();
// The patched uses of a label embed a linked list where the
// to-be-patched immediate is the offset of the next to-be-patched
// instruction.
while (labelOffset != LabelBase::INVALID_OFFSET) {
size_t patchAtOffset = masm.labelOffsetToPatchOffset(labelOffset);
RelativeLink link(RelativeLink::CodeLabel);
link.patchAtOffset = patchAtOffset;
link.targetOffset = targetOffset;
if (!staticLinkData_.relativeLinks.append(link))
return false;
labelOffset = Assembler::ExtractCodeLabelOffset(code_ + patchAtOffset);
}
CodeLabel cl = masm.codeLabel(i);
RelativeLink link(RelativeLink::CodeLabel);
link.patchAtOffset = masm.labelToPatchOffset(*cl.patchAt());
link.targetOffset = cl.target()->offset();
if (!staticLinkData_.relativeLinks.append(link))
return false;
}
#if defined(JS_CODEGEN_X86)
@ -366,7 +356,7 @@ AsmJSModule::finish(ExclusiveContext* cx, TokenStream& tokenStream, MacroAssembl
for (size_t i = 0; i < masm.numAsmJSGlobalAccesses(); i++) {
AsmJSGlobalAccess a = masm.asmJSGlobalAccess(i);
RelativeLink link(RelativeLink::InstructionImmediate);
link.patchAtOffset = masm.labelOffsetToPatchOffset(a.patchAt.offset());
link.patchAtOffset = masm.labelToPatchOffset(a.patchAt);
link.targetOffset = offsetOfGlobalData() + a.globalDataOffset;
if (!staticLinkData_.relativeLinks.append(link))
return false;

View File

@ -891,7 +891,7 @@ BaselineCompiler::emitProfilerEnterFrame()
masm.bind(&noInstrument);
// Store the start offset in the appropriate location.
MOZ_ASSERT(profilerEnterFrameToggleOffset_.offset() == 0);
MOZ_ASSERT(!profilerEnterFrameToggleOffset_.used());
profilerEnterFrameToggleOffset_ = toggleOffset;
}
@ -906,7 +906,7 @@ BaselineCompiler::emitProfilerExitFrame()
masm.bind(&noInstrument);
// Store the start offset in the appropriate location.
MOZ_ASSERT(profilerExitFrameToggleOffset_.offset() == 0);
MOZ_ASSERT(!profilerExitFrameToggleOffset_.used());
profilerExitFrameToggleOffset_ = toggleOffset;
}

View File

@ -7805,6 +7805,7 @@ CodeGenerator::generateAsmJS(AsmJSFunctionLabels* labels)
// is nothing else to do after this point since the LifoAlloc memory
// holding the MIR graph is about to be popped and reused. In particular,
// every step in CodeGenerator::link must be a nop, as asserted here:
MOZ_ASSERT(!masm.failureLabel()->used());
MOZ_ASSERT(snapshots_.listSize() == 0);
MOZ_ASSERT(snapshots_.RVATableSize() == 0);
MOZ_ASSERT(recovers_.size() == 0);

View File

@ -37,6 +37,13 @@ struct LabelBase
MOZ_ASSERT(bound() || used());
return offset_;
}
void offsetBy(int32_t delta) {
MOZ_ASSERT(bound() || used());
MOZ_ASSERT(offset() + delta >= offset(), "no overflow");
mozilla::DebugOnly<int32_t> oldOffset(offset());
offset_ += delta;
MOZ_ASSERT(offset_ == delta + oldOffset, "new offset fits in 31 bits");
}
// Returns whether the label is not bound, but has incoming uses.
bool used() const {
return !bound() && offset_ > INVALID_OFFSET;
@ -46,7 +53,7 @@ struct LabelBase
MOZ_ASSERT(!bound());
offset_ = offset;
bound_ = true;
MOZ_ASSERT(offset_ == offset);
MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
}
// Marks the label as neither bound nor used.
void reset() {
@ -60,7 +67,7 @@ struct LabelBase
int32_t old = offset_;
offset_ = offset;
MOZ_ASSERT(offset_ == offset);
MOZ_ASSERT(offset_ == offset, "offset fits in 31 bits");
return old;
}

View File

@ -310,7 +310,7 @@ MacroAssembler::leaveExitFrame(size_t extraFrame)
bool
MacroAssembler::hasSelfReference() const
{
return selfReferencePatch_.offset() != 0;
return selfReferencePatch_.used();
}
//}}} check_macroassembler_style

View File

@ -2002,6 +2002,12 @@ MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister
}
}
bool
MacroAssembler::asmMergeWith(const MacroAssembler& other)
{
return MacroAssemblerSpecific::asmMergeWith(other);
}
void
MacroAssembler::finish()
{

View File

@ -1336,6 +1336,7 @@ class MacroAssembler : public MacroAssemblerSpecific
return &failureLabel_;
}
bool asmMergeWith(const MacroAssembler& masm);
void finish();
void link(JitCode* code);

View File

@ -633,6 +633,15 @@ Assembler::finish()
isFinished = true;
}
bool
Assembler::asmMergeWith(const Assembler& other)
{
flush();
if (!AssemblerShared::asmMergeWith(size(), other))
return false;
return m_buffer.appendBuffer(other.m_buffer);
}
void
Assembler::executableCopy(uint8_t* buffer)
{
@ -937,28 +946,19 @@ Assembler::processCodeLabels(uint8_t* rawCode)
{
for (size_t i = 0; i < codeLabels_.length(); i++) {
CodeLabel label = codeLabels_[i];
Bind(rawCode, label.dest(), rawCode + label.src()->offset());
Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
}
}
void
Assembler::writeCodePointer(AbsoluteLabel* absoluteLabel) {
MOZ_ASSERT(!absoluteLabel->bound());
Assembler::writeCodePointer(CodeOffsetLabel* label) {
BufferOffset off = writeInst(LabelBase::INVALID_OFFSET);
// The x86/x64 makes general use of AbsoluteLabel and weaves a linked list
// of uses of an AbsoluteLabel through the assembly. ARM only uses labels
// for the case statements of switch jump tables. Thus, for simplicity, we
// simply treat the AbsoluteLabel as a label and bind it to the offset of
// the jump table entry that needs to be patched.
LabelBase* label = absoluteLabel;
label->bind(off.getOffset());
label->use(off.getOffset());
}
void
Assembler::Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address)
Assembler::Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address)
{
// See writeCodePointer comment.
*reinterpret_cast<const void**>(rawCode + label->offset()) = address;
}
@ -2876,6 +2876,40 @@ Assembler::retarget(Label* label, Label* target)
}
void
Assembler::retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target)
{
if (!label->used())
return;
MOZ_ASSERT(!target->bound());
bool more;
BufferOffset labelBranchOffset(label->offset() + baseOffset);
do {
BufferOffset next;
more = nextLink(labelBranchOffset, &next);
Instruction branch = *editSrc(labelBranchOffset);
Condition c = branch.extractCond();
int32_t prev = target->use(labelBranchOffset.getOffset());
MOZ_RELEASE_ASSERT(prev == Label::INVALID_OFFSET || unsigned(prev) < size());
BOffImm newOffset;
if (prev != Label::INVALID_OFFSET)
newOffset = BOffImm(prev);
if (branch.is<InstBImm>())
as_b(newOffset, c, labelBranchOffset);
else if (branch.is<InstBLImm>())
as_bl(newOffset, c, labelBranchOffset);
else
MOZ_CRASH("crazy fixup!");
labelBranchOffset = BufferOffset(next.getOffset() + baseOffset);
} while (more);
}
static int stopBKPT = -1;
void
Assembler::as_bkpt()

View File

@ -1397,6 +1397,7 @@ class Assembler : public AssemblerShared
bool isFinished;
public:
void finish();
bool asmMergeWith(const Assembler& other);
void executableCopy(void* buffer);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);
@ -1429,7 +1430,7 @@ class Assembler : public AssemblerShared
static void WriteInstStatic(uint32_t x, uint32_t* dest);
public:
void writeCodePointer(AbsoluteLabel* label);
void writeCodePointer(CodeOffsetLabel* label);
void haltingAlign(int alignment);
void nopAlign(int alignment);
@ -1690,15 +1691,16 @@ class Assembler : public AssemblerShared
uint32_t currentOffset() {
return nextOffset().getOffset();
}
void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target);
void retarget(Label* label, Label* target);
// I'm going to pretend this doesn't exist for now.
void retarget(Label* label, void* target, Relocation::Kind reloc);
void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address);
void Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address);
// See Bind
size_t labelOffsetToPatchOffset(size_t offset) {
return offset;
size_t labelToPatchOffset(CodeOffsetLabel label) {
return label.offset();
}
void as_bkpt();
@ -1910,9 +1912,6 @@ class Assembler : public AssemblerShared
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
void processCodeLabels(uint8_t* rawCode);
static int32_t ExtractCodeLabelOffset(uint8_t* code) {
return *(uintptr_t*)code;
}
bool bailed() {
return m_buffer.bail();

View File

@ -1031,7 +1031,7 @@ CodeGeneratorARM::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
// The entries of the jump table need to be absolute addresses and thus
// must be patched after codegen is finished.
CodeLabel cl = ool->codeLabel(i);
cl.src()->bind(caseoffset);
cl.target()->use(caseoffset);
masm.addCodeLabel(cl);
}
}
@ -1085,7 +1085,7 @@ CodeGeneratorARM::emitTableSwitchDispatch(MTableSwitch* mir, Register index, Reg
OutOfLineTableSwitch* ool = new(alloc()) OutOfLineTableSwitch(alloc(), mir);
for (int32_t i = 0; i < cases; i++) {
CodeLabel cl;
masm.writeCodePointer(cl.dest());
masm.writeCodePointer(cl.patchAt());
ool->addCodeLabel(cl);
}
addOutOfLineCode(ool, mir);

View File

@ -631,12 +631,6 @@ Assembler::FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader
cx->runtime()->gc.storeBuffer.putWholeCell(code);
}
int32_t
Assembler::ExtractCodeLabelOffset(uint8_t* code)
{
return *(int32_t*)code;
}
void
Assembler::PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm)
{

View File

@ -177,6 +177,9 @@ class Assembler : public vixl::Assembler
typedef vixl::Condition Condition;
void finish();
bool asmMergeWith(const Assembler& other) {
MOZ_CRASH("NYI");
}
void trace(JSTracer* trc);
// Emit the jump table, returning the BufferOffset to the first entry in the table.
@ -236,15 +239,18 @@ class Assembler : public vixl::Assembler
void processCodeLabels(uint8_t* rawCode) {
for (size_t i = 0; i < codeLabels_.length(); i++) {
CodeLabel label = codeLabels_[i];
Bind(rawCode, label.dest(), rawCode + label.src()->offset());
Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
}
}
void Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address) {
void Bind(uint8_t* rawCode, CodeOffsetLabel* label, const void* address) {
*reinterpret_cast<const void**>(rawCode + label->offset()) = address;
}
void retarget(Label* cur, Label* next);
void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
MOZ_CRASH("NYI");
}
// The buffer is about to be linked. Ensure any constant pools or
// excess bookkeeping has been flushed to the instruction stream.
@ -256,7 +262,9 @@ class Assembler : public vixl::Assembler
ARMBuffer::PoolEntry pe(curOffset);
return armbuffer_.poolEntryOffset(pe);
}
size_t labelOffsetToPatchOffset(size_t labelOff) { return labelOff; }
size_t labelToPatchOffset(CodeOffsetLabel label) {
return label.offset();
}
static uint8_t* PatchableJumpAddress(JitCode* code, uint32_t index) {
return code->raw() + index;
}
@ -325,7 +333,6 @@ class Assembler : public vixl::Assembler
static void TraceJumpRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
static void TraceDataRelocations(JSTracer* trc, JitCode* code, CompactBufferReader& reader);
static int32_t ExtractCodeLabelOffset(uint8_t* code);
static void PatchInstructionImmediate(uint8_t* code, PatchedImmPtr imm);
static void FixupNurseryObjects(JSContext* cx, JitCode* code, CompactBufferReader& reader,

View File

@ -797,6 +797,9 @@ class AssemblerMIPSShared : public AssemblerShared
bool isFinished;
public:
void finish();
bool asmMergeWith(const AssemblerMIPSShared& other) {
MOZ_CRASH("NYI");
}
void executableCopy(void* buffer);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);
@ -1036,9 +1039,12 @@ class AssemblerMIPSShared : public AssemblerShared
return nextOffset().getOffset();
}
void retarget(Label* label, Label* target);
void retargetWithOffset(size_t offset, const LabelBase* label, Label* target) {
MOZ_CRASH("NYI");
}
// See Bind
size_t labelOffsetToPatchOffset(size_t offset) { return offset; }
size_t labelToPatchOffset(CodeOffsetLabel label) { return label.offset(); }
void call(Label* label);
void call(void* target);

View File

@ -239,12 +239,6 @@ Assembler::trace(JSTracer* trc)
}
}
int32_t
Assembler::ExtractCodeLabelOffset(uint8_t* code) {
InstImm* inst = (InstImm*)code;
return Assembler::ExtractLuiOriValue(inst, inst->next());
}
void
Assembler::Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address)
{

View File

@ -153,7 +153,6 @@ class Assembler : public AssemblerMIPSShared
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void UpdateBoundsCheck(uint32_t logHeapSize, Instruction* inst);
static int32_t ExtractCodeLabelOffset(uint8_t* code);
}; // Assembler
static const uint32_t NumIntArgRegs = 4;

View File

@ -233,13 +233,6 @@ Assembler::trace(JSTracer* trc)
}
}
int64_t
Assembler::ExtractCodeLabelOffset(uint8_t* code)
{
Instruction* inst = (Instruction*)code;
return Assembler::ExtractLoad64Value(inst);
}
void
Assembler::Bind(uint8_t* rawCode, AbsoluteLabel* label, const void* address)
{

View File

@ -155,7 +155,6 @@ class Assembler : public AssemblerMIPSShared
static void ToggleCall(CodeLocationLabel inst_, bool enabled);
static void UpdateBoundsCheck(uint64_t logHeapSize, Instruction* inst);
static int64_t ExtractCodeLabelOffset(uint8_t* code);
}; // Assembler
static const uint32_t NumIntArgRegs = 8;

View File

@ -124,7 +124,6 @@ class Assembler : public AssemblerShared
static void PatchWrite_NearCall(CodeLocationLabel, CodeLocationLabel) { MOZ_CRASH(); }
static uint32_t PatchWrite_NearCallSize() { MOZ_CRASH(); }
static void PatchInstructionImmediate(uint8_t*, PatchedImmPtr) { MOZ_CRASH(); }
static int32_t ExtractCodeLabelOffset(uint8_t*) { MOZ_CRASH(); }
static void ToggleToJmp(CodeLocationLabel) { MOZ_CRASH(); }
static void ToggleToCmp(CodeLocationLabel) { MOZ_CRASH(); }
@ -189,7 +188,7 @@ class MacroAssemblerNone : public Assembler
void nopAlign(size_t) { MOZ_CRASH(); }
void checkStackAlignment() { MOZ_CRASH(); }
uint32_t currentOffset() { MOZ_CRASH(); }
uint32_t labelOffsetToPatchOffset(uint32_t) { MOZ_CRASH(); }
uint32_t labelToPatchOffset(CodeOffsetLabel) { MOZ_CRASH(); }
CodeOffsetLabel labelForPatch() { MOZ_CRASH(); }
void nop() { MOZ_CRASH(); }

View File

@ -422,6 +422,37 @@ struct AbsoluteLabel : public LabelBase
}
};
class CodeOffsetLabel
{
size_t offset_;
static const size_t NOT_USED = size_t(-1);
public:
explicit CodeOffsetLabel(size_t offset) : offset_(offset) {}
CodeOffsetLabel() : offset_(NOT_USED) {}
size_t offset() const {
MOZ_ASSERT(used());
return offset_;
}
void use(size_t offset) {
MOZ_ASSERT(!used());
offset_ = offset;
MOZ_ASSERT(used());
}
bool used() const {
return offset_ != NOT_USED;
}
void offsetBy(size_t delta) {
MOZ_ASSERT(used());
MOZ_ASSERT(offset_ + delta >= offset_, "no overflow");
offset_ += delta;
}
};
// A code label contains an absolute reference to a point in the code. Thus, it
// cannot be patched until after linking.
// When the source label is resolved into a memory address, this address is
@ -430,23 +461,31 @@ class CodeLabel
{
// The destination position, where the absolute reference should get
// patched into.
AbsoluteLabel dest_;
CodeOffsetLabel patchAt_;
// The source label (relative) in the code to where the destination should
// get patched to.
Label src_;
CodeOffsetLabel target_;
public:
CodeLabel()
{ }
explicit CodeLabel(const AbsoluteLabel& dest)
: dest_(dest)
explicit CodeLabel(const CodeOffsetLabel& patchAt)
: patchAt_(patchAt)
{ }
AbsoluteLabel* dest() {
return &dest_;
CodeLabel(const CodeOffsetLabel& patchAt, const CodeOffsetLabel& target)
: patchAt_(patchAt),
target_(target)
{ }
CodeOffsetLabel* patchAt() {
return &patchAt_;
}
Label* src() {
return &src_;
CodeOffsetLabel* target() {
return &target_;
}
void offsetBy(size_t delta) {
patchAt_.offsetBy(delta);
target_.offsetBy(delta);
}
};
@ -484,19 +523,6 @@ class CodeOffsetJump
void fixup(MacroAssembler* masm);
};
class CodeOffsetLabel
{
size_t offset_;
public:
explicit CodeOffsetLabel(size_t offset) : offset_(offset) {}
CodeOffsetLabel() : offset_(0) {}
size_t offset() const {
return offset_;
}
};
// Absolute location of a jump or a label in some generated JitCode block.
// Can also encode a CodeOffset{Jump,Label}, such that the offset is initially
// set and the absolute location later filled in after the final JitCode is
@ -683,6 +709,7 @@ class CallSite : public CallSiteDesc
{ }
void setReturnAddressOffset(uint32_t r) { returnAddressOffset_ = r; }
void offsetReturnAddressBy(int32_t o) { returnAddressOffset_ += o; }
uint32_t returnAddressOffset() const { return returnAddressOffset_; }
// The stackDepth measures the amount of stack space pushed since the
@ -809,6 +836,7 @@ class AsmJSHeapAccess
uint32_t insnOffset() const { return insnOffset_; }
void setInsnOffset(uint32_t insnOffset) { insnOffset_ = insnOffset; }
void offsetInsnOffsetBy(uint32_t offset) { insnOffset_ += offset; }
#if defined(JS_CODEGEN_X86)
void* patchHeapPtrImmAt(uint8_t* code) const { return code + (insnOffset_ + opLength_); }
#endif
@ -991,7 +1019,7 @@ class AssemblerShared
CallSite callsite(desc, label.offset(), framePushed + sizeof(AsmJSFrame));
enoughMemory_ &= callsites_.append(CallSiteAndTarget(callsite, targetIndex));
}
const CallSiteAndTargetVector& callSites() const { return callsites_; }
CallSiteAndTargetVector& callSites() { return callsites_; }
void append(AsmJSHeapAccess access) { enoughMemory_ &= asmJSHeapAccesses_.append(access); }
AsmJSHeapAccessVector&& extractAsmJSHeapAccesses() { return Move(asmJSHeapAccesses_); }
@ -1015,6 +1043,37 @@ class AssemblerShared
CodeLabel codeLabel(size_t i) {
return codeLabels_[i];
}
// Merge this assembler with the other one, invalidating it, by shifting all
// offsets by a delta.
bool asmMergeWith(size_t delta, const AssemblerShared& other) {
size_t i = callsites_.length();
enoughMemory_ &= callsites_.appendAll(other.callsites_);
for (; i < callsites_.length(); i++)
callsites_[i].offsetReturnAddressBy(delta);
i = asmJSHeapAccesses_.length();
enoughMemory_ &= asmJSHeapAccesses_.appendAll(other.asmJSHeapAccesses_);
for (; i < asmJSHeapAccesses_.length(); i++)
asmJSHeapAccesses_[i].offsetInsnOffsetBy(delta);
i = asmJSGlobalAccesses_.length();
enoughMemory_ &= asmJSGlobalAccesses_.appendAll(other.asmJSGlobalAccesses_);
for (; i < asmJSGlobalAccesses_.length(); i++)
asmJSGlobalAccesses_[i].patchAt.offsetBy(delta);
i = asmJSAbsoluteLinks_.length();
enoughMemory_ &= asmJSAbsoluteLinks_.appendAll(other.asmJSAbsoluteLinks_);
for (; i < asmJSAbsoluteLinks_.length(); i++)
asmJSAbsoluteLinks_[i].patchAt.offsetBy(delta);
i = codeLabels_.length();
enoughMemory_ &= codeLabels_.appendAll(other.codeLabels_);
for (; i < codeLabels_.length(); i++)
codeLabels_[i].offsetBy(delta);
return !oom();
}
};
} // namespace jit

View File

@ -1099,6 +1099,19 @@ struct AssemblerBufferWithConstantPools : public AssemblerBuffer<SliceSize, Inst
}
}
bool appendBuffer(const AssemblerBufferWithConstantPools& other) {
if (this->oom())
return false;
// The pools should have all been flushed, check.
MOZ_ASSERT(pool_.numEntries() == 0);
for (Slice* cur = other.getHead(); cur != nullptr; cur = cur->getNext()) {
this->putBytes(cur->length(), &cur->instructions[0]);
if (this->oom())
return false;
}
return true;
}
public:
size_t poolEntryOffset(PoolEntry pe) const {
MOZ_ASSERT(pe.index() < poolEntryCount - pool_.numEntries(),

View File

@ -612,12 +612,9 @@ class Assembler : public AssemblerX86Shared
void mov(Register src, Register dest) {
movq(src, dest);
}
void mov(AbsoluteLabel* label, Register dest) {
MOZ_ASSERT(!label->bound());
// Thread the patch list through the unpatched address word in the
// instruction stream.
masm.movq_i64r(label->prev(), dest.encoding());
label->setPrev(masm.size());
void mov(CodeOffsetLabel* label, Register dest) {
masm.movq_i64r(/* placeholder */ 0, dest.encoding());
label->use(masm.size());
}
void xchg(Register src, Register dest) {
xchgq(src, dest);

View File

@ -32,8 +32,7 @@ MacroAssemblerX64::loadConstantDouble(double d, FloatRegister dest)
// PC-relative addressing. Use "jump" label support code, because we need
// the same PC-relative address patching that jumps use.
JmpSrc j = masm.vmovsd_ripr(dest.encoding());
JmpSrc prev = JmpSrc(dbl->uses.use(j.offset()));
masm.setNextJump(j, prev);
dbl->uses.append(CodeOffsetLabel(j.offset()));
}
void
@ -46,8 +45,7 @@ MacroAssemblerX64::loadConstantFloat32(float f, FloatRegister dest)
return;
// See comment in loadConstantDouble
JmpSrc j = masm.vmovss_ripr(dest.encoding());
JmpSrc prev = JmpSrc(flt->uses.use(j.offset()));
masm.setNextJump(j, prev);
flt->uses.append(CodeOffsetLabel(j.offset()));
}
void
@ -61,8 +59,7 @@ MacroAssemblerX64::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest
return;
MOZ_ASSERT(val->type() == SimdConstant::Int32x4);
JmpSrc j = masm.vmovdqa_ripr(dest.encoding());
JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
masm.setNextJump(j, prev);
val->uses.append(CodeOffsetLabel(j.offset()));
}
void
@ -76,8 +73,19 @@ MacroAssemblerX64::loadConstantFloat32x4(const SimdConstant&v, FloatRegister des
return;
MOZ_ASSERT(val->type() == SimdConstant::Float32x4);
JmpSrc j = masm.vmovaps_ripr(dest.encoding());
JmpSrc prev = JmpSrc(val->uses.use(j.offset()));
masm.setNextJump(j, prev);
val->uses.append(CodeOffsetLabel(j.offset()));
}
void
MacroAssemblerX64::bindOffsets(const MacroAssemblerX86Shared::UsesVector& uses)
{
for (CodeOffsetLabel use : uses) {
JmpDst dst(currentOffset());
JmpSrc src(use.offset());
// Using linkJump here is safe, as explaind in the comment in
// loadConstantDouble.
masm.linkJump(src, dst);
}
}
void
@ -85,26 +93,23 @@ MacroAssemblerX64::finish()
{
if (!doubles_.empty())
masm.haltingAlign(sizeof(double));
for (size_t i = 0; i < doubles_.length(); i++) {
Double& dbl = doubles_[i];
bind(&dbl.uses);
masm.doubleConstant(dbl.value);
for (const Double& d : doubles_) {
bindOffsets(d.uses);
masm.doubleConstant(d.value);
}
if (!floats_.empty())
masm.haltingAlign(sizeof(float));
for (size_t i = 0; i < floats_.length(); i++) {
Float& flt = floats_[i];
bind(&flt.uses);
masm.floatConstant(flt.value);
for (const Float& f : floats_) {
bindOffsets(f.uses);
masm.floatConstant(f.value);
}
// SIMD memory values must be suitably aligned.
if (!simds_.empty())
masm.haltingAlign(SimdMemoryAlignment);
for (size_t i = 0; i < simds_.length(); i++) {
SimdData& v = simds_[i];
bind(&v.uses);
for (const SimdData& v : simds_) {
bindOffsets(v.uses);
switch(v.type()) {
case SimdConstant::Int32x4: masm.int32x4Constant(v.value.asInt32x4()); break;
case SimdConstant::Float32x4: masm.float32x4Constant(v.value.asFloat32x4()); break;

View File

@ -32,9 +32,6 @@ struct ImmTag : public Imm32
{ }
};
struct MacroAssemblerX86Shared::PlatformSpecificLabel : public NonAssertingLabel
{};
class MacroAssemblerX64 : public MacroAssemblerX86Shared
{
private:
@ -42,16 +39,14 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
void bindOffsets(const MacroAssemblerX86Shared::UsesVector&);
public:
using MacroAssemblerX86Shared::branch32;
using MacroAssemblerX86Shared::branchTest32;
using MacroAssemblerX86Shared::load32;
using MacroAssemblerX86Shared::store32;
typedef MacroAssemblerX86Shared::Double<> Double;
typedef MacroAssemblerX86Shared::Float<> Float;
typedef MacroAssemblerX86Shared::SimdData<> SimdData;
MacroAssemblerX64()
{
}

View File

@ -162,6 +162,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.push(r14);
CodeLabel returnLabel;
CodeLabel oomReturnLabel;
if (type == EnterJitBaseline) {
// Handle OSR.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
@ -182,7 +183,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.movq(numStackValuesAddr, numStackValues);
// Push return address
masm.mov(returnLabel.dest(), scratch);
masm.mov(returnLabel.patchAt(), scratch);
masm.push(scratch);
// Push previous frame pointer.
@ -270,7 +271,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.mov(framePtr, rsp);
masm.addPtr(Imm32(2 * sizeof(uintptr_t)), rsp);
masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
masm.mov(returnLabel.dest(), scratch);
masm.mov(oomReturnLabel.patchAt(), scratch);
masm.jump(scratch);
masm.bind(&notOsr);
@ -286,8 +287,10 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
if (type == EnterJitBaseline) {
// Baseline OSR will return here.
masm.bind(returnLabel.src());
masm.use(returnLabel.target());
masm.addCodeLabel(returnLabel);
masm.use(oomReturnLabel.target());
masm.addCodeLabel(oomReturnLabel);
}
// Pop arguments and padding from stack.

View File

@ -109,7 +109,7 @@ AssemblerX86Shared::processCodeLabels(uint8_t* rawCode)
{
for (size_t i = 0; i < codeLabels_.length(); i++) {
CodeLabel label = codeLabels_[i];
Bind(rawCode, label.dest(), rawCode + label.src()->offset());
Bind(rawCode, label.patchAt(), rawCode + label.target()->offset());
}
}

View File

@ -398,10 +398,13 @@ class AssemblerX86Shared : public AssemblerShared
}
void executableCopy(void* buffer);
void processCodeLabels(uint8_t* rawCode);
static int32_t ExtractCodeLabelOffset(uint8_t* code) {
return *(uintptr_t*)code;
bool asmMergeWith(const AssemblerX86Shared& other) {
MOZ_ASSERT(other.jumps_.length() == 0);
if (!AssemblerShared::asmMergeWith(masm.size(), other))
return false;
return masm.appendBuffer(other.masm);
}
void processCodeLabels(uint8_t* rawCode);
void copyJumpRelocationTable(uint8_t* dest);
void copyDataRelocationTable(uint8_t* dest);
void copyPreBarrierTable(uint8_t* dest);
@ -435,28 +438,10 @@ class AssemblerX86Shared : public AssemblerShared
void nopAlign(int alignment) {
masm.nopAlign(alignment);
}
void writeCodePointer(AbsoluteLabel* label) {
MOZ_ASSERT(!label->bound());
// Thread the patch list through the unpatched address word in the
// instruction stream.
masm.jumpTablePointer(label->prev());
label->setPrev(masm.size());
}
void writeDoubleConstant(double d, Label* label) {
label->bind(masm.size());
masm.doubleConstant(d);
}
void writeFloatConstant(float f, Label* label) {
label->bind(masm.size());
masm.floatConstant(f);
}
void writeInt32x4Constant(const SimdConstant& v, Label* label) {
label->bind(masm.size());
masm.int32x4Constant(v.asInt32x4());
}
void writeFloat32x4Constant(const SimdConstant& v, Label* label) {
label->bind(masm.size());
masm.float32x4Constant(v.asFloat32x4());
void writeCodePointer(CodeOffsetLabel* label) {
// A CodeOffsetLabel only has one use, bake in the "end of list" value.
masm.jumpTablePointer(LabelBase::INVALID_OFFSET);
label->use(masm.size());
}
void movl(Imm32 imm32, Register dest) {
masm.movl_i32r(imm32.value, dest.encoding());
@ -938,49 +923,48 @@ class AssemblerX86Shared : public AssemblerShared
}
label->bind(dst.offset());
}
void use(CodeOffsetLabel* label) {
label->use(currentOffset());
}
uint32_t currentOffset() {
return masm.label().offset();
}
// Re-routes pending jumps to a new label.
void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
if (!label->used())
return;
bool more;
JmpSrc jmp(label->offset() + baseOffset);
do {
JmpSrc next;
more = masm.nextJump(jmp, &next);
if (target->bound()) {
// The jump can be immediately patched to the correct destination.
masm.linkJump(jmp, JmpDst(target->offset()));
} else {
// Thread the jump list through the unpatched jump targets.
JmpSrc prev(target->use(jmp.offset()));
masm.setNextJump(jmp, prev);
}
jmp = JmpSrc(next.offset() + baseOffset);
} while (more);
}
void retarget(Label* label, Label* target) {
if (label->used()) {
bool more;
JmpSrc jmp(label->offset());
do {
JmpSrc next;
more = masm.nextJump(jmp, &next);
if (target->bound()) {
// The jump can be immediately patched to the correct destination.
masm.linkJump(jmp, JmpDst(target->offset()));
} else {
// Thread the jump list through the unpatched jump targets.
JmpSrc prev = JmpSrc(target->use(jmp.offset()));
masm.setNextJump(jmp, prev);
}
jmp = next;
} while (more);
}
retargetWithOffset(0, label, target);
label->reset();
}
static void Bind(uint8_t* raw, AbsoluteLabel* label, const void* address) {
static void Bind(uint8_t* raw, CodeOffsetLabel* label, const void* address) {
if (label->used()) {
intptr_t src = label->offset();
do {
intptr_t next = reinterpret_cast<intptr_t>(X86Encoding::GetPointer(raw + src));
X86Encoding::SetPointer(raw + src, address);
src = next;
} while (src != AbsoluteLabel::INVALID_OFFSET);
intptr_t offset = label->offset();
X86Encoding::SetPointer(raw + offset, address);
}
label->bind();
}
// See Bind and X86Encoding::setPointer.
size_t labelOffsetToPatchOffset(size_t offset) {
return offset - sizeof(void*);
size_t labelToPatchOffset(CodeOffsetLabel label) {
return label.offset() - sizeof(void*);
}
void ret() {

View File

@ -80,6 +80,15 @@ namespace jit {
oomDetected();
}
bool growByUninitialized(size_t space)
{
if (MOZ_UNLIKELY(!m_buffer.growByUninitialized(space))) {
oomDetected();
return false;
}
return true;
}
bool isAligned(size_t alignment) const
{
return !(m_buffer.length() & (alignment - 1));

View File

@ -3412,6 +3412,15 @@ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm, off
{
memcpy(buffer, m_formatter.buffer(), size());
}
bool appendBuffer(const BaseAssembler& other)
{
size_t otherSize = other.size();
size_t formerSize = size();
if (!m_formatter.growByUninitialized(otherSize))
return false;
memcpy((char*)m_formatter.buffer() + formerSize, other.m_formatter.buffer(), otherSize);
return true;
}
protected:
static bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(int8_t)value; }
@ -4662,6 +4671,7 @@ threeByteOpImmSimd("vblendps", VEX_PD, OP3_BLENDPS_VpsWpsIb, ESCAPE_3A, imm, off
// Administrative methods:
size_t size() const { return m_buffer.size(); }
bool growByUninitialized(size_t size) { return m_buffer.growByUninitialized(size); }
const unsigned char* buffer() const { return m_buffer.buffer(); }
bool oom() const { return m_buffer.oom(); }
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }

View File

@ -1619,7 +1619,7 @@ CodeGeneratorX86Shared::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
MTableSwitch* mir = ool->mir();
masm.haltingAlign(sizeof(void*));
masm.bind(ool->jumpLabel()->src());
masm.use(ool->jumpLabel()->target());
masm.addCodeLabel(*ool->jumpLabel());
for (size_t i = 0; i < mir->numCases(); i++) {
@ -1630,8 +1630,8 @@ CodeGeneratorX86Shared::visitOutOfLineTableSwitch(OutOfLineTableSwitch* ool)
// The entries of the jump table need to be absolute addresses and thus
// must be patched after codegen is finished.
CodeLabel cl;
masm.writeCodePointer(cl.dest());
cl.src()->bind(caseoffset);
masm.writeCodePointer(cl.patchAt());
cl.target()->use(caseoffset);
masm.addCodeLabel(cl);
}
}
@ -1657,7 +1657,7 @@ CodeGeneratorX86Shared::emitTableSwitchDispatch(MTableSwitch* mir, Register inde
addOutOfLineCode(ool, mir);
// Compute the position where a pointer to the right case stands.
masm.mov(ool->jumpLabel()->dest(), base);
masm.mov(ool->jumpLabel()->patchAt(), base);
Operand pointer = Operand(base, index, ScalePointer);
// Jump to the right case

View File

@ -232,7 +232,7 @@ template void
MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const BaseIndex& mem,
Register value, Register temp, AnyRegister output);
MacroAssemblerX86Shared::Float<>*
MacroAssemblerX86Shared::Float*
MacroAssemblerX86Shared::getFloat(float f)
{
if (!floatMap_.initialized()) {
@ -245,19 +245,17 @@ MacroAssemblerX86Shared::getFloat(float f)
floatIndex = p->value();
} else {
floatIndex = floats_.length();
enoughMemory_ &= floats_.append(Float<>(f));
enoughMemory_ &= floats_.append(Float(f));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= floatMap_.add(p, f, floatIndex);
if (!enoughMemory_)
return nullptr;
}
Float<>& flt = floats_[floatIndex];
MOZ_ASSERT(!flt.uses.bound());
return &flt;
return &floats_[floatIndex];
}
MacroAssemblerX86Shared::Double<>*
MacroAssemblerX86Shared::Double*
MacroAssemblerX86Shared::getDouble(double d)
{
if (!doubleMap_.initialized()) {
@ -270,19 +268,17 @@ MacroAssemblerX86Shared::getDouble(double d)
doubleIndex = p->value();
} else {
doubleIndex = doubles_.length();
enoughMemory_ &= doubles_.append(Double<>(d));
enoughMemory_ &= doubles_.append(Double(d));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= doubleMap_.add(p, d, doubleIndex);
if (!enoughMemory_)
return nullptr;
}
Double<>& dbl = doubles_[doubleIndex];
MOZ_ASSERT(!dbl.uses.bound());
return &dbl;
return &doubles_[doubleIndex];
}
MacroAssemblerX86Shared::SimdData<>*
MacroAssemblerX86Shared::SimdData*
MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
{
if (!simdMap_.initialized()) {
@ -295,16 +291,83 @@ MacroAssemblerX86Shared::getSimdData(const SimdConstant& v)
index = p->value();
} else {
index = simds_.length();
enoughMemory_ &= simds_.append(SimdData<>(v));
enoughMemory_ &= simds_.append(SimdData(v));
if (!enoughMemory_)
return nullptr;
enoughMemory_ &= simdMap_.add(p, v, index);
if (!enoughMemory_)
return nullptr;
}
SimdData<>& simd = simds_[index];
MOZ_ASSERT(!simd.uses.bound());
return &simd;
return &simds_[index];
}
static bool
AppendShiftedUses(const MacroAssemblerX86Shared::UsesVector& old, size_t delta,
MacroAssemblerX86Shared::UsesVector* vec)
{
for (CodeOffsetLabel use : old) {
use.offsetBy(delta);
if (!vec->append(use))
return false;
}
return true;
}
bool
MacroAssemblerX86Shared::asmMergeWith(const MacroAssemblerX86Shared& other)
{
size_t sizeBefore = masm.size();
if (!Assembler::asmMergeWith(other))
return false;
if (!doubleMap_.initialized() && !doubleMap_.init())
return false;
if (!floatMap_.initialized() && !floatMap_.init())
return false;
if (!simdMap_.initialized() && !simdMap_.init())
return false;
for (const Double& d : other.doubles_) {
size_t index;
if (DoubleMap::AddPtr p = doubleMap_.lookupForAdd(d.value)) {
index = p->value();
} else {
index = doubles_.length();
if (!doubles_.append(Double(d.value)) || !doubleMap_.add(p, d.value, index))
return false;
}
if (!AppendShiftedUses(d.uses, sizeBefore, &doubles_[index].uses))
return false;
}
for (const Float& f : other.floats_) {
size_t index;
if (FloatMap::AddPtr p = floatMap_.lookupForAdd(f.value)) {
index = p->value();
} else {
index = floats_.length();
if (!floats_.append(Float(f.value)) || !floatMap_.add(p, f.value, index))
return false;
}
if (!AppendShiftedUses(f.uses, sizeBefore, &floats_[index].uses))
return false;
}
for (const SimdData& s : other.simds_) {
size_t index;
if (SimdMap::AddPtr p = simdMap_.lookupForAdd(s.value)) {
index = p->value();
} else {
index = simds_.length();
if (!simds_.append(SimdData(s.value)) || !simdMap_.add(p, s.value, index))
return false;
}
if (!AppendShiftedUses(s.uses, sizeBefore, &simds_[index].uses))
return false;
}
return true;
}
//{{{ check_macroassembler_style
@ -560,9 +623,9 @@ MacroAssembler::pushFakeReturnAddress(Register scratch)
{
CodeLabel cl;
mov(cl.dest(), scratch);
mov(cl.patchAt(), scratch);
Push(scratch);
bind(cl.src());
use(cl.target());
uint32_t retAddr = currentOffset();
addCodeLabel(cl);

View File

@ -45,49 +45,57 @@ class MacroAssemblerX86Shared : public Assembler
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
protected:
struct PlatformSpecificLabel;
public:
typedef Vector<CodeOffsetLabel, 0, SystemAllocPolicy> UsesVector;
template<class LabelType = PlatformSpecificLabel>
protected:
// For Double, Float and SimdData, make the move ctors explicit so that MSVC
// knows what to use instead of copying these data structures.
struct Double {
double value;
LabelType uses;
UsesVector uses;
explicit Double(double value) : value(value) {}
Double(Double&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
explicit Double(const Double&) = delete;
};
// These use SystemAllocPolicy since asm.js releases memory after each
// function is compiled, and these need to live until after all functions
// are compiled.
Vector<Double<PlatformSpecificLabel>, 0, SystemAllocPolicy> doubles_;
Vector<Double, 0, SystemAllocPolicy> doubles_;
typedef HashMap<double, size_t, DefaultHasher<double>, SystemAllocPolicy> DoubleMap;
DoubleMap doubleMap_;
template<class LabelType = PlatformSpecificLabel>
struct Float {
float value;
LabelType uses;
UsesVector uses;
explicit Float(float value) : value(value) {}
Float(Float&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
explicit Float(const Float&) = delete;
};
Vector<Float<PlatformSpecificLabel>, 0, SystemAllocPolicy> floats_;
Vector<Float, 0, SystemAllocPolicy> floats_;
typedef HashMap<float, size_t, DefaultHasher<float>, SystemAllocPolicy> FloatMap;
FloatMap floatMap_;
template<class LabelType = PlatformSpecificLabel>
struct SimdData {
SimdConstant value;
LabelType uses;
UsesVector uses;
explicit SimdData(const SimdConstant& v) : value(v) {}
SimdConstant::Type type() { return value.type(); }
SimdData(SimdData&& other) : value(other.value), uses(mozilla::Move(other.uses)) {}
explicit SimdData(const SimdData&) = delete;
SimdConstant::Type type() const { return value.type(); }
};
Vector<SimdData<PlatformSpecificLabel>, 0, SystemAllocPolicy> simds_;
Vector<SimdData, 0, SystemAllocPolicy> simds_;
typedef HashMap<SimdConstant, size_t, SimdConstant, SystemAllocPolicy> SimdMap;
SimdMap simdMap_;
Float<>* getFloat(float f);
Double<>* getDouble(double d);
SimdData<>* getSimdData(const SimdConstant& v);
Float* getFloat(float f);
Double* getDouble(double d);
SimdData* getSimdData(const SimdConstant& v);
bool asmMergeWith(const MacroAssemblerX86Shared& other);
public:
using Assembler::call;

View File

@ -299,12 +299,10 @@ class Assembler : public AssemblerX86Shared
void mov(Imm32 imm, const Operand& dest) {
movl(imm, dest);
}
void mov(AbsoluteLabel* label, Register dest) {
MOZ_ASSERT(!label->bound());
// Thread the patch list through the unpatched address word in the
// instruction stream.
masm.movl_i32r(label->prev(), dest.encoding());
label->setPrev(masm.size());
void mov(CodeOffsetLabel* label, Register dest) {
// Put a placeholder value in the instruction stream.
masm.movl_i32r(0, dest.encoding());
label->use(masm.size());
}
void mov(Register src, Register dest) {
movl(src, dest);

View File

@ -98,8 +98,8 @@ MacroAssemblerX86::loadConstantDouble(double d, FloatRegister dest)
Double* dbl = getDouble(d);
if (!dbl)
return;
masm.vmovsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.encoding());
dbl->uses.setPrev(masm.size());
masm.vmovsd_mr(nullptr, dest.encoding());
dbl->uses.append(CodeOffsetLabel(masm.size()));
}
void
@ -108,8 +108,8 @@ MacroAssemblerX86::addConstantDouble(double d, FloatRegister dest)
Double* dbl = getDouble(d);
if (!dbl)
return;
masm.vaddsd_mr(reinterpret_cast<const void*>(dbl->uses.prev()), dest.encoding(), dest.encoding());
dbl->uses.setPrev(masm.size());
masm.vaddsd_mr(nullptr, dest.encoding(), dest.encoding());
dbl->uses.append(CodeOffsetLabel(masm.size()));
}
void
@ -120,8 +120,8 @@ MacroAssemblerX86::loadConstantFloat32(float f, FloatRegister dest)
Float* flt = getFloat(f);
if (!flt)
return;
masm.vmovss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.encoding());
flt->uses.setPrev(masm.size());
masm.vmovss_mr(nullptr, dest.encoding());
flt->uses.append(CodeOffsetLabel(masm.size()));
}
void
@ -130,8 +130,8 @@ MacroAssemblerX86::addConstantFloat32(float f, FloatRegister dest)
Float* flt = getFloat(f);
if (!flt)
return;
masm.vaddss_mr(reinterpret_cast<const void*>(flt->uses.prev()), dest.encoding(), dest.encoding());
flt->uses.setPrev(masm.size());
masm.vaddss_mr(nullptr, dest.encoding(), dest.encoding());
flt->uses.append(CodeOffsetLabel(masm.size()));
}
void
@ -144,8 +144,8 @@ MacroAssemblerX86::loadConstantInt32x4(const SimdConstant& v, FloatRegister dest
if (!i4)
return;
MOZ_ASSERT(i4->type() == SimdConstant::Int32x4);
masm.vmovdqa_mr(reinterpret_cast<const void*>(i4->uses.prev()), dest.encoding());
i4->uses.setPrev(masm.size());
masm.vmovdqa_mr(nullptr, dest.encoding());
i4->uses.append(CodeOffsetLabel(masm.size()));
}
void
@ -158,8 +158,8 @@ MacroAssemblerX86::loadConstantFloat32x4(const SimdConstant& v, FloatRegister de
if (!f4)
return;
MOZ_ASSERT(f4->type() == SimdConstant::Float32x4);
masm.vmovaps_mr(reinterpret_cast<const void*>(f4->uses.prev()), dest.encoding());
f4->uses.setPrev(masm.size());
masm.vmovaps_mr(nullptr, dest.encoding());
f4->uses.append(CodeOffsetLabel(masm.size()));
}
void
@ -167,20 +167,22 @@ MacroAssemblerX86::finish()
{
if (!doubles_.empty())
masm.haltingAlign(sizeof(double));
for (size_t i = 0; i < doubles_.length(); i++) {
CodeLabel cl(doubles_[i].uses);
writeDoubleConstant(doubles_[i].value, cl.src());
addCodeLabel(cl);
for (const Double& d : doubles_) {
CodeOffsetLabel cst(masm.currentOffset());
for (CodeOffsetLabel use : d.uses)
addCodeLabel(CodeLabel(use, cst));
masm.doubleConstant(d.value);
if (!enoughMemory_)
return;
}
if (!floats_.empty())
masm.haltingAlign(sizeof(float));
for (size_t i = 0; i < floats_.length(); i++) {
CodeLabel cl(floats_[i].uses);
writeFloatConstant(floats_[i].value, cl.src());
addCodeLabel(cl);
for (const Float& f : floats_) {
CodeOffsetLabel cst(masm.currentOffset());
for (CodeOffsetLabel use : f.uses)
addCodeLabel(CodeLabel(use, cst));
masm.floatConstant(f.value);
if (!enoughMemory_)
return;
}
@ -188,15 +190,15 @@ MacroAssemblerX86::finish()
// SIMD memory values must be suitably aligned.
if (!simds_.empty())
masm.haltingAlign(SimdMemoryAlignment);
for (size_t i = 0; i < simds_.length(); i++) {
CodeLabel cl(simds_[i].uses);
SimdData& v = simds_[i];
for (const SimdData& v : simds_) {
CodeOffsetLabel cst(masm.currentOffset());
for (CodeOffsetLabel use : v.uses)
addCodeLabel(CodeLabel(use, cst));
switch (v.type()) {
case SimdConstant::Int32x4: writeInt32x4Constant(v.value, cl.src()); break;
case SimdConstant::Float32x4: writeFloat32x4Constant(v.value, cl.src()); break;
case SimdConstant::Int32x4: masm.int32x4Constant(v.value.asInt32x4()); break;
case SimdConstant::Float32x4: masm.float32x4Constant(v.value.asFloat32x4()); break;
default: MOZ_CRASH("unexpected SimdConstant type");
}
addCodeLabel(cl);
if (!enoughMemory_)
return;
}

View File

@ -16,9 +16,6 @@
namespace js {
namespace jit {
struct MacroAssemblerX86Shared::PlatformSpecificLabel : public AbsoluteLabel
{};
class MacroAssemblerX86 : public MacroAssemblerX86Shared
{
private:
@ -26,10 +23,6 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
MacroAssembler& asMasm();
const MacroAssembler& asMasm() const;
typedef MacroAssemblerX86Shared::Double<> Double;
typedef MacroAssemblerX86Shared::Float<> Float;
typedef MacroAssemblerX86Shared::SimdData<> SimdData;
protected:
MoveResolver moveResolver_;

View File

@ -156,6 +156,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.push(esi);
CodeLabel returnLabel;
CodeLabel oomReturnLabel;
if (type == EnterJitBaseline) {
// Handle OSR.
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
@ -176,7 +177,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.loadPtr(Address(ebp, ARG_JITCODE), jitcode);
// Push return address.
masm.mov(returnLabel.dest(), scratch);
masm.mov(returnLabel.patchAt(), scratch);
masm.push(scratch);
// Push previous frame pointer.
@ -261,7 +262,7 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
masm.mov(framePtr, esp);
masm.addPtr(Imm32(2 * sizeof(uintptr_t)), esp);
masm.moveValue(MagicValue(JS_ION_ERROR), JSReturnOperand);
masm.mov(returnLabel.dest(), scratch);
masm.mov(oomReturnLabel.patchAt(), scratch);
masm.jump(scratch);
masm.bind(&notOsr);
@ -280,8 +281,10 @@ JitRuntime::generateEnterJIT(JSContext* cx, EnterJitType type)
if (type == EnterJitBaseline) {
// Baseline OSR will return here.
masm.bind(returnLabel.src());
masm.use(returnLabel.target());
masm.addCodeLabel(returnLabel);
masm.use(oomReturnLabel.target());
masm.addCodeLabel(oomReturnLabel);
}
// Pop arguments off the stack.