Bug 1240583 - Odin: replace retargetWithOffset (r=bbouvier)

--HG--
extra : commitid : 3AAtDhxBHQl
This commit is contained in:
Luke Wagner 2016-02-08 20:59:04 -06:00
parent e1e92d91ec
commit 0526fa3f73
27 changed files with 272 additions and 243 deletions

View File

@ -639,20 +639,18 @@ ModuleGenerator::defineInlineStub(Offsets offsets)
return module_->codeRanges.emplaceBack(CodeRange::Inline, offsets);
}
bool
ModuleGenerator::defineInterruptStub(Offsets offsets)
void
ModuleGenerator::defineInterruptExit(uint32_t offset)
{
MOZ_ASSERT(finishedFuncs_);
link_->pod.interruptOffset = offsets.begin;
return module_->codeRanges.emplaceBack(CodeRange::Inline, offsets);
link_->pod.interruptOffset = offset;
}
bool
ModuleGenerator::defineOutOfBoundsStub(Offsets offsets)
void
ModuleGenerator::defineOutOfBoundsExit(uint32_t offset)
{
MOZ_ASSERT(finishedFuncs_);
link_->pod.outOfBoundsOffset = offsets.begin;
return module_->codeRanges.emplaceBack(CodeRange::Inline, offsets);
link_->pod.outOfBoundsOffset = offset;
}
bool
@ -667,7 +665,7 @@ ModuleGenerator::finish(CacheableCharsVector&& prettyFuncNames,
module_->prettyFuncNames = Move(prettyFuncNames);
if (!GenerateStubs(*this, UsesHeap(module_->heapUsage)))
if (!GenerateStubs(*this))
return false;
masm_.finish();

View File

@ -219,8 +219,8 @@ class MOZ_STACK_CLASS ModuleGenerator
// Stubs:
bool defineInlineStub(Offsets offsets);
bool defineInterruptStub(Offsets offsets);
bool defineOutOfBoundsStub(Offsets offsets);
void defineInterruptExit(uint32_t offset);
void defineOutOfBoundsExit(uint32_t offset);
// Return a ModuleData object which may be used to construct a Module, the
// StaticLinkData required to call Module::staticallyLink, and the list of

View File

@ -95,7 +95,7 @@ static const unsigned FramePushedForEntrySP = FramePushedAfterSave + sizeof(void
// function has an ABI derived from its specific signature, so this function
// must map from the ABI of CodePtr to the export's signature's ABI.
static bool
GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, bool usesHeap)
GenerateEntry(ModuleGenerator& mg, unsigned exportIndex)
{
MacroAssembler& masm = mg.masm();
const Sig& sig = mg.exportSig(exportIndex);
@ -131,7 +131,7 @@ GenerateEntry(ModuleGenerator& mg, unsigned exportIndex, bool usesHeap)
// ARM, MIPS/MIPS64 and x64 have a globally-pinned HeapReg (x86 uses immediates in
// effective addresses). Loading the heap register depends on the global
// register already having been loaded.
if (usesHeap)
if (mg.usesHeap())
masm.loadAsmJSHeapRegisterFromGlobalData();
// Put the 'argv' argument into a non-argument/return register so that we
@ -334,8 +334,7 @@ FillArgumentArray(MacroAssembler& masm, const ValTypeVector& args, unsigned argO
// signature of the import and calls into an appropriate InvokeImport C++
// function, having boxed all the ABI arguments into a homogeneous Value array.
static bool
GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLabel,
ProfilingOffsets* offsets)
GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, ProfilingOffsets* offsets)
{
MacroAssembler& masm = mg.masm();
const Sig& sig = *mg.import(importIndex).sig;
@ -398,11 +397,11 @@ GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLa
switch (sig.ret()) {
case ExprType::Void:
masm.call(SymbolicAddress::InvokeImport_Void);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
break;
case ExprType::I32:
masm.call(SymbolicAddress::InvokeImport_I32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
masm.unboxInt32(argv, ReturnReg);
break;
case ExprType::I64:
@ -411,7 +410,7 @@ GenerateInterpExitStub(ModuleGenerator& mg, unsigned importIndex, Label* throwLa
MOZ_CRASH("Float32 shouldn't be returned from a FFI");
case ExprType::F64:
masm.call(SymbolicAddress::InvokeImport_F64);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
masm.loadDouble(argv, ReturnDoubleReg);
break;
case ExprType::I32x4:
@ -441,8 +440,7 @@ static const unsigned MaybeSavedGlobalReg = 0;
// signature of the import and calls into a compatible JIT function,
// having boxed all the ABI arguments into the JIT stack frame layout.
static bool
GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
Label* throwLabel, ProfilingOffsets* offsets)
GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, ProfilingOffsets* offsets)
{
MacroAssembler& masm = mg.masm();
const Sig& sig = *mg.import(importIndex).sig;
@ -647,7 +645,7 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
unsigned nativeFramePushed = masm.framePushed();
AssertStackAlignment(masm, ABIStackAlignment);
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, throwLabel);
masm.branchTestMagic(Assembler::Equal, JSReturnOperand, JumpTarget::Throw);
Label oolConvert;
switch (sig.ret()) {
@ -677,7 +675,7 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
// Ion code does not respect system callee-saved register conventions so
// reload the heap register.
if (usesHeap)
if (mg.usesHeap())
masm.loadAsmJSHeapRegisterFromGlobalData();
GenerateExitEpilogue(masm, masm.framePushed(), ExitReason::ImportJit, offsets);
@ -714,12 +712,12 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
switch (sig.ret()) {
case ExprType::I32:
masm.call(SymbolicAddress::CoerceInPlace_ToInt32);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
masm.unboxInt32(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnReg);
break;
case ExprType::F64:
masm.call(SymbolicAddress::CoerceInPlace_ToNumber);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, JumpTarget::Throw);
masm.loadDouble(Address(masm.getStackPointer(), offsetToCoerceArgv), ReturnDoubleReg);
break;
default:
@ -739,18 +737,32 @@ GenerateJitExitStub(ModuleGenerator& mg, unsigned importIndex, bool usesHeap,
return true;
}
static void
BindJumps(MacroAssembler& masm, JumpTarget target)
{
for (uint32_t offset : masm.jumpSites()[target]) {
RepatchLabel label;
label.use(offset);
masm.bind(&label);
}
}
// Generate a stub that is called immediately after the prologue when there is a
// stack overflow. This stub calls a C++ function to report the error and then
// jumps to the throw stub to pop the activation.
static bool
GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel)
GenerateStackOverflowStub(ModuleGenerator& mg)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
if (masm.jumpSites()[JumpTarget::StackOverflow].empty())
return true;
BindJumps(masm, JumpTarget::StackOverflow);
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmStackOverflowLabel());
// If we reach here via the non-profiling prologue, WasmActivation::fp has
// not been updated. To enable stack unwinding from C++, store to it now. If
@ -769,7 +781,7 @@ GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel)
// No need to restore the stack; the throw stub pops everything.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::ReportOverRecursed);
masm.jump(throwLabel);
masm.jump(JumpTarget::Throw);
if (masm.oom())
return false;
@ -782,14 +794,18 @@ GenerateStackOverflowStub(ModuleGenerator& mg, Label* throwLabel)
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
GenerateConversionErrorStub(ModuleGenerator& mg, Label* throwLabel)
GenerateConversionErrorStub(ModuleGenerator& mg)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
if (masm.jumpSites()[JumpTarget::ConversionError].empty())
return true;
BindJumps(masm, JumpTarget::ConversionError);
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmOnConversionErrorLabel());
// sp can be anything at this point, so ensure it is aligned when calling
// into C++. We unconditionally jump to throw so don't worry about restoring sp.
@ -798,7 +814,7 @@ GenerateConversionErrorStub(ModuleGenerator& mg, Label* throwLabel)
// OnImpreciseConversion always throws.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnImpreciseConversion);
masm.jump(throwLabel);
masm.jump(JumpTarget::Throw);
if (masm.oom())
return false;
@ -811,14 +827,19 @@ GenerateConversionErrorStub(ModuleGenerator& mg, Label* throwLabel)
// there are throwing semantics. This stub calls a C++ function to report an
// error and then jumps to the throw stub to pop the activation.
static bool
GenerateOutOfBoundsStub(ModuleGenerator& mg, Label* throwLabel)
GenerateOutOfBoundsStub(ModuleGenerator& mg)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
// Generate the out-of-bounds stub unconditionally since it may always be
// used by the signal handler.
mg.defineOutOfBoundsExit(masm.currentOffset());
BindJumps(masm, JumpTarget::OutOfBounds);
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(masm.asmOnOutOfBoundsLabel());
// sp can be anything at this point, so ensure it is aligned when calling
// into C++. We unconditionally jump to throw so don't worry about restoring sp.
@ -827,13 +848,13 @@ GenerateOutOfBoundsStub(ModuleGenerator& mg, Label* throwLabel)
// OnOutOfBounds always throws.
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::OnOutOfBounds);
masm.jump(throwLabel);
masm.jump(JumpTarget::Throw);
if (masm.oom())
return false;
offsets.end = masm.currentOffset();
return mg.defineOutOfBoundsStub(offsets);
return mg.defineInlineStub(offsets);
}
static const LiveRegisterSet AllRegsExceptSP(
@ -850,11 +871,15 @@ static const LiveRegisterSet AllRegsExceptSP(
// after restoring all registers. To hack around this, push the resumePC on the
// stack so that it can be popped directly into PC.
static bool
GenerateInterruptStub(ModuleGenerator& mg, Label* throwLabel)
GenerateInterruptStub(ModuleGenerator& mg)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
// Generate the interrupt stub unconditionally since it may always be used
// by the signal handler.
mg.defineInterruptExit(masm.currentOffset());
Offsets offsets;
offsets.begin = masm.currentOffset();
@ -884,7 +909,7 @@ GenerateInterruptStub(ModuleGenerator& mg, Label* throwLabel)
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, throwLabel);
masm.branchIfFalseBool(ReturnReg, JumpTarget::Throw);
// Restore the StackPointer to its position before the call.
masm.moveToStackPtr(ABIArgGenerator::NonVolatileReg);
@ -922,7 +947,7 @@ GenerateInterruptStub(ModuleGenerator& mg, Label* throwLabel)
masm.addToStackPtr(Imm32(4 * sizeof(intptr_t)));
masm.branchIfFalseBool(ReturnReg, throwLabel);
masm.branchIfFalseBool(ReturnReg, JumpTarget::Throw);
// This will restore stack to the address before the call.
masm.moveToStackPtr(s0);
@ -965,7 +990,7 @@ GenerateInterruptStub(ModuleGenerator& mg, Label* throwLabel)
masm.assertStackAlignment(ABIStackAlignment);
masm.call(SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, throwLabel);
masm.branchIfFalseBool(ReturnReg, JumpTarget::Throw);
// Restore the machine state to before the interrupt. this will set the pc!
@ -1005,7 +1030,7 @@ GenerateInterruptStub(ModuleGenerator& mg, Label* throwLabel)
return false;
offsets.end = masm.currentOffset();
return mg.defineInterruptStub(offsets);
return mg.defineInlineStub(offsets);
}
// If an exception is thrown, simply pop all frames (since asm.js does not
@ -1014,14 +1039,18 @@ GenerateInterruptStub(ModuleGenerator& mg, Label* throwLabel)
// 2. PopRegsInMask to restore the caller's non-volatile registers.
// 3. Return (to CallAsmJS).
static bool
GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel)
GenerateThrowStub(ModuleGenerator& mg)
{
MacroAssembler& masm = mg.masm();
masm.haltingAlign(CodeAlignment);
if (masm.jumpSites()[JumpTarget::Throw].empty())
return true;
BindJumps(masm, JumpTarget::Throw);
Offsets offsets;
offsets.begin = masm.currentOffset();
masm.bind(throwLabel);
// We are about to pop all frames in this WasmActivation. Set fp to null to
// maintain the invariant that fp is either null or pointing to a valid
@ -1047,50 +1076,39 @@ GenerateThrowStub(ModuleGenerator& mg, Label* throwLabel)
}
bool
wasm::GenerateStubs(ModuleGenerator& mg, bool usesHeap)
wasm::GenerateStubs(ModuleGenerator& mg)
{
MacroAssembler& masm = mg.masm();
for (unsigned i = 0; i < mg.numExports(); i++) {
if (!GenerateEntry(mg, i, usesHeap))
if (!GenerateEntry(mg, i))
return false;
}
for (size_t i = 0; i < mg.numImports(); i++) {
ProfilingOffsets interp;
if (!GenerateInterpExitStub(mg, i, masm.asmThrowLabel(), &interp))
if (!GenerateInterpExitStub(mg, i, &interp))
return false;
ProfilingOffsets jit;
if (!GenerateJitExitStub(mg, i, usesHeap, masm.asmThrowLabel(), &jit))
if (!GenerateJitExitStub(mg, i, &jit))
return false;
if (!mg.defineImport(i, interp, jit))
return false;
}
if (masm.asmStackOverflowLabel()->used()) {
if (!GenerateStackOverflowStub(mg, masm.asmThrowLabel()))
if (!GenerateStackOverflowStub(mg))
return false;
}
if (masm.asmOnConversionErrorLabel()->used()) {
if (!GenerateConversionErrorStub(mg, masm.asmThrowLabel()))
return false;
}
// Generate unconditionally: the out-of-bounds exit may be used later even
// if signal handling isn't used for out-of-bounds at the moment.
if (!GenerateOutOfBoundsStub(mg, masm.asmThrowLabel()))
return false;
// Generate unconditionally: the async interrupt may be taken at any time.
if (!GenerateInterruptStub(mg, masm.asmThrowLabel()))
return false;
if (!GenerateThrowStub(mg, masm.asmThrowLabel()))
return false;
return true;
if (!GenerateConversionErrorStub(mg))
return false;
if (!GenerateOutOfBoundsStub(mg))
return false;
if (!GenerateInterruptStub(mg))
return false;
// The throw stub must go last since the other stubs use it.
return GenerateThrowStub(mg);
}

View File

@ -25,7 +25,7 @@ namespace js {
namespace wasm {
bool
GenerateStubs(ModuleGenerator& mg, bool usesHeap);
GenerateStubs(ModuleGenerator& mg);
} // namespace wasm
} // namespace js

View File

@ -20,6 +20,7 @@
#define wasm_types_h
#include "mozilla/DebugOnly.h"
#include "mozilla/EnumeratedArray.h"
#include "mozilla/HashFunctions.h"
#include "mozilla/Move.h"
@ -556,6 +557,22 @@ enum class SymbolicAddress
void*
AddressOf(SymbolicAddress imm, ExclusiveContext* cx);
// A wasm::JumpTarget represents one of a special set of stubs that can be
// jumped to from any function. Because wasm modules can be larger than the
// range of a plain jump, these potentially out-of-range jumps must be recorded
// and patched specially by the MacroAssembler and ModuleGenerator.
enum class JumpTarget
{
StackOverflow,
OutOfBounds,
ConversionError,
Throw,
Limit
};
typedef mozilla::EnumeratedArray<JumpTarget, JumpTarget::Limit, Uint32Vector> JumpSiteArray;
// The CompileArgs struct captures global parameters that affect all wasm code
// generation. It also currently is the single source of truth for whether or
// not to use signal handlers for different purposes.

View File

@ -8288,12 +8288,10 @@ CodeGenerator::generateAsmJS(wasm::FuncOffsets* offsets)
// pushing framePushed to catch cases with really large frames.
Label onOverflow;
if (!omitOverRecursedCheck()) {
// See comment below.
Label* target = frameSize() > 0 ? &onOverflow : masm.asmStackOverflowLabel();
masm.branchPtr(Assembler::AboveOrEqual,
wasm::SymbolicAddress::StackLimit,
masm.getStackPointer(),
target);
&onOverflow);
}
if (!generateBody())
@ -8302,13 +8300,17 @@ CodeGenerator::generateAsmJS(wasm::FuncOffsets* offsets)
masm.bind(&returnLabel_);
wasm::GenerateFunctionEpilogue(masm, frameSize(), offsets);
if (onOverflow.used()) {
// The stack overflow stub assumes that only sizeof(AsmJSFrame) bytes have
// been pushed. The overflow check occurs after incrementing by
if (!omitOverRecursedCheck()) {
// The stack overflow stub assumes that only sizeof(AsmJSFrame) bytes
// have been pushed. The overflow check occurs after incrementing by
// framePushed, so pop that before jumping to the overflow exit.
if (frameSize() > 0) {
masm.bind(&onOverflow);
masm.addToStackPtr(Imm32(frameSize()));
masm.jump(masm.asmStackOverflowLabel());
masm.jump(wasm::JumpTarget::StackOverflow);
} else {
masm.bindLater(&onOverflow, wasm::JumpTarget::StackOverflow);
}
}
#if defined(JS_ION_PERF)
@ -10613,7 +10615,7 @@ CodeGenerator::visitAsmJSInterruptCheck(LAsmJSInterruptCheck* lir)
MOZ_ASSERT((sizeof(AsmJSFrame) + masm.framePushed()) % ABIStackAlignment == 0);
masm.call(wasm::SymbolicAddress::HandleExecutionInterrupt);
masm.branchIfFalseBool(ReturnReg, masm.asmThrowLabel());
masm.branchIfFalseBool(ReturnReg, wasm::JumpTarget::Throw);
masm.bind(&rejoin);
}

View File

@ -2005,21 +2005,6 @@ MacroAssembler::convertTypedOrValueToInt(TypedOrValueRegister src, FloatRegister
}
}
bool
MacroAssembler::asmMergeWith(MacroAssembler& other)
{
size_t sizeBeforeMerge = size();
if (!MacroAssemblerSpecific::asmMergeWith(other))
return false;
retargetWithOffset(sizeBeforeMerge, other.asmStackOverflowLabel(), asmStackOverflowLabel());
retargetWithOffset(sizeBeforeMerge, other.asmOnOutOfBoundsLabel(), asmOnOutOfBoundsLabel());
retargetWithOffset(sizeBeforeMerge, other.asmOnConversionErrorLabel(), asmOnConversionErrorLabel());
retargetWithOffset(sizeBeforeMerge, other.asmThrowLabel(), asmThrowLabel());
return true;
}
void
MacroAssembler::finish()
{

View File

@ -339,12 +339,6 @@ class MacroAssembler : public MacroAssemblerSpecific
// Labels for handling exceptions and failures.
NonAssertingLabel failureLabel_;
// Asm failure labels
NonAssertingLabel asmStackOverflowLabel_;
NonAssertingLabel asmOnConversionErrorLabel_;
NonAssertingLabel asmOnOutOfBoundsLabel_;
NonAssertingLabel asmThrowLabel_;
public:
MacroAssembler()
: framePushed_(0),
@ -870,7 +864,8 @@ class MacroAssembler : public MacroAssemblerSpecific
}
// Branches to |label| if |reg| is false. |reg| should be a C++ bool.
void branchIfFalseBool(Register reg, Label* label) {
template <class L>
void branchIfFalseBool(Register reg, L label) {
// Note that C++ bool is only 1 byte, so ignore the higher-order bits.
branchTest32(Assembler::Zero, reg, Imm32(0xFF), label);
}
@ -1409,32 +1404,6 @@ class MacroAssembler : public MacroAssemblerSpecific
return &failureLabel_;
}
Label* asmStackOverflowLabel() {
return &asmStackOverflowLabel_;
}
const Label* asmStackOverflowLabel() const {
return &asmStackOverflowLabel_;
}
Label* asmOnOutOfBoundsLabel() {
return &asmOnOutOfBoundsLabel_;
}
const Label* asmOnOutOfBoundsLabel() const {
return &asmOnOutOfBoundsLabel_;
}
Label* asmOnConversionErrorLabel() {
return &asmOnConversionErrorLabel_;
}
const Label* asmOnConversionErrorLabel() const {
return &asmOnConversionErrorLabel_;
}
Label* asmThrowLabel() {
return &asmThrowLabel_;
}
const Label* asmThrowLabel() const {
return &asmThrowLabel_;
}
bool asmMergeWith(MacroAssembler& masm);
void finish();
void link(JitCode* code);

View File

@ -2367,6 +2367,15 @@ Assembler::as_b(Label* l, Condition c)
return ret;
}
BufferOffset
Assembler::as_b(wasm::JumpTarget target, Condition c)
{
Label l;
BufferOffset ret = as_b(&l, c);
bindLater(&l, target);
return ret;
}
BufferOffset
Assembler::as_b(BOffImm off, Condition c, BufferOffset inst)
{
@ -2786,6 +2795,18 @@ Assembler::bind(Label* label, BufferOffset boff)
label->bind(nextOffset().getOffset());
}
void
Assembler::bindLater(Label* label, wasm::JumpTarget target)
{
if (label->used()) {
BufferOffset b(label);
do {
append(target, b.getOffset());
} while (nextLink(b, &b));
}
label->reset();
}
void
Assembler::bind(RepatchLabel* label)
{
@ -2853,40 +2874,6 @@ Assembler::retarget(Label* label, Label* target)
}
void
Assembler::retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target)
{
if (!label->used())
return;
MOZ_ASSERT(!target->bound());
bool more;
BufferOffset labelBranchOffset(label->offset() + baseOffset);
do {
BufferOffset next;
more = nextLink(labelBranchOffset, &next);
Instruction branch = *editSrc(labelBranchOffset);
Condition c = branch.extractCond();
int32_t prev = target->use(labelBranchOffset.getOffset());
MOZ_RELEASE_ASSERT(prev == Label::INVALID_OFFSET || unsigned(prev) < size());
BOffImm newOffset;
if (prev != Label::INVALID_OFFSET)
newOffset = BOffImm(prev);
if (branch.is<InstBImm>())
as_b(newOffset, c, labelBranchOffset);
else if (branch.is<InstBLImm>())
as_bl(newOffset, c, labelBranchOffset);
else
MOZ_CRASH("crazy fixup!");
labelBranchOffset = BufferOffset(next.getOffset() + baseOffset);
} while (more);
}
static int stopBKPT = -1;
void
Assembler::as_bkpt()

View File

@ -1597,6 +1597,7 @@ class Assembler : public AssemblerShared
BufferOffset as_b(BOffImm off, Condition c, Label* documentation = nullptr);
BufferOffset as_b(Label* l, Condition c = Always);
BufferOffset as_b(wasm::JumpTarget target, Condition c = Always);
BufferOffset as_b(BOffImm off, Condition c, BufferOffset inst);
// blx can go to either an immediate or a register. When blx'ing to a
@ -1704,10 +1705,10 @@ class Assembler : public AssemblerShared
bool nextLink(BufferOffset b, BufferOffset* next);
void bind(Label* label, BufferOffset boff = BufferOffset());
void bind(RepatchLabel* label);
void bindLater(Label* label, wasm::JumpTarget target);
uint32_t currentOffset() {
return nextOffset().getOffset();
}
void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target);
void retarget(Label* label, Label* target);
// I'm going to pretend this doesn't exist for now.
void retarget(Label* label, void* target, Relocation::Kind reloc);

View File

@ -2181,7 +2181,7 @@ CodeGeneratorARM::visitAsmJSLoadHeap(LAsmJSLoadHeap* ins)
} else {
Register d = ToRegister(ins->output());
if (mir->isAtomicAccess())
masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
else
masm.ma_mov(Imm32(0), d, Assembler::AboveOrEqual);
masm.ma_dataTransferN(IsLoad, size, isSigned, HeapReg, ptrReg, d, Offset, Assembler::Below);
@ -2255,7 +2255,7 @@ CodeGeneratorARM::visitAsmJSStoreHeap(LAsmJSStoreHeap* ins)
masm.ma_vstr(vd, HeapReg, ptrReg, 0, 0, Assembler::Below);
} else {
if (mir->isAtomicAccess())
masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
masm.ma_dataTransferN(IsStore, size, isSigned, HeapReg, ptrReg,
ToRegister(ins->value()), Offset, Assembler::Below);
}
@ -2280,7 +2280,7 @@ CodeGeneratorARM::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
}
masm.compareExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
srcAddr, oldval, newval, InvalidReg,
@ -2326,7 +2326,7 @@ CodeGeneratorARM::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
}
masm.atomicExchangeToTypedIntArray(vt == Scalar::Uint32 ? Scalar::Int32 : vt,
@ -2377,7 +2377,7 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
}
if (value->isConstant())
@ -2412,7 +2412,7 @@ CodeGeneratorARM::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
if (mir->needsBoundsCheck()) {
BufferOffset bo = masm.ma_BoundsCheck(ptrReg);
maybeCmpOffset = bo.getOffset();
masm.ma_b(masm.asmOnOutOfBoundsLabel(), Assembler::AboveOrEqual);
masm.ma_b(wasm::JumpTarget::OutOfBounds, Assembler::AboveOrEqual);
}
if (value->isConstant())

View File

@ -1404,6 +1404,12 @@ MacroAssemblerARM::ma_b(Label* dest, Assembler::Condition c)
return as_b(dest, c);
}
BufferOffset
MacroAssemblerARM::ma_b(wasm::JumpTarget target, Assembler::Condition c)
{
return as_b(target, c);
}
void
MacroAssemblerARM::ma_bx(Register dest, Assembler::Condition c)
{

View File

@ -323,6 +323,7 @@ class MacroAssemblerARM : public Assembler
// Branches when done from within arm-specific code.
BufferOffset ma_b(Label* dest, Condition c = Always);
BufferOffset ma_b(wasm::JumpTarget target, Condition c = Always);
void ma_b(void* target, Condition c = Always);
void ma_bx(Register dest, Condition c = Always);
@ -608,6 +609,9 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_ldr(addr, scratch);
ma_bx(scratch);
}
void jump(wasm::JumpTarget target) {
as_b(target);
}
void negl(Register reg) {
ma_neg(reg, reg, SetCC);
@ -865,8 +869,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
cond = testNumber(cond, t);
ma_b(label, cond);
}
template <typename T>
void branchTestMagic(Condition cond, const T& t, Label* label) {
template <typename T, class L>
void branchTestMagic(Condition cond, const T& t, L label) {
cond = testMagic(cond, t);
ma_b(label, cond);
}
@ -891,7 +895,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
Condition c = testStringTruthy(truthy, value);
ma_b(label, c);
}
void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) {
template <class L>
void branchTest32(Condition cond, Register lhs, Register rhs, L label) {
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
// x86 likes test foo, foo rather than cmp foo, #0.
// Convert the former into the latter.
@ -901,7 +906,8 @@ class MacroAssemblerARMCompat : public MacroAssemblerARM
ma_tst(lhs, rhs);
ma_b(label, cond);
}
void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) {
template <class L>
void branchTest32(Condition cond, Register lhs, Imm32 imm, L label) {
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
ma_tst(lhs, imm);
ma_b(label, cond);

View File

@ -213,6 +213,9 @@ class Assembler : public vixl::Assembler
void bind(Label* label) { bind(label, nextOffset()); }
void bind(Label* label, BufferOffset boff);
void bind(RepatchLabel* label);
void bindLater(Label* label, wasm::JumpTarget target) {
MOZ_CRASH("NYI");
}
bool oom() const {
return AssemblerShared::oom() ||
@ -263,9 +266,6 @@ class Assembler : public vixl::Assembler
}
void retarget(Label* cur, Label* next);
void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
MOZ_CRASH("NYI");
}
// The buffer is about to be linked. Ensure any constant pools or
// excess bookkeeping has been flushed to the instruction stream.

View File

@ -556,6 +556,14 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
Fcvt(ARMFPRegister(dest, 32), ARMFPRegister(src, 64));
}
using vixl::MacroAssembler::B;
void B(wasm::JumpTarget) {
MOZ_CRASH("NYI");
}
void B(wasm::JumpTarget, Condition cond) {
MOZ_CRASH("NYI");
}
void branchTruncateDouble(FloatRegister src, Register dest, Label* fail) {
vixl::UseScratchRegisterScope temps(this);
const ARMRegister scratch64 = temps.AcquireX();
@ -756,6 +764,9 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
loadPtr(addr, ip0);
Br(vixl::ip0);
}
void jump(wasm::JumpTarget target) {
MOZ_CRASH("NYI");
}
void align(int alignment) {
armbuffer_.align(alignment);
@ -1410,7 +1421,8 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
branch32(cond, scratch32.asUnsized(), rhs, label);
}
void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) {
template <class L>
void branchTest32(Condition cond, Register lhs, Register rhs, L label) {
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
// x86 prefers |test foo, foo| to |cmp foo, #0|.
// Convert the former to the latter for ARM.
@ -1420,7 +1432,8 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
test32(lhs, rhs);
B(label, cond);
}
void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) {
template <class L>
void branchTest32(Condition cond, Register lhs, Imm32 imm, L label) {
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
test32(lhs, imm);
B(label, cond);
@ -1754,8 +1767,8 @@ class MacroAssemblerCompat : public vixl::MacroAssembler
Condition c = testPrimitive(cond, t);
B(label, c);
}
template <typename T>
void branchTestMagic(Condition cond, const T& t, Label* label) {
template <typename T, typename L>
void branchTestMagic(Condition cond, const T& t, L label) {
Condition c = testMagic(cond, t);
B(label, c);
}

View File

@ -1326,6 +1326,12 @@ AssemblerMIPSShared::bind(Label* label, BufferOffset boff)
label->bind(dest.getOffset());
}
void
AssemblerMIPSShared::bindLater(Label* label, wasm::JumpTarget target)
{
MOZ_CRASH("NYI");
}
void
AssemblerMIPSShared::retarget(Label* label, Label* target)
{
@ -1362,28 +1368,6 @@ AssemblerMIPSShared::retarget(Label* label, Label* target)
label->reset();
}
void
AssemblerMIPSShared::retargetWithOffset(size_t baseOffset, const LabelBase* label, Label* target)
{
if (!label->used())
return;
MOZ_ASSERT(!target->bound());
int32_t next;
BufferOffset labelBranchOffset(label->offset() + baseOffset);
do {
Instruction* inst = editSrc(labelBranchOffset);
int32_t prev = target->use(labelBranchOffset.getOffset());
MOZ_RELEASE_ASSERT(prev == Label::INVALID_OFFSET || unsigned(prev) < size());
next = inst[1].encode();
inst[1].setData(prev);
labelBranchOffset = BufferOffset(next + baseOffset);
} while (next != LabelBase::INVALID_OFFSET);
}
void dbg_break() {}
void
AssemblerMIPSShared::as_break(uint32_t code)

View File

@ -1054,6 +1054,7 @@ class AssemblerMIPSShared : public AssemblerShared
// label operations
void bind(Label* label, BufferOffset boff = BufferOffset());
void bindLater(Label* label, wasm::JumpTarget target);
virtual void bind(InstImm* inst, uintptr_t branch, uintptr_t target) = 0;
virtual void Bind(uint8_t* rawCode, CodeOffset* label, const void* address) = 0;
void bind(CodeOffset* label) {
@ -1063,7 +1064,6 @@ class AssemblerMIPSShared : public AssemblerShared
return nextOffset().getOffset();
}
void retarget(Label* label, Label* target);
void retargetWithOffset(size_t baseOffset, const LabelBase* label, Label* target);
// See Bind
size_t labelToPatchOffset(CodeOffset label) { return label.offset(); }

View File

@ -173,7 +173,6 @@ class MacroAssemblerNone : public Assembler
size_t numCodeLabels() const { MOZ_CRASH(); }
CodeLabel codeLabel(size_t) { MOZ_CRASH(); }
void retargetWithOffset(size_t, const LabelBase*, LabelBase*) { MOZ_CRASH(); }
bool asmMergeWith(const MacroAssemblerNone&) { MOZ_CRASH(); }
void trace(JSTracer*) { MOZ_CRASH(); }
@ -192,6 +191,7 @@ class MacroAssemblerNone : public Assembler
void flushBuffer() { MOZ_CRASH(); }
template <typename T> void bind(T) { MOZ_CRASH(); }
void bindLater(Label*, wasm::JumpTarget) { MOZ_CRASH(); }
template <typename T> void j(Condition, T) { MOZ_CRASH(); }
template <typename T> void jump(T) { MOZ_CRASH(); }
void haltingAlign(size_t) { MOZ_CRASH(); }
@ -245,7 +245,7 @@ class MacroAssemblerNone : public Assembler
template <typename T, typename S> void cmp32Set(Condition, T, S, Register) { MOZ_CRASH(); }
template <typename T, typename S> void branch32(Condition, T, S, Label*) { MOZ_CRASH(); }
template <typename T, typename S> void branchTest32(Condition, T, S, Label*) { MOZ_CRASH(); }
template <typename T, typename S, typename L> void branchTest32(Condition, T, S, L) { MOZ_CRASH(); }
template <typename T, typename S> void branchAdd32(Condition, T, S, Label*) { MOZ_CRASH(); }
template <typename T, typename S> void branchSub32(Condition, T, S, Label*) { MOZ_CRASH(); }
template <typename T, typename S> void branchPtr(Condition, T, S, Label*) { MOZ_CRASH(); }
@ -369,7 +369,7 @@ class MacroAssemblerNone : public Assembler
template <typename T> void branchTestNumber(Condition, T, Label*) { MOZ_CRASH(); }
template <typename T> void branchTestGCThing(Condition, T, Label*) { MOZ_CRASH(); }
template <typename T> void branchTestPrimitive(Condition, T, Label*) { MOZ_CRASH(); }
template <typename T> void branchTestMagic(Condition, T, Label*) { MOZ_CRASH(); }
template <typename T, typename L> void branchTestMagic(Condition, T, L) { MOZ_CRASH(); }
template <typename T> void branchTestMagicValue(Condition, T, JSWhyMagic, Label*) { MOZ_CRASH(); }
void boxDouble(FloatRegister, ValueOperand) { MOZ_CRASH(); }
void boxNonDouble(JSValueType, Register, ValueOperand) { MOZ_CRASH(); }

View File

@ -708,6 +708,7 @@ struct AsmJSInternalCallee
class AssemblerShared
{
wasm::CallSiteAndTargetVector callsites_;
wasm::JumpSiteArray jumpsites_;
wasm::HeapAccessVector heapAccesses_;
Vector<AsmJSGlobalAccess, 0, SystemAllocPolicy> asmJSGlobalAccesses_;
Vector<AsmJSAbsoluteAddress, 0, SystemAllocPolicy> asmJSAbsoluteAddresses_;
@ -740,16 +741,21 @@ class AssemblerShared
return embedsNurseryPointers_;
}
void append(const wasm::CallSiteDesc& desc, CodeOffset label, size_t framePushed,
void append(const wasm::CallSiteDesc& desc, CodeOffset retAddr, size_t framePushed,
uint32_t targetIndex = wasm::CallSiteAndTarget::NOT_INTERNAL)
{
// framePushed does not include sizeof(AsmJSFrame), so add it in here (see
// CallSite::stackDepth).
wasm::CallSite callsite(desc, label.offset(), framePushed + sizeof(AsmJSFrame));
wasm::CallSite callsite(desc, retAddr.offset(), framePushed + sizeof(AsmJSFrame));
enoughMemory_ &= callsites_.append(wasm::CallSiteAndTarget(callsite, targetIndex));
}
wasm::CallSiteAndTargetVector& callSites() { return callsites_; }
void append(wasm::JumpTarget target, uint32_t offset) {
enoughMemory_ &= jumpsites_[target].append(offset);
}
const wasm::JumpSiteArray& jumpSites() const { return jumpsites_; }
void append(wasm::HeapAccess access) { enoughMemory_ &= heapAccesses_.append(access); }
wasm::HeapAccessVector&& extractHeapAccesses() { return Move(heapAccesses_); }
@ -781,6 +787,14 @@ class AssemblerShared
for (; i < callsites_.length(); i++)
callsites_[i].offsetReturnAddressBy(delta);
for (wasm::JumpTarget target : mozilla::MakeEnumeratedRange(wasm::JumpTarget::Limit)) {
wasm::Uint32Vector& offsets = jumpsites_[target];
i = offsets.length();
enoughMemory_ &= offsets.appendAll(other.jumpsites_[target]);
for (; i < offsets.length(); i++)
offsets[i] += delta;
}
i = heapAccesses_.length();
enoughMemory_ &= heapAccesses_.appendAll(other.heapAccesses_);
for (; i < heapAccesses_.length(); i++)

View File

@ -575,7 +575,7 @@ CodeGeneratorX64::visitAsmJSCompareExchangeHeap(LAsmJSCompareExchangeHeap* ins)
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();
masm.compareExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
@ -609,7 +609,7 @@ CodeGeneratorX64::visitAsmJSAtomicExchangeHeap(LAsmJSAtomicExchangeHeap* ins)
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ToRegister(ptr), Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();
masm.atomicExchangeToTypedIntArray(accessType == Scalar::Uint32 ? Scalar::Int32 : accessType,
@ -643,7 +643,7 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeap(LAsmJSAtomicBinopHeap* ins)
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();
if (value->isConstant()) {
@ -686,7 +686,7 @@ CodeGeneratorX64::visitAsmJSAtomicBinopHeapForEffect(LAsmJSAtomicBinopHeapForEff
uint32_t maybeCmpOffset = wasm::HeapAccess::NoLengthCheck;
if (mir->needsBoundsCheck()) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-mir->endOffset())).offset();
masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
uint32_t before = masm.size();

View File

@ -977,8 +977,8 @@ class MacroAssemblerX64 : public MacroAssemblerX86Shared
cond = testPrimitive(cond, t);
j(cond, label);
}
template <typename T>
void branchTestMagic(Condition cond, const T& t, Label* label) {
template <typename T, class L>
void branchTestMagic(Condition cond, const T& t, L label) {
cond = testMagic(cond, t);
j(cond, label);
}

View File

@ -885,6 +885,17 @@ class AssemblerX86Shared : public AssemblerShared
void j(Condition cond, RepatchLabel* label) { jSrc(cond, label); }
void jmp(RepatchLabel* label) { jmpSrc(label); }
void j(Condition cond, wasm::JumpTarget target) {
Label l;
j(cond, &l);
bindLater(&l, target);
}
void jmp(wasm::JumpTarget target) {
Label l;
jmp(&l);
bindLater(&l, target);
}
void jmp(const Operand& op) {
switch (op.kind()) {
case Operand::MEM_REG_DISP:
@ -915,6 +926,15 @@ class AssemblerX86Shared : public AssemblerShared
}
label->bind(dst.offset());
}
void bindLater(Label* label, wasm::JumpTarget target) {
if (label->used()) {
JmpSrc jmp(label->offset());
do {
append(target, jmp.offset());
} while (masm.nextJump(jmp, &jmp));
}
label->reset();
}
void bind(RepatchLabel* label) {
JmpDst dst(masm.label());
if (label->used()) {
@ -931,11 +951,11 @@ class AssemblerX86Shared : public AssemblerShared
}
// Re-routes pending jumps to a new label.
void retargetWithOffset(size_t baseOffset, const LabelBase* label, LabelBase* target) {
void retarget(Label* label, Label* target) {
if (!label->used())
return;
bool more;
JmpSrc jmp(label->offset() + baseOffset);
JmpSrc jmp(label->offset());
do {
JmpSrc next;
more = masm.nextJump(jmp, &next);
@ -947,11 +967,8 @@ class AssemblerX86Shared : public AssemblerShared
JmpSrc prev(target->use(jmp.offset()));
masm.setNextJump(jmp, prev);
}
jmp = JmpSrc(next.offset() + baseOffset);
jmp = JmpSrc(next.offset());
} while (more);
}
void retarget(Label* label, Label* target) {
retargetWithOffset(0, label, target);
label->reset();
}

View File

@ -338,7 +338,10 @@ CodeGeneratorX86Shared::visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck)
MOZ_ASSERT(oolCheck->offset() != 0,
"An access without a constant offset doesn't need a separate OffsetBoundsCheck");
masm.cmp32(oolCheck->ptrReg(), Imm32(-uint32_t(oolCheck->offset())));
masm.j(Assembler::Below, oolCheck->outOfBounds());
if (oolCheck->maybeOutOfBounds())
masm.j(Assembler::Below, oolCheck->maybeOutOfBounds());
else
masm.j(Assembler::Below, wasm::JumpTarget::OutOfBounds);
#ifdef JS_CODEGEN_X64
// In order to get the offset to wrap properly, we must sign-extend the
@ -353,7 +356,7 @@ CodeGeneratorX86Shared::visitOffsetBoundsCheck(OffsetBoundsCheck* oolCheck)
uint32_t
CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* access,
const MInstruction* mir,
Register ptr, Label* fail)
Register ptr, Label* maybeFail)
{
// Emit a bounds-checking branch for |access|.
@ -366,8 +369,8 @@ CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* acces
// this case, we need a second branch, which we emit out of line since it's
// unlikely to be needed in normal programs.
if (access->offset() != 0) {
OffsetBoundsCheck* oolCheck = new(alloc()) OffsetBoundsCheck(fail, ptr, access->offset());
fail = oolCheck->entry();
auto oolCheck = new(alloc()) OffsetBoundsCheck(maybeFail, ptr, access->offset());
maybeFail = oolCheck->entry();
pass = oolCheck->rejoin();
addOutOfLineCode(oolCheck, mir);
}
@ -378,7 +381,10 @@ CodeGeneratorX86Shared::emitAsmJSBoundsCheckBranch(const MAsmJSHeapAccess* acces
// (heapLength - access->endOffset()), allowing us to test whether the end
// of the access is beyond the end of the heap.
uint32_t cmpOffset = masm.cmp32WithPatch(ptr, Imm32(-access->endOffset())).offset();
masm.j(Assembler::Above, fail);
if (maybeFail)
masm.j(Assembler::Above, maybeFail);
else
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
if (pass)
masm.bind(pass);
@ -394,7 +400,7 @@ CodeGeneratorX86Shared::maybeEmitThrowingAsmJSBoundsCheck(const MAsmJSHeapAccess
if (!gen->needsAsmJSBoundsCheckBranch(access))
return wasm::HeapAccess::NoLengthCheck;
return emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), masm.asmOnOutOfBoundsLabel());
return emitAsmJSBoundsCheckBranch(access, mir, ToRegister(ptr), nullptr);
}
uint32_t
@ -2327,8 +2333,7 @@ CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIn
static const SimdConstant Int32MaxX4 = SimdConstant::SplatX4(2147483647.f);
static const SimdConstant Int32MinX4 = SimdConstant::SplatX4(-2147483648.f);
Label bail;
Label* onConversionError = gen->compilingAsmJS() ? masm.asmOnConversionErrorLabel() : &bail;
Label onConversionError;
FloatRegister input = ool->input();
Register temp = ool->temp();
@ -2338,18 +2343,20 @@ CodeGeneratorX86Shared::visitOutOfLineSimdFloatToIntCheck(OutOfLineSimdFloatToIn
masm.vcmpleps(Operand(input), scratch, scratch);
masm.vmovmskps(scratch, temp);
masm.cmp32(temp, Imm32(15));
masm.j(Assembler::NotEqual, onConversionError);
masm.j(Assembler::NotEqual, &onConversionError);
masm.loadConstantFloat32x4(Int32MaxX4, scratch);
masm.vcmpleps(Operand(input), scratch, scratch);
masm.vmovmskps(scratch, temp);
masm.cmp32(temp, Imm32(0));
masm.j(Assembler::NotEqual, onConversionError);
masm.j(Assembler::NotEqual, &onConversionError);
masm.jump(ool->rejoin());
if (bail.used()) {
masm.bind(&bail);
if (gen->compilingAsmJS()) {
masm.bindLater(&onConversionError, wasm::JumpTarget::ConversionError);
} else {
masm.bind(&onConversionError);
bailout(ool->ins()->snapshot());
}
}

View File

@ -53,15 +53,15 @@ class CodeGeneratorX86Shared : public CodeGeneratorShared
// Additional bounds checking for heap accesses with constant offsets.
class OffsetBoundsCheck : public OutOfLineCodeBase<CodeGeneratorX86Shared>
{
Label* outOfBounds_;
Label* maybeOutOfBounds_;
Register ptrReg_;
int32_t offset_;
public:
OffsetBoundsCheck(Label* outOfBounds, Register ptrReg, int32_t offset)
: outOfBounds_(outOfBounds), ptrReg_(ptrReg), offset_(offset)
OffsetBoundsCheck(Label* maybeOutOfBounds, Register ptrReg, int32_t offset)
: maybeOutOfBounds_(maybeOutOfBounds), ptrReg_(ptrReg), offset_(offset)
{}
Label* outOfBounds() const { return outOfBounds_; }
Label* maybeOutOfBounds() const { return maybeOutOfBounds_; }
Register ptrReg() const { return ptrReg_; }
int32_t offset() const { return offset_; }
void accept(CodeGeneratorX86Shared* codegen) {

View File

@ -95,14 +95,14 @@ class MacroAssemblerX86Shared : public Assembler
Double* getDouble(double d);
SimdData* getSimdData(const SimdConstant& v);
bool asmMergeWith(const MacroAssemblerX86Shared& other);
public:
using Assembler::call;
MacroAssemblerX86Shared()
{ }
bool asmMergeWith(const MacroAssemblerX86Shared& other);
void compareDouble(DoubleCondition cond, FloatRegister lhs, FloatRegister rhs) {
if (cond & DoubleConditionBitInvert)
vucomisd(lhs, rhs);
@ -604,12 +604,14 @@ class MacroAssemblerX86Shared : public Assembler
testw(rhs, lhs);
j(cond, label);
}
void branchTest32(Condition cond, Register lhs, Register rhs, Label* label) {
template <class L>
void branchTest32(Condition cond, Register lhs, Register rhs, L label) {
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
test32(lhs, rhs);
j(cond, label);
}
void branchTest32(Condition cond, Register lhs, Imm32 imm, Label* label) {
template <class L>
void branchTest32(Condition cond, Register lhs, Imm32 imm, L label) {
MOZ_ASSERT(cond == Zero || cond == NonZero || cond == Signed || cond == NotSigned);
test32(lhs, imm);
j(cond, label);
@ -640,6 +642,9 @@ class MacroAssemblerX86Shared : public Assembler
void jump(const Address& addr) {
jmp(Operand(addr));
}
void jump(wasm::JumpTarget target) {
jmp(target);
}
void convertInt32ToDouble(Register src, FloatRegister dest) {
// vcvtsi2sd and friends write only part of their output register, which

View File

@ -668,7 +668,7 @@ CodeGeneratorX86::asmJSAtomicComputeAddress(Register addrTemp, Register ptrReg,
if (boundsCheck) {
maybeCmpOffset = masm.cmp32WithPatch(ptrReg, Imm32(-endOffset)).offset();
masm.j(Assembler::Above, masm.asmOnOutOfBoundsLabel());
masm.j(Assembler::Above, wasm::JumpTarget::OutOfBounds);
}
// Add in the actual heap pointer explicitly, to avoid opening up

View File

@ -797,8 +797,8 @@ class MacroAssemblerX86 : public MacroAssemblerX86Shared
cond = testPrimitive(cond, t);
j(cond, label);
}
template <typename T>
void branchTestMagic(Condition cond, const T& t, Label* label) {
template <typename T, class L>
void branchTestMagic(Condition cond, const T& t, L label) {
cond = testMagic(cond, t);
j(cond, label);
}