Bug 1001346 - IonMonkey MIPS: Adding MIPS OdinMonkey code part 1. r=luke

This commit is contained in:
Branislav Rankov 2014-05-29 14:08:16 +02:00
parent 8311e3a117
commit b74b3c6b14
7 changed files with 165 additions and 23 deletions

View File

@ -151,6 +151,8 @@ class Registers
(1 << Registers::t6) |
(1 << Registers::t7);
// We use this constant to save registers when entering functions. This
// is why $ra is added here even though it is not "Non Volatile".
static const uint32_t NonVolatileMask =
(1 << Registers::s0) |
(1 << Registers::s1) |
@ -159,7 +161,8 @@ class Registers
(1 << Registers::s4) |
(1 << Registers::s5) |
(1 << Registers::s6) |
(1 << Registers::s7);
(1 << Registers::s7) |
(1 << Registers::ra);
static const uint32_t WrapperMask =
VolatileMask | // = arguments

View File

@ -30,8 +30,39 @@ ABIArgGenerator::ABIArgGenerator()
ABIArg
ABIArgGenerator::next(MIRType type)
{
MOZ_ASSUME_UNREACHABLE("NYI");
return ABIArg();
switch (type) {
case MIRType_Int32:
case MIRType_Pointer:
Register destReg;
if (GetIntArgReg(usedArgSlots_, &destReg))
current_ = ABIArg(destReg);
else
current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
usedArgSlots_++;
break;
case MIRType_Float32:
case MIRType_Double:
if (!usedArgSlots_) {
current_ = ABIArg(f12);
usedArgSlots_ += 2;
firstArgFloat = true;
} else if (usedArgSlots_ <= 2) {
// NOTE: We will use f14 always. This is not compatible with
// system ABI. We will have to introduce some infrastructure
// changes if we have to use system ABI here.
current_ = ABIArg(f14);
usedArgSlots_ = 4;
} else {
usedArgSlots_ += usedArgSlots_ % 2;
current_ = ABIArg(usedArgSlots_ * sizeof(intptr_t));
usedArgSlots_ += 2;
}
break;
default:
MOZ_ASSUME_UNREACHABLE("Unexpected argument type");
}
return current_;
}
const Register ABIArgGenerator::NonArgReturnVolatileReg0 = t0;
const Register ABIArgGenerator::NonArgReturnVolatileReg1 = t1;
@ -1077,6 +1108,12 @@ Assembler::as_absd(FloatRegister fd, FloatRegister fs)
return writeInst(InstReg(op_cop1, rs_d, zero, fs, fd, ff_abs_fmt).encode());
}
BufferOffset
Assembler::as_negs(FloatRegister fd, FloatRegister fs)
{
return writeInst(InstReg(op_cop1, rs_s, zero, fs, fd, ff_neg_fmt).encode());
}
BufferOffset
Assembler::as_negd(FloatRegister fd, FloatRegister fs)
{
@ -1215,6 +1252,17 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
inst[1].makeNop();
return;
}
// Generate the long jump for calls because return address has to be the
// address after the reserved block.
if (inst[0].encode() == inst_bgezal.encode()) {
addLongJump(BufferOffset(branch));
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
// There is 1 nop after this.
return;
}
if (BOffImm16::isInRange(offset)) {
bool conditional = (inst[0].encode() != inst_bgezal.encode() &&
inst[0].encode() != inst_beq.encode());
@ -1230,13 +1278,7 @@ Assembler::bind(InstImm *inst, uint32_t branch, uint32_t target)
return;
}
if (inst[0].encode() == inst_bgezal.encode()) {
// Handle long call.
addLongJump(BufferOffset(branch));
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
inst[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr).encode();
// There is 1 nop after this.
} else if (inst[0].encode() == inst_beq.encode()) {
if (inst[0].encode() == inst_beq.encode()) {
// Handle long unconditional jump.
addLongJump(BufferOffset(branch));
writeLuiOriInstructions(inst, &inst[1], ScratchRegister, target);
@ -1525,5 +1567,9 @@ Assembler::ToggleCall(CodeLocationLabel inst_, bool enabled)
void Assembler::updateBoundsCheck(uint32_t heapSize, Instruction *inst)
{
MOZ_ASSUME_UNREACHABLE("NYI");
InstImm *i0 = (InstImm *) inst;
InstImm *i1 = (InstImm *) i0->next();
// Replace with new value
Assembler::updateLuiOriValue(i0, i1, heapSize);
}

View File

@ -114,6 +114,21 @@ static MOZ_CONSTEXPR_VAR FloatRegister SecondScratchFloatReg = { FloatRegisters:
static MOZ_CONSTEXPR_VAR FloatRegister NANReg = { FloatRegisters::f30 };
// Registers used in the GenerateFFIIonExit Enable Activation block.
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegCallee = t0;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE0 = a0;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE1 = a1;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE2 = a2;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegE3 = a3;
// Registers used in the GenerateFFIIonExit Disable Activation block.
// None of these may be the second scratch register (t8).
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnData = JSReturnReg_Data;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegReturnType = JSReturnReg_Type;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD0 = a0;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD1 = a1;
static MOZ_CONSTEXPR_VAR Register AsmJSIonExitRegD2 = a2;
static MOZ_CONSTEXPR_VAR FloatRegister f0 = {FloatRegisters::f0};
static MOZ_CONSTEXPR_VAR FloatRegister f2 = {FloatRegisters::f2};
static MOZ_CONSTEXPR_VAR FloatRegister f4 = {FloatRegisters::f4};
@ -925,6 +940,7 @@ class Assembler : public AssemblerShared
BufferOffset as_abss(FloatRegister fd, FloatRegister fs);
BufferOffset as_absd(FloatRegister fd, FloatRegister fs);
BufferOffset as_negs(FloatRegister fd, FloatRegister fs);
BufferOffset as_negd(FloatRegister fd, FloatRegister fs);
BufferOffset as_muls(FloatRegister fd, FloatRegister fs, FloatRegister ft);

View File

@ -40,16 +40,33 @@ CodeGeneratorMIPS::CodeGeneratorMIPS(MIRGenerator *gen, LIRGraph *graph, MacroAs
bool
CodeGeneratorMIPS::generatePrologue()
{
if (gen->compilingAsmJS()) {
masm.Push(ra);
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameDepth_);
} else {
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();
MOZ_ASSERT(!gen->compilingAsmJS());
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameSize());
masm.checkStackAlignment();
return true;
}
bool
CodeGeneratorMIPS::generateAsmJSPrologue(Label *stackOverflowLabel)
{
JS_ASSERT(gen->compilingAsmJS());
masm.Push(ra);
// The asm.js over-recursed handler wants to be able to assume that SP
// points to the return address, so perform the check after pushing ra but
// before pushing frameDepth.
if (!omitOverRecursedCheck()) {
masm.branchPtr(Assembler::AboveOrEqual,
AsmJSAbsoluteAddress(AsmJSImm_StackLimit),
StackPointer,
stackOverflowLabel);
}
// Note that this automatically sets MacroAssembler::framePushed().
masm.reserveStack(frameDepth_);
masm.checkStackAlignment();
return true;
}
@ -1994,7 +2011,7 @@ CodeGeneratorMIPS::visitAsmJSLoadHeap(LAsmJSLoadHeap *ins)
}
masm.bind(&done);
return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
return masm.append(AsmJSHeapAccess(bo.getOffset()));
}
bool
@ -2070,7 +2087,7 @@ CodeGeneratorMIPS::visitAsmJSStoreHeap(LAsmJSStoreHeap *ins)
}
masm.bind(&rejoin);
return gen->noteHeapAccess(AsmJSHeapAccess(bo.getOffset()));
return masm.append(AsmJSHeapAccess(bo.getOffset()));
}
bool

View File

@ -111,6 +111,7 @@ class CodeGeneratorMIPS : public CodeGeneratorShared
protected:
bool generatePrologue();
bool generateAsmJSPrologue(Label *stackOverflowLabel);
bool generateEpilogue();
bool generateOutOfLineCode();

View File

@ -1774,7 +1774,9 @@ MacroAssemblerMIPSCompat::movePtr(ImmPtr imm, Register dest)
void
MacroAssemblerMIPSCompat::movePtr(AsmJSImmPtr imm, Register dest)
{
MOZ_ASSUME_UNREACHABLE("NYI");
enoughMemory_ &= append(AsmJSAbsoluteLink(CodeOffsetLabel(nextOffset().getOffset()),
imm.kind()));
ma_liPatchable(dest, Imm32(-1));
}
void
@ -2907,6 +2909,14 @@ MacroAssemblerMIPSCompat::storeTypeTag(ImmTag tag, Register base, Register index
as_sw(ScratchRegister, SecondScratchReg, TAG_OFFSET);
}
void
MacroAssemblerMIPS::ma_callIonNoPush(const Register r)
{
// This is a MIPS hack to push return address during jalr delay slot.
as_jalr(r);
as_sw(ra, StackPointer, 0);
}
// This macrosintruction calls the ion code and pushes the return address to
// the stack in the case when stack is alligned.
void
@ -2929,6 +2939,21 @@ MacroAssemblerMIPS::ma_callIonHalfPush(const Register r)
as_sw(ra, StackPointer, 0);
}
void
MacroAssemblerMIPS::ma_callAndStoreRet(const Register r, uint32_t stackArgBytes)
{
// Note: this function stores the return address to sp[16]. The caller
// must anticipate this by reserving additional space on the stack.
// The ABI does not provide space for a return address so this function
// stores 'ra' before any ABI arguments.
// This function may only be called if there are 4 or less arguments.
JS_ASSERT(stackArgBytes == 4 * sizeof(uintptr_t));
// This is a MIPS hack to push return address during jalr delay slot.
as_jalr(r);
as_sw(ra, StackPointer, 4 * sizeof(uintptr_t));
}
void
MacroAssemblerMIPS::ma_call(ImmPtr dest)
{

View File

@ -301,6 +301,9 @@ class MacroAssemblerMIPS : public Assembler
// calls an ion function, assuming that the stack is currently not 8 byte aligned
void ma_callIonHalfPush(const Register reg);
// calls reg, storing the return address into sp[stackArgBytes]
void ma_callAndStoreRet(const Register reg, uint32_t stackArgBytes);
void ma_call(ImmPtr dest);
void ma_jump(ImmPtr dest);
@ -396,7 +399,6 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
}
void call(Label *label) {
// for now, assume that it'll be nearby?
ma_bal(label);
}
@ -418,6 +420,38 @@ class MacroAssemblerMIPSCompat : public MacroAssemblerMIPS
ma_liPatchable(ScratchRegister, Imm32((uint32_t)c->raw()));
ma_callIonHalfPush(ScratchRegister);
}
void appendCallSite(const CallSiteDesc &desc) {
enoughMemory_ &= append(CallSite(desc, currentOffset(), framePushed_));
}
void call(const CallSiteDesc &desc, const Register reg) {
call(reg);
appendCallSite(desc);
}
void call(const CallSiteDesc &desc, Label *label) {
call(label);
appendCallSite(desc);
}
void call(const CallSiteDesc &desc, AsmJSImmPtr imm) {
call(imm);
appendCallSite(desc);
}
void callExit(AsmJSImmPtr imm, uint32_t stackArgBytes) {
movePtr(imm, CallReg);
ma_callAndStoreRet(CallReg, stackArgBytes);
appendCallSite(CallSiteDesc::Exit());
}
void callIonFromAsmJS(const Register reg) {
ma_callIonNoPush(reg);
appendCallSite(CallSiteDesc::Exit());
// The Ion ABI has the callee pop the return address off the stack.
// The asm.js caller assumes that the call leaves sp unchanged, so bump
// the stack.
subPtr(Imm32(sizeof(void*)), StackPointer);
}
void branch(JitCode *c) {
BufferOffset bo = m_buffer.nextOffset();
addPendingJump(bo, ImmPtr(c->raw()), Relocation::JITCODE);