mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-02-24 08:01:29 +00:00
Merge pull request #17799 from unknownbrackets/irjit-lsu
Add ll/sc to IR and x86jit
This commit is contained in:
commit
180bda6f6b
@ -540,6 +540,9 @@ void HLEReturnFromMipsCall() {
|
||||
const static u32 deadbeefRegs[12] = {0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF, 0xDEADBEEF};
|
||||
inline static void SetDeadbeefRegs()
|
||||
{
|
||||
// Not exactly the same, but any time a syscall happens, it should clear ll.
|
||||
currentMIPS->llBit = 0;
|
||||
|
||||
if (g_Config.bSkipDeadbeefFilling)
|
||||
return;
|
||||
|
||||
|
@ -385,6 +385,12 @@ namespace MIPSComp
|
||||
}
|
||||
}
|
||||
|
||||
void ArmJit::Comp_StoreSync(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
DISABLE;
|
||||
}
|
||||
|
||||
void ArmJit::Comp_Cache(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
|
@ -61,6 +61,7 @@ public:
|
||||
|
||||
// Ops
|
||||
void Comp_ITypeMem(MIPSOpcode op) override;
|
||||
void Comp_StoreSync(MIPSOpcode op) override;
|
||||
void Comp_Cache(MIPSOpcode op) override;
|
||||
|
||||
void Comp_RelBranch(MIPSOpcode op) override;
|
||||
|
@ -434,6 +434,12 @@ namespace MIPSComp {
|
||||
}
|
||||
}
|
||||
|
||||
void Arm64Jit::Comp_StoreSync(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
DISABLE;
|
||||
}
|
||||
|
||||
void Arm64Jit::Comp_Cache(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
|
@ -62,6 +62,7 @@ public:
|
||||
|
||||
// Ops
|
||||
void Comp_ITypeMem(MIPSOpcode op) override;
|
||||
void Comp_StoreSync(MIPSOpcode op) override;
|
||||
void Comp_Cache(MIPSOpcode op) override;
|
||||
|
||||
void Comp_RelBranch(MIPSOpcode op) override;
|
||||
|
@ -105,6 +105,30 @@ namespace MIPSComp {
|
||||
}
|
||||
}
|
||||
|
||||
void IRFrontend::Comp_StoreSync(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
int offset = _IMM16;
|
||||
MIPSGPReg rt = _RT;
|
||||
MIPSGPReg rs = _RS;
|
||||
// Note: still does something even if loading to zero.
|
||||
|
||||
CheckMemoryBreakpoint(rs, offset);
|
||||
|
||||
switch (op >> 26) {
|
||||
case 48: // ll
|
||||
ir.Write(IROp::Load32Linked, rt, rs, ir.AddConstant(offset));
|
||||
break;
|
||||
|
||||
case 56: // sc
|
||||
ir.Write(IROp::Store32Conditional, rt, rs, ir.AddConstant(offset));
|
||||
break;
|
||||
|
||||
default:
|
||||
INVALIDOP;
|
||||
}
|
||||
}
|
||||
|
||||
void IRFrontend::Comp_Cache(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
|
@ -18,6 +18,7 @@ public:
|
||||
|
||||
// Ops
|
||||
void Comp_ITypeMem(MIPSOpcode op) override;
|
||||
void Comp_StoreSync(MIPSOpcode op) override;
|
||||
void Comp_Cache(MIPSOpcode op) override;
|
||||
|
||||
void Comp_RelBranch(MIPSOpcode op) override;
|
||||
|
@ -73,6 +73,7 @@ static const IRMeta irMeta[] = {
|
||||
{ IROp::Load32, "Load32", "GGC" },
|
||||
{ IROp::Load32Left, "Load32Left", "GGC", IRFLAG_SRC3DST },
|
||||
{ IROp::Load32Right, "Load32Right", "GGC", IRFLAG_SRC3DST },
|
||||
{ IROp::Load32Linked, "Load32Linked", "GGC" },
|
||||
{ IROp::LoadFloat, "LoadFloat", "FGC" },
|
||||
{ IROp::LoadVec4, "LoadVec4", "VGC" },
|
||||
{ IROp::Store8, "Store8", "GGC", IRFLAG_SRC3 },
|
||||
@ -80,6 +81,7 @@ static const IRMeta irMeta[] = {
|
||||
{ IROp::Store32, "Store32", "GGC", IRFLAG_SRC3 },
|
||||
{ IROp::Store32Left, "Store32Left", "GGC", IRFLAG_SRC3 },
|
||||
{ IROp::Store32Right, "Store32Right", "GGC", IRFLAG_SRC3 },
|
||||
{ IROp::Store32Conditional, "Store32Conditional", "GGC", IRFLAG_SRC3DST },
|
||||
{ IROp::StoreFloat, "StoreFloat", "FGC", IRFLAG_SRC3 },
|
||||
{ IROp::StoreVec4, "StoreVec4", "VGC", IRFLAG_SRC3 },
|
||||
{ IROp::FAdd, "FAdd", "FFF" },
|
||||
|
@ -92,6 +92,7 @@ enum class IROp : u8 {
|
||||
Load32,
|
||||
Load32Left,
|
||||
Load32Right,
|
||||
Load32Linked,
|
||||
LoadFloat,
|
||||
LoadVec4,
|
||||
|
||||
@ -100,6 +101,7 @@ enum class IROp : u8 {
|
||||
Store32,
|
||||
Store32Left,
|
||||
Store32Right,
|
||||
Store32Conditional,
|
||||
StoreFloat,
|
||||
StoreVec4,
|
||||
|
||||
|
@ -218,6 +218,11 @@ u32 IRInterpret(MIPSState *mips, const IRInst *inst, int count) {
|
||||
mips->r[inst->dest] = (mips->r[inst->dest] & destMask) | (mem >> shift);
|
||||
break;
|
||||
}
|
||||
case IROp::Load32Linked:
|
||||
if (inst->dest != MIPS_REG_ZERO)
|
||||
mips->r[inst->dest] = Memory::ReadUnchecked_U32(mips->r[inst->src1] + inst->constant);
|
||||
mips->llBit = 1;
|
||||
break;
|
||||
case IROp::LoadFloat:
|
||||
mips->f[inst->dest] = Memory::ReadUnchecked_Float(mips->r[inst->src1] + inst->constant);
|
||||
break;
|
||||
@ -251,6 +256,16 @@ u32 IRInterpret(MIPSState *mips, const IRInst *inst, int count) {
|
||||
Memory::WriteUnchecked_U32(result, addr & 0xfffffffc);
|
||||
break;
|
||||
}
|
||||
case IROp::Store32Conditional:
|
||||
if (mips->llBit) {
|
||||
Memory::WriteUnchecked_U32(mips->r[inst->src3], mips->r[inst->src1] + inst->constant);
|
||||
if (inst->dest != MIPS_REG_ZERO) {
|
||||
mips->r[inst->dest] = 1;
|
||||
}
|
||||
} else if (inst->dest != MIPS_REG_ZERO) {
|
||||
mips->r[inst->dest] = 0;
|
||||
}
|
||||
break;
|
||||
case IROp::StoreFloat:
|
||||
Memory::WriteUnchecked_Float(mips->f[inst->src3], mips->r[inst->src1] + inst->constant);
|
||||
break;
|
||||
|
@ -228,7 +228,7 @@ bool RemoveLoadStoreLeftRight(const IRWriter &in, IRWriter &out, const IROptions
|
||||
};
|
||||
|
||||
auto combineOpposite = [&](IROp matchOp, int matchOff, IROp replaceOp, int replaceOff) {
|
||||
if (!opts.unalignedLoadStore || i + 1 >= n)
|
||||
if (i + 1 >= n)
|
||||
return false;
|
||||
const IRInst &next = nextOp();
|
||||
if (next.op != matchOp || next.dest != inst.dest || next.src1 != inst.src1)
|
||||
@ -236,8 +236,40 @@ bool RemoveLoadStoreLeftRight(const IRWriter &in, IRWriter &out, const IROptions
|
||||
if (inst.constant + matchOff != next.constant)
|
||||
return false;
|
||||
|
||||
// Write out one unaligned op.
|
||||
out.Write(replaceOp, inst.dest, inst.src1, out.AddConstant(inst.constant + replaceOff));
|
||||
if (opts.unalignedLoadStore) {
|
||||
// Write out one unaligned op.
|
||||
out.Write(replaceOp, inst.dest, inst.src1, out.AddConstant(inst.constant + replaceOff));
|
||||
} else if (replaceOp == IROp::Load32) {
|
||||
// We can still combine to a simpler set of two loads.
|
||||
// We start by isolating the address and shift amount.
|
||||
|
||||
// IRTEMP_LR_ADDR = rs + imm
|
||||
out.Write(IROp::AddConst, IRTEMP_LR_ADDR, inst.src1, out.AddConstant(inst.constant + replaceOff));
|
||||
// IRTEMP_LR_SHIFT = (addr & 3) * 8
|
||||
out.Write(IROp::AndConst, IRTEMP_LR_SHIFT, IRTEMP_LR_ADDR, out.AddConstant(3));
|
||||
out.Write(IROp::ShlImm, IRTEMP_LR_SHIFT, IRTEMP_LR_SHIFT, 3);
|
||||
// IRTEMP_LR_ADDR = addr & 0xfffffffc
|
||||
out.Write(IROp::AndConst, IRTEMP_LR_ADDR, IRTEMP_LR_ADDR, out.AddConstant(0xFFFFFFFC));
|
||||
// IRTEMP_LR_VALUE = low_word, dest = high_word
|
||||
out.Write(IROp::Load32, inst.dest, IRTEMP_LR_ADDR, out.AddConstant(0));
|
||||
out.Write(IROp::Load32, IRTEMP_LR_VALUE, IRTEMP_LR_ADDR, out.AddConstant(4));
|
||||
|
||||
// Now we just need to adjust and combine dest and IRTEMP_LR_VALUE.
|
||||
// inst.dest >>= shift (putting its bits in the right spot.)
|
||||
out.Write(IROp::Shr, inst.dest, inst.dest, IRTEMP_LR_SHIFT);
|
||||
// We can't shift by 32, so we compromise by shifting twice.
|
||||
out.Write(IROp::ShlImm, IRTEMP_LR_VALUE, IRTEMP_LR_VALUE, 8);
|
||||
// IRTEMP_LR_SHIFT = 24 - shift
|
||||
out.Write(IROp::Neg, IRTEMP_LR_SHIFT, IRTEMP_LR_SHIFT);
|
||||
out.Write(IROp::AddConst, IRTEMP_LR_SHIFT, IRTEMP_LR_SHIFT, out.AddConstant(24));
|
||||
// IRTEMP_LR_VALUE <<= (24 - shift)
|
||||
out.Write(IROp::Shl, IRTEMP_LR_VALUE, IRTEMP_LR_VALUE, IRTEMP_LR_SHIFT);
|
||||
|
||||
// At this point the values are aligned, and we just merge.
|
||||
out.Write(IROp::Or, inst.dest, inst.dest, IRTEMP_LR_VALUE);
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
// Skip the next one, replaced.
|
||||
i++;
|
||||
return true;
|
||||
@ -572,6 +604,7 @@ bool PropagateConstants(const IRWriter &in, IRWriter &out, const IROptions &opts
|
||||
case IROp::Store32:
|
||||
case IROp::Store32Left:
|
||||
case IROp::Store32Right:
|
||||
case IROp::Store32Conditional:
|
||||
if (gpr.IsImm(inst.src1) && inst.src1 != inst.dest) {
|
||||
gpr.MapIn(inst.dest);
|
||||
out.Write(inst.op, inst.dest, 0, out.AddConstant(gpr.GetImm(inst.src1) + inst.constant));
|
||||
@ -595,6 +628,7 @@ bool PropagateConstants(const IRWriter &in, IRWriter &out, const IROptions &opts
|
||||
case IROp::Load16:
|
||||
case IROp::Load16Ext:
|
||||
case IROp::Load32:
|
||||
case IROp::Load32Linked:
|
||||
if (gpr.IsImm(inst.src1) && inst.src1 != inst.dest) {
|
||||
gpr.MapDirty(inst.dest);
|
||||
out.Write(inst.op, inst.dest, 0, out.AddConstant(gpr.GetImm(inst.src1) + inst.constant));
|
||||
@ -1477,10 +1511,12 @@ bool ApplyMemoryValidation(const IRWriter &in, IRWriter &out, const IROptions &o
|
||||
break;
|
||||
|
||||
case IROp::Load32:
|
||||
case IROp::Load32Linked:
|
||||
case IROp::LoadFloat:
|
||||
case IROp::Store32:
|
||||
case IROp::Store32Conditional:
|
||||
case IROp::StoreFloat:
|
||||
addValidate(IROp::ValidateAddress32, inst, inst.op == IROp::Store32 || inst.op == IROp::StoreFloat);
|
||||
addValidate(IROp::ValidateAddress32, inst, inst.op == IROp::Store32 || inst.op == IROp::Store32Conditional || inst.op == IROp::StoreFloat);
|
||||
break;
|
||||
|
||||
case IROp::LoadVec4:
|
||||
|
@ -54,6 +54,7 @@ namespace MIPSComp {
|
||||
virtual void Comp_RunBlock(MIPSOpcode op) = 0;
|
||||
virtual void Comp_ReplacementFunc(MIPSOpcode op) = 0;
|
||||
virtual void Comp_ITypeMem(MIPSOpcode op) = 0;
|
||||
virtual void Comp_StoreSync(MIPSOpcode op) = 0;
|
||||
virtual void Comp_Cache(MIPSOpcode op) = 0;
|
||||
virtual void Comp_RelBranch(MIPSOpcode op) = 0;
|
||||
virtual void Comp_RelBranchRI(MIPSOpcode op) = 0;
|
||||
|
@ -62,6 +62,7 @@ public:
|
||||
|
||||
// Ops
|
||||
void Comp_ITypeMem(MIPSOpcode op) override {}
|
||||
void Comp_StoreSync(MIPSOpcode op) override {}
|
||||
void Comp_Cache(MIPSOpcode op) override {}
|
||||
|
||||
void Comp_RelBranch(MIPSOpcode op) override {}
|
||||
|
@ -150,7 +150,7 @@ static const MIPSInstruction tableImmediate[64] = // xxxxxx ..... ..... ........
|
||||
INSTR("swr", JITFUNC(Comp_ITypeMem), Dis_ITypeMem, Int_ITypeMem, IN_IMM16|IN_RS_ADDR|IN_RT|OUT_MEM|MEMTYPE_WORD),
|
||||
INSTR("cache", JITFUNC(Comp_Cache), Dis_Cache, Int_Cache, IN_MEM|IN_IMM16|IN_RS_ADDR),
|
||||
//48
|
||||
INSTR("ll", JITFUNC(Comp_Generic), Dis_Generic, Int_StoreSync, IN_MEM|IN_IMM16|IN_RS_ADDR|OUT_RT|OUT_OTHER|MEMTYPE_WORD),
|
||||
INSTR("ll", JITFUNC(Comp_StoreSync), Dis_ITypeMem, Int_StoreSync, IN_MEM|IN_IMM16|IN_RS_ADDR|OUT_RT|OUT_OTHER|MEMTYPE_WORD),
|
||||
INSTR("lwc1", JITFUNC(Comp_FPULS), Dis_FPULS, Int_FPULS, IN_MEM|IN_IMM16|IN_RS_ADDR|OUT_FT|MEMTYPE_FLOAT|IS_FPU),
|
||||
INSTR("lv.s", JITFUNC(Comp_SV), Dis_SV, Int_SV, IN_MEM|IN_IMM16|IN_RS_ADDR|OUT_OTHER|IS_VFPU|VFPU_NO_PREFIX|MEMTYPE_FLOAT),
|
||||
INVALID,
|
||||
@ -159,7 +159,7 @@ static const MIPSInstruction tableImmediate[64] = // xxxxxx ..... ..... ........
|
||||
INSTR("lv.q", JITFUNC(Comp_SVQ), Dis_SVQ, Int_SVQ, IN_MEM|IN_IMM16|IN_RS_ADDR|OUT_OTHER|IS_VFPU|VFPU_NO_PREFIX|MEMTYPE_VQUAD), //copU
|
||||
ENCODING(VFPU5),
|
||||
//56
|
||||
INSTR("sc", JITFUNC(Comp_Generic), Dis_Generic, Int_StoreSync, IN_IMM16|IN_RS_ADDR|IN_OTHER|IN_RT|OUT_RT|OUT_MEM|MEMTYPE_WORD),
|
||||
INSTR("sc", JITFUNC(Comp_StoreSync), Dis_ITypeMem, Int_StoreSync, IN_IMM16|IN_RS_ADDR|IN_OTHER|IN_RT|OUT_RT|OUT_MEM|MEMTYPE_WORD),
|
||||
INSTR("swc1", JITFUNC(Comp_FPULS), Dis_FPULS, Int_FPULS, IN_IMM16|IN_RS_ADDR|IN_FT|OUT_MEM|MEMTYPE_FLOAT|IS_FPU), //copU
|
||||
INSTR("sv.s", JITFUNC(Comp_SV), Dis_SV, Int_SV, IN_IMM16|IN_RS_ADDR|IN_OTHER|OUT_MEM|IS_VFPU|VFPU_NO_PREFIX|MEMTYPE_FLOAT),
|
||||
INVALID,
|
||||
|
@ -112,6 +112,10 @@ void RiscVJit::CompIR_Load(IRInst inst) {
|
||||
LW(gpr.R(inst.dest), addrReg, imm);
|
||||
break;
|
||||
|
||||
case IROp::Load32Linked:
|
||||
CompIR_Generic(inst);
|
||||
break;
|
||||
|
||||
default:
|
||||
INVALIDOP;
|
||||
break;
|
||||
@ -249,6 +253,14 @@ void RiscVJit::CompIR_Store(IRInst inst) {
|
||||
}
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_CondStore(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
if (inst.op != IROp::Store32Conditional)
|
||||
INVALIDOP;
|
||||
|
||||
CompIR_Generic(inst);
|
||||
}
|
||||
|
||||
void RiscVJit::CompIR_StoreShift(IRInst inst) {
|
||||
CONDITIONAL_DISABLE;
|
||||
|
||||
|
@ -201,6 +201,7 @@ void RiscVJit::CompileIRInst(IRInst inst) {
|
||||
case IROp::Load16:
|
||||
case IROp::Load16Ext:
|
||||
case IROp::Load32:
|
||||
case IROp::Load32Linked:
|
||||
CompIR_Load(inst);
|
||||
break;
|
||||
|
||||
@ -223,6 +224,10 @@ void RiscVJit::CompileIRInst(IRInst inst) {
|
||||
CompIR_Store(inst);
|
||||
break;
|
||||
|
||||
case IROp::Store32Conditional:
|
||||
CompIR_CondStore(inst);
|
||||
break;
|
||||
|
||||
case IROp::Store32Left:
|
||||
case IROp::Store32Right:
|
||||
CompIR_StoreShift(inst);
|
||||
|
@ -71,6 +71,7 @@ private:
|
||||
void CompIR_Breakpoint(IRInst inst);
|
||||
void CompIR_Compare(IRInst inst);
|
||||
void CompIR_CondAssign(IRInst inst);
|
||||
void CompIR_CondStore(IRInst inst);
|
||||
void CompIR_Div(IRInst inst);
|
||||
void CompIR_Exit(IRInst inst);
|
||||
void CompIR_ExitIf(IRInst inst);
|
||||
|
@ -60,6 +60,7 @@ public:
|
||||
|
||||
// Ops
|
||||
void Comp_ITypeMem(MIPSOpcode op) override {}
|
||||
void Comp_StoreSync(MIPSOpcode op) override {}
|
||||
void Comp_Cache(MIPSOpcode op) override {}
|
||||
|
||||
void Comp_RelBranch(MIPSOpcode op) override {}
|
||||
|
@ -44,6 +44,7 @@
|
||||
// #define CONDITIONAL_DISABLE(flag) { Comp_Generic(op); return; }
|
||||
#define CONDITIONAL_DISABLE(flag) if (jo.Disabled(JitDisable::flag)) { Comp_Generic(op); return; }
|
||||
#define DISABLE { Comp_Generic(op); return; }
|
||||
#define INVALIDOP { Comp_Generic(op); return; }
|
||||
|
||||
namespace MIPSComp {
|
||||
using namespace Gen;
|
||||
@ -405,6 +406,42 @@ namespace MIPSComp {
|
||||
|
||||
}
|
||||
|
||||
void Jit::Comp_StoreSync(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
int offset = _IMM16;
|
||||
MIPSGPReg rt = _RT;
|
||||
MIPSGPReg rs = _RS;
|
||||
// Note: still does something even if loading to zero.
|
||||
|
||||
CheckMemoryBreakpoint(0, rs, offset);
|
||||
|
||||
FixupBranch skipStore;
|
||||
FixupBranch finish;
|
||||
switch (op >> 26) {
|
||||
case 48: // ll
|
||||
CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
|
||||
MOV(8, MDisp(X64JitConstants::CTXREG, -128 + offsetof(MIPSState, llBit)), Imm8(1));
|
||||
break;
|
||||
|
||||
case 56: // sc
|
||||
CMP(8, MDisp(X64JitConstants::CTXREG, -128 + offsetof(MIPSState, llBit)), Imm8(1));
|
||||
skipStore = J_CC(CC_NE);
|
||||
|
||||
CompITypeMemWrite(op, 32, safeMemFuncs.writeU32);
|
||||
MOV(32, gpr.R(rt), Imm32(1));
|
||||
finish = J();
|
||||
|
||||
SetJumpTarget(skipStore);
|
||||
MOV(32, gpr.R(rt), Imm32(0));
|
||||
SetJumpTarget(finish);
|
||||
break;
|
||||
|
||||
default:
|
||||
INVALIDOP;
|
||||
}
|
||||
}
|
||||
|
||||
void Jit::Comp_Cache(MIPSOpcode op) {
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
|
||||
|
@ -69,6 +69,7 @@ public:
|
||||
|
||||
// Ops
|
||||
void Comp_ITypeMem(MIPSOpcode op) override;
|
||||
void Comp_StoreSync(MIPSOpcode op) override;
|
||||
void Comp_Cache(MIPSOpcode op) override;
|
||||
|
||||
void Comp_RelBranch(MIPSOpcode op) override;
|
||||
|
Loading…
x
Reference in New Issue
Block a user