mirror of
https://github.com/hrydgard/ppsspp.git
synced 2024-11-27 07:20:49 +00:00
x86jit: Simplify memcheck handling.
Now it's mostly the same as the other jits.
This commit is contained in:
parent
d26700820c
commit
0f5859510e
@ -40,7 +40,6 @@ u32 CBreakPoints::breakSkipFirstAt_ = 0;
|
||||
u64 CBreakPoints::breakSkipFirstTicks_ = 0;
|
||||
static std::mutex memCheckMutex_;
|
||||
std::vector<MemCheck> CBreakPoints::memChecks_;
|
||||
std::vector<MemCheck *> CBreakPoints::cleanupMemChecks_;
|
||||
std::vector<MemCheck> CBreakPoints::memCheckRangesRead_;
|
||||
std::vector<MemCheck> CBreakPoints::memCheckRangesWrite_;
|
||||
|
||||
@ -81,55 +80,6 @@ BreakAction MemCheck::Action(u32 addr, bool write, int size, u32 pc, const char
|
||||
return BREAK_ACTION_IGNORE;
|
||||
}
|
||||
|
||||
void MemCheck::JitBeforeApply(u32 addr, bool write, int size, u32 pc) {
|
||||
int mask = MEMCHECK_WRITE | MEMCHECK_WRITE_ONCHANGE;
|
||||
if (write && (cond & mask) == mask) {
|
||||
lastAddr = addr;
|
||||
lastPC = pc;
|
||||
lastSize = size;
|
||||
} else {
|
||||
lastAddr = 0;
|
||||
Apply(addr, write, size, pc);
|
||||
}
|
||||
}
|
||||
|
||||
void MemCheck::JitBeforeAction(u32 addr, bool write, int size, u32 pc) {
|
||||
if (lastAddr) {
|
||||
// We have to break to find out if it changed.
|
||||
Core_EnableStepping(true, "memory.breakpoint.check", start);
|
||||
} else {
|
||||
Action(addr, write, size, pc, "CPU");
|
||||
}
|
||||
}
|
||||
|
||||
bool MemCheck::JitApplyChanged() {
|
||||
if (lastAddr == 0 || lastPC == 0)
|
||||
return false;
|
||||
|
||||
// Here's the tricky part: would this have changed memory?
|
||||
// Note that it did not actually get written.
|
||||
bool changed = MIPSAnalyst::OpWouldChangeMemory(lastPC, lastAddr, lastSize);
|
||||
if (changed)
|
||||
++numHits;
|
||||
return changed;
|
||||
}
|
||||
|
||||
void MemCheck::JitCleanup(bool changed)
|
||||
{
|
||||
if (lastAddr == 0 || lastPC == 0)
|
||||
return;
|
||||
|
||||
if (changed)
|
||||
Log(lastAddr, true, lastSize, lastPC, "CPU");
|
||||
|
||||
// Resume if it should not have gone to stepping, or if it did not change.
|
||||
if ((!(result & BREAK_ACTION_PAUSE) || !changed) && coreState == CORE_STEPPING)
|
||||
{
|
||||
CBreakPoints::SetSkipFirst(lastPC);
|
||||
Core_EnableStepping(false);
|
||||
}
|
||||
}
|
||||
|
||||
// Note: must lock while calling this.
|
||||
size_t CBreakPoints::FindBreakpoint(u32 addr, bool matchTemp, bool temp)
|
||||
{
|
||||
@ -392,8 +342,6 @@ BreakAction CBreakPoints::ExecBreakPoint(u32 addr) {
|
||||
void CBreakPoints::AddMemCheck(u32 start, u32 end, MemCheckCondition cond, BreakAction result)
|
||||
{
|
||||
std::unique_lock<std::mutex> guard(memCheckMutex_);
|
||||
// This will ruin any pending memchecks.
|
||||
cleanupMemChecks_.clear();
|
||||
|
||||
size_t mc = FindMemCheck(start, end);
|
||||
if (mc == INVALID_MEMCHECK)
|
||||
@ -426,8 +374,6 @@ void CBreakPoints::AddMemCheck(u32 start, u32 end, MemCheckCondition cond, Break
|
||||
void CBreakPoints::RemoveMemCheck(u32 start, u32 end)
|
||||
{
|
||||
std::unique_lock<std::mutex> guard(memCheckMutex_);
|
||||
// This will ruin any pending memchecks.
|
||||
cleanupMemChecks_.clear();
|
||||
|
||||
size_t mc = FindMemCheck(start, end);
|
||||
if (mc != INVALID_MEMCHECK)
|
||||
@ -457,8 +403,6 @@ void CBreakPoints::ChangeMemCheck(u32 start, u32 end, MemCheckCondition cond, Br
|
||||
void CBreakPoints::ClearAllMemChecks()
|
||||
{
|
||||
std::unique_lock<std::mutex> guard(memCheckMutex_);
|
||||
// This will ruin any pending memchecks.
|
||||
cleanupMemChecks_.clear();
|
||||
|
||||
if (!memChecks_.empty())
|
||||
{
|
||||
@ -576,34 +520,6 @@ BreakAction CBreakPoints::ExecOpMemCheck(u32 address, u32 pc)
|
||||
return BREAK_ACTION_IGNORE;
|
||||
}
|
||||
|
||||
void CBreakPoints::ExecMemCheckJitBefore(u32 address, bool write, int size, u32 pc)
|
||||
{
|
||||
std::unique_lock<std::mutex> guard(memCheckMutex_);
|
||||
auto check = GetMemCheckLocked(address, size);
|
||||
if (check) {
|
||||
check->JitBeforeApply(address, write, size, pc);
|
||||
auto copy = *check;
|
||||
guard.unlock();
|
||||
copy.JitBeforeAction(address, write, size, pc);
|
||||
guard.lock();
|
||||
cleanupMemChecks_.push_back(check);
|
||||
}
|
||||
}
|
||||
|
||||
void CBreakPoints::ExecMemCheckJitCleanup()
|
||||
{
|
||||
std::unique_lock<std::mutex> guard(memCheckMutex_);
|
||||
for (auto it = cleanupMemChecks_.begin(), end = cleanupMemChecks_.end(); it != end; ++it) {
|
||||
auto check = *it;
|
||||
bool changed = check->JitApplyChanged();
|
||||
auto copy = *check;
|
||||
guard.unlock();
|
||||
copy.JitCleanup(changed);
|
||||
guard.lock();
|
||||
}
|
||||
cleanupMemChecks_.clear();
|
||||
}
|
||||
|
||||
void CBreakPoints::SetSkipFirst(u32 pc)
|
||||
{
|
||||
breakSkipFirstAt_ = pc;
|
||||
|
@ -97,10 +97,6 @@ struct MemCheck {
|
||||
BreakAction Apply(u32 addr, bool write, int size, u32 pc);
|
||||
// Called on a copy.
|
||||
BreakAction Action(u32 addr, bool write, int size, u32 pc, const char *reason);
|
||||
void JitBeforeApply(u32 addr, bool write, int size, u32 pc);
|
||||
void JitBeforeAction(u32 addr, bool write, int size, u32 pc);
|
||||
bool JitApplyChanged();
|
||||
void JitCleanup(bool changed);
|
||||
|
||||
void Log(u32 addr, bool write, int size, u32 pc, const char *reason);
|
||||
|
||||
@ -154,10 +150,6 @@ public:
|
||||
static BreakAction ExecMemCheck(u32 address, bool write, int size, u32 pc, const char *reason);
|
||||
static BreakAction ExecOpMemCheck(u32 address, u32 pc);
|
||||
|
||||
// Executes memchecks but used by the jit. Cleanup finalizes after jit is done.
|
||||
static void ExecMemCheckJitBefore(u32 address, bool write, int size, u32 pc);
|
||||
static void ExecMemCheckJitCleanup();
|
||||
|
||||
static void SetSkipFirst(u32 pc);
|
||||
static u32 CheckSkipFirst();
|
||||
|
||||
@ -187,7 +179,6 @@ private:
|
||||
static u64 breakSkipFirstTicks_;
|
||||
|
||||
static std::vector<MemCheck> memChecks_;
|
||||
static std::vector<MemCheck *> cleanupMemChecks_;
|
||||
static std::vector<MemCheck> memCheckRangesRead_;
|
||||
static std::vector<MemCheck> memCheckRangesWrite_;
|
||||
};
|
||||
|
@ -787,7 +787,7 @@ bool ArmJit::CheckMemoryBreakpoint(int instructionOffset) {
|
||||
MOVI2R(R0, GetCompilerPC());
|
||||
MovToPC(R0);
|
||||
if (off != 0)
|
||||
ADDI2R(R0, R0, off, SCRATCHREG2);
|
||||
ADDI2R(R0, R0, off * 4, SCRATCHREG2);
|
||||
QuickCallFunction(SCRATCHREG2, &JitMemCheck);
|
||||
|
||||
// If 0, the breakpoint wasn't tripped.
|
||||
|
@ -53,8 +53,6 @@ namespace MIPSComp {
|
||||
{
|
||||
AFTER_NONE = 0x00,
|
||||
AFTER_CORE_STATE = 0x01,
|
||||
AFTER_REWIND_PC_BAD_STATE = 0x02,
|
||||
AFTER_MEMCHECK_CLEANUP = 0x04,
|
||||
};
|
||||
|
||||
u32 compilerPC;
|
||||
|
@ -676,7 +676,6 @@ namespace MIPSAnalyst {
|
||||
return memcmp(rd, Memory::GetPointerRange(addr, 16), sizeof(float) * 4) != 0;
|
||||
}
|
||||
|
||||
// TODO: Technically, the break might be for 1 byte in the middle of a sw.
|
||||
return writeVal != prevVal;
|
||||
}
|
||||
|
||||
|
@ -127,6 +127,8 @@ void Jit::Comp_FPULS(MIPSOpcode op) {
|
||||
int ft = _FT;
|
||||
MIPSGPReg rs = _RS;
|
||||
|
||||
CheckMemoryBreakpoint(0, rs, offset);
|
||||
|
||||
switch (op >> 26) {
|
||||
case 49: //FI(ft) = Memory::Read_U32(addr); break; //lwc1
|
||||
{
|
||||
|
@ -286,6 +286,7 @@ namespace MIPSComp {
|
||||
{
|
||||
CONDITIONAL_DISABLE(LSU);
|
||||
int offset = _IMM16;
|
||||
MIPSGPReg rs = _RS;
|
||||
MIPSGPReg rt = _RT;
|
||||
int o = op>>26;
|
||||
if (((op >> 29) & 1) == 0 && rt == MIPS_REG_ZERO) {
|
||||
@ -293,6 +294,8 @@ namespace MIPSComp {
|
||||
return;
|
||||
}
|
||||
|
||||
CheckMemoryBreakpoint(0, rs, offset);
|
||||
|
||||
switch (o)
|
||||
{
|
||||
case 37: //R(rt) = ReadMem16(addr); break; //lhu
|
||||
@ -334,6 +337,7 @@ namespace MIPSComp {
|
||||
u32 desiredOp = ((op & 0xFFFF0000) + (4 << 26)) + (offset - 3);
|
||||
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
|
||||
{
|
||||
CheckMemoryBreakpoint(1, rs, offset - 3);
|
||||
EatInstruction(nextOp);
|
||||
// nextOp has the correct address.
|
||||
CompITypeMemRead(nextOp, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
|
||||
@ -350,6 +354,7 @@ namespace MIPSComp {
|
||||
u32 desiredOp = ((op & 0xFFFF0000) - (4 << 26)) + (offset + 3);
|
||||
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
|
||||
{
|
||||
CheckMemoryBreakpoint(1, rs, offset + 3);
|
||||
EatInstruction(nextOp);
|
||||
// op has the correct address.
|
||||
CompITypeMemRead(op, 32, &XEmitter::MOVZX, safeMemFuncs.readU32);
|
||||
@ -366,6 +371,7 @@ namespace MIPSComp {
|
||||
u32 desiredOp = ((op & 0xFFFF0000) + (4 << 26)) + (offset - 3);
|
||||
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
|
||||
{
|
||||
CheckMemoryBreakpoint(1, rs, offset - 3);
|
||||
EatInstruction(nextOp);
|
||||
// nextOp has the correct address.
|
||||
CompITypeMemWrite(nextOp, 32, safeMemFuncs.writeU32);
|
||||
@ -382,6 +388,7 @@ namespace MIPSComp {
|
||||
u32 desiredOp = ((op & 0xFFFF0000) - (4 << 26)) + (offset + 3);
|
||||
if (!js.inDelaySlot && nextOp == desiredOp && !jo.Disabled(JitDisable::LSU_UNALIGNED))
|
||||
{
|
||||
CheckMemoryBreakpoint(1, rs, offset + 3);
|
||||
EatInstruction(nextOp);
|
||||
// op has the correct address.
|
||||
CompITypeMemWrite(op, 32, safeMemFuncs.writeU32);
|
||||
|
@ -245,6 +245,8 @@ void Jit::Comp_SV(MIPSOpcode op) {
|
||||
int vt = ((op >> 16) & 0x1f) | ((op & 3) << 5);
|
||||
MIPSGPReg rs = _RS;
|
||||
|
||||
CheckMemoryBreakpoint(0, rs, imm);
|
||||
|
||||
switch (op >> 26) {
|
||||
case 50: //lv.s // VI(vt) = Memory::Read_U32(addr);
|
||||
{
|
||||
@ -300,6 +302,8 @@ void Jit::Comp_SVQ(MIPSOpcode op) {
|
||||
int vt = (((op >> 16) & 0x1f)) | ((op&1) << 5);
|
||||
MIPSGPReg rs = _RS;
|
||||
|
||||
CheckMemoryBreakpoint(0, rs, imm);
|
||||
|
||||
switch (op >> 26) {
|
||||
case 53: //lvl.q/lvr.q
|
||||
{
|
||||
|
@ -91,7 +91,19 @@ u32 JitBreakpoint(uint32_t addr)
|
||||
return 1;
|
||||
}
|
||||
|
||||
extern void JitMemCheckCleanup();
|
||||
static u32 JitMemCheck(u32 addr, u32 pc) {
|
||||
// Should we skip this breakpoint?
|
||||
if (CBreakPoints::CheckSkipFirst() == currentMIPS->pc)
|
||||
return 0;
|
||||
|
||||
// Did we already hit one?
|
||||
if (coreState != CORE_RUNNING && coreState != CORE_NEXTFRAME)
|
||||
return 1;
|
||||
|
||||
// Note: pc may be the delay slot.
|
||||
CBreakPoints::ExecOpMemCheck(addr, pc);
|
||||
return coreState == CORE_RUNNING || coreState == CORE_NEXTFRAME ? 0 : 1;
|
||||
}
|
||||
|
||||
static void JitLogMiss(MIPSOpcode op)
|
||||
{
|
||||
@ -370,11 +382,6 @@ const u8 *Jit::DoJit(u32 em_address, JitBlock *b) {
|
||||
MIPSCompileOp(inst, this);
|
||||
|
||||
if (js.afterOp & JitState::AFTER_CORE_STATE) {
|
||||
// TODO: Save/restore?
|
||||
FlushAll();
|
||||
|
||||
// If we're rewinding, CORE_NEXTFRAME should not cause a rewind.
|
||||
// It doesn't really matter either way if we're not rewinding.
|
||||
// CORE_RUNNING is <= CORE_NEXTFRAME.
|
||||
if (RipAccessible((const void *)&coreState)) {
|
||||
CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); // rip accessible
|
||||
@ -382,19 +389,18 @@ const u8 *Jit::DoJit(u32 em_address, JitBlock *b) {
|
||||
MOV(PTRBITS, R(RAX), ImmPtr((const void *)&coreState));
|
||||
CMP(32, MatR(RAX), Imm32(CORE_NEXTFRAME));
|
||||
}
|
||||
FixupBranch skipCheck = J_CC(CC_LE);
|
||||
if (js.afterOp & JitState::AFTER_REWIND_PC_BAD_STATE)
|
||||
MOV(32, MIPSSTATE_VAR(pc), Imm32(GetCompilerPC()));
|
||||
else
|
||||
MOV(32, MIPSSTATE_VAR(pc), Imm32(GetCompilerPC() + 4));
|
||||
FixupBranch skipCheck = J_CC(CC_LE, true);
|
||||
MOV(32, MIPSSTATE_VAR(pc), Imm32(GetCompilerPC() + 4));
|
||||
RegCacheState state;
|
||||
GetStateAndFlushAll(state);
|
||||
WriteSyscallExit();
|
||||
|
||||
SetJumpTarget(skipCheck);
|
||||
// If we didn't jump, we can keep our regs as they were.
|
||||
RestoreState(state);
|
||||
|
||||
js.afterOp = JitState::AFTER_NONE;
|
||||
}
|
||||
if (js.afterOp & JitState::AFTER_MEMCHECK_CLEANUP) {
|
||||
js.afterOp &= ~JitState::AFTER_MEMCHECK_CLEANUP;
|
||||
}
|
||||
|
||||
js.compilerPC += 4;
|
||||
js.numInstructions++;
|
||||
@ -681,8 +687,8 @@ void Jit::WriteExit(u32 destination, int exit_num) {
|
||||
ABI_CallFunctionC(&HitInvalidBranch, destination);
|
||||
js.afterOp |= JitState::AFTER_CORE_STATE;
|
||||
}
|
||||
// If we need to verify coreState and rewind, we may not jump yet.
|
||||
if (js.afterOp & (JitState::AFTER_CORE_STATE | JitState::AFTER_REWIND_PC_BAD_STATE)) {
|
||||
// If we need to verify coreState, we may not jump yet.
|
||||
if (js.afterOp & JitState::AFTER_CORE_STATE) {
|
||||
// CORE_RUNNING is <= CORE_NEXTFRAME.
|
||||
if (RipAccessible((const void *)&coreState)) {
|
||||
CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); // rip accessible
|
||||
@ -736,8 +742,8 @@ static void HitInvalidJumpReg(uint32_t source) {
|
||||
}
|
||||
|
||||
void Jit::WriteExitDestInReg(X64Reg reg) {
|
||||
// If we need to verify coreState and rewind, we may not jump yet.
|
||||
if (js.afterOp & (JitState::AFTER_CORE_STATE | JitState::AFTER_REWIND_PC_BAD_STATE)) {
|
||||
// If we need to verify coreState, we may not jump yet.
|
||||
if (js.afterOp & JitState::AFTER_CORE_STATE) {
|
||||
// CORE_RUNNING is <= CORE_NEXTFRAME.
|
||||
if (RipAccessible((const void *)&coreState)) {
|
||||
CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); // rip accessible
|
||||
@ -791,11 +797,6 @@ void Jit::WriteExitDestInReg(X64Reg reg) {
|
||||
|
||||
void Jit::WriteSyscallExit() {
|
||||
WriteDowncount();
|
||||
if (js.afterOp & JitState::AFTER_MEMCHECK_CLEANUP) {
|
||||
RestoreRoundingMode();
|
||||
ABI_CallFunction(&JitMemCheckCleanup);
|
||||
ApplyRoundingMode();
|
||||
}
|
||||
JMP(dispatcherCheckCoreState, true);
|
||||
}
|
||||
|
||||
@ -825,6 +826,94 @@ bool Jit::CheckJitBreakpoint(u32 addr, int downcountOffset) {
|
||||
return false;
|
||||
}
|
||||
|
||||
void Jit::CheckMemoryBreakpoint(int instructionOffset, MIPSGPReg rs, int offset) {
|
||||
if (!CBreakPoints::HasMemChecks())
|
||||
return;
|
||||
|
||||
int totalInstructionOffset = instructionOffset + (js.inDelaySlot ? 1 : 0);
|
||||
uint32_t checkedPC = GetCompilerPC() + totalInstructionOffset * 4;
|
||||
int size = MIPSAnalyst::OpMemoryAccessSize(checkedPC);
|
||||
bool isWrite = MIPSAnalyst::IsOpMemoryWrite(checkedPC);
|
||||
|
||||
// 0 because we normally execute before increasing.
|
||||
int downcountOffset = js.inDelaySlot ? -2 : -1;
|
||||
// TODO: In likely branches, downcount will be incorrect. This might make resume fail.
|
||||
if (js.downcountAmount + downcountOffset < 0) {
|
||||
downcountOffset = 0;
|
||||
}
|
||||
|
||||
if (gpr.IsImm(rs)) {
|
||||
uint32_t iaddr = gpr.GetImm(rs) + offset;
|
||||
MemCheck check;
|
||||
if (CBreakPoints::GetMemCheckInRange(iaddr, size, &check)) {
|
||||
if (!(check.cond & MEMCHECK_READ) && !isWrite)
|
||||
return;
|
||||
if (!(check.cond & MEMCHECK_WRITE) && isWrite)
|
||||
return;
|
||||
|
||||
// We need to flush, or conditions and log expressions will see old register values.
|
||||
FlushAll();
|
||||
|
||||
MOV(32, MIPSSTATE_VAR(pc), Imm32(GetCompilerPC()));
|
||||
CallProtectedFunction(&JitMemCheck, iaddr, checkedPC);
|
||||
|
||||
CMP(32, R(RAX), Imm32(0));
|
||||
FixupBranch skipCheck = J_CC(CC_E);
|
||||
WriteDowncount(downcountOffset);
|
||||
JMP(dispatcherCheckCoreState, true);
|
||||
|
||||
SetJumpTarget(skipCheck);
|
||||
}
|
||||
} else {
|
||||
const auto memchecks = CBreakPoints::GetMemCheckRanges(isWrite);
|
||||
bool possible = !memchecks.empty();
|
||||
if (!possible)
|
||||
return;
|
||||
|
||||
gpr.Lock(rs);
|
||||
gpr.MapReg(rs, true, false);
|
||||
LEA(32, RAX, MDisp(gpr.RX(rs), offset));
|
||||
gpr.UnlockAll();
|
||||
|
||||
// We need to flush, or conditions and log expressions will see old register values.
|
||||
FlushAll();
|
||||
|
||||
std::vector<FixupBranch> hitChecks;
|
||||
for (auto it = memchecks.begin(), end = memchecks.end(); it != end; ++it) {
|
||||
if (it->end != 0) {
|
||||
CMP(32, R(RAX), Imm32(it->start - size));
|
||||
FixupBranch skipNext = J_CC(CC_BE);
|
||||
|
||||
CMP(32, R(RAX), Imm32(it->end));
|
||||
hitChecks.push_back(J_CC(CC_B, true));
|
||||
|
||||
SetJumpTarget(skipNext);
|
||||
} else {
|
||||
CMP(32, R(RAX), Imm32(it->start));
|
||||
hitChecks.push_back(J_CC(CC_E, true));
|
||||
}
|
||||
}
|
||||
|
||||
FixupBranch noHits = J(true);
|
||||
|
||||
// Okay, now land any hit here.
|
||||
for (auto &fixup : hitChecks)
|
||||
SetJumpTarget(fixup);
|
||||
hitChecks.clear();
|
||||
|
||||
MOV(32, MIPSSTATE_VAR(pc), Imm32(GetCompilerPC()));
|
||||
CallProtectedFunction(&JitMemCheck, R(RAX), checkedPC);
|
||||
|
||||
CMP(32, R(RAX), Imm32(0));
|
||||
FixupBranch skipCheck = J_CC(CC_E);
|
||||
WriteDowncount(downcountOffset);
|
||||
JMP(dispatcherCheckCoreState, true);
|
||||
|
||||
SetJumpTarget(skipCheck);
|
||||
SetJumpTarget(noHits);
|
||||
}
|
||||
}
|
||||
|
||||
void Jit::CallProtectedFunction(const void *func, const OpArg &arg1) {
|
||||
// We don't regcache RCX, so the below is safe (and also faster, maybe branch prediction?)
|
||||
ABI_CallFunctionA(thunks.ProtectFunction(func, 1), arg1);
|
||||
@ -835,18 +924,14 @@ void Jit::CallProtectedFunction(const void *func, const OpArg &arg1, const OpArg
|
||||
ABI_CallFunctionAA(thunks.ProtectFunction(func, 2), arg1, arg2);
|
||||
}
|
||||
|
||||
void Jit::CallProtectedFunction(const void *func, const u32 arg1, const u32 arg2, const u32 arg3) {
|
||||
// On x64, we need to save R8, which is caller saved.
|
||||
thunks.Enter(this);
|
||||
ABI_CallFunctionCCC(func, arg1, arg2, arg3);
|
||||
thunks.Leave(this);
|
||||
void Jit::CallProtectedFunction(const void *func, const u32 arg1, const u32 arg2) {
|
||||
// We don't regcache RCX/RDX, so the below is safe (and also faster, maybe branch prediction?)
|
||||
ABI_CallFunctionCC(thunks.ProtectFunction(func, 2), arg1, arg2);
|
||||
}
|
||||
|
||||
void Jit::CallProtectedFunction(const void *func, const OpArg &arg1, const u32 arg2, const u32 arg3) {
|
||||
// On x64, we need to save R8, which is caller saved.
|
||||
thunks.Enter(this);
|
||||
ABI_CallFunctionACC(func, arg1, arg2, arg3);
|
||||
thunks.Leave(this);
|
||||
void Jit::CallProtectedFunction(const void *func, const OpArg &arg1, const u32 arg2) {
|
||||
// We don't regcache RCX/RDX, so the below is safe (and also faster, maybe branch prediction?)
|
||||
ABI_CallFunctionAC(thunks.ProtectFunction(func, 2), arg1, arg2);
|
||||
}
|
||||
|
||||
void Jit::Comp_DoNothing(MIPSOpcode op) { }
|
||||
|
@ -204,6 +204,7 @@ private:
|
||||
// void WriteRfiExitDestInEAX();
|
||||
void WriteSyscallExit();
|
||||
bool CheckJitBreakpoint(u32 addr, int downcountOffset);
|
||||
void CheckMemoryBreakpoint(int instructionOffset, MIPSGPReg rs, int offset);
|
||||
|
||||
// Utility compilation functions
|
||||
void BranchFPFlag(MIPSOpcode op, Gen::CCFlags cc, bool likely);
|
||||
@ -242,8 +243,8 @@ private:
|
||||
|
||||
void CallProtectedFunction(const void *func, const Gen::OpArg &arg1);
|
||||
void CallProtectedFunction(const void *func, const Gen::OpArg &arg1, const Gen::OpArg &arg2);
|
||||
void CallProtectedFunction(const void *func, const u32 arg1, const u32 arg2, const u32 arg3);
|
||||
void CallProtectedFunction(const void *func, const Gen::OpArg &arg1, const u32 arg2, const u32 arg3);
|
||||
void CallProtectedFunction(const void *func, const Gen::OpArg &arg1, const u32 arg2);
|
||||
void CallProtectedFunction(const void *func, const u32 arg1, const u32 arg2);
|
||||
|
||||
template <typename Tr, typename T1>
|
||||
void CallProtectedFunction(Tr (*func)(T1), const Gen::OpArg &arg1) {
|
||||
@ -255,14 +256,14 @@ private:
|
||||
CallProtectedFunction((const void *)func, arg1, arg2);
|
||||
}
|
||||
|
||||
template <typename Tr, typename T1, typename T2, typename T3>
|
||||
void CallProtectedFunction(Tr (*func)(T1, T2, T3), const u32 arg1, const u32 arg2, const u32 arg3) {
|
||||
CallProtectedFunction((const void *)func, arg1, arg2, arg3);
|
||||
template <typename Tr, typename T1, typename T2>
|
||||
void CallProtectedFunction(Tr(*func)(T1, T2), const Gen::OpArg &arg1, const u32 arg2) {
|
||||
CallProtectedFunction((const void *)func, arg1, arg2);
|
||||
}
|
||||
|
||||
template <typename Tr, typename T1, typename T2, typename T3>
|
||||
void CallProtectedFunction(Tr (*func)(T1, T2, T3), const Gen::OpArg &arg1, const u32 arg2, const u32 arg3) {
|
||||
CallProtectedFunction((const void *)func, arg1, arg2, arg3);
|
||||
template <typename Tr, typename T1, typename T2>
|
||||
void CallProtectedFunction(Tr(*func)(T1, T2), const u32 arg1, const u32 arg2) {
|
||||
CallProtectedFunction((const void *)func, arg1, arg2);
|
||||
}
|
||||
|
||||
bool PredictTakeBranch(u32 targetAddr, bool likely);
|
||||
|
@ -33,24 +33,6 @@ namespace MIPSComp
|
||||
using namespace Gen;
|
||||
using namespace X64JitConstants;
|
||||
|
||||
void JitMemCheck(u32 addr, int size, int isWrite)
|
||||
{
|
||||
// Should we skip this breakpoint?
|
||||
if (CBreakPoints::CheckSkipFirst() == currentMIPS->pc)
|
||||
return;
|
||||
|
||||
// Did we already hit one?
|
||||
if (coreState != CORE_RUNNING && coreState != CORE_NEXTFRAME)
|
||||
return;
|
||||
|
||||
CBreakPoints::ExecMemCheckJitBefore(addr, isWrite == 1, size, currentMIPS->pc);
|
||||
}
|
||||
|
||||
void JitMemCheckCleanup()
|
||||
{
|
||||
CBreakPoints::ExecMemCheckJitCleanup();
|
||||
}
|
||||
|
||||
JitSafeMem::JitSafeMem(Jit *jit, MIPSGPReg raddr, s32 offset, u32 alignMask)
|
||||
: jit_(jit), raddr_(raddr), offset_(offset), needsCheck_(false), needsSkip_(false), alignMask_(alignMask)
|
||||
{
|
||||
@ -77,7 +59,6 @@ bool JitSafeMem::PrepareWrite(OpArg &dest, int size)
|
||||
{
|
||||
if (ImmValid())
|
||||
{
|
||||
MemCheckImm(MEM_WRITE);
|
||||
u32 addr = (iaddr_ & alignMask_);
|
||||
#ifdef MASKED_PSP_MEMORY
|
||||
addr &= Memory::MEMVIEW32_MASK;
|
||||
@ -106,7 +87,6 @@ bool JitSafeMem::PrepareRead(OpArg &src, int size)
|
||||
{
|
||||
if (ImmValid())
|
||||
{
|
||||
MemCheckImm(MEM_READ);
|
||||
u32 addr = (iaddr_ & alignMask_);
|
||||
#ifdef MASKED_PSP_MEMORY
|
||||
addr &= Memory::MEMVIEW32_MASK;
|
||||
@ -174,8 +154,6 @@ OpArg JitSafeMem::PrepareMemoryOpArg(MemoryOpType type)
|
||||
xaddr_ = EAX;
|
||||
}
|
||||
|
||||
MemCheckAsm(type);
|
||||
|
||||
if (!fast_)
|
||||
{
|
||||
// Is it in physical ram?
|
||||
@ -377,85 +355,6 @@ void JitSafeMem::Finish()
|
||||
jit_->SetJumpTarget(*it);
|
||||
}
|
||||
|
||||
void JitSafeMem::MemCheckImm(MemoryOpType type) {
|
||||
MemCheck check;
|
||||
if (CBreakPoints::GetMemCheckInRange(iaddr_, size_, &check)) {
|
||||
if (!(check.cond & MEMCHECK_READ) && type == MEM_READ)
|
||||
return;
|
||||
if (!(check.cond & MEMCHECK_WRITE) && type == MEM_WRITE)
|
||||
return;
|
||||
|
||||
jit_->MOV(32, MIPSSTATE_VAR(pc), Imm32(jit_->GetCompilerPC()));
|
||||
jit_->CallProtectedFunction(&JitMemCheck, iaddr_, size_, type == MEM_WRITE ? 1 : 0);
|
||||
|
||||
// CORE_RUNNING is <= CORE_NEXTFRAME.
|
||||
if (jit_->RipAccessible((const void *)&coreState)) {
|
||||
jit_->CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); // rip accessible
|
||||
} else {
|
||||
// We can't safely overwrite any register, so push. This is only while debugging.
|
||||
jit_->PUSH(RAX);
|
||||
jit_->MOV(PTRBITS, R(RAX), ImmPtr((const void *)&coreState));
|
||||
jit_->CMP(32, MatR(RAX), Imm32(CORE_NEXTFRAME));
|
||||
jit_->POP(RAX);
|
||||
}
|
||||
skipChecks_.push_back(jit_->J_CC(CC_G, true));
|
||||
jit_->js.afterOp |= JitState::AFTER_CORE_STATE | JitState::AFTER_REWIND_PC_BAD_STATE | JitState::AFTER_MEMCHECK_CLEANUP;
|
||||
}
|
||||
}
|
||||
|
||||
void JitSafeMem::MemCheckAsm(MemoryOpType type) {
|
||||
const auto memchecks = CBreakPoints::GetMemCheckRanges(type == MEM_WRITE);
|
||||
bool possible = !memchecks.empty();
|
||||
std::vector<FixupBranch> hitChecks;
|
||||
for (auto it = memchecks.begin(), end = memchecks.end(); it != end; ++it) {
|
||||
if (it->end != 0) {
|
||||
jit_->CMP(32, R(xaddr_), Imm32(it->start - offset_ - size_));
|
||||
FixupBranch skipNext = jit_->J_CC(CC_BE);
|
||||
|
||||
jit_->CMP(32, R(xaddr_), Imm32(it->end - offset_));
|
||||
hitChecks.push_back(jit_->J_CC(CC_B, true));
|
||||
|
||||
jit_->SetJumpTarget(skipNext);
|
||||
} else {
|
||||
jit_->CMP(32, R(xaddr_), Imm32(it->start - offset_));
|
||||
hitChecks.push_back(jit_->J_CC(CC_E, true));
|
||||
}
|
||||
}
|
||||
|
||||
if (possible) {
|
||||
FixupBranch noHits = jit_->J(true);
|
||||
|
||||
// Okay, now land any hit here.
|
||||
for (auto &fixup : hitChecks)
|
||||
jit_->SetJumpTarget(fixup);
|
||||
hitChecks.clear();
|
||||
|
||||
jit_->PUSH(xaddr_);
|
||||
// Keep the stack 16-byte aligned.
|
||||
jit_->SUB(PTRBITS, R(SP), Imm32(16 - PTRBITS / 8));
|
||||
jit_->MOV(32, MIPSSTATE_VAR(pc), Imm32(jit_->GetCompilerPC()));
|
||||
jit_->ADD(32, R(xaddr_), Imm32(offset_));
|
||||
jit_->CallProtectedFunction(&JitMemCheck, R(xaddr_), size_, type == MEM_WRITE ? 1 : 0);
|
||||
jit_->ADD(PTRBITS, R(SP), Imm32(16 - PTRBITS / 8));
|
||||
jit_->POP(xaddr_);
|
||||
|
||||
// CORE_RUNNING is <= CORE_NEXTFRAME.
|
||||
if (jit_->RipAccessible((const void *)&coreState)) {
|
||||
jit_->CMP(32, M(&coreState), Imm32(CORE_NEXTFRAME)); // rip accessible
|
||||
} else {
|
||||
// We can't safely overwrite any register, so push. This is only while debugging.
|
||||
jit_->PUSH(RAX);
|
||||
jit_->MOV(PTRBITS, R(RAX), ImmPtr((const void *)&coreState));
|
||||
jit_->CMP(32, MatR(RAX), Imm32(CORE_NEXTFRAME));
|
||||
jit_->POP(RAX);
|
||||
}
|
||||
skipChecks_.push_back(jit_->J_CC(CC_G, true));
|
||||
jit_->js.afterOp |= JitState::AFTER_CORE_STATE | JitState::AFTER_REWIND_PC_BAD_STATE | JitState::AFTER_MEMCHECK_CLEANUP;
|
||||
|
||||
jit_->SetJumpTarget(noHits);
|
||||
}
|
||||
}
|
||||
|
||||
static const int FUNCS_ARENA_SIZE = 512 * 1024;
|
||||
|
||||
void JitSafeMemFuncs::Init(ThunkManager *thunks) {
|
||||
|
@ -69,8 +69,6 @@ private:
|
||||
|
||||
Gen::OpArg PrepareMemoryOpArg(MemoryOpType type);
|
||||
void PrepareSlowAccess();
|
||||
void MemCheckImm(MemoryOpType type);
|
||||
void MemCheckAsm(MemoryOpType type);
|
||||
bool ImmValid();
|
||||
void IndirectCALL(const void *safeFunc);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user