Jit64: Remove Jitx86Base class

This commit is contained in:
MerryMage 2018-12-28 09:15:26 +00:00
parent bfb9b1aca5
commit 77e9aa48bc
21 changed files with 214 additions and 242 deletions

View File

@ -255,7 +255,6 @@ if(_M_X86)
PowerPC/Jit64Common/EmuCodeBlock.cpp
PowerPC/Jit64Common/FarCodeCache.cpp
PowerPC/Jit64Common/Jit64AsmCommon.cpp
PowerPC/Jit64Common/Jit64Base.cpp
PowerPC/Jit64Common/TrampolineCache.cpp
)
elseif(_M_ARM_64)

View File

@ -279,7 +279,6 @@
<ClCompile Include="PowerPC\Jit64Common\EmuCodeBlock.cpp" />
<ClCompile Include="PowerPC\Jit64Common\FarCodeCache.cpp" />
<ClCompile Include="PowerPC\Jit64Common\Jit64AsmCommon.cpp" />
<ClCompile Include="PowerPC\Jit64Common\Jit64Base.cpp" />
<ClCompile Include="PowerPC\Jit64Common\TrampolineCache.cpp" />
<ClCompile Include="PowerPC\JitCommon\JitAsmCommon.cpp" />
<ClCompile Include="PowerPC\JitCommon\JitBase.cpp" />
@ -523,7 +522,7 @@
<ClInclude Include="PowerPC\Jit64Common\EmuCodeBlock.h" />
<ClInclude Include="PowerPC\Jit64Common\FarCodeCache.h" />
<ClInclude Include="PowerPC\Jit64Common\Jit64AsmCommon.h" />
<ClInclude Include="PowerPC\Jit64Common\Jit64Base.h" />
<ClInclude Include="PowerPC\Jit64Common\Jit64Constants.h" />
<ClInclude Include="PowerPC\Jit64Common\Jit64PowerPCState.h" />
<ClInclude Include="PowerPC\Jit64Common\TrampolineCache.h" />
<ClInclude Include="PowerPC\Jit64Common\TrampolineInfo.h" />

View File

@ -699,9 +699,6 @@
<ClCompile Include="PowerPC\Jit64Common\Jit64AsmCommon.cpp">
<Filter>PowerPC\Jit64Common</Filter>
</ClCompile>
<ClCompile Include="PowerPC\Jit64Common\Jit64Base.cpp">
<Filter>PowerPC\Jit64Common</Filter>
</ClCompile>
<ClCompile Include="PowerPC\Jit64Common\TrampolineCache.cpp">
<Filter>PowerPC\Jit64Common</Filter>
</ClCompile>
@ -1393,7 +1390,7 @@
<ClInclude Include="PowerPC\Jit64Common\Jit64AsmCommon.h">
<Filter>PowerPC\Jit64Common</Filter>
</ClInclude>
<ClInclude Include="PowerPC\Jit64Common\Jit64Base.h">
<ClInclude Include="PowerPC\Jit64Common\Jit64Constants.h">
<Filter>PowerPC\Jit64Common</Filter>
</ClInclude>
<ClInclude Include="PowerPC\Jit64Common\Jit64PowerPCState.h">

View File

@ -11,7 +11,7 @@
#include "Core/HLE/HLE.h"
#include "Core/HW/CPU.h"
#include "Core/PowerPC/Gekko.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
#include "Core/PowerPC/PPCAnalyst.h"
#include "Core/PowerPC/PowerPC.h"

View File

@ -4,7 +4,9 @@
#include "Core/PowerPC/Jit64/Jit.h"
#include <disasm.h>
#include <map>
#include <sstream>
#include <string>
// for the PROFILER stuff
@ -14,21 +16,26 @@
#include "Common/CommonTypes.h"
#include "Common/File.h"
#include "Common/GekkoDisassembler.h"
#include "Common/Logging/Log.h"
#include "Common/MemoryUtil.h"
#include "Common/PerformanceCounter.h"
#include "Common/StringUtil.h"
#include "Common/Swap.h"
#include "Common/x64ABI.h"
#include "Core/Core.h"
#include "Core/CoreTiming.h"
#include "Core/HLE/HLE.h"
#include "Core/HW/CPU.h"
#include "Core/HW/GPFifo.h"
#include "Core/HW/Memmap.h"
#include "Core/HW/ProcessorInterface.h"
#include "Core/MachineContext.h"
#include "Core/PatchEngine.h"
#include "Core/PowerPC/Jit64/JitAsm.h"
#include "Core/PowerPC/Jit64/RegCache/JitRegCache.h"
#include "Core/PowerPC/Jit64Common/FarCodeCache.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
#include "Core/PowerPC/Jit64Common/TrampolineCache.h"
#include "Core/PowerPC/JitInterface.h"
@ -146,7 +153,9 @@ enum
GUARD_OFFSET = STACK_SIZE - SAFE_STACK_SIZE - GUARD_SIZE,
};
Jit64::Jit64() = default;
Jit64::Jit64() : QuantizedMemoryRoutines(*this)
{
}
Jit64::~Jit64() = default;
@ -211,7 +220,110 @@ bool Jit64::HandleFault(uintptr_t access_address, SContext* ctx)
if (m_enable_blr_optimization && diff >= GUARD_OFFSET && diff < GUARD_OFFSET + GUARD_SIZE)
return HandleStackFault();
return Jitx86Base::HandleFault(access_address, ctx);
// This generates some fairly heavy trampolines, but it doesn't really hurt.
// Only instructions that access I/O will get these, and there won't be that
// many of them in a typical program/game.
// TODO: do we properly handle off-the-end?
const auto base_ptr = reinterpret_cast<uintptr_t>(Memory::physical_base);
if (access_address >= base_ptr && access_address < base_ptr + 0x100010000)
return BackPatch(static_cast<u32>(access_address - base_ptr), ctx);
const auto logical_base_ptr = reinterpret_cast<uintptr_t>(Memory::logical_base);
if (access_address >= logical_base_ptr && access_address < logical_base_ptr + 0x100010000)
return BackPatch(static_cast<u32>(access_address - logical_base_ptr), ctx);
return false;
}
bool Jit64::BackPatch(u32 emAddress, SContext* ctx)
{
u8* codePtr = reinterpret_cast<u8*>(ctx->CTX_PC);
if (!IsInSpace(codePtr))
return false; // this will become a regular crash real soon after this
auto it = m_back_patch_info.find(codePtr);
if (it == m_back_patch_info.end())
{
PanicAlert("BackPatch: no register use entry for address %p", codePtr);
return false;
}
TrampolineInfo& info = it->second;
u8* exceptionHandler = nullptr;
if (jo.memcheck)
{
auto it2 = m_exception_handler_at_loc.find(codePtr);
if (it2 != m_exception_handler_at_loc.end())
exceptionHandler = it2->second;
}
// In the trampoline code, we jump back into the block at the beginning
// of the next instruction. The next instruction comes immediately
// after the backpatched operation, or BACKPATCH_SIZE bytes after the start
// of the backpatched operation, whichever comes last. (The JIT inserts NOPs
// into the original code if necessary to ensure there is enough space
// to insert the backpatch jump.)
js.generatingTrampoline = true;
js.trampolineExceptionHandler = exceptionHandler;
js.compilerPC = info.pc;
// Generate the trampoline.
const u8* trampoline = trampolines.GenerateTrampoline(info);
js.generatingTrampoline = false;
js.trampolineExceptionHandler = nullptr;
u8* start = info.start;
// Patch the original memory operation.
XEmitter emitter(start);
emitter.JMP(trampoline, true);
// NOPs become dead code
const u8* end = info.start + info.len;
for (const u8* i = emitter.GetCodePtr(); i < end; ++i)
emitter.INT3();
// Rewind time to just before the start of the write block. If we swapped memory
// before faulting (eg: the store+swap was not an atomic op like MOVBE), let's
// swap it back so that the swap can happen again (this double swap isn't ideal but
// only happens the first time we fault).
if (info.nonAtomicSwapStoreSrc != Gen::INVALID_REG)
{
u64* ptr = ContextRN(ctx, info.nonAtomicSwapStoreSrc);
switch (info.accessSize << 3)
{
case 8:
// No need to swap a byte
break;
case 16:
*ptr = Common::swap16(static_cast<u16>(*ptr));
break;
case 32:
*ptr = Common::swap32(static_cast<u32>(*ptr));
break;
case 64:
*ptr = Common::swap64(static_cast<u64>(*ptr));
break;
default:
DEBUG_ASSERT(0);
break;
}
}
// This is special code to undo the LEA in SafeLoadToReg if it clobbered the address
// register in the case where reg_value shared the same location as opAddress.
if (info.offsetAddedToAddress)
{
u64* ptr = ContextRN(ctx, info.op_arg.GetSimpleReg());
*ptr -= static_cast<u32>(info.offset);
}
ctx->CTX_PC = reinterpret_cast<u64>(trampoline);
return true;
}
void Jit64::Init()
@ -1044,3 +1156,40 @@ bool Jit64::HandleFunctionHooking(u32 address)
return true;
});
}
void LogGeneratedX86(size_t size, const PPCAnalyst::CodeBuffer& code_buffer, const u8* normalEntry,
const JitBlock* b)
{
for (size_t i = 0; i < size; i++)
{
const PPCAnalyst::CodeOp& op = code_buffer[i];
const std::string disasm = Common::GekkoDisassembler::Disassemble(op.inst.hex, op.address);
DEBUG_LOG(DYNA_REC, "IR_X86 PPC: %08x %s\n", op.address, disasm.c_str());
}
disassembler x64disasm;
x64disasm.set_syntax_intel();
u64 disasmPtr = reinterpret_cast<u64>(normalEntry);
const u8* end = normalEntry + b->codeSize;
while (reinterpret_cast<u8*>(disasmPtr) < end)
{
char sptr[1000] = "";
disasmPtr += x64disasm.disasm64(disasmPtr, disasmPtr, reinterpret_cast<u8*>(disasmPtr), sptr);
DEBUG_LOG(DYNA_REC, "IR_X86 x86: %s", sptr);
}
if (b->codeSize <= 250)
{
std::stringstream ss;
ss << std::hex;
for (u8 i = 0; i <= b->codeSize; i++)
{
ss.width(2);
ss.fill('0');
ss << static_cast<u32>(*(normalEntry + i));
}
DEBUG_LOG(DYNA_REC, "IR_X86 bin: %s\n\n\n", ss.str().c_str());
}
}

View File

@ -25,16 +25,19 @@
#include "Core/PowerPC/Jit64/RegCache/FPURegCache.h"
#include "Core/PowerPC/Jit64/RegCache/GPRRegCache.h"
#include "Core/PowerPC/Jit64/RegCache/JitRegCache.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64Common/BlockCache.h"
#include "Core/PowerPC/Jit64Common/Jit64AsmCommon.h"
#include "Core/PowerPC/Jit64Common/TrampolineCache.h"
#include "Core/PowerPC/JitCommon/JitBase.h"
#include "Core/PowerPC/JitCommon/JitCache.h"
namespace PPCAnalyst
{
struct CodeBlock;
struct CodeOp;
}
} // namespace PPCAnalyst
class Jit64 : public Jitx86Base
class Jit64 : public JitBase, public QuantizedMemoryRoutines
{
public:
Jit64();
@ -45,6 +48,7 @@ public:
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
bool HandleStackFault() override;
bool BackPatch(u32 emAddress, SContext* ctx);
void EnableOptimization();
void EnableBlockLink();
@ -239,6 +243,9 @@ private:
void AllocStack();
void FreeStack();
JitBlockCache blocks{*this};
TrampolineCache trampolines{*this};
GPRRegCache gpr{*this};
FPURegCache fpr{*this};
@ -248,3 +255,6 @@ private:
bool m_cleanup_after_stackfault;
u8* m_stack;
};
void LogGeneratedX86(size_t size, const PPCAnalyst::CodeBuffer& code_buffer, const u8* normalEntry,
const JitBlock* b);

View File

@ -18,7 +18,7 @@
using namespace Gen;
Jit64AsmRoutineManager::Jit64AsmRoutineManager(Jitx86Base& jit) : m_jit{jit}, CommonAsmRoutines(jit)
Jit64AsmRoutineManager::Jit64AsmRoutineManager(Jit64& jit) : CommonAsmRoutines(jit), m_jit{jit}
{
}

View File

@ -35,7 +35,7 @@ public:
// want to ensure this number is big enough.
static constexpr size_t CODE_SIZE = 16384;
explicit Jit64AsmRoutineManager(Jitx86Base& jit);
explicit Jit64AsmRoutineManager(Jit64& jit);
void Init(u8* stack_top);

View File

@ -8,6 +8,7 @@
#include "Common/x64Emitter.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64/RegCache/JitRegCache.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
using namespace Gen;

View File

@ -4,8 +4,8 @@
#include "Core/PowerPC/Jit64/RegCache/FPURegCache.h"
#include "Common/x64Reg.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
using namespace Gen;

View File

@ -4,8 +4,8 @@
#include "Core/PowerPC/Jit64/RegCache/GPRRegCache.h"
#include "Common/x64Reg.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
using namespace Gen;

View File

@ -15,7 +15,8 @@
#include "Core/HW/MMIO.h"
#include "Core/HW/Memmap.h"
#include "Core/PowerPC/Gekko.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
#include "Core/PowerPC/MMU.h"
#include "Core/PowerPC/PowerPC.h"
@ -182,7 +183,7 @@ bool EmuCodeBlock::UnsafeLoadToReg(X64Reg reg_value, OpArg opAddress, int access
{
// This method can potentially clobber the address if it shares a register
// with the load target. In this case we can just subtract offset from the
// register (see Jit64Base for this implementation).
// register (see Jit64 for this implementation).
offsetAddedToAddress = (reg_value == opAddress.GetSimpleReg());
LEA(32, reg_value, MDisp(opAddress.GetSimpleReg(), offset));

View File

@ -19,13 +19,13 @@ namespace MMIO
class Mapping;
}
class Jitx86Base;
class Jit64;
// Like XCodeBlock but has some utilities for memory access.
class EmuCodeBlock : public Gen::X64CodeBlock
{
public:
explicit EmuCodeBlock(Jitx86Base& jit) : m_jit{jit} {}
explicit EmuCodeBlock(Jit64& jit) : m_jit{jit} {}
void MemoryExceptionCheck();
// Simple functions to switch between near and far code emitting
@ -128,7 +128,7 @@ public:
void Clear();
protected:
Jitx86Base& m_jit;
Jit64& m_jit;
ConstantPool m_const_pool;
FarCodeCache m_far_code;
u8* m_near_code; // Backed up when we switch to far code.

View File

@ -13,7 +13,8 @@
#include "Common/x64ABI.h"
#include "Common/x64Emitter.h"
#include "Core/PowerPC/Gekko.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
#include "Core/PowerPC/PowerPC.h"

View File

@ -13,7 +13,7 @@ enum EQuantizeType : u32;
class QuantizedMemoryRoutines : public EmuCodeBlock
{
public:
explicit QuantizedMemoryRoutines(Jitx86Base& jit) : EmuCodeBlock(jit) {}
explicit QuantizedMemoryRoutines(Jit64& jit) : EmuCodeBlock(jit) {}
void GenQuantizedLoad(bool single, EQuantizeType type, int quantize);
void GenQuantizedStore(bool single, EQuantizeType type, int quantize);
@ -25,7 +25,7 @@ private:
class CommonAsmRoutines : public CommonAsmRoutinesBase, public QuantizedMemoryRoutines
{
public:
explicit CommonAsmRoutines(Jitx86Base& jit) : QuantizedMemoryRoutines(jit) {}
explicit CommonAsmRoutines(Jit64& jit) : QuantizedMemoryRoutines(jit) {}
void GenFrsqrte();
void GenFres();
void GenMfcr();

View File

@ -1,165 +0,0 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include <disasm.h>
#include <sstream>
#include <string>
#include "Common/Assert.h"
#include "Common/CommonTypes.h"
#include "Common/GekkoDisassembler.h"
#include "Common/Logging/Log.h"
#include "Common/MsgHandler.h"
#include "Common/StringUtil.h"
#include "Common/Swap.h"
#include "Common/x64Reg.h"
#include "Core/HW/Memmap.h"
#include "Core/MachineContext.h"
#include "Core/PowerPC/PPCAnalyst.h"
// This generates some fairly heavy trampolines, but it doesn't really hurt.
// Only instructions that access I/O will get these, and there won't be that
// many of them in a typical program/game.
bool Jitx86Base::HandleFault(uintptr_t access_address, SContext* ctx)
{
// TODO: do we properly handle off-the-end?
const auto base_ptr = reinterpret_cast<uintptr_t>(Memory::physical_base);
if (access_address >= base_ptr && access_address < base_ptr + 0x100010000)
return BackPatch(static_cast<u32>(access_address - base_ptr), ctx);
const auto logical_base_ptr = reinterpret_cast<uintptr_t>(Memory::logical_base);
if (access_address >= logical_base_ptr && access_address < logical_base_ptr + 0x100010000)
return BackPatch(static_cast<u32>(access_address - logical_base_ptr), ctx);
return false;
}
bool Jitx86Base::BackPatch(u32 emAddress, SContext* ctx)
{
u8* codePtr = reinterpret_cast<u8*>(ctx->CTX_PC);
if (!IsInSpace(codePtr))
return false; // this will become a regular crash real soon after this
auto it = m_back_patch_info.find(codePtr);
if (it == m_back_patch_info.end())
{
PanicAlert("BackPatch: no register use entry for address %p", codePtr);
return false;
}
TrampolineInfo& info = it->second;
u8* exceptionHandler = nullptr;
if (jo.memcheck)
{
auto it2 = m_exception_handler_at_loc.find(codePtr);
if (it2 != m_exception_handler_at_loc.end())
exceptionHandler = it2->second;
}
// In the trampoline code, we jump back into the block at the beginning
// of the next instruction. The next instruction comes immediately
// after the backpatched operation, or BACKPATCH_SIZE bytes after the start
// of the backpatched operation, whichever comes last. (The JIT inserts NOPs
// into the original code if necessary to ensure there is enough space
// to insert the backpatch jump.)
js.generatingTrampoline = true;
js.trampolineExceptionHandler = exceptionHandler;
js.compilerPC = info.pc;
// Generate the trampoline.
const u8* trampoline = trampolines.GenerateTrampoline(info);
js.generatingTrampoline = false;
js.trampolineExceptionHandler = nullptr;
u8* start = info.start;
// Patch the original memory operation.
XEmitter emitter(start);
emitter.JMP(trampoline, true);
// NOPs become dead code
const u8* end = info.start + info.len;
for (const u8* i = emitter.GetCodePtr(); i < end; ++i)
emitter.INT3();
// Rewind time to just before the start of the write block. If we swapped memory
// before faulting (eg: the store+swap was not an atomic op like MOVBE), let's
// swap it back so that the swap can happen again (this double swap isn't ideal but
// only happens the first time we fault).
if (info.nonAtomicSwapStoreSrc != Gen::INVALID_REG)
{
u64* ptr = ContextRN(ctx, info.nonAtomicSwapStoreSrc);
switch (info.accessSize << 3)
{
case 8:
// No need to swap a byte
break;
case 16:
*ptr = Common::swap16(static_cast<u16>(*ptr));
break;
case 32:
*ptr = Common::swap32(static_cast<u32>(*ptr));
break;
case 64:
*ptr = Common::swap64(static_cast<u64>(*ptr));
break;
default:
DEBUG_ASSERT(0);
break;
}
}
// This is special code to undo the LEA in SafeLoadToReg if it clobbered the address
// register in the case where reg_value shared the same location as opAddress.
if (info.offsetAddedToAddress)
{
u64* ptr = ContextRN(ctx, info.op_arg.GetSimpleReg());
*ptr -= static_cast<u32>(info.offset);
}
ctx->CTX_PC = reinterpret_cast<u64>(trampoline);
return true;
}
void LogGeneratedX86(size_t size, const PPCAnalyst::CodeBuffer& code_buffer, const u8* normalEntry,
const JitBlock* b)
{
for (size_t i = 0; i < size; i++)
{
const PPCAnalyst::CodeOp& op = code_buffer[i];
const std::string disasm = Common::GekkoDisassembler::Disassemble(op.inst.hex, op.address);
DEBUG_LOG(DYNA_REC, "IR_X86 PPC: %08x %s\n", op.address, disasm.c_str());
}
disassembler x64disasm;
x64disasm.set_syntax_intel();
u64 disasmPtr = reinterpret_cast<u64>(normalEntry);
const u8* end = normalEntry + b->codeSize;
while (reinterpret_cast<u8*>(disasmPtr) < end)
{
char sptr[1000] = "";
disasmPtr += x64disasm.disasm64(disasmPtr, disasmPtr, reinterpret_cast<u8*>(disasmPtr), sptr);
DEBUG_LOG(DYNA_REC, "IR_X86 x86: %s", sptr);
}
if (b->codeSize <= 250)
{
std::stringstream ss;
ss << std::hex;
for (u8 i = 0; i <= b->codeSize; i++)
{
ss.width(2);
ss.fill('0');
ss << static_cast<u32>(*(normalEntry + i));
}
DEBUG_LOG(DYNA_REC, "IR_X86 bin: %s\n\n\n", ss.str().c_str());
}
}

View File

@ -1,47 +0,0 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include <cstdint>
#include "Common/CommonTypes.h"
#include "Common/x64Reg.h"
#include "Core/PowerPC/Jit64Common/BlockCache.h"
#include "Core/PowerPC/Jit64Common/Jit64AsmCommon.h"
#include "Core/PowerPC/Jit64Common/TrampolineCache.h"
#include "Core/PowerPC/JitCommon/JitBase.h"
#include "Core/PowerPC/PPCAnalyst.h"
// RSCRATCH and RSCRATCH2 are always scratch registers and can be used without
// limitation.
constexpr Gen::X64Reg RSCRATCH = Gen::RAX;
constexpr Gen::X64Reg RSCRATCH2 = Gen::RDX;
// RSCRATCH_EXTRA may be in the allocation order, so it has to be flushed
// before use.
constexpr Gen::X64Reg RSCRATCH_EXTRA = Gen::RCX;
// RMEM points to the start of emulated memory.
constexpr Gen::X64Reg RMEM = Gen::RBX;
// RPPCSTATE points to ppcState + 0x80. It's offset because we want to be able
// to address as much as possible in a one-byte offset form.
constexpr Gen::X64Reg RPPCSTATE = Gen::RBP;
constexpr size_t CODE_SIZE = 1024 * 1024 * 32;
class Jitx86Base : public JitBase, public QuantizedMemoryRoutines
{
public:
Jitx86Base() : QuantizedMemoryRoutines(*this) {}
JitBlockCache* GetBlockCache() override { return &blocks; }
bool HandleFault(uintptr_t access_address, SContext* ctx) override;
protected:
bool BackPatch(u32 emAddress, SContext* ctx);
JitBlockCache blocks{*this};
TrampolineCache trampolines{*this};
};
void LogGeneratedX86(size_t size, const PPCAnalyst::CodeBuffer& code_buffer, const u8* normalEntry,
const JitBlock* b);

View File

@ -0,0 +1,24 @@
// Copyright 2016 Dolphin Emulator Project
// Licensed under GPLv2+
// Refer to the license.txt file included.
#pragma once
#include <cstddef>
#include "Common/x64Reg.h"
// RSCRATCH and RSCRATCH2 are always scratch registers and can be used without
// limitation.
constexpr Gen::X64Reg RSCRATCH = Gen::RAX;
constexpr Gen::X64Reg RSCRATCH2 = Gen::RDX;
// RSCRATCH_EXTRA may be in the allocation order, so it has to be flushed
// before use.
constexpr Gen::X64Reg RSCRATCH_EXTRA = Gen::RCX;
// RMEM points to the start of emulated memory.
constexpr Gen::X64Reg RMEM = Gen::RBX;
// RPPCSTATE points to ppcState + 0x80. It's offset because we want to be able
// to address as much as possible in a one-byte offset form.
constexpr Gen::X64Reg RPPCSTATE = Gen::RBP;
constexpr size_t CODE_SIZE = 1024 * 1024 * 32;

View File

@ -5,6 +5,7 @@
#pragma once
#include "Common/CommonTypes.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
#include "Core/PowerPC/PowerPC.h"
// We offset by 0x80 because the range of one byte memory offsets is

View File

@ -11,7 +11,8 @@
#include "Common/JitRegister.h"
#include "Common/MsgHandler.h"
#include "Common/x64Emitter.h"
#include "Core/PowerPC/Jit64Common/Jit64Base.h"
#include "Core/PowerPC/Jit64/Jit.h"
#include "Core/PowerPC/Jit64Common/Jit64Constants.h"
#include "Core/PowerPC/Jit64Common/Jit64PowerPCState.h"
#include "Core/PowerPC/Jit64Common/TrampolineInfo.h"
#include "Core/PowerPC/PowerPC.h"

View File

@ -20,11 +20,12 @@ constexpr int BACKPATCH_SIZE = 5;
class TrampolineCache : public EmuCodeBlock
{
const u8* GenerateReadTrampoline(const TrampolineInfo& info);
const u8* GenerateWriteTrampoline(const TrampolineInfo& info);
public:
explicit TrampolineCache(Jitx86Base& jit) : EmuCodeBlock(jit) {}
explicit TrampolineCache(Jit64& jit) : EmuCodeBlock(jit) {}
const u8* GenerateTrampoline(const TrampolineInfo& info);
void ClearCodeSpace();
private:
const u8* GenerateReadTrampoline(const TrampolineInfo& info);
const u8* GenerateWriteTrampoline(const TrampolineInfo& info);
};