x64Emitter: Check end of allocated space when emitting code.

This commit is contained in:
Admiral H. Curtiss 2020-05-02 00:42:29 +02:00
parent c36ae84b96
commit 5b52b3e9cb
10 changed files with 125 additions and 17 deletions

View File

@ -310,7 +310,7 @@ void ARM64XEmitter::SetCodePtrUnsafe(u8* ptr)
m_code = ptr;
}
void ARM64XEmitter::SetCodePtr(u8* ptr)
void ARM64XEmitter::SetCodePtr(u8* ptr, u8* end, bool write_failed)
{
SetCodePtrUnsafe(ptr);
m_lastCacheFlushEnd = ptr;

View File

@ -540,7 +540,11 @@ public:
}
virtual ~ARM64XEmitter() {}
void SetCodePtr(u8* ptr);
// 'end' and 'write_failed' are unused in the ARM code emitter at the moment.
// They're just here for interface compatibility with the x64 code emitter.
void SetCodePtr(u8* ptr, u8* end, bool write_failed = false);
void SetCodePtrUnsafe(u8* ptr);
void ReserveCodeSpace(u32 bytes);
u8* AlignCode16();

View File

@ -55,7 +55,7 @@ public:
region_size = size;
total_region_size = size;
region = static_cast<u8*>(Common::AllocateExecutableMemory(total_region_size));
T::SetCodePtr(region);
T::SetCodePtr(region, region + size);
}
// Always clear code space with breakpoints, so that if someone accidentally executes
@ -86,7 +86,7 @@ public:
// Cannot currently be undone. Will write protect the entire code region.
// Start over if you need to change the code (call FreeCodeSpace(), AllocCodeSpace()).
void WriteProtect() { Common::WriteProtectMemory(region, region_size, true); }
void ResetCodePtr() { T::SetCodePtr(region); }
void ResetCodePtr() { T::SetCodePtr(region, region + region_size); }
size_t GetSpaceLeft() const
{
ASSERT(static_cast<size_t>(T::GetCodePtr() - region) < region_size);

View File

@ -101,9 +101,11 @@ enum class FloatOp
Invalid = -1,
};
void XEmitter::SetCodePtr(u8* ptr)
void XEmitter::SetCodePtr(u8* ptr, u8* end, bool write_failed)
{
code = ptr;
m_code_end = end;
m_write_failed = write_failed;
}
const u8* XEmitter::GetCodePtr() const
@ -116,31 +118,76 @@ u8* XEmitter::GetWritableCodePtr()
return code;
}
const u8* XEmitter::GetCodeEnd() const
{
return m_code_end;
}
u8* XEmitter::GetWritableCodeEnd()
{
return m_code_end;
}
void XEmitter::Write8(u8 value)
{
if (code >= m_code_end)
{
code = m_code_end;
m_write_failed = true;
return;
}
*code++ = value;
}
void XEmitter::Write16(u16 value)
{
if (code + sizeof(u16) > m_code_end)
{
code = m_code_end;
m_write_failed = true;
return;
}
std::memcpy(code, &value, sizeof(u16));
code += sizeof(u16);
}
void XEmitter::Write32(u32 value)
{
if (code + sizeof(u32) > m_code_end)
{
code = m_code_end;
m_write_failed = true;
return;
}
std::memcpy(code, &value, sizeof(u32));
code += sizeof(u32);
}
void XEmitter::Write64(u64 value)
{
if (code + sizeof(u64) > m_code_end)
{
code = m_code_end;
m_write_failed = true;
return;
}
std::memcpy(code, &value, sizeof(u64));
code += sizeof(u64);
}
void XEmitter::ReserveCodeSpace(int bytes)
{
if (code + bytes > m_code_end)
{
code = m_code_end;
m_write_failed = true;
return;
}
for (int i = 0; i < bytes; i++)
*code++ = 0xCC;
}
@ -454,6 +501,13 @@ FixupBranch XEmitter::CALL()
branch.ptr = code + 5;
Write8(0xE8);
Write32(0);
// If we couldn't write the full call instruction, indicate that in the returned FixupBranch by
// setting the branch's address to null. This will prevent a later SetJumpTarget() from writing to
// invalid memory.
if (HasWriteFailed())
branch.ptr = nullptr;
return branch;
}
@ -473,6 +527,13 @@ FixupBranch XEmitter::J(bool force5bytes)
Write8(0xE9);
Write32(0);
}
// If we couldn't write the full jump instruction, indicate that in the returned FixupBranch by
// setting the branch's address to null. This will prevent a later SetJumpTarget() from writing to
// invalid memory.
if (HasWriteFailed())
branch.ptr = nullptr;
return branch;
}
@ -493,6 +554,13 @@ FixupBranch XEmitter::J_CC(CCFlags conditionCode, bool force5bytes)
Write8(0x80 + conditionCode);
Write32(0);
}
// If we couldn't write the full jump instruction, indicate that in the returned FixupBranch by
// setting the branch's address to null. This will prevent a later SetJumpTarget() from writing to
// invalid memory.
if (HasWriteFailed())
branch.ptr = nullptr;
return branch;
}
@ -518,6 +586,9 @@ void XEmitter::J_CC(CCFlags conditionCode, const u8* addr)
void XEmitter::SetJumpTarget(const FixupBranch& branch)
{
if (!branch.ptr)
return;
if (branch.type == FixupBranch::Type::Branch8Bit)
{
s64 distance = (s64)(code - branch.ptr);

View File

@ -329,9 +329,19 @@ class XEmitter
{
friend struct OpArg; // for Write8 etc
private:
// Pointer to memory where code will be emitted to.
u8* code = nullptr;
// Pointer past the end of the memory region we're allowed to emit to.
// Writes that would reach this memory are refused and will set the m_write_failed flag instead.
u8* m_code_end = nullptr;
bool flags_locked = false;
// Set to true when a write request happens that would write past m_code_end.
// Must be cleared with SetCodePtr() afterwards.
bool m_write_failed = false;
void CheckFlags();
void Rex(int w, int r, int x, int b);
@ -378,9 +388,9 @@ protected:
public:
XEmitter() = default;
explicit XEmitter(u8* code_ptr) : code{code_ptr} {}
explicit XEmitter(u8* code_ptr, u8* code_end) : code(code_ptr), m_code_end(code_end) {}
virtual ~XEmitter() = default;
void SetCodePtr(u8* ptr);
void SetCodePtr(u8* ptr, u8* end, bool write_failed = false);
void ReserveCodeSpace(int bytes);
u8* AlignCodeTo(size_t alignment);
u8* AlignCode4();
@ -388,9 +398,16 @@ public:
u8* AlignCodePage();
const u8* GetCodePtr() const;
u8* GetWritableCodePtr();
const u8* GetCodeEnd() const;
u8* GetWritableCodeEnd();
void LockFlags() { flags_locked = true; }
void UnlockFlags() { flags_locked = false; }
// Should be checked after a block of code has been generated to see if the code has been
// successfully written to memory. Do not call the generated code when this returns true!
bool HasWriteFailed() const { return m_write_failed; }
// Looking for one of these? It's BANNED!! Some instructions are slow on modern CPU
// INC, DEC, LOOP, LOOPNE, LOOPE, ENTER, LEAVE, XCHG, XLAT, REP MOVSB/MOVSD, REP SCASD + other
// string instr.,

View File

@ -281,7 +281,7 @@ bool Jit64::BackPatch(u32 emAddress, SContext* ctx)
u8* start = info.start;
// Patch the original memory operation.
XEmitter emitter(start);
XEmitter emitter(start, start + info.len);
emitter.JMP(trampoline, true);
// NOPs become dead code
const u8* end = info.start + info.len;
@ -351,6 +351,7 @@ void Jit64::Init()
AddChildCodeSpace(&trampolines, trampolines_size);
AddChildCodeSpace(&m_far_code, farcode_size);
m_const_pool.Init(AllocChildCodeSpace(constpool_size), constpool_size);
ResetCodePtr();
// BLR optimization has the same consequences as block linking, as well as
// depending on the fault handler to be safe in the event of excessive BL.

View File

@ -21,9 +21,9 @@ void JitBlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const JitBl
u8* location = source.exitPtrs;
const u8* address = dest ? dest->checkedEntry : dispatcher;
Gen::XEmitter emit(location);
if (source.call)
{
Gen::XEmitter emit(location, location + 5);
emit.CALL(address);
}
else
@ -31,19 +31,25 @@ void JitBlockCache::WriteLinkBlock(const JitBlock::LinkData& source, const JitBl
// If we're going to link with the next block, there is no need
// to emit JMP. So just NOP out the gap to the next block.
// Support up to 3 additional bytes because of alignment.
s64 offset = address - emit.GetCodePtr();
s64 offset = address - location;
if (offset > 0 && offset <= 5 + 3)
{
Gen::XEmitter emit(location, location + offset);
emit.NOP(offset);
}
else
{
Gen::XEmitter emit(location, location + 5);
emit.JMP(address, true);
}
}
}
void JitBlockCache::WriteDestroyBlock(const JitBlock& block)
{
// Only clear the entry points as we might still be within this block.
Gen::XEmitter emit(block.checkedEntry);
Gen::XEmitter emit(block.checkedEntry, block.checkedEntry + 1);
emit.INT3();
Gen::XEmitter emit2(block.normalEntry);
Gen::XEmitter emit2(block.normalEntry, block.normalEntry + 1);
emit2.INT3();
}

View File

@ -80,13 +80,16 @@ void EmuCodeBlock::MemoryExceptionCheck()
void EmuCodeBlock::SwitchToFarCode()
{
m_near_code = GetWritableCodePtr();
SetCodePtr(m_far_code.GetWritableCodePtr());
m_near_code_end = GetWritableCodeEnd();
m_near_code_write_failed = HasWriteFailed();
SetCodePtr(m_far_code.GetWritableCodePtr(), m_far_code.GetWritableCodeEnd(),
m_far_code.HasWriteFailed());
}
void EmuCodeBlock::SwitchToNearCode()
{
m_far_code.SetCodePtr(GetWritableCodePtr());
SetCodePtr(m_near_code);
m_far_code.SetCodePtr(GetWritableCodePtr(), GetWritableCodeEnd(), HasWriteFailed());
SetCodePtr(m_near_code, m_near_code_end, m_near_code_write_failed);
}
FixupBranch EmuCodeBlock::CheckIfSafeAddress(const OpArg& reg_value, X64Reg reg_addr,

View File

@ -131,7 +131,11 @@ protected:
Jit64& m_jit;
ConstantPool m_const_pool;
FarCodeCache m_far_code;
u8* m_near_code; // Backed up when we switch to far code.
// Backed up when we switch to far code.
u8* m_near_code;
u8* m_near_code_end;
bool m_near_code_write_failed;
std::unordered_map<u8*, TrampolineInfo> m_back_patch_info;
std::unordered_map<u8*, u8*> m_exception_handler_at_loc;

View File

@ -93,6 +93,7 @@ protected:
emitter.reset(new X64CodeBlock());
emitter->AllocCodeSpace(4096);
code_buffer = emitter->GetWritableCodePtr();
code_buffer_end = emitter->GetWritableCodeEnd();
disasm.reset(new disassembler);
disasm->set_syntax_intel();
@ -158,12 +159,13 @@ protected:
EXPECT_EQ(expected_norm, disasmed_norm);
// Reset code buffer afterwards.
emitter->SetCodePtr(code_buffer);
emitter->SetCodePtr(code_buffer, code_buffer_end);
}
std::unique_ptr<X64CodeBlock> emitter;
std::unique_ptr<disassembler> disasm;
u8* code_buffer;
u8* code_buffer_end;
};
#define TEST_INSTR_NO_OPERANDS(Name, ExpectedDisasm) \