mirror of
https://github.com/hrydgard/ppsspp.git
synced 2024-11-23 13:30:02 +00:00
Some renaming, remove a gross hack
This commit is contained in:
parent
8a535ed5d4
commit
9d36a08ec4
@ -89,7 +89,7 @@ bool Arm64JitBackend::CompileBlock(IRBlockCache *irBlockCache, int block_num, bo
|
||||
|
||||
// Don't worry, the codespace isn't large enough to overflow offsets.
|
||||
const u8 *blockStart = GetCodePointer();
|
||||
block->SetTargetOffset((int)GetOffset(blockStart));
|
||||
block->SetNativeOffset((int)GetOffset(blockStart));
|
||||
compilingBlockNum_ = block_num;
|
||||
lastConstPC_ = 0;
|
||||
|
||||
@ -122,7 +122,7 @@ bool Arm64JitBackend::CompileBlock(IRBlockCache *irBlockCache, int block_num, bo
|
||||
B(hooks_.crashHandler);
|
||||
}
|
||||
|
||||
int len = (int)GetOffset(GetCodePointer()) - block->GetTargetOffset();
|
||||
int len = (int)GetOffset(GetCodePointer()) - block->GetNativeOffset();
|
||||
if (len < MIN_BLOCK_NORMAL_LEN) {
|
||||
// We need at least 10 bytes to invalidate blocks with.
|
||||
ReserveCodeSpace(MIN_BLOCK_NORMAL_LEN - len);
|
||||
@ -324,7 +324,7 @@ void Arm64JitBackend::ClearAllBlocks() {
|
||||
|
||||
void Arm64JitBackend::InvalidateBlock(IRBlockCache *irBlockCache, int block_num) {
|
||||
IRBlock *block = irBlockCache->GetBlock(block_num);
|
||||
int offset = block->GetTargetOffset();
|
||||
int offset = block->GetNativeOffset();
|
||||
u8 *writable = GetWritablePtrFromCodePtr(GetBasePtr()) + offset;
|
||||
|
||||
// Overwrite the block with a jump to compile it again.
|
||||
|
@ -44,10 +44,12 @@
|
||||
|
||||
namespace MIPSComp {
|
||||
|
||||
IRJit::IRJit(MIPSState *mipsState, bool actualJit) : frontend_(mipsState->HasDefaultPrefix()), mips_(mipsState) {
|
||||
IRJit::IRJit(MIPSState *mipsState, bool actualJit) : frontend_(mipsState->HasDefaultPrefix()), mips_(mipsState), blocks_(actualJit) {
|
||||
// u32 size = 128 * 1024;
|
||||
InitIR();
|
||||
|
||||
compileToNative_ = actualJit;
|
||||
|
||||
// If this IRJit instance will be used to drive a "JIT using IR", don't optimize for interpretation.
|
||||
jo.optimizeForInterpreter = !actualJit;
|
||||
|
||||
@ -91,7 +93,8 @@ void IRJit::InvalidateCacheAt(u32 em_address, int length) {
|
||||
std::vector<int> numbers = blocks_.FindInvalidatedBlockNumbers(em_address, length);
|
||||
for (int block_num : numbers) {
|
||||
auto block = blocks_.GetBlock(block_num);
|
||||
int cookie = block->GetTargetOffset() < 0 ? block->GetInstructionOffset() : block->GetTargetOffset();
|
||||
// If we're a native JIT (IR->JIT, not just IR interpreter), we write native offsets into the blocks.
|
||||
int cookie = compileToNative_ ? block->GetNativeOffset() : block->GetIRArenaOffset();
|
||||
block->Destroy(cookie);
|
||||
}
|
||||
}
|
||||
@ -105,7 +108,7 @@ void IRJit::Compile(u32 em_address) {
|
||||
if (block_num != -1) {
|
||||
IRBlock *block = blocks_.GetBlock(block_num);
|
||||
// Okay, let's link and finalize the block now.
|
||||
int cookie = block->GetTargetOffset() < 0 ? block->GetInstructionOffset() : block->GetTargetOffset();
|
||||
int cookie = compileToNative_ ? block->GetNativeOffset() : block->GetIRArenaOffset();
|
||||
block->Finalize(cookie);
|
||||
if (block->IsValid()) {
|
||||
// Success, we're done.
|
||||
@ -272,7 +275,7 @@ void IRJit::RunLoopUntil(u64 globalticks) {
|
||||
#endif
|
||||
// Note: this will "jump to zero" on a badly constructed block missing exits.
|
||||
if (!Memory::IsValid4AlignedAddress(mips->pc)) {
|
||||
int blockNum = blocks_.GetBlockNumFromOffset(offset);
|
||||
int blockNum = blocks_.GetBlockNumFromIRArenaOffset(offset);
|
||||
IRBlock *block = blocks_.GetBlockUnchecked(blockNum);
|
||||
Core_ExecException(mips->pc, block->GetOriginalStart(), ExecExceptionType::JUMP);
|
||||
break;
|
||||
@ -303,7 +306,7 @@ void IRJit::UnlinkBlock(u8 *checkedEntry, u32 originalAddress) {
|
||||
|
||||
void IRBlockCache::Clear() {
|
||||
for (int i = 0; i < (int)blocks_.size(); ++i) {
|
||||
int cookie = blocks_[i].GetTargetOffset() < 0 ? blocks_[i].GetInstructionOffset() : blocks_[i].GetTargetOffset();
|
||||
int cookie = compileToNative_ ? blocks_[i].GetNativeOffset() : blocks_[i].GetIRArenaOffset();
|
||||
blocks_[i].Destroy(cookie);
|
||||
}
|
||||
blocks_.clear();
|
||||
@ -312,7 +315,7 @@ void IRBlockCache::Clear() {
|
||||
arena_.shrink_to_fit();
|
||||
}
|
||||
|
||||
IRBlockCache::IRBlockCache() {
|
||||
IRBlockCache::IRBlockCache(bool compileToNative) : compileToNative_(compileToNative) {
|
||||
// For whatever reason, this makes things go slower?? Probably just a CPU cache alignment fluke.
|
||||
// arena_.reserve(1024 * 1024 * 2);
|
||||
}
|
||||
@ -332,14 +335,14 @@ int IRBlockCache::AllocateBlock(int emAddr, u32 origSize, const std::vector<IRIn
|
||||
return (int)blocks_.size() - 1;
|
||||
}
|
||||
|
||||
int IRBlockCache::GetBlockNumFromOffset(int offset) const {
|
||||
int IRBlockCache::GetBlockNumFromIRArenaOffset(int offset) const {
|
||||
// Block offsets are always in rising order (we don't go back and replace them when invalidated). So we can binary search.
|
||||
int low = 0;
|
||||
int high = (int)blocks_.size() - 1;
|
||||
int found = -1;
|
||||
while (low <= high) {
|
||||
int mid = low + (high - low) / 2;
|
||||
const int blockOffset = blocks_[mid].GetInstructionOffset();
|
||||
const int blockOffset = blocks_[mid].GetIRArenaOffset();
|
||||
if (blockOffset == offset) {
|
||||
found = mid;
|
||||
break;
|
||||
@ -357,7 +360,7 @@ int IRBlockCache::GetBlockNumFromOffset(int offset) const {
|
||||
#else
|
||||
// TODO: Optimize if we need to call this often.
|
||||
for (int i = 0; i < (int)blocks_.size(); i++) {
|
||||
if (blocks_[i].GetInstructionOffset() == offset) {
|
||||
if (blocks_[i].GetIRArenaOffset() == offset) {
|
||||
_dbg_assert_(i == found);
|
||||
return i;
|
||||
}
|
||||
@ -391,7 +394,7 @@ std::vector<int> IRBlockCache::FindInvalidatedBlockNumbers(u32 address, u32 leng
|
||||
|
||||
void IRBlockCache::FinalizeBlock(int i, bool preload) {
|
||||
if (!preload) {
|
||||
int cookie = blocks_[i].GetTargetOffset() < 0 ? blocks_[i].GetInstructionOffset() : blocks_[i].GetTargetOffset();
|
||||
int cookie = compileToNative_ ? blocks_[i].GetNativeOffset() : blocks_[i].GetIRArenaOffset();
|
||||
blocks_[i].Finalize(cookie);
|
||||
}
|
||||
|
||||
@ -434,13 +437,13 @@ int IRBlockCache::FindByCookie(int cookie) {
|
||||
return -1;
|
||||
|
||||
// TODO: Maybe a flag to determine target offset mode?
|
||||
if (blocks_[0].GetTargetOffset() < 0)
|
||||
return GetBlockNumFromOffset(cookie);
|
||||
if (!compileToNative_) {
|
||||
return GetBlockNumFromIRArenaOffset(cookie);
|
||||
}
|
||||
|
||||
// TODO: Now that we are using offsets in pure IR mode too, we can probably unify
|
||||
// the two paradigms. Or actually no, we still need two offsets..
|
||||
// TODO: This could also use a binary search.
|
||||
for (int i = 0; i < GetNumBlocks(); ++i) {
|
||||
int offset = blocks_[i].GetTargetOffset();
|
||||
int offset = blocks_[i].GetNativeOffset();
|
||||
if (offset == cookie)
|
||||
return i;
|
||||
}
|
||||
@ -453,7 +456,7 @@ std::vector<u32> IRBlockCache::SaveAndClearEmuHackOps() {
|
||||
|
||||
for (int number = 0; number < (int)blocks_.size(); ++number) {
|
||||
IRBlock &b = blocks_[number];
|
||||
int cookie = b.GetTargetOffset() < 0 ? b.GetInstructionOffset() : b.GetTargetOffset();
|
||||
int cookie = compileToNative_ ? b.GetNativeOffset() : b.GetIRArenaOffset();
|
||||
if (b.IsValid() && b.RestoreOriginalFirstOp(cookie)) {
|
||||
result[number] = number;
|
||||
} else {
|
||||
@ -474,7 +477,7 @@ void IRBlockCache::RestoreSavedEmuHackOps(const std::vector<u32> &saved) {
|
||||
IRBlock &b = blocks_[number];
|
||||
// Only if we restored it, write it back.
|
||||
if (b.IsValid() && saved[number] != 0 && b.HasOriginalFirstOp()) {
|
||||
int cookie = b.GetTargetOffset() < 0 ? b.GetInstructionOffset() : b.GetTargetOffset();
|
||||
int cookie = compileToNative_ ? b.GetNativeOffset() : b.GetIRArenaOffset();
|
||||
b.Finalize(cookie);
|
||||
}
|
||||
}
|
||||
|
@ -50,31 +50,31 @@ class IRBlock {
|
||||
public:
|
||||
IRBlock() {}
|
||||
IRBlock(u32 emAddr, u32 origSize, int instOffset, u16 numInstructions)
|
||||
: origAddr_(emAddr), origSize_(origSize), instOffset_(instOffset), numInstructions_(numInstructions) {}
|
||||
: origAddr_(emAddr), origSize_(origSize), arenaOffset_(instOffset), numInstructions_(numInstructions) {}
|
||||
IRBlock(IRBlock &&b) {
|
||||
instOffset_ = b.instOffset_;
|
||||
arenaOffset_ = b.arenaOffset_;
|
||||
hash_ = b.hash_;
|
||||
origAddr_ = b.origAddr_;
|
||||
origSize_ = b.origSize_;
|
||||
origFirstOpcode_ = b.origFirstOpcode_;
|
||||
targetOffset_ = b.targetOffset_;
|
||||
nativeOffset_ = b.nativeOffset_;
|
||||
numInstructions_ = b.numInstructions_;
|
||||
b.instOffset_ = 0xFFFFFFFF;
|
||||
b.arenaOffset_ = 0xFFFFFFFF;
|
||||
}
|
||||
|
||||
~IRBlock() {}
|
||||
|
||||
u32 GetInstructionOffset() const { return instOffset_; }
|
||||
u32 GetIRArenaOffset() const { return arenaOffset_; }
|
||||
int GetNumInstructions() const { return numInstructions_; }
|
||||
MIPSOpcode GetOriginalFirstOp() const { return origFirstOpcode_; }
|
||||
bool HasOriginalFirstOp() const;
|
||||
bool RestoreOriginalFirstOp(int number);
|
||||
bool IsValid() const { return origAddr_ != 0 && origFirstOpcode_.encoding != 0x68FFFFFF; }
|
||||
void SetTargetOffset(int offset) {
|
||||
targetOffset_ = offset;
|
||||
void SetNativeOffset(int offset) {
|
||||
nativeOffset_ = offset;
|
||||
}
|
||||
int GetTargetOffset() const {
|
||||
return targetOffset_;
|
||||
int GetNativeOffset() const {
|
||||
return nativeOffset_;
|
||||
}
|
||||
void UpdateHash() {
|
||||
hash_ = CalculateHash();
|
||||
@ -103,19 +103,20 @@ private:
|
||||
u64 CalculateHash() const;
|
||||
|
||||
// Offset into the block cache's Arena
|
||||
// TODO: These should maybe be stored in a separate array.
|
||||
u32 instOffset_ = 0;
|
||||
u32 arenaOffset_ = 0;
|
||||
// Offset into the native code buffer.
|
||||
int nativeOffset_ = -1;
|
||||
u64 hash_ = 0;
|
||||
u32 origAddr_ = 0;
|
||||
u32 origSize_ = 0;
|
||||
MIPSOpcode origFirstOpcode_ = MIPSOpcode(0x68FFFFFF);
|
||||
int targetOffset_ = -1;
|
||||
u16 numInstructions_ = 0;
|
||||
};
|
||||
|
||||
class IRBlockCache : public JitBlockCacheDebugInterface {
|
||||
public:
|
||||
IRBlockCache();
|
||||
IRBlockCache(bool compileToNative);
|
||||
|
||||
void Clear();
|
||||
std::vector<int> FindInvalidatedBlockNumbers(u32 address, u32 length);
|
||||
void FinalizeBlock(int blockNum, bool preload = false);
|
||||
@ -128,12 +129,12 @@ public:
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
int GetBlockNumFromOffset(int offset) const;
|
||||
int GetBlockNumFromIRArenaOffset(int offset) const;
|
||||
const IRInst *GetBlockInstructionPtr(const IRBlock &block) const {
|
||||
return arena_.data() + block.GetInstructionOffset();
|
||||
return arena_.data() + block.GetIRArenaOffset();
|
||||
}
|
||||
const IRInst *GetBlockInstructionPtr(int blockNum) const {
|
||||
return arena_.data() + blocks_[blockNum].GetInstructionOffset();
|
||||
return arena_.data() + blocks_[blockNum].GetIRArenaOffset();
|
||||
}
|
||||
const IRInst *GetArenaPtr() const {
|
||||
return arena_.data();
|
||||
@ -153,6 +154,8 @@ public:
|
||||
}
|
||||
|
||||
int FindPreloadBlock(u32 em_address);
|
||||
|
||||
// "Cookie" means the 24 bits we inject into the first instruction of each block.
|
||||
int FindByCookie(int cookie);
|
||||
|
||||
std::vector<u32> SaveAndClearEmuHackOps();
|
||||
@ -187,7 +190,7 @@ public:
|
||||
|
||||
private:
|
||||
u32 AddressToPage(u32 addr) const;
|
||||
|
||||
bool compileToNative_;
|
||||
std::vector<IRBlock> blocks_;
|
||||
std::vector<IRInst> arena_;
|
||||
std::unordered_map<u32, std::vector<int>> byPage_;
|
||||
@ -235,6 +238,8 @@ protected:
|
||||
virtual bool CompileTargetBlock(IRBlockCache *irBlockCache, int block_num, bool preload) { return true; }
|
||||
virtual void FinalizeTargetBlock(IRBlockCache *irBlockCache, int block_num) {}
|
||||
|
||||
bool compileToNative_;
|
||||
|
||||
JitOptions jo;
|
||||
|
||||
IRFrontend frontend_;
|
||||
|
@ -534,7 +534,7 @@ void IRNativeJit::InvalidateCacheAt(u32 em_address, int length) {
|
||||
for (int block_num : numbers) {
|
||||
auto block = blocks_.GetBlock(block_num);
|
||||
backend_->InvalidateBlock(&blocks_, block_num);
|
||||
block->Destroy(block->GetTargetOffset());
|
||||
block->Destroy(block->GetNativeOffset());
|
||||
}
|
||||
}
|
||||
|
||||
@ -550,7 +550,7 @@ bool IRNativeJit::DescribeCodePtr(const u8 *ptr, std::string &name) {
|
||||
int block_offset = INT_MAX;
|
||||
for (int i = 0; i < blocks_.GetNumBlocks(); ++i) {
|
||||
const auto &b = blocks_.GetBlock(i);
|
||||
int b_start = b->GetTargetOffset();
|
||||
int b_start = b->GetNativeOffset();
|
||||
if (b_start > offset)
|
||||
continue;
|
||||
|
||||
@ -737,7 +737,7 @@ JitBlockProfileStats IRNativeBlockCacheDebugInterface::GetBlockProfileStats(int
|
||||
}
|
||||
|
||||
void IRNativeBlockCacheDebugInterface::GetBlockCodeRange(int blockNum, int *startOffset, int *size) const {
|
||||
int blockOffset = irBlocks_.GetBlock(blockNum)->GetTargetOffset();
|
||||
int blockOffset = irBlocks_.GetBlock(blockNum)->GetNativeOffset();
|
||||
int endOffset = backend_->GetNativeBlock(blockNum)->checkedOffset;
|
||||
|
||||
// If endOffset is before, the checked entry is before the block start.
|
||||
@ -747,7 +747,7 @@ void IRNativeBlockCacheDebugInterface::GetBlockCodeRange(int blockNum, int *star
|
||||
// Last block, get from current code pointer.
|
||||
endOffset = (int)codeBlock_->GetOffset(codeBlock_->GetCodePtr());
|
||||
} else {
|
||||
endOffset = irBlocks_.GetBlock(blockNum + 1)->GetTargetOffset();
|
||||
endOffset = irBlocks_.GetBlock(blockNum + 1)->GetNativeOffset();
|
||||
_assert_msg_(endOffset >= blockOffset, "Next block not sequential, block=%d/%08x, next=%d/%08x", blockNum, blockOffset, blockNum + 1, endOffset);
|
||||
}
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ bool RiscVJitBackend::CompileBlock(IRBlockCache *irBlockCache, int block_num, bo
|
||||
|
||||
// Don't worry, the codespace isn't large enough to overflow offsets.
|
||||
const u8 *blockStart = GetCodePointer();
|
||||
block->SetTargetOffset((int)GetOffset(blockStart));
|
||||
block->SetNativeOffset((int)GetOffset(blockStart));
|
||||
compilingBlockNum_ = block_num;
|
||||
|
||||
regs_.Start(irBlockCache, block_num);
|
||||
@ -110,7 +110,7 @@ bool RiscVJitBackend::CompileBlock(IRBlockCache *irBlockCache, int block_num, bo
|
||||
QuickJ(R_RA, hooks_.crashHandler);
|
||||
}
|
||||
|
||||
int len = (int)GetOffset(GetCodePointer()) - block->GetTargetOffset();
|
||||
int len = (int)GetOffset(GetCodePointer()) - block->GetNativeOffset();
|
||||
if (len < MIN_BLOCK_NORMAL_LEN) {
|
||||
// We need at least 16 bytes to invalidate blocks with, but larger doesn't need to align.
|
||||
ReserveCodeSpace(MIN_BLOCK_NORMAL_LEN - len);
|
||||
@ -300,7 +300,7 @@ void RiscVJitBackend::ClearAllBlocks() {
|
||||
|
||||
void RiscVJitBackend::InvalidateBlock(IRBlockCache *irBlockCache, int block_num) {
|
||||
IRBlock *block = irBlockCache->GetBlock(block_num);
|
||||
int offset = block->GetTargetOffset();
|
||||
int offset = block->GetNativeOffset();
|
||||
u8 *writable = GetWritablePtrFromCodePtr(GetBasePtr()) + offset;
|
||||
|
||||
// Overwrite the block with a jump to compile it again.
|
||||
|
@ -82,7 +82,7 @@ bool X64JitBackend::CompileBlock(IRBlockCache *irBlockCache, int block_num, bool
|
||||
|
||||
// Don't worry, the codespace isn't large enough to overflow offsets.
|
||||
const u8 *blockStart = GetCodePointer();
|
||||
block->SetTargetOffset((int)GetOffset(blockStart));
|
||||
block->SetNativeOffset((int)GetOffset(blockStart));
|
||||
compilingBlockNum_ = block_num;
|
||||
lastConstPC_ = 0;
|
||||
|
||||
@ -115,7 +115,7 @@ bool X64JitBackend::CompileBlock(IRBlockCache *irBlockCache, int block_num, bool
|
||||
JMP(hooks_.crashHandler, true);
|
||||
}
|
||||
|
||||
int len = (int)GetOffset(GetCodePointer()) - block->GetTargetOffset();
|
||||
int len = (int)GetOffset(GetCodePointer()) - block->GetNativeOffset();
|
||||
if (len < MIN_BLOCK_NORMAL_LEN) {
|
||||
// We need at least 10 bytes to invalidate blocks with.
|
||||
ReserveCodeSpace(MIN_BLOCK_NORMAL_LEN - len);
|
||||
@ -321,7 +321,7 @@ void X64JitBackend::ClearAllBlocks() {
|
||||
|
||||
void X64JitBackend::InvalidateBlock(IRBlockCache *irBlockCache, int block_num) {
|
||||
IRBlock *block = irBlockCache->GetBlock(block_num);
|
||||
int offset = block->GetTargetOffset();
|
||||
int offset = block->GetNativeOffset();
|
||||
u8 *writable = GetWritablePtrFromCodePtr(GetBasePtr()) + offset;
|
||||
|
||||
// Overwrite the block with a jump to compile it again.
|
||||
|
Loading…
Reference in New Issue
Block a user