riscv: Centralize reg allocation.

This commit is contained in:
Unknown W. Brackets 2023-08-17 18:50:33 -07:00
parent b30daa5760
commit ebab0e1591
6 changed files with 421 additions and 435 deletions

View File

@ -21,6 +21,8 @@
#include <cstring> #include <cstring>
#include "Common/Log.h" #include "Common/Log.h"
#include "Common/LogReporting.h"
#include "Core/MIPS/IR/IRAnalysis.h"
#include "Core/MIPS/IR/IRRegCache.h" #include "Core/MIPS/IR/IRRegCache.h"
#include "Core/MIPS/IR/IRInst.h" #include "Core/MIPS/IR/IRInst.h"
#include "Core/MIPS/IR/IRJit.h" #include "Core/MIPS/IR/IRJit.h"
@ -90,10 +92,10 @@ void IRImmRegCache::MapDirtyInIn(IRReg rd, IRReg rs, IRReg rt) {
Flush(rt); Flush(rt);
} }
IRNativeRegCache::IRNativeRegCache(MIPSComp::JitOptions *jo) IRNativeRegCacheBase::IRNativeRegCacheBase(MIPSComp::JitOptions *jo)
: jo_(jo) {} : jo_(jo) {}
void IRNativeRegCache::Start(MIPSComp::IRBlock *irBlock) { void IRNativeRegCacheBase::Start(MIPSComp::IRBlock *irBlock) {
if (!initialReady_) { if (!initialReady_) {
SetupInitialRegs(); SetupInitialRegs();
initialReady_ = true; initialReady_ = true;
@ -109,7 +111,7 @@ void IRNativeRegCache::Start(MIPSComp::IRBlock *irBlock) {
nr[statics[i].nr].mipsReg = statics[i].mr; nr[statics[i].nr].mipsReg = statics[i].mr;
nr[statics[i].nr].pointerified = statics[i].pointerified && jo_->enablePointerify; nr[statics[i].nr].pointerified = statics[i].pointerified && jo_->enablePointerify;
nr[statics[i].nr].normalized32 = statics[i].normalized32; nr[statics[i].nr].normalized32 = statics[i].normalized32;
mr[statics[i].mr].loc = MIPSLoc::REG; mr[statics[i].mr].loc = statics[i].loc;
mr[statics[i].mr].nReg = statics[i].nr; mr[statics[i].mr].nReg = statics[i].nr;
mr[statics[i].mr].isStatic = true; mr[statics[i].mr].isStatic = true;
// Lock it until the very end. // Lock it until the very end.
@ -120,10 +122,242 @@ void IRNativeRegCache::Start(MIPSComp::IRBlock *irBlock) {
irIndex_ = 0; irIndex_ = 0;
} }
void IRNativeRegCache::SetupInitialRegs() { void IRNativeRegCacheBase::SetupInitialRegs() {
_assert_msg_(totalNativeRegs_ > 0, "totalNativeRegs_ was never set by backend"); _assert_msg_(totalNativeRegs_ > 0, "totalNativeRegs_ was never set by backend");
// Everything else is initialized in the struct. // Everything else is initialized in the struct.
mrInitial_[MIPS_REG_ZERO].loc = MIPSLoc::IMM; mrInitial_[MIPS_REG_ZERO].loc = MIPSLoc::IMM;
mrInitial_[MIPS_REG_ZERO].imm = 0; mrInitial_[MIPS_REG_ZERO].imm = 0;
} }
IRNativeReg IRNativeRegCacheBase::AllocateReg(MIPSLoc type) {
_dbg_assert_(type == MIPSLoc::REG || type == MIPSLoc::FREG || type == MIPSLoc::VREG);
IRNativeReg nreg = FindFreeReg(type);
if (nreg != -1)
return nreg;
// Still nothing. Let's spill a reg and goto 10.
bool clobbered;
IRNativeReg bestToSpill = FindBestToSpill(type, true, &clobbered);
if (bestToSpill == -1) {
bestToSpill = FindBestToSpill(type, false, &clobbered);
}
if (bestToSpill != -1) {
if (clobbered) {
DiscardNativeReg(bestToSpill);
} else {
FlushNativeReg(bestToSpill);
}
// Now one must be free.
return FindFreeReg(type);
}
// Uh oh, we have all of them spilllocked....
ERROR_LOG_REPORT(JIT, "Out of spillable registers in block PC %08x, index %d", irBlock_->GetOriginalStart(), irIndex_);
_assert_(bestToSpill != -1);
return -1;
}
IRNativeReg IRNativeRegCacheBase::FindFreeReg(MIPSLoc type) const {
int allocCount = 0, base = 0;
const int *allocOrder = GetAllocationOrder(type, allocCount, base);
for (int i = 0; i < allocCount; i++) {
IRNativeReg nreg = IRNativeReg(allocOrder[i] - base);
if (nr[nreg].mipsReg == IRREG_INVALID) {
return nreg;
}
}
return -1;
}
IRNativeReg IRNativeRegCacheBase::FindBestToSpill(MIPSLoc type, bool unusedOnly, bool *clobbered) const {
int allocCount = 0, base = 0;
const int *allocOrder = GetAllocationOrder(type, allocCount, base);
static const int UNUSED_LOOKAHEAD_OPS = 30;
IRSituation info;
info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;
info.currentIndex = irIndex_;
info.instructions = irBlock_->GetInstructions();
info.numInstructions = irBlock_->GetNumInstructions();
auto getUsage = [type, &info](IRReg mipsReg) {
if (type == MIPSLoc::REG)
return IRNextGPRUsage(mipsReg, info);
else if (type == MIPSLoc::FREG || type == MIPSLoc::VREG)
return IRNextFPRUsage(mipsReg - 32, info);
_assert_msg_(false, "Unknown spill allocation type");
return IRUsage::UNKNOWN;
};
*clobbered = false;
for (int i = 0; i < allocCount; i++) {
IRNativeReg nreg = IRNativeReg(allocOrder[i] - base);
if (nr[nreg].mipsReg != IRREG_INVALID && mr[nr[nreg].mipsReg].spillLockIRIndex >= irIndex_)
continue;
if (nr[nreg].tempLockIRIndex >= irIndex_)
continue;
// As it's in alloc-order, we know it's not static so we don't need to check for that.
IRReg mipsReg = nr[nreg].mipsReg;
IRUsage usage = getUsage(mipsReg);
// Awesome, a clobbered reg. Let's use it?
if (usage == IRUsage::CLOBBERED) {
// If multiple mips regs use this native reg (i.e. vector, HI/LO), check each.
// Note: mipsReg points to the lowest numbered IRReg.
bool canClobber = true;
for (IRReg m = mipsReg + 1; mr[m].nReg == nreg && m < IRREG_INVALID && canClobber; ++m)
canClobber = getUsage(mipsReg) == IRUsage::CLOBBERED;
// Okay, if all can be clobbered, we're good to go.
if (canClobber) {
*clobbered = true;
return nreg;
}
}
// Not awesome. A used reg. Let's try to avoid spilling.
if (!unusedOnly || usage == IRUsage::UNUSED) {
// TODO: Use age or something to choose which register to spill?
// TODO: Spill dirty regs first? or opposite?
return nreg;
}
}
return -1;
}
void IRNativeRegCacheBase::DiscardNativeReg(IRNativeReg nreg) {
_assert_msg_(nreg >= 0 && nreg < totalNativeRegs_, "DiscardNativeReg on invalid register %d", nreg);
if (nr[nreg].mipsReg != IRREG_INVALID) {
_assert_(nr[nreg].mipsReg != MIPS_REG_ZERO);
int8_t lanes = 0;
for (IRReg m = nr[nreg].mipsReg; mr[m].nReg == nreg && m < IRREG_INVALID; ++m)
lanes++;
if (mr[nr[nreg].mipsReg].isStatic) {
int numStatics;
const StaticAllocation *statics = GetStaticAllocations(numStatics);
// If it's not currently marked as in a reg, throw it away.
for (IRReg m = nr[nreg].mipsReg; m < nr[nreg].mipsReg + lanes; ++m) {
_assert_msg_(!mr[m].isStatic, "Reg in lane %d mismatched static status", m - nr[nreg].mipsReg);
for (int i = 0; i < numStatics; i++) {
if (m == statics[i].mr)
mr[m].loc = statics[i].loc;
}
}
} else {
for (IRReg m = nr[nreg].mipsReg; m < nr[nreg].mipsReg + lanes; ++m) {
mr[m].loc = MIPSLoc::MEM;
mr[m].nReg = -1;
mr[m].imm = 0;
_assert_msg_(!mr[m].isStatic, "Reg in lane %d mismatched static status", m - nr[nreg].mipsReg);
}
nr[nreg].mipsReg = IRREG_INVALID;
}
}
// Even for a static reg, we assume this means it's not pointerified anymore.
nr[nreg].pointerified = false;
nr[nreg].isDirty = false;
nr[nreg].normalized32 = false;
}
void IRNativeRegCacheBase::FlushNativeReg(IRNativeReg nreg) {
_assert_msg_(nreg >= 0 && nreg < totalNativeRegs_, "FlushNativeReg on invalid register %d", nreg);
if (nr[nreg].mipsReg == IRREG_INVALID || nr[nreg].mipsReg == MIPS_REG_ZERO) {
// Nothing to do, reg not mapped or mapped to fixed zero.
_dbg_assert_(!nr[nreg].isDirty);
return;
}
_dbg_assert_(!mr[nr[nreg].mipsReg].isStatic);
if (mr[nr[nreg].mipsReg].isStatic) {
ERROR_LOG(JIT, "Cannot FlushNativeReg a statically mapped register");
return;
}
// Multiple mipsRegs may match this if a vector or HI/LO, etc.
bool isDirty = nr[nreg].isDirty;
int8_t lanes = 0;
for (IRReg m = nr[nreg].mipsReg; mr[m].nReg == nreg && m < IRREG_INVALID; ++m) {
_dbg_assert_(!mr[m].isStatic);
lanes++;
}
if (isDirty) {
IRReg first = nr[nreg].mipsReg;
if (mr[first].loc == MIPSLoc::REG_AS_PTR) {
// We assume this can't be multiple lanes. Maybe some gather craziness?
_assert_(lanes == 1);
AdjustNativeRegAsPtr(nreg, false);
mr[first].loc = MIPSLoc::REG;
}
StoreNativeReg(nreg, first, lanes);
}
for (int8_t i = 0; i < lanes; ++i) {
auto &mreg = mr[nr[nreg].mipsReg + i];
mreg.nReg = -1;
// Note that it loses its imm status, because imms are always dirty.
mreg.loc = MIPSLoc::MEM;
mreg.imm = 0;
}
nr[nreg].mipsReg = IRREG_INVALID;
nr[nreg].isDirty = false;
nr[nreg].pointerified = false;
nr[nreg].normalized32 = false;
}
void IRNativeRegCacheBase::AdjustNativeRegAsPtr(IRNativeReg nreg, bool state) {
// This isn't necessary to implement if REG_AS_PTR is unsupported entirely.
_assert_msg_(false, "AdjustNativeRegAsPtr unimplemented");
}
bool IRNativeRegCacheBase::IsValidGPR(IRReg r) const {
// See MIPSState for these offsets.
// Don't allow FPU regs, VFPU regs, or VFPU temps here.
if (r >= 32 && IsValidFPR(r - 32))
return false;
// Don't allow nextPC, etc. since it's probably a mistake.
if (r > IRREG_FPCOND && r != IRREG_LLBIT)
return false;
// Don't allow PC either.
if (r == 241)
return false;
return true;
}
bool IRNativeRegCacheBase::IsValidGPRNoZero(IRReg r) const {
return IsValidGPR(r) && r != MIPS_REG_ZERO;
}
bool IRNativeRegCacheBase::IsValidFPR(IRReg r) const {
// FPR parameters are off by 32 within the MIPSState object.
if (r >= TOTAL_MAPPABLE_IRREGS - 32)
return false;
// See MIPSState for these offsets.
int index = r + 32;
// Allow FPU or VFPU regs here.
if (index >= 32 && index < 32 + 32 + 128)
return true;
// Also allow VFPU temps.
if (index >= 224 && index < 224 + 16)
return true;
// Nothing else is allowed for the FPU side.
return false;
}

View File

@ -30,6 +30,8 @@ constexpr int TOTAL_MAPPABLE_IRREGS = 256;
// Arbitrary - increase if your backend has more. // Arbitrary - increase if your backend has more.
constexpr int TOTAL_POSSIBLE_NATIVEREGS = 128; constexpr int TOTAL_POSSIBLE_NATIVEREGS = 128;
typedef int8_t IRNativeReg;
constexpr IRReg IRREG_INVALID = 255; constexpr IRReg IRREG_INVALID = 255;
class IRWriter; class IRWriter;
@ -75,7 +77,7 @@ private:
IRWriter *ir_; IRWriter *ir_;
}; };
class IRNativeRegCache { class IRNativeRegCacheBase {
protected: protected:
enum class MIPSLoc { enum class MIPSLoc {
// Known immediate value (only in regcache.) // Known immediate value (only in regcache.)
@ -95,10 +97,10 @@ protected:
}; };
struct RegStatusMIPS { struct RegStatusMIPS {
// Where is this IR/MIPS register? // Where is this IR/MIPS register? Note: base reg if vector.
MIPSLoc loc = MIPSLoc::MEM; MIPSLoc loc = MIPSLoc::MEM;
// If in a register, what index (into nr array)? // If in a register, what index (into nr array)?
int8_t nReg = -1; IRNativeReg nReg = -1;
// If a known immediate value, what value? // If a known immediate value, what value?
uint64_t imm = 0; uint64_t imm = 0;
// Locked from spilling (i.e. used by current instruction) as of what IR instruction? // Locked from spilling (i.e. used by current instruction) as of what IR instruction?
@ -125,7 +127,9 @@ protected:
struct StaticAllocation { struct StaticAllocation {
IRReg mr; IRReg mr;
int8_t nr; IRNativeReg nr;
// Register type.
MIPSLoc loc;
// Whether the reg should be marked pointerified by default. // Whether the reg should be marked pointerified by default.
bool pointerified = false; bool pointerified = false;
// Whether the reg should be considered always normalized at the start of a block. // Whether the reg should be considered always normalized at the start of a block.
@ -133,8 +137,8 @@ protected:
}; };
public: public:
IRNativeRegCache(MIPSComp::JitOptions *jo); IRNativeRegCacheBase(MIPSComp::JitOptions *jo);
virtual ~IRNativeRegCache() {} virtual ~IRNativeRegCacheBase() {}
virtual void Start(MIPSComp::IRBlock *irBlock); virtual void Start(MIPSComp::IRBlock *irBlock);
void SetIRIndex(int index) { void SetIRIndex(int index) {
@ -143,13 +147,26 @@ public:
protected: protected:
virtual void SetupInitialRegs(); virtual void SetupInitialRegs();
virtual const StaticAllocation *GetStaticAllocations(int &count) { virtual const int *GetAllocationOrder(MIPSLoc type, int &count, int &base) const = 0;
virtual const StaticAllocation *GetStaticAllocations(int &count) const {
count = 0; count = 0;
return nullptr; return nullptr;
} }
IRNativeReg AllocateReg(MIPSLoc type);
IRNativeReg FindFreeReg(MIPSLoc type) const;
IRNativeReg FindBestToSpill(MIPSLoc type, bool unusedOnly, bool *clobbered) const;
virtual void DiscardNativeReg(IRNativeReg nreg);
virtual void FlushNativeReg(IRNativeReg nreg);
virtual void AdjustNativeRegAsPtr(IRNativeReg nreg, bool state);
virtual void StoreNativeReg(IRNativeReg nreg, IRReg first, int lanes) = 0;
bool IsValidGPR(IRReg r) const;
bool IsValidGPRNoZero(IRReg r) const;
bool IsValidFPR(IRReg r) const;
MIPSComp::JitOptions *jo_; MIPSComp::JitOptions *jo_;
MIPSComp::IRBlock *irBlock_ = nullptr; const MIPSComp::IRBlock *irBlock_ = nullptr;
int irIndex_ = 0; int irIndex_ = 0;
int totalNativeRegs_ = 0; int totalNativeRegs_ = 0;

View File

@ -30,7 +30,7 @@ using namespace RiscVGen;
using namespace RiscVJitConstants; using namespace RiscVJitConstants;
RiscVRegCache::RiscVRegCache(MIPSComp::JitOptions *jo) RiscVRegCache::RiscVRegCache(MIPSComp::JitOptions *jo)
: IRNativeRegCache(jo) { : IRNativeRegCacheBase(jo) {
// TODO: Move to using for FPRs and VPRs too? // TODO: Move to using for FPRs and VPRs too?
totalNativeRegs_ = NUM_RVREG; totalNativeRegs_ = NUM_RVREG;
} }
@ -40,7 +40,7 @@ void RiscVRegCache::Init(RiscVEmitter *emitter) {
} }
void RiscVRegCache::SetupInitialRegs() { void RiscVRegCache::SetupInitialRegs() {
IRNativeRegCache::SetupInitialRegs(); IRNativeRegCacheBase::SetupInitialRegs();
// Treat R_ZERO a bit specially, but it's basically static alloc too. // Treat R_ZERO a bit specially, but it's basically static alloc too.
nrInitial_[R_ZERO].mipsReg = MIPS_REG_ZERO; nrInitial_[R_ZERO].mipsReg = MIPS_REG_ZERO;
@ -53,16 +53,18 @@ void RiscVRegCache::SetupInitialRegs() {
mrInitial_[MIPS_REG_ZERO].isStatic = true; mrInitial_[MIPS_REG_ZERO].isStatic = true;
} }
const RiscVReg *RiscVRegCache::GetMIPSAllocationOrder(int &count) { const int *RiscVRegCache::GetAllocationOrder(MIPSLoc type, int &count, int &base) const {
_assert_(type == MIPSLoc::REG);
// X8 and X9 are the most ideal for static alloc because they can be used with compression. // X8 and X9 are the most ideal for static alloc because they can be used with compression.
// Otherwise we stick to saved regs - might not be necessary. // Otherwise we stick to saved regs - might not be necessary.
static const RiscVReg allocationOrder[] = { static const int allocationOrder[] = {
X8, X9, X12, X13, X14, X15, X5, X6, X7, X16, X17, X18, X19, X20, X21, X22, X23, X28, X29, X30, X31, X8, X9, X12, X13, X14, X15, X5, X6, X7, X16, X17, X18, X19, X20, X21, X22, X23, X28, X29, X30, X31,
}; };
static const RiscVReg allocationOrderStaticAlloc[] = { static const int allocationOrderStaticAlloc[] = {
X12, X13, X14, X15, X5, X6, X7, X16, X17, X21, X22, X23, X28, X29, X30, X31, X12, X13, X14, X15, X5, X6, X7, X16, X17, X21, X22, X23, X28, X29, X30, X31,
}; };
base = X0;
if (jo_->useStaticAlloc) { if (jo_->useStaticAlloc) {
count = ARRAY_SIZE(allocationOrderStaticAlloc); count = ARRAY_SIZE(allocationOrderStaticAlloc);
return allocationOrderStaticAlloc; return allocationOrderStaticAlloc;
@ -72,20 +74,20 @@ const RiscVReg *RiscVRegCache::GetMIPSAllocationOrder(int &count) {
} }
} }
const RiscVRegCache::StaticAllocation *RiscVRegCache::GetStaticAllocations(int &count) { const RiscVRegCache::StaticAllocation *RiscVRegCache::GetStaticAllocations(int &count) const {
static const StaticAllocation allocs[] = { static const StaticAllocation allocs[] = {
{ MIPS_REG_SP, X8, true }, { MIPS_REG_SP, X8, MIPSLoc::REG, true },
{ MIPS_REG_V0, X9 }, { MIPS_REG_V0, X9, MIPSLoc::REG },
{ MIPS_REG_V1, X18 }, { MIPS_REG_V1, X18, MIPSLoc::REG },
{ MIPS_REG_A0, X19 }, { MIPS_REG_A0, X19, MIPSLoc::REG },
{ MIPS_REG_RA, X20 }, { MIPS_REG_RA, X20, MIPSLoc::REG },
}; };
if (jo_->useStaticAlloc) { if (jo_->useStaticAlloc) {
count = ARRAY_SIZE(allocs); count = ARRAY_SIZE(allocs);
return allocs; return allocs;
} }
return IRNativeRegCache::GetStaticAllocations(count); return IRNativeRegCacheBase::GetStaticAllocations(count);
} }
void RiscVRegCache::EmitLoadStaticRegisters() { void RiscVRegCache::EmitLoadStaticRegisters() {
@ -115,28 +117,28 @@ void RiscVRegCache::EmitSaveStaticRegisters() {
void RiscVRegCache::FlushBeforeCall() { void RiscVRegCache::FlushBeforeCall() {
// These registers are not preserved by function calls. // These registers are not preserved by function calls.
for (int i = 5; i <= 7; ++i) { for (int i = 5; i <= 7; ++i) {
FlushRiscVReg(RiscVReg(X0 + i)); FlushNativeReg(i);
} }
for (int i = 10; i <= 17; ++i) { for (int i = 10; i <= 17; ++i) {
FlushRiscVReg(RiscVReg(X0 + i)); FlushNativeReg(i);
} }
for (int i = 28; i <= 31; ++i) { for (int i = 28; i <= 31; ++i) {
FlushRiscVReg(RiscVReg(X0 + i)); FlushNativeReg(i);
} }
} }
bool RiscVRegCache::IsInRAM(IRReg reg) { bool RiscVRegCache::IsInRAM(IRReg reg) {
_dbg_assert_(IsValidReg(reg)); _dbg_assert_(IsValidGPR(reg));
return mr[reg].loc == MIPSLoc::MEM; return mr[reg].loc == MIPSLoc::MEM;
} }
bool RiscVRegCache::IsMapped(IRReg mipsReg) { bool RiscVRegCache::IsMapped(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
return mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM; return mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM;
} }
bool RiscVRegCache::IsMappedAsPointer(IRReg mipsReg) { bool RiscVRegCache::IsMappedAsPointer(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
if (mr[mipsReg].loc == MIPSLoc::REG) { if (mr[mipsReg].loc == MIPSLoc::REG) {
return nr[mr[mipsReg].nReg].pointerified; return nr[mr[mipsReg].nReg].pointerified;
} else if (mr[mipsReg].loc == MIPSLoc::REG_IMM) { } else if (mr[mipsReg].loc == MIPSLoc::REG_IMM) {
@ -157,7 +159,7 @@ bool RiscVRegCache::IsMappedAsStaticPointer(IRReg reg) {
} }
bool RiscVRegCache::IsNormalized32(IRReg mipsReg) { bool RiscVRegCache::IsNormalized32(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
if (XLEN == 32) if (XLEN == 32)
return true; return true;
if (mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM) { if (mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM) {
@ -196,7 +198,7 @@ void RiscVRegCache::MarkPtrDirty(RiscVReg reg) {
} }
RiscVGen::RiscVReg RiscVRegCache::Normalize32(IRReg mipsReg, RiscVGen::RiscVReg destReg) { RiscVGen::RiscVReg RiscVRegCache::Normalize32(IRReg mipsReg, RiscVGen::RiscVReg destReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
_dbg_assert_(destReg == INVALID_REG || (destReg > X0 && destReg <= X31)); _dbg_assert_(destReg == INVALID_REG || (destReg > X0 && destReg <= X31));
RiscVReg reg = (RiscVReg)mr[mipsReg].nReg; RiscVReg reg = (RiscVReg)mr[mipsReg].nReg;
@ -257,7 +259,7 @@ void RiscVRegCache::SetRegImm(RiscVReg reg, u64 imm) {
void RiscVRegCache::MapRegTo(RiscVReg reg, IRReg mipsReg, MIPSMap mapFlags) { void RiscVRegCache::MapRegTo(RiscVReg reg, IRReg mipsReg, MIPSMap mapFlags) {
_dbg_assert_(reg > X0 && reg <= X31); _dbg_assert_(reg > X0 && reg <= X31);
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
_dbg_assert_(!mr[mipsReg].isStatic); _dbg_assert_(!mr[mipsReg].isStatic);
if (mr[mipsReg].isStatic) { if (mr[mipsReg].isStatic) {
ERROR_LOG(JIT, "Cannot MapRegTo static register %d", mipsReg); ERROR_LOG(JIT, "Cannot MapRegTo static register %d", mipsReg);
@ -316,88 +318,8 @@ void RiscVRegCache::MapRegTo(RiscVReg reg, IRReg mipsReg, MIPSMap mapFlags) {
mr[mipsReg].nReg = reg; mr[mipsReg].nReg = reg;
} }
RiscVReg RiscVRegCache::AllocateReg() {
int allocCount;
const RiscVReg *allocOrder = GetMIPSAllocationOrder(allocCount);
allocate:
for (int i = 0; i < allocCount; i++) {
RiscVReg reg = allocOrder[i];
if (nr[reg].mipsReg == IRREG_INVALID && nr[reg].tempLockIRIndex < irIndex_) {
return reg;
}
}
// Still nothing. Let's spill a reg and goto 10.
bool clobbered;
RiscVReg bestToSpill = FindBestToSpill(true, &clobbered);
if (bestToSpill == INVALID_REG) {
bestToSpill = FindBestToSpill(false, &clobbered);
}
if (bestToSpill != INVALID_REG) {
if (clobbered) {
DiscardR(nr[bestToSpill].mipsReg);
} else {
FlushRiscVReg(bestToSpill);
}
// Now one must be free.
goto allocate;
}
// Uh oh, we have all of them spilllocked....
ERROR_LOG_REPORT(JIT, "Out of spillable registers in block PC %08x, index %d", irBlock_->GetOriginalStart(), irIndex_);
_assert_(bestToSpill != INVALID_REG);
return INVALID_REG;
}
RiscVReg RiscVRegCache::FindBestToSpill(bool unusedOnly, bool *clobbered) {
int allocCount;
const RiscVReg *allocOrder = GetMIPSAllocationOrder(allocCount);
static const int UNUSED_LOOKAHEAD_OPS = 30;
IRSituation info;
info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;
info.currentIndex = irIndex_;
info.instructions = irBlock_->GetInstructions();
info.numInstructions = irBlock_->GetNumInstructions();
*clobbered = false;
for (int i = 0; i < allocCount; i++) {
RiscVReg reg = allocOrder[i];
if (nr[reg].mipsReg != IRREG_INVALID && mr[nr[reg].mipsReg].spillLockIRIndex >= irIndex_)
continue;
if (nr[reg].tempLockIRIndex >= irIndex_)
continue;
// As it's in alloc-order, we know it's not static so we don't need to check for that.
IRUsage usage = IRNextGPRUsage(nr[reg].mipsReg, info);
// Awesome, a clobbered reg. Let's use it.
if (usage == IRUsage::CLOBBERED) {
// TODO: Check HI/LO clobber together if we combine.
bool canClobber = true;
if (canClobber) {
*clobbered = true;
return reg;
}
}
// Not awesome. A used reg. Let's try to avoid spilling.
if (!unusedOnly || usage == IRUsage::UNUSED) {
// TODO: Use age or something to choose which register to spill?
// TODO: Spill dirty regs first? or opposite?
return reg;
}
}
return INVALID_REG;
}
RiscVReg RiscVRegCache::TryMapTempImm(IRReg r) { RiscVReg RiscVRegCache::TryMapTempImm(IRReg r) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidGPR(r));
// If already mapped, no need for a temporary. // If already mapped, no need for a temporary.
if (IsMapped(r)) { if (IsMapped(r)) {
return R(r); return R(r);
@ -421,7 +343,7 @@ RiscVReg RiscVRegCache::TryMapTempImm(IRReg r) {
} }
RiscVReg RiscVRegCache::GetAndLockTempR() { RiscVReg RiscVRegCache::GetAndLockTempR() {
RiscVReg reg = AllocateReg(); RiscVReg reg = (RiscVReg)AllocateReg(MIPSLoc::REG);
if (reg != INVALID_REG) { if (reg != INVALID_REG) {
nr[reg].tempLockIRIndex = irIndex_; nr[reg].tempLockIRIndex = irIndex_;
} }
@ -429,7 +351,7 @@ RiscVReg RiscVRegCache::GetAndLockTempR() {
} }
RiscVReg RiscVRegCache::MapReg(IRReg mipsReg, MIPSMap mapFlags) { RiscVReg RiscVRegCache::MapReg(IRReg mipsReg, MIPSMap mapFlags) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
// TODO: Optimization to force HI/LO to be combined? // TODO: Optimization to force HI/LO to be combined?
@ -521,7 +443,7 @@ RiscVReg RiscVRegCache::MapReg(IRReg mipsReg, MIPSMap mapFlags) {
} }
// Okay, not mapped, so we need to allocate an RV register. // Okay, not mapped, so we need to allocate an RV register.
RiscVReg reg = AllocateReg(); RiscVReg reg = (RiscVReg)AllocateReg(MIPSLoc::REG);
if (reg != INVALID_REG) { if (reg != INVALID_REG) {
// Grab it, and load the value into it (if requested). // Grab it, and load the value into it (if requested).
MapRegTo(reg, mipsReg, mapFlags); MapRegTo(reg, mipsReg, mapFlags);
@ -531,7 +453,7 @@ RiscVReg RiscVRegCache::MapReg(IRReg mipsReg, MIPSMap mapFlags) {
} }
RiscVReg RiscVRegCache::MapRegAsPointer(IRReg reg) { RiscVReg RiscVRegCache::MapRegAsPointer(IRReg reg) {
_dbg_assert_(IsValidRegNoZero(reg)); _dbg_assert_(IsValidGPRNoZero(reg));
// Already mapped. // Already mapped.
if (mr[reg].loc == MIPSLoc::REG_AS_PTR) { if (mr[reg].loc == MIPSLoc::REG_AS_PTR) {
@ -639,83 +561,38 @@ void RiscVRegCache::MapDirtyDirtyInIn(IRReg rd1, IRReg rd2, IRReg rs, IRReg rt,
ReleaseSpillLock(rd1, rd2, rs, rt); ReleaseSpillLock(rd1, rd2, rs, rt);
} }
void RiscVRegCache::FlushRiscVReg(RiscVReg r) { void RiscVRegCache::AdjustNativeRegAsPtr(IRNativeReg nreg, bool state) {
_dbg_assert_(r > X0 && r <= X31); RiscVReg r = (RiscVReg)(X0 + nreg);
_dbg_assert_(nr[r].mipsReg != MIPS_REG_ZERO); if (state) {
if (r == INVALID_REG) { AddMemBase(r);
ERROR_LOG(JIT, "FlushRiscVReg called on invalid register %d", r);
return;
}
if (nr[r].mipsReg == IRREG_INVALID) {
// Nothing to do, reg not mapped.
_dbg_assert_(!nr[r].isDirty);
return;
}
_dbg_assert_(!mr[nr[r].mipsReg].isStatic);
if (mr[nr[r].mipsReg].isStatic) {
ERROR_LOG(JIT, "Cannot FlushRiscVReg a statically mapped register");
return;
}
auto &mreg = mr[nr[r].mipsReg];
if (mreg.loc == MIPSLoc::REG_IMM || nr[r].mipsReg == MIPS_REG_ZERO) {
// We know its immediate value, no need to STR now.
mreg.loc = MIPSLoc::IMM;
mreg.nReg = (int)INVALID_REG;
} else { } else {
if (mreg.loc == MIPSLoc::IMM || nr[r].isDirty) {
if (mreg.loc == MIPSLoc::REG_AS_PTR) {
// Unpointerify, in case dirty.
#ifdef MASKED_PSP_MEMORY #ifdef MASKED_PSP_MEMORY
_dbg_assert_(!nr[r].isDirty); _dbg_assert_(!nr[nreg].isDirty);
#endif #endif
emit_->SUB(r, r, MEMBASEREG); emit_->SUB(r, r, MEMBASEREG);
mreg.loc = MIPSLoc::REG; nr[nreg].normalized32 = false;
nr[r].normalized32 = false;
}
RiscVReg storeReg = RiscVRegForFlush(nr[r].mipsReg);
if (storeReg != INVALID_REG)
emit_->SW(storeReg, CTXREG, GetMipsRegOffset(nr[r].mipsReg));
}
mreg.loc = MIPSLoc::MEM;
mreg.nReg = (int)INVALID_REG;
mreg.imm = -1;
} }
nr[r].isDirty = false; }
nr[r].mipsReg = IRREG_INVALID;
nr[r].pointerified = false; void RiscVRegCache::StoreNativeReg(IRNativeReg nreg, IRReg first, int lanes) {
RiscVReg r = (RiscVReg)(X0 + nreg);
_dbg_assert_(r > X0 && r <= X31);
_dbg_assert_(first != MIPS_REG_ZERO);
// Multilane not yet supported.
_assert_(lanes == 1);
_assert_(mr[first].loc == MIPSLoc::REG || mr[first].loc == MIPSLoc::REG_IMM);
emit_->SW(r, CTXREG, GetMipsRegOffset(first));
} }
void RiscVRegCache::DiscardR(IRReg mipsReg) { void RiscVRegCache::DiscardR(IRReg mipsReg) {
_dbg_assert_(IsValidRegNoZero(mipsReg)); _dbg_assert_(IsValidGPRNoZero(mipsReg));
if (mr[mipsReg].isStatic) { if (mr[mipsReg].isStatic) {
// Simply do nothing unless it's an IMM/RVREG_IMM/RVREG_AS_PTR, in case we just switch it over to RVREG, losing the value. DiscardNativeReg(mr[mipsReg].nReg);
RiscVReg riscvReg = (RiscVReg)mr[mipsReg].nReg;
_dbg_assert_(riscvReg != INVALID_REG);
if (mipsReg == MIPS_REG_ZERO) {
// Shouldn't happen, but in case it does.
mr[mipsReg].loc = MIPSLoc::REG_IMM;
mr[mipsReg].nReg = R_ZERO;
mr[mipsReg].imm = 0;
} else if (mr[mipsReg].loc == MIPSLoc::REG_IMM || mr[mipsReg].loc == MIPSLoc::IMM || mr[mipsReg].loc == MIPSLoc::REG_AS_PTR) {
// Ignore the imm value, restore sanity
mr[mipsReg].loc = MIPSLoc::REG;
nr[riscvReg].pointerified = false;
nr[riscvReg].isDirty = false;
nr[riscvReg].normalized32 = false;
}
return; return;
} }
const MIPSLoc prevLoc = mr[mipsReg].loc; const MIPSLoc prevLoc = mr[mipsReg].loc;
if (prevLoc == MIPSLoc::REG || prevLoc == MIPSLoc::REG_IMM || prevLoc == MIPSLoc::REG_AS_PTR) { if (prevLoc == MIPSLoc::REG || prevLoc == MIPSLoc::REG_IMM || prevLoc == MIPSLoc::REG_AS_PTR) {
RiscVReg riscvReg = (RiscVReg)mr[mipsReg].nReg; DiscardNativeReg(mr[mipsReg].nReg);
_dbg_assert_(riscvReg != INVALID_REG);
nr[riscvReg].mipsReg = IRREG_INVALID;
nr[riscvReg].pointerified = false;
nr[riscvReg].isDirty = false;
nr[riscvReg].normalized32 = false;
mr[mipsReg].nReg = (int)INVALID_REG;
mr[mipsReg].loc = MIPSLoc::MEM;
mr[mipsReg].imm = -1;
} }
if (prevLoc == MIPSLoc::IMM && mipsReg != MIPS_REG_ZERO) { if (prevLoc == MIPSLoc::IMM && mipsReg != MIPS_REG_ZERO) {
mr[mipsReg].loc = MIPSLoc::MEM; mr[mipsReg].loc = MIPSLoc::MEM;
@ -724,7 +601,7 @@ void RiscVRegCache::DiscardR(IRReg mipsReg) {
} }
RiscVReg RiscVRegCache::RiscVRegForFlush(IRReg r) { RiscVReg RiscVRegCache::RiscVRegForFlush(IRReg r) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidGPR(r));
if (mr[r].isStatic) if (mr[r].isStatic)
return INVALID_REG; // No flushing needed return INVALID_REG; // No flushing needed
@ -772,7 +649,7 @@ RiscVReg RiscVRegCache::RiscVRegForFlush(IRReg r) {
} }
void RiscVRegCache::FlushR(IRReg r) { void RiscVRegCache::FlushR(IRReg r) {
_dbg_assert_(IsValidRegNoZero(r)); _dbg_assert_(IsValidGPRNoZero(r));
if (mr[r].isStatic) { if (mr[r].isStatic) {
ERROR_LOG(JIT, "Cannot flush static reg %d", r); ERROR_LOG(JIT, "Cannot flush static reg %d", r);
return; return;
@ -796,10 +673,7 @@ void RiscVRegCache::FlushR(IRReg r) {
case MIPSLoc::REG: case MIPSLoc::REG:
case MIPSLoc::REG_IMM: case MIPSLoc::REG_IMM:
if (nr[mr[r].nReg].isDirty) { if (nr[mr[r].nReg].isDirty) {
RiscVReg storeReg = RiscVRegForFlush(r); StoreNativeReg(mr[r].nReg, r, 1);
if (storeReg != INVALID_REG) {
emit_->SW(storeReg, CTXREG, GetMipsRegOffset(r));
}
nr[mr[r].nReg].isDirty = false; nr[mr[r].nReg].isDirty = false;
} }
nr[mr[r].nReg].mipsReg = IRREG_INVALID; nr[mr[r].nReg].mipsReg = IRREG_INVALID;
@ -808,17 +682,10 @@ void RiscVRegCache::FlushR(IRReg r) {
case MIPSLoc::REG_AS_PTR: case MIPSLoc::REG_AS_PTR:
if (nr[mr[r].nReg].isDirty) { if (nr[mr[r].nReg].isDirty) {
#ifdef MASKED_PSP_MEMORY AdjustNativeRegAsPtr(mr[r].nReg, false);
// This is kinda bad, because we've cleared bits in it. // We set this so StoreNativeReg knows it's no longer a pointer.
_dbg_assert_(!nr[mr[r].nReg].isDirty);
#endif
emit_->SUB((RiscVReg)mr[r].nReg, (RiscVReg)mr[r].nReg, MEMBASEREG);
// We set this so RiscVRegForFlush knows it's no longer a pointer.
mr[r].loc = MIPSLoc::REG; mr[r].loc = MIPSLoc::REG;
RiscVReg storeReg = RiscVRegForFlush(r); StoreNativeReg(mr[r].nReg, r, 1);
if (storeReg != INVALID_REG) {
emit_->SW(storeReg, CTXREG, GetMipsRegOffset(r));
}
nr[mr[r].nReg].isDirty = false; nr[mr[r].nReg].isDirty = false;
} }
nr[mr[r].nReg].mipsReg = IRREG_INVALID; nr[mr[r].nReg].mipsReg = IRREG_INVALID;
@ -845,11 +712,10 @@ void RiscVRegCache::FlushR(IRReg r) {
void RiscVRegCache::FlushAll() { void RiscVRegCache::FlushAll() {
// Note: make sure not to change the registers when flushing: // Note: make sure not to change the registers when flushing:
// Branching code expects the armreg to retain its value. // Branching code expects the native reg to retain its value.
// TODO: HI/LO optimization? // TODO: HI/LO optimization?
// Final pass to grab any that were left behind.
for (int i = 1; i < TOTAL_MAPPABLE_IRREGS; i++) { for (int i = 1; i < TOTAL_MAPPABLE_IRREGS; i++) {
IRReg mipsReg = IRReg(i); IRReg mipsReg = IRReg(i);
if (mr[i].isStatic) { if (mr[i].isStatic) {
@ -878,7 +744,7 @@ void RiscVRegCache::FlushAll() {
ERROR_LOG(JIT, "RV reg of static %i is invalid", i); ERROR_LOG(JIT, "RV reg of static %i is invalid", i);
continue; continue;
} }
} else if (IsValidRegNoZero(mipsReg)) { } else if (IsValidGPRNoZero(mipsReg)) {
FlushR(mipsReg); FlushR(mipsReg);
} }
} }
@ -907,7 +773,7 @@ void RiscVRegCache::FlushAll() {
} }
void RiscVRegCache::SetImm(IRReg r, u64 immVal) { void RiscVRegCache::SetImm(IRReg r, u64 immVal) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidGPR(r));
if (r == MIPS_REG_ZERO && immVal != 0) { if (r == MIPS_REG_ZERO && immVal != 0) {
ERROR_LOG_REPORT(JIT, "Trying to set immediate %08x to r0", (u32)immVal); ERROR_LOG_REPORT(JIT, "Trying to set immediate %08x to r0", (u32)immVal);
return; return;
@ -943,7 +809,7 @@ void RiscVRegCache::SetImm(IRReg r, u64 immVal) {
} }
bool RiscVRegCache::IsImm(IRReg r) const { bool RiscVRegCache::IsImm(IRReg r) const {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidGPR(r));
if (r == MIPS_REG_ZERO) if (r == MIPS_REG_ZERO)
return true; return true;
else else
@ -951,7 +817,7 @@ bool RiscVRegCache::IsImm(IRReg r) const {
} }
u64 RiscVRegCache::GetImm(IRReg r) const { u64 RiscVRegCache::GetImm(IRReg r) const {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidGPR(r));
if (r == MIPS_REG_ZERO) if (r == MIPS_REG_ZERO)
return 0; return 0;
if (mr[r].loc != MIPSLoc::IMM && mr[r].loc != MIPSLoc::REG_IMM) { if (mr[r].loc != MIPSLoc::IMM && mr[r].loc != MIPSLoc::REG_IMM) {
@ -961,41 +827,15 @@ u64 RiscVRegCache::GetImm(IRReg r) const {
} }
int RiscVRegCache::GetMipsRegOffset(IRReg r) { int RiscVRegCache::GetMipsRegOffset(IRReg r) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidGPR(r));
return r * 4; return r * 4;
} }
bool RiscVRegCache::IsValidReg(IRReg r) const {
if (r < 0 || r >= TOTAL_MAPPABLE_IRREGS)
return false;
// See MIPSState for these offsets.
// Don't allow FPU or VFPU regs here.
if (r >= 32 && r < 32 + 32 + 128)
return false;
// Also disallow VFPU temps.
if (r >= 224 && r < 224 + 16)
return false;
// Don't allow nextPC, etc. since it's probably a mistake.
if (r > IRREG_FPCOND && r != IRREG_LLBIT)
return false;
// Don't allow PC either.
if (r == 241)
return false;
return true;
}
bool RiscVRegCache::IsValidRegNoZero(IRReg r) const {
return IsValidReg(r) && r != MIPS_REG_ZERO;
}
void RiscVRegCache::SpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) { void RiscVRegCache::SpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
_dbg_assert_(IsValidReg(r1)); _dbg_assert_(IsValidGPR(r1));
_dbg_assert_(r2 == IRREG_INVALID || IsValidReg(r2)); _dbg_assert_(r2 == IRREG_INVALID || IsValidGPR(r2));
_dbg_assert_(r3 == IRREG_INVALID || IsValidReg(r3)); _dbg_assert_(r3 == IRREG_INVALID || IsValidGPR(r3));
_dbg_assert_(r4 == IRREG_INVALID || IsValidReg(r4)); _dbg_assert_(r4 == IRREG_INVALID || IsValidGPR(r4));
mr[r1].spillLockIRIndex = irIndex_; mr[r1].spillLockIRIndex = irIndex_;
if (r2 != IRREG_INVALID) mr[r2].spillLockIRIndex = irIndex_; if (r2 != IRREG_INVALID) mr[r2].spillLockIRIndex = irIndex_;
if (r3 != IRREG_INVALID) mr[r3].spillLockIRIndex = irIndex_; if (r3 != IRREG_INVALID) mr[r3].spillLockIRIndex = irIndex_;
@ -1003,10 +843,10 @@ void RiscVRegCache::SpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
} }
void RiscVRegCache::ReleaseSpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) { void RiscVRegCache::ReleaseSpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
_dbg_assert_(IsValidReg(r1)); _dbg_assert_(IsValidGPR(r1));
_dbg_assert_(r2 == IRREG_INVALID || IsValidReg(r2)); _dbg_assert_(r2 == IRREG_INVALID || IsValidGPR(r2));
_dbg_assert_(r3 == IRREG_INVALID || IsValidReg(r3)); _dbg_assert_(r3 == IRREG_INVALID || IsValidGPR(r3));
_dbg_assert_(r4 == IRREG_INVALID || IsValidReg(r4)); _dbg_assert_(r4 == IRREG_INVALID || IsValidGPR(r4));
if (!mr[r1].isStatic) if (!mr[r1].isStatic)
mr[r1].spillLockIRIndex = -1; mr[r1].spillLockIRIndex = -1;
if (r2 != IRREG_INVALID && !mr[r2].isStatic) if (r2 != IRREG_INVALID && !mr[r2].isStatic)
@ -1018,7 +858,7 @@ void RiscVRegCache::ReleaseSpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
} }
RiscVReg RiscVRegCache::R(IRReg mipsReg) { RiscVReg RiscVRegCache::R(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
_dbg_assert_(mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM); _dbg_assert_(mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM);
if (mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM) { if (mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM) {
return (RiscVReg)mr[mipsReg].nReg; return (RiscVReg)mr[mipsReg].nReg;
@ -1029,7 +869,7 @@ RiscVReg RiscVRegCache::R(IRReg mipsReg) {
} }
RiscVReg RiscVRegCache::RPtr(IRReg mipsReg) { RiscVReg RiscVRegCache::RPtr(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidGPR(mipsReg));
_dbg_assert_(mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM || mr[mipsReg].loc == MIPSLoc::REG_AS_PTR); _dbg_assert_(mr[mipsReg].loc == MIPSLoc::REG || mr[mipsReg].loc == MIPSLoc::REG_IMM || mr[mipsReg].loc == MIPSLoc::REG_AS_PTR);
if (mr[mipsReg].loc == MIPSLoc::REG_AS_PTR) { if (mr[mipsReg].loc == MIPSLoc::REG_AS_PTR) {
return (RiscVReg)mr[mipsReg].nReg; return (RiscVReg)mr[mipsReg].nReg;

View File

@ -57,7 +57,7 @@ enum class MapType {
} // namespace RiscVJitConstants } // namespace RiscVJitConstants
class RiscVRegCache : public IRNativeRegCache { class RiscVRegCache : public IRNativeRegCacheBase {
public: public:
RiscVRegCache(MIPSComp::JitOptions *jo); RiscVRegCache(MIPSComp::JitOptions *jo);
@ -98,7 +98,6 @@ public:
void FlushBeforeCall(); void FlushBeforeCall();
void FlushAll(); void FlushAll();
void FlushR(IRReg r); void FlushR(IRReg r);
void FlushRiscVReg(RiscVGen::RiscVReg r);
void DiscardR(IRReg r); void DiscardR(IRReg r);
RiscVGen::RiscVReg GetAndLockTempR(); RiscVGen::RiscVReg GetAndLockTempR();
@ -112,21 +111,18 @@ public:
protected: protected:
void SetupInitialRegs() override; void SetupInitialRegs() override;
const StaticAllocation *GetStaticAllocations(int &count) override; const StaticAllocation *GetStaticAllocations(int &count) const override;
const int *GetAllocationOrder(MIPSLoc type, int &count, int &base) const override;
void AdjustNativeRegAsPtr(IRNativeReg nreg, bool state) override;
void StoreNativeReg(IRNativeReg nreg, IRReg first, int lanes) override;
private: private:
const RiscVGen::RiscVReg *GetMIPSAllocationOrder(int &count);
void MapRegTo(RiscVGen::RiscVReg reg, IRReg mipsReg, RiscVJitConstants::MIPSMap mapFlags); void MapRegTo(RiscVGen::RiscVReg reg, IRReg mipsReg, RiscVJitConstants::MIPSMap mapFlags);
RiscVGen::RiscVReg AllocateReg();
RiscVGen::RiscVReg FindBestToSpill(bool unusedOnly, bool *clobbered);
RiscVGen::RiscVReg RiscVRegForFlush(IRReg r); RiscVGen::RiscVReg RiscVRegForFlush(IRReg r);
void SetRegImm(RiscVGen::RiscVReg reg, u64 imm); void SetRegImm(RiscVGen::RiscVReg reg, u64 imm);
void AddMemBase(RiscVGen::RiscVReg reg); void AddMemBase(RiscVGen::RiscVReg reg);
int GetMipsRegOffset(IRReg r); int GetMipsRegOffset(IRReg r);
bool IsValidReg(IRReg r) const;
bool IsValidRegNoZero(IRReg r) const;
RiscVGen::RiscVEmitter *emit_ = nullptr; RiscVGen::RiscVEmitter *emit_ = nullptr;
enum { enum {

View File

@ -30,7 +30,7 @@ using namespace RiscVGen;
using namespace RiscVJitConstants; using namespace RiscVJitConstants;
RiscVRegCacheFPU::RiscVRegCacheFPU(MIPSComp::JitOptions *jo) RiscVRegCacheFPU::RiscVRegCacheFPU(MIPSComp::JitOptions *jo)
: IRNativeRegCache(jo) { : IRNativeRegCacheBase(jo) {
totalNativeRegs_ = NUM_RVFPUREG; totalNativeRegs_ = NUM_RVFPUREG;
} }
@ -38,137 +38,65 @@ void RiscVRegCacheFPU::Init(RiscVEmitter *emitter) {
emit_ = emitter; emit_ = emitter;
} }
const RiscVReg *RiscVRegCacheFPU::GetMIPSAllocationOrder(int &count) { const int *RiscVRegCacheFPU::GetAllocationOrder(MIPSLoc type, int &count, int &base) const {
_assert_(type == MIPSLoc::FREG);
// F8 through F15 are used for compression, so they are great. // F8 through F15 are used for compression, so they are great.
// TODO: Maybe we could remove some saved regs since we rarely need that many? Or maybe worth it? // TODO: Maybe we could remove some saved regs since we rarely need that many? Or maybe worth it?
static const RiscVReg allocationOrder[] = { static const int allocationOrder[] = {
F8, F9, F10, F11, F12, F13, F14, F15, F8, F9, F10, F11, F12, F13, F14, F15,
F0, F1, F2, F3, F4, F5, F6, F7, F0, F1, F2, F3, F4, F5, F6, F7,
F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30, F31, F16, F17, F18, F19, F20, F21, F22, F23, F24, F25, F26, F27, F28, F29, F30, F31,
}; };
count = ARRAY_SIZE(allocationOrder); count = ARRAY_SIZE(allocationOrder);
base = F0;
return allocationOrder; return allocationOrder;
} }
bool RiscVRegCacheFPU::IsInRAM(IRReg reg) { bool RiscVRegCacheFPU::IsInRAM(IRReg reg) {
_dbg_assert_(IsValidReg(reg)); _dbg_assert_(IsValidFPR(reg));
return mr[reg].loc == MIPSLoc::MEM; return mr[reg + 32].loc == MIPSLoc::MEM;
} }
bool RiscVRegCacheFPU::IsMapped(IRReg mipsReg) { bool RiscVRegCacheFPU::IsMapped(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidFPR(mipsReg));
return mr[mipsReg].loc == MIPSLoc::FREG; return mr[mipsReg + 32].loc == MIPSLoc::FREG;
} }
RiscVReg RiscVRegCacheFPU::MapReg(IRReg mipsReg, MIPSMap mapFlags) { RiscVReg RiscVRegCacheFPU::MapReg(IRReg mipsReg, MIPSMap mapFlags) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidFPR(mipsReg));
_dbg_assert_(mr[mipsReg].loc == MIPSLoc::MEM || mr[mipsReg].loc == MIPSLoc::FREG); _dbg_assert_(mr[mipsReg + 32].loc == MIPSLoc::MEM || mr[mipsReg + 32].loc == MIPSLoc::FREG);
pendingFlush_ = true; pendingFlush_ = true;
// Let's see if it's already mapped. If so we just need to update the dirty flag. // Let's see if it's already mapped. If so we just need to update the dirty flag.
// We don't need to check for NOINIT because we assume that anyone who maps // We don't need to check for NOINIT because we assume that anyone who maps
// with that flag immediately writes a "known" value to the register. // with that flag immediately writes a "known" value to the register.
if (mr[mipsReg].loc == MIPSLoc::FREG) { if (mr[mipsReg + 32].loc == MIPSLoc::FREG) {
_assert_msg_(nr[mr[mipsReg].nReg].mipsReg == mipsReg, "GPU mapping out of sync, IR=%i", mipsReg); _assert_msg_(nr[mr[mipsReg + 32].nReg].mipsReg == mipsReg + 32, "FPR mapping out of sync, IR=%i", mipsReg);
if ((mapFlags & MIPSMap::DIRTY) == MIPSMap::DIRTY) { if ((mapFlags & MIPSMap::DIRTY) == MIPSMap::DIRTY) {
nr[mr[mipsReg].nReg].isDirty = true; nr[mr[mipsReg + 32].nReg].isDirty = true;
} }
return (RiscVReg)(mr[mipsReg].nReg + F0); return (RiscVReg)(mr[mipsReg + 32].nReg + F0);
} }
// Okay, not mapped, so we need to allocate an RV register. // Okay, not mapped, so we need to allocate an RV register.
RiscVReg reg = AllocateReg(); IRNativeReg nreg = AllocateReg(MIPSLoc::FREG);
if (reg != INVALID_REG) { if (nreg != -1) {
// That means it's free. Grab it, and load the value into it (if requested). // That means it's free. Grab it, and load the value into it (if requested).
nr[reg - F0].isDirty = (mapFlags & MIPSMap::DIRTY) == MIPSMap::DIRTY; nr[nreg].isDirty = (mapFlags & MIPSMap::DIRTY) == MIPSMap::DIRTY;
if ((mapFlags & MIPSMap::NOINIT) != MIPSMap::NOINIT) { if ((mapFlags & MIPSMap::NOINIT) != MIPSMap::NOINIT) {
if (mr[mipsReg].loc == MIPSLoc::MEM) { if (mr[mipsReg + 32].loc == MIPSLoc::MEM) {
emit_->FL(32, reg, CTXREG, GetMipsRegOffset(mipsReg)); emit_->FL(32, (RiscVReg)(F0 + nreg), CTXREG, GetMipsRegOffset(mipsReg + 32));
} }
} }
nr[reg - F0].mipsReg = mipsReg; nr[nreg].mipsReg = mipsReg + 32;
mr[mipsReg].loc = MIPSLoc::FREG; mr[mipsReg + 32].loc = MIPSLoc::FREG;
mr[mipsReg].nReg = reg - F0; mr[mipsReg + 32].nReg = nreg;
return reg; return (RiscVReg)(F0 + nreg);
} }
return reg; return (RiscVReg)(F0 + nreg);
}
RiscVReg RiscVRegCacheFPU::AllocateReg() {
int allocCount = 0;
const RiscVReg *allocOrder = GetMIPSAllocationOrder(allocCount);
allocate:
for (int i = 0; i < allocCount; i++) {
RiscVReg reg = allocOrder[i];
if (nr[reg - F0].mipsReg == IRREG_INVALID) {
return reg;
}
}
// Still nothing. Let's spill a reg and goto 10.
bool clobbered;
RiscVReg bestToSpill = FindBestToSpill(true, &clobbered);
if (bestToSpill == INVALID_REG) {
bestToSpill = FindBestToSpill(false, &clobbered);
}
if (bestToSpill != INVALID_REG) {
if (clobbered) {
DiscardR(nr[bestToSpill - F0].mipsReg);
} else {
FlushRiscVReg(bestToSpill);
}
// Now one must be free.
goto allocate;
}
// Uh oh, we have all of them spilllocked....
ERROR_LOG_REPORT(JIT, "Out of spillable registers in block PC %08x, index %d", irBlock_->GetOriginalStart(), irIndex_);
_assert_(bestToSpill != INVALID_REG);
return INVALID_REG;
}
RiscVReg RiscVRegCacheFPU::FindBestToSpill(bool unusedOnly, bool *clobbered) {
int allocCount = 0;
const RiscVReg *allocOrder = GetMIPSAllocationOrder(allocCount);
static const int UNUSED_LOOKAHEAD_OPS = 30;
IRSituation info;
info.lookaheadCount = UNUSED_LOOKAHEAD_OPS;
info.currentIndex = irIndex_;
info.instructions = irBlock_->GetInstructions();
info.numInstructions = irBlock_->GetNumInstructions();
*clobbered = false;
for (int i = 0; i < allocCount; i++) {
RiscVReg reg = allocOrder[i];
if (nr[reg - F0].mipsReg != IRREG_INVALID && mr[nr[reg - F0].mipsReg].spillLockIRIndex >= irIndex_)
continue;
// As it's in alloc-order, we know it's not static so we don't need to check for that.
IRUsage usage = IRNextFPRUsage(nr[reg - F0].mipsReg, info);
// Awesome, a clobbered reg. Let's use it.
if (usage == IRUsage::CLOBBERED) {
*clobbered = true;
return reg;
}
// Not awesome. A used reg. Let's try to avoid spilling.
if (!unusedOnly || usage == IRUsage::UNUSED) {
// TODO: Use age or something to choose which register to spill?
// TODO: Spill dirty regs first? or opposite?
return reg;
}
}
return INVALID_REG;
} }
void RiscVRegCacheFPU::MapInIn(IRReg rd, IRReg rs) { void RiscVRegCacheFPU::MapInIn(IRReg rd, IRReg rs) {
@ -200,7 +128,7 @@ RiscVReg RiscVRegCacheFPU::MapDirtyInTemp(IRReg rd, IRReg rs, bool avoidLoad) {
bool load = !avoidLoad || rd == rs; bool load = !avoidLoad || rd == rs;
MapReg(rd, load ? MIPSMap::DIRTY : MIPSMap::NOINIT); MapReg(rd, load ? MIPSMap::DIRTY : MIPSMap::NOINIT);
MapReg(rs); MapReg(rs);
RiscVReg temp = AllocateReg(); RiscVReg temp = (RiscVReg)(F0 + AllocateReg(MIPSLoc::FREG));
ReleaseSpillLock(rd, rs); ReleaseSpillLock(rd, rs);
return temp; return temp;
} }
@ -239,44 +167,40 @@ RiscVReg RiscVRegCacheFPU::Map4DirtyInTemp(IRReg rdbase, IRReg rsbase, bool avoi
MapReg(rdbase + i, load ? MIPSMap::DIRTY : MIPSMap::NOINIT); MapReg(rdbase + i, load ? MIPSMap::DIRTY : MIPSMap::NOINIT);
for (int i = 0; i < 4; ++i) for (int i = 0; i < 4; ++i)
MapReg(rsbase + i); MapReg(rsbase + i);
RiscVReg temp = AllocateReg(); RiscVReg temp = (RiscVReg)(F0 + AllocateReg(MIPSLoc::FREG));
for (int i = 0; i < 4; ++i) for (int i = 0; i < 4; ++i)
ReleaseSpillLock(rdbase + i, rsbase + i); ReleaseSpillLock(rdbase + i, rsbase + i);
return temp; return temp;
} }
void RiscVRegCacheFPU::FlushRiscVReg(RiscVReg r) { void RiscVRegCacheFPU::StoreNativeReg(IRNativeReg nreg, IRReg first, int lanes) {
RiscVReg r = (RiscVReg)(F0 + nreg);
_dbg_assert_(r >= F0 && r <= F31); _dbg_assert_(r >= F0 && r <= F31);
int reg = r - F0; // Multilane not yet supported.
if (nr[reg].mipsReg == IRREG_INVALID) { _assert_(lanes == 1);
// Nothing to do, reg not mapped. if (mr[first].loc == MIPSLoc::FREG) {
return; emit_->FS(32, r, CTXREG, GetMipsRegOffset(first));
} else {
_assert_msg_(mr[first].loc == MIPSLoc::FREG, "Cannot store this type: %d", (int)mr[first].loc);
} }
if (nr[reg].isDirty && mr[nr[reg].mipsReg].loc == MIPSLoc::FREG) {
emit_->FS(32, r, CTXREG, GetMipsRegOffset(nr[reg].mipsReg));
}
mr[nr[reg].mipsReg].loc = MIPSLoc::MEM;
mr[nr[reg].mipsReg].nReg = (int)INVALID_REG;
nr[reg].mipsReg = IRREG_INVALID;
nr[reg].isDirty = false;
} }
void RiscVRegCacheFPU::FlushR(IRReg r) { void RiscVRegCacheFPU::FlushR(IRReg r) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidFPR(r));
RiscVReg reg = RiscVRegForFlush(r); RiscVReg reg = RiscVRegForFlush(r);
if (reg != INVALID_REG) if (reg != INVALID_REG)
FlushRiscVReg(reg); FlushNativeReg((IRNativeReg)(reg - F0));
} }
RiscVReg RiscVRegCacheFPU::RiscVRegForFlush(IRReg r) { RiscVReg RiscVRegCacheFPU::RiscVRegForFlush(IRReg r) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidFPR(r));
switch (mr[r].loc) { switch (mr[r + 32].loc) {
case MIPSLoc::FREG: case MIPSLoc::FREG:
_assert_msg_(mr[r].nReg != INVALID_REG, "RiscVRegForFlush: IR %d had bad RiscVReg", r); _assert_msg_(mr[r + 32].nReg != INVALID_REG, "RiscVRegForFlush: IR %d had bad RiscVReg", r + 32);
if (mr[r].nReg == INVALID_REG) { if (mr[r + 32].nReg == INVALID_REG) {
return INVALID_REG; return INVALID_REG;
} }
return (RiscVReg)(F0 + mr[r].nReg); return (RiscVReg)(F0 + mr[r + 32].nReg);
case MIPSLoc::MEM: case MIPSLoc::MEM:
return INVALID_REG; return INVALID_REG;
@ -295,13 +219,13 @@ void RiscVRegCacheFPU::FlushBeforeCall() {
// These registers are not preserved by function calls. // These registers are not preserved by function calls.
for (int i = 0; i <= 7; ++i) { for (int i = 0; i <= 7; ++i) {
FlushRiscVReg(RiscVReg(F0 + i)); FlushNativeReg(i);
} }
for (int i = 10; i <= 17; ++i) { for (int i = 10; i <= 17; ++i) {
FlushRiscVReg(RiscVReg(F0 + i)); FlushNativeReg(i);
} }
for (int i = 28; i <= 31; ++i) { for (int i = 28; i <= 31; ++i) {
FlushRiscVReg(RiscVReg(F0 + i)); FlushNativeReg(i);
} }
} }
@ -311,16 +235,16 @@ void RiscVRegCacheFPU::FlushAll() {
return; return;
} }
int numRVRegs = 0; int numRVRegs = 0, baseIndex = 0;
const RiscVReg *order = GetMIPSAllocationOrder(numRVRegs); const int *order = GetAllocationOrder(MIPSLoc::FREG, numRVRegs, baseIndex);
for (int i = 0; i < numRVRegs; i++) { for (int i = 0; i < numRVRegs; i++) {
int a = order[i] - F0; int a = order[i] - baseIndex;
int m = nr[a].mipsReg; int m = nr[a].mipsReg;
if (nr[a].isDirty) { if (nr[a].isDirty) {
_assert_(m != MIPS_REG_INVALID); _assert_(m != MIPS_REG_INVALID);
emit_->FS(32, order[i], CTXREG, GetMipsRegOffset(m)); emit_->FS(32, (RiscVReg)(F0 + a), CTXREG, GetMipsRegOffset(m));
mr[m].loc = MIPSLoc::MEM; mr[m].loc = MIPSLoc::MEM;
mr[m].nReg = (int)INVALID_REG; mr[m].nReg = (int)INVALID_REG;
@ -339,15 +263,12 @@ void RiscVRegCacheFPU::FlushAll() {
} }
void RiscVRegCacheFPU::DiscardR(IRReg r) { void RiscVRegCacheFPU::DiscardR(IRReg r) {
_dbg_assert_(IsValidReg(r)); _dbg_assert_(IsValidFPR(r));
switch (mr[r].loc) { switch (mr[r + 32].loc) {
case MIPSLoc::FREG: case MIPSLoc::FREG:
_assert_(mr[r].nReg != INVALID_REG); _assert_(mr[r + 32].nReg != INVALID_REG);
if (mr[r].nReg != INVALID_REG) { // Note that we DO NOT write it back here. That's the whole point of Discard.
// Note that we DO NOT write it back here. That's the whole point of Discard. DiscardNativeReg(mr[r + 32].nReg);
nr[mr[r].nReg].isDirty = false;
nr[mr[r].nReg].mipsReg = IRREG_INVALID;
}
break; break;
case MIPSLoc::MEM: case MIPSLoc::MEM:
@ -358,77 +279,59 @@ void RiscVRegCacheFPU::DiscardR(IRReg r) {
_assert_(false); _assert_(false);
break; break;
} }
mr[r].loc = MIPSLoc::MEM; mr[r + 32].loc = MIPSLoc::MEM;
mr[r].nReg = (int)INVALID_REG; mr[r + 32].nReg = -1;
mr[r].spillLockIRIndex = -1; mr[r + 32].spillLockIRIndex = -1;
} }
int RiscVRegCacheFPU::GetMipsRegOffset(IRReg r) { int RiscVRegCacheFPU::GetMipsRegOffset(IRReg r) {
_assert_(IsValidReg(r)); _assert_(IsValidFPR(r - 32));
// These are offsets within the MIPSState structure. // These are offsets within the MIPSState structure.
// IR gives us an index that is already 32 after the state index (skipping GPRs.) // IR gives us an index that is already 32 after the state index (skipping GPRs.)
return (32 + r) * 4; return (r) * 4;
} }
void RiscVRegCacheFPU::SpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) { void RiscVRegCacheFPU::SpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
_dbg_assert_(IsValidReg(r1)); _dbg_assert_(IsValidFPR(r1));
_dbg_assert_(r2 == IRREG_INVALID || IsValidReg(r2)); _dbg_assert_(r2 == IRREG_INVALID || IsValidFPR(r2));
_dbg_assert_(r3 == IRREG_INVALID || IsValidReg(r3)); _dbg_assert_(r3 == IRREG_INVALID || IsValidFPR(r3));
_dbg_assert_(r4 == IRREG_INVALID || IsValidReg(r4)); _dbg_assert_(r4 == IRREG_INVALID || IsValidFPR(r4));
mr[r1].spillLockIRIndex = irIndex_; mr[r1 + 32].spillLockIRIndex = irIndex_;
if (r2 != IRREG_INVALID) if (r2 != IRREG_INVALID)
mr[r2].spillLockIRIndex = irIndex_; mr[r2 + 32].spillLockIRIndex = irIndex_;
if (r3 != IRREG_INVALID) if (r3 != IRREG_INVALID)
mr[r3].spillLockIRIndex = irIndex_; mr[r3 + 32].spillLockIRIndex = irIndex_;
if (r4 != IRREG_INVALID) if (r4 != IRREG_INVALID)
mr[r4].spillLockIRIndex = irIndex_; mr[r4 + 32].spillLockIRIndex = irIndex_;
} }
void RiscVRegCacheFPU::ReleaseSpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) { void RiscVRegCacheFPU::ReleaseSpillLock(IRReg r1, IRReg r2, IRReg r3, IRReg r4) {
_dbg_assert_(IsValidReg(r1)); _dbg_assert_(IsValidFPR(r1));
_dbg_assert_(r2 == IRREG_INVALID || IsValidReg(r2)); _dbg_assert_(r2 == IRREG_INVALID || IsValidFPR(r2));
_dbg_assert_(r3 == IRREG_INVALID || IsValidReg(r3)); _dbg_assert_(r3 == IRREG_INVALID || IsValidFPR(r3));
_dbg_assert_(r4 == IRREG_INVALID || IsValidReg(r4)); _dbg_assert_(r4 == IRREG_INVALID || IsValidFPR(r4));
mr[r1].spillLockIRIndex = -1; mr[r1 + 32].spillLockIRIndex = -1;
if (r2 != IRREG_INVALID) if (r2 != IRREG_INVALID)
mr[r2].spillLockIRIndex = -1; mr[r2 + 32].spillLockIRIndex = -1;
if (r3 != IRREG_INVALID) if (r3 != IRREG_INVALID)
mr[r3].spillLockIRIndex = -1; mr[r3 + 32].spillLockIRIndex = -1;
if (r4 != IRREG_INVALID) if (r4 != IRREG_INVALID)
mr[r4].spillLockIRIndex = -1; mr[r4 + 32].spillLockIRIndex = -1;
} }
RiscVReg RiscVRegCacheFPU::R(IRReg mipsReg) { RiscVReg RiscVRegCacheFPU::R(IRReg mipsReg) {
_dbg_assert_(IsValidReg(mipsReg)); _dbg_assert_(IsValidFPR(mipsReg));
_dbg_assert_(mr[mipsReg].loc == MIPSLoc::FREG); _dbg_assert_(mr[mipsReg + 32].loc == MIPSLoc::FREG);
if (mr[mipsReg].loc == MIPSLoc::FREG) { if (mr[mipsReg + 32].loc == MIPSLoc::FREG) {
return (RiscVReg)(mr[mipsReg].nReg + F0); return (RiscVReg)(mr[mipsReg + 32].nReg + F0);
} else { } else {
ERROR_LOG_REPORT(JIT, "Reg %i not in riscv reg", mipsReg); ERROR_LOG_REPORT(JIT, "Reg %i not in riscv reg", mipsReg);
return INVALID_REG; // BAAAD return INVALID_REG; // BAAAD
} }
} }
bool RiscVRegCacheFPU::IsValidReg(IRReg r) const {
if (r < 0 || r >= NUM_MIPSFPUREG)
return false;
// See MIPSState for these offsets.
int index = r + 32;
// Allow FPU or VFPU regs here.
if (index >= 32 && index < 32 + 32 + 128)
return true;
// Also allow VFPU temps.
if (index >= 224 && index < 224 + 16)
return true;
// Nothing else is allowed for the FPU side cache.
return false;
}
void RiscVRegCacheFPU::SetupInitialRegs() { void RiscVRegCacheFPU::SetupInitialRegs() {
IRNativeRegCache::SetupInitialRegs(); IRNativeRegCacheBase::SetupInitialRegs();
// TODO: Move to a shared cache? // TODO: Move to a shared cache?
mrInitial_[0].loc = MIPSLoc::MEM; mrInitial_[0].loc = MIPSLoc::MEM;

View File

@ -25,7 +25,7 @@ namespace MIPSComp {
struct JitOptions; struct JitOptions;
} }
class RiscVRegCacheFPU : public IRNativeRegCache { class RiscVRegCacheFPU : public IRNativeRegCacheBase {
public: public:
RiscVRegCacheFPU(MIPSComp::JitOptions *jo); RiscVRegCacheFPU(MIPSComp::JitOptions *jo);
@ -52,23 +52,19 @@ public:
void FlushBeforeCall(); void FlushBeforeCall();
void FlushAll(); void FlushAll();
void FlushR(IRReg r); void FlushR(IRReg r);
void FlushRiscVReg(RiscVGen::RiscVReg r);
void DiscardR(IRReg r); void DiscardR(IRReg r);
RiscVGen::RiscVReg R(IRReg preg); // Returns a cached register RiscVGen::RiscVReg R(IRReg preg); // Returns a cached register
protected: protected:
void SetupInitialRegs() override; void SetupInitialRegs() override;
const int *GetAllocationOrder(MIPSLoc type, int &count, int &base) const override;
void StoreNativeReg(IRNativeReg nreg, IRReg first, int lanes) override;
private: private:
const RiscVGen::RiscVReg *GetMIPSAllocationOrder(int &count);
RiscVGen::RiscVReg AllocateReg();
RiscVGen::RiscVReg FindBestToSpill(bool unusedOnly, bool *clobbered);
RiscVGen::RiscVReg RiscVRegForFlush(IRReg r); RiscVGen::RiscVReg RiscVRegForFlush(IRReg r);
int GetMipsRegOffset(IRReg r); int GetMipsRegOffset(IRReg r);
bool IsValidReg(IRReg r) const;
RiscVGen::RiscVEmitter *emit_ = nullptr; RiscVGen::RiscVEmitter *emit_ = nullptr;
enum { enum {