ir-jit: Get rid of the regcache. Should be replaced with optimization passes.

This commit is contained in:
Henrik Rydgard 2016-05-07 23:12:53 +02:00
parent 750d520cc7
commit a33f8b68c6
9 changed files with 55 additions and 205 deletions

View File

@ -20,7 +20,6 @@
#include "Core/MIPS/MIPS.h"
#include "Core/MIPS/MIPSCodeUtils.h"
#include "Core/MIPS/IR/IRJit.h"
#include "Core/MIPS/IR/IRRegCache.h"
#include "Common/CPUDetect.h"
using namespace MIPSAnalyst;
@ -46,21 +45,6 @@ using namespace MIPSAnalyst;
namespace MIPSComp {
void IRJit::CompImmLogic(MIPSGPReg rs, MIPSGPReg rt, u32 uimm, IROp OP) {
if (gpr.IsImm(rs)) {
switch (OP) {
case IROp::AddConst: gpr.SetImm(rt, gpr.GetImm(rs) + uimm); break;
case IROp::SubConst: gpr.SetImm(rt, gpr.GetImm(rs) - uimm); break;
case IROp::AndConst: gpr.SetImm(rt, gpr.GetImm(rs) & uimm); break;
case IROp::OrConst: gpr.SetImm(rt, gpr.GetImm(rs) | uimm); break;
case IROp::XorConst: gpr.SetImm(rt, gpr.GetImm(rs) ^ uimm); break;
}
} else {
gpr.MapDirtyIn(rt, rs);
ir.Write(OP, rt, rs, ir.AddConstant(uimm));
}
}
void IRJit::Comp_IType(MIPSOpcode op) {
CONDITIONAL_DISABLE;
s32 simm = (s32)(s16)(op & 0xFFFF); // sign extension
@ -79,36 +63,26 @@ void IRJit::Comp_IType(MIPSOpcode op) {
case 9: // R(rt) = R(rs) + simm; break; //addiu
// Special-case for small adjustments of pointerified registers. Commonly for SP but happens for others.
if (simm >= 0) {
CompImmLogic(rs, rt, simm, IROp::AddConst);
ir.Write(IROp::AddConst, rt, rs, ir.AddConstant(simm));
} else if (simm < 0) {
CompImmLogic(rs, rt, -simm, IROp::SubConst);
ir.Write(IROp::SubConst, rt, rs, ir.AddConstant(-simm));
}
break;
case 12: CompImmLogic(rs, rt, uimm, IROp::AndConst); break;
case 13: CompImmLogic(rs, rt, uimm, IROp::OrConst); break;
case 14: CompImmLogic(rs, rt, uimm, IROp::XorConst); break;
case 12: ir.Write(IROp::AndConst, rt, rs, ir.AddConstant(uimm)); break;
case 13: ir.Write(IROp::OrConst, rt, rs, ir.AddConstant(uimm)); break;
case 14: ir.Write(IROp::XorConst, rt, rs, ir.AddConstant(uimm)); break;
case 10: // R(rt) = (s32)R(rs) < simm; break; //slti
if (gpr.IsImm(rs)) {
gpr.SetImm(rt, (s32)gpr.GetImm(rs) < simm ? 1 : 0);
break;
}
gpr.MapDirtyIn(rt, rs);
ir.Write(IROp::SltConst, rt, rs, ir.AddConstant(simm));
break;
case 11: // R(rt) = R(rs) < suimm; break; //sltiu
if (gpr.IsImm(rs)) {
gpr.SetImm(rt, gpr.GetImm(rs) < suimm ? 1 : 0);
break;
}
gpr.MapDirtyIn(rt, rs);
ir.Write(IROp::SltUConst, rt, rs, ir.AddConstant(suimm));
break;
case 15: // R(rt) = uimm << 16; //lui
gpr.SetImm(rt, uimm << 16);
ir.WriteSetConstant(rt, uimm << 16);
break;
default:
@ -129,11 +103,9 @@ void IRJit::Comp_RType2(MIPSOpcode op) {
switch (op & 63) {
case 22: //clz
gpr.MapDirtyIn(rd, rs);
ir.Write(IROp::Clz, rd, rs);
break;
case 23: //clo
gpr.MapDirtyIn(rd, rs);
ir.Write(IROp::Not, IRTEMP_0, rs);
ir.Write(IROp::Clz, rd, IRTEMP_0);
break;
@ -142,25 +114,6 @@ void IRJit::Comp_RType2(MIPSOpcode op) {
}
}
void IRJit::CompType3(MIPSGPReg rd, MIPSGPReg rs, MIPSGPReg rt, IROp op, IROp constOp, bool symmetric) {
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
switch (op) {
case IROp::Add: gpr.SetImm(rd, gpr.GetImm(rs) + gpr.GetImm(rt)); break;
case IROp::Sub: gpr.SetImm(rd, gpr.GetImm(rs) - gpr.GetImm(rt)); break;
case IROp::And: gpr.SetImm(rd, gpr.GetImm(rs) & gpr.GetImm(rt)); break;
case IROp::Or: gpr.SetImm(rd, gpr.GetImm(rs) | gpr.GetImm(rt)); break;
case IROp::Xor: gpr.SetImm(rd, gpr.GetImm(rs) ^ gpr.GetImm(rt)); break;
}
return;
}
// Can't do the RSB optimization on ARM64 - no RSB!
// Generic solution. If it's an imm, better to flush at this point.
gpr.MapDirtyInIn(rd, rs, rt);
ir.Write(op, rd, rs, rt);
}
void IRJit::Comp_RType3(MIPSOpcode op) {
CONDITIONAL_DISABLE;
@ -174,83 +127,56 @@ void IRJit::Comp_RType3(MIPSOpcode op) {
switch (op & 63) {
case 10: //if (!R(rt)) R(rd) = R(rs); break; //movz
gpr.MapDirtyInIn(rd, rt, rs);
ir.Write(IROp::MovZ, rd, rt, rs);
break;
case 11:// if (R(rt)) R(rd) = R(rs); break; //movn
gpr.MapDirtyInIn(rd, rt, rs);
ir.Write(IROp::MovNZ, rd, rt, rs);
break;
case 32: //R(rd) = R(rs) + R(rt); break; //add
case 33: //R(rd) = R(rs) + R(rt); break; //addu
CompType3(rd, rs, rt, IROp::Add, IROp::AddConst, true);
ir.Write(IROp::Add, rd, rs, rt);
break;
case 34: //R(rd) = R(rs) - R(rt); break; //sub
case 35: //R(rd) = R(rs) - R(rt); break; //subu
CompType3(rd, rs, rt, IROp::Sub, IROp::SubConst, false);
ir.Write(IROp::Sub, rd, rs, rt);
break;
case 36: //R(rd) = R(rs) & R(rt); break; //and
CompType3(rd, rs, rt, IROp::And, IROp::AndConst, true);
ir.Write(IROp::And, rd, rs, rt);
break;
case 37: //R(rd) = R(rs) | R(rt); break; //or
CompType3(rd, rs, rt, IROp::Or, IROp::OrConst, true);
ir.Write(IROp::Or, rd, rs, rt);
break;
case 38: //R(rd) = R(rs) ^ R(rt); break; //xor/eor
CompType3(rd, rs, rt, IROp::Xor, IROp::XorConst, true);
ir.Write(IROp::Xor, rd, rs, rt);
break;
case 39: // R(rd) = ~(R(rs) | R(rt)); break; //nor
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
gpr.SetImm(rd, ~(gpr.GetImm(rs) | gpr.GetImm(rt)));
if (rs == 0) {
ir.Write(IROp::Not, rd, rt);
} else if (rt == 0) {
ir.Write(IROp::Not, rd, rs);
} else {
gpr.MapDirtyInIn(rd, rs, rt);
if (rs == 0) {
ir.Write(IROp::Not, rd, rt);
} else if (rt == 0) {
ir.Write(IROp::Not, rd, rs);
} else {
ir.Write(IROp::Or, IRTEMP_0, rs, rt);
ir.Write(IROp::Not, rd, IRTEMP_0);
}
ir.Write(IROp::Or, IRTEMP_0, rs, rt);
ir.Write(IROp::Not, rd, IRTEMP_0);
}
break;
case 42: //R(rd) = (int)R(rs) < (int)R(rt); break; //slt
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
gpr.SetImm(rd, (s32)gpr.GetImm(rs) < (s32)gpr.GetImm(rt));
} else {
gpr.MapDirtyInIn(rd, rt, rs);
ir.Write(IROp::Slt, rd, rs, rt);
}
ir.Write(IROp::Slt, rd, rs, rt);
break;
case 43: //R(rd) = R(rs) < R(rt); break; //sltu
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
gpr.SetImm(rd, gpr.GetImm(rs) < gpr.GetImm(rt));
} else {
gpr.MapDirtyInIn(rd, rt, rs);
ir.Write(IROp::SltU, rd, rs, rt);
}
ir.Write(IROp::SltU, rd, rs, rt);
break;
case 44: //R(rd) = max(R(rs), R(rt); break; //max
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
gpr.SetImm(rd, std::max(gpr.GetImm(rs), gpr.GetImm(rt)));
break;
}
gpr.MapDirtyInIn(rd, rs, rt);
ir.Write(IROp::Max, rd, rs, rt);
break;
case 45: //R(rd) = min(R(rs), R(rt)); break; //min
if (gpr.IsImm(rs) && gpr.IsImm(rt)) {
gpr.SetImm(rd, std::min(gpr.GetImm(rs), gpr.GetImm(rt)));
break;
}
gpr.MapDirtyInIn(rd, rs, rt);
ir.Write(IROp::Min, rd, rs, rt);
break;
@ -263,39 +189,13 @@ void IRJit::Comp_RType3(MIPSOpcode op) {
void IRJit::CompShiftImm(MIPSOpcode op, IROp shiftOpConst, int sa) {
MIPSGPReg rd = _RD;
MIPSGPReg rt = _RT;
if (gpr.IsImm(rt)) {
switch (shiftOpConst) {
case IROp::ShlImm:
gpr.SetImm(rd, gpr.GetImm(rt) << sa);
break;
case IROp::ShrImm:
gpr.SetImm(rd, gpr.GetImm(rt) >> sa);
break;
case IROp::SarImm:
gpr.SetImm(rd, (int)gpr.GetImm(rt) >> sa);
break;
case IROp::RorImm:
gpr.SetImm(rd, (gpr.GetImm(rt) >> sa) | (gpr.GetImm(rt) << (32 - sa)));
break;
default:
DISABLE;
}
} else {
gpr.MapDirtyIn(rd, rt);
ir.Write(shiftOpConst, rd, rt, sa);
}
ir.Write(shiftOpConst, rd, rt, sa);
}
void IRJit::CompShiftVar(MIPSOpcode op, IROp shiftOp, IROp shiftOpConst) {
MIPSGPReg rd = _RD;
MIPSGPReg rt = _RT;
MIPSGPReg rs = _RS;
if (gpr.IsImm(rs)) {
int sa = gpr.GetImm(rs) & 0x1F;
CompShiftImm(op, shiftOpConst, sa);
return;
}
gpr.MapDirtyInIn(rd, rs, rt);
// Not sure if ARM64 wraps like this so let's do it for it. (TODO: According to the ARM ARM, it will indeed mask for us so this is not necessary)
// ANDI2R(SCRATCH1, gpr.R(rs), 0x1F, INVALID_REG);
ir.Write(IROp::AndConst, IRTEMP_0, rs, ir.AddConstant(31));
@ -343,12 +243,6 @@ void IRJit::Comp_Special3(MIPSOpcode op) {
switch (op & 0x3f) {
case 0x0: //ext
if (gpr.IsImm(rs)) {
gpr.SetImm(rt, (gpr.GetImm(rs) >> pos) & mask);
return;
}
gpr.MapDirtyIn(rt, rs);
ir.Write(IROp::Shl, rt, rs);
ir.Write(IROp::AndConst, rt, rt, ir.AddConstant(mask));
break;
@ -357,25 +251,10 @@ void IRJit::Comp_Special3(MIPSOpcode op) {
{
u32 sourcemask = mask >> pos;
u32 destmask = ~(sourcemask << pos);
if (gpr.IsImm(rs)) {
u32 inserted = (gpr.GetImm(rs) & sourcemask) << pos;
if (gpr.IsImm(rt)) {
gpr.SetImm(rt, (gpr.GetImm(rt) & destmask) | inserted);
return;
}
gpr.MapDirty(rt);
ir.Write(IROp::AndConst, rt, rt, ir.AddConstant(destmask));
if (inserted != 0) {
ir.Write(IROp::OrConst, rt, rt, inserted);
}
} else {
gpr.MapDirtyIn(rt, rs);
ir.Write(IROp::AndConst, IRTEMP_0, rs, ir.AddConstant(sourcemask));
ir.Write(IROp::AndConst, rt, rt, ir.AddConstant(destmask));
ir.Write(IROp::ShlImm, IRTEMP_0, IRTEMP_0, pos);
ir.Write(IROp::Or, rt, rt, IRTEMP_0);
}
ir.Write(IROp::AndConst, IRTEMP_0, rs, ir.AddConstant(sourcemask));
ir.Write(IROp::AndConst, rt, rt, ir.AddConstant(destmask));
ir.Write(IROp::ShlImm, IRTEMP_0, IRTEMP_0, pos);
ir.Write(IROp::Or, rt, rt, IRTEMP_0);
}
break;
}
@ -391,20 +270,10 @@ void IRJit::Comp_Allegrex(MIPSOpcode op) {
switch ((op >> 6) & 31) {
case 16: // seb // R(rd) = (u32)(s32)(s8)(u8)R(rt);
if (gpr.IsImm(rt)) {
gpr.SetImm(rd, (s32)(s8)(u8)gpr.GetImm(rt));
return;
}
gpr.MapDirtyIn(rd, rt);
ir.Write(IROp::Ext8to32, rd, rt);
break;
case 24: // seh
if (gpr.IsImm(rt)) {
gpr.SetImm(rd, (s32)(s16)(u16)gpr.GetImm(rt));
return;
}
gpr.MapDirtyIn(rd, rt);
ir.Write(IROp::Ext16to32, rd, rt);
break;
@ -425,20 +294,10 @@ void IRJit::Comp_Allegrex2(MIPSOpcode op) {
switch (op & 0x3ff) {
case 0xA0: //wsbh
if (gpr.IsImm(rt)) {
gpr.SetImm(rd, ((gpr.GetImm(rt) & 0xFF00FF00) >> 8) | ((gpr.GetImm(rt) & 0x00FF00FF) << 8));
} else {
gpr.MapDirtyIn(rd, rt);
ir.Write(IROp::BSwap16, rd, rt);
}
ir.Write(IROp::BSwap16, rd, rt);
break;
case 0xE0: //wsbw
if (gpr.IsImm(rt)) {
gpr.SetImm(rd, swap32(gpr.GetImm(rt)));
} else {
gpr.MapDirtyIn(rd, rt);
ir.Write(IROp::BSwap16, rd, rt);
}
ir.Write(IROp::BSwap16, rd, rt);
break;
default:
Comp_Generic(op);
@ -456,25 +315,21 @@ void IRJit::Comp_MulDivType(MIPSOpcode op) {
switch (op & 63) {
case 16: // R(rd) = HI; //mfhi
if (rd != MIPS_REG_ZERO) {
gpr.MapDirty(rd);
ir.Write(IROp::MfHi, rd);
}
break;
case 17: // HI = R(rs); //mthi
gpr.MapIn(rs);
ir.Write(IROp::MtHi, 0, rs);
break;
case 18: // R(rd) = LO; break; //mflo
if (rd != MIPS_REG_ZERO) {
gpr.MapDirty(rd);
ir.Write(IROp::MfLo, rd);
}
break;
case 19: // LO = R(rs); break; //mtlo
gpr.MapIn(rs);
ir.Write(IROp::MtLo, 0, rs);
break;

View File

@ -29,7 +29,6 @@
#include "Core/MIPS/MIPSTables.h"
#include "Core/MIPS/IR/IRJit.h"
#include "Core/MIPS/IR/IRRegCache.h"
#include "Core/MIPS/JitCommon/JitBlockCache.h"
#include "Common/Arm64Emitter.h"
@ -74,12 +73,10 @@ void IRJit::BranchRSRTComp(MIPSOpcode op, IRComparison cc, bool likely)
MIPSGPReg rhs = rt;
if (!delaySlotIsNice) { // if likely, we don't need this
if (rs != 0) {
gpr.MapIn(rs);
ir.Write(IROp::Mov, IRTEMP_0, rs);
lhs = (MIPSGPReg)IRTEMP_0;
}
if (rt != 0) {
gpr.MapIn(rt);
ir.Write(IROp::Mov, IRTEMP_1, rt);
rhs = (MIPSGPReg)IRTEMP_1;
}
@ -88,7 +85,6 @@ void IRJit::BranchRSRTComp(MIPSOpcode op, IRComparison cc, bool likely)
if (!likely)
CompileDelaySlot();
gpr.MapInIn(lhs, rhs);
FlushAll();
ir.Write(ComparisonToExit(cc), ir.AddConstant(GetCompilerPC() + 8), lhs, rhs);
// This makes the block "impure" :(
@ -121,12 +117,11 @@ void IRJit::BranchRSZeroComp(MIPSOpcode op, IRComparison cc, bool andLink, bool
lhs = (MIPSGPReg)IRTEMP_0;
}
if (andLink)
gpr.SetImm(MIPS_REG_RA, GetCompilerPC() + 8);
ir.WriteSetConstant(MIPS_REG_RA, GetCompilerPC() + 8);
if (!likely)
CompileDelaySlot();
gpr.MapIn(lhs);
FlushAll();
ir.Write(ComparisonToExit(cc), ir.AddConstant(GetCompilerPC() + 8), lhs);
if (likely)
@ -294,7 +289,7 @@ void IRJit::Comp_Jump(MIPSOpcode op) {
break;
case 3: //jal
gpr.SetImm(MIPS_REG_RA, GetCompilerPC() + 8);
ir.WriteSetConstant(MIPS_REG_RA, GetCompilerPC() + 8);
CompileDelaySlot();
FlushAll();
ir.Write(IROp::ExitToConst, ir.AddConstant(targetAddr));
@ -325,27 +320,24 @@ void IRJit::Comp_JumpReg(MIPSOpcode op) {
int destReg;
if (IsSyscall(delaySlotOp)) {
gpr.MapDirty(rs);
ir.Write(IROp::SetPC, 0, rs);
if (andLink)
gpr.SetImm(rd, GetCompilerPC() + 8);
ir.WriteSetConstant(rd, GetCompilerPC() + 8);
CompileDelaySlot();
// Syscall (the delay slot) does FlushAll.
return; // Syscall (delay slot) wrote exit code.
} else if (delaySlotIsNice) {
if (andLink)
gpr.SetImm(rd, GetCompilerPC() + 8);
ir.WriteSetConstant(rd, GetCompilerPC() + 8);
CompileDelaySlot();
gpr.MapDirty(rs);
destReg = rs; // Safe because FlushAll doesn't change any regs
FlushAll();
} else {
// Bad delay slot.
gpr.MapDirty(rs);
ir.Write(IROp::Mov, IRTEMP_0, rs);
destReg = IRTEMP_0;
if (andLink)
gpr.SetImm(rd, GetCompilerPC() + 8);
ir.WriteSetConstant(rd, GetCompilerPC() + 8);
CompileDelaySlot();
FlushAll();
}

View File

@ -82,7 +82,7 @@ void IRJit::Comp_FPUComp(MIPSOpcode op) {
int opc = op & 0xF;
if (opc >= 8) opc -= 8; // alias
if (opc == 0) { // f, sf (signalling false)
gpr.SetImm((MIPSGPReg)IRREG_FPCOND, 0);
ir.Write(IROp::ZeroFpCond);
return;
}
@ -186,7 +186,6 @@ void IRJit::Comp_mxc1(MIPSOpcode op)
if (rt == MIPS_REG_ZERO) {
return;
}
gpr.MapDirty(rt);
ir.Write(IROp::FMovToGPR, rt, fs);
return;
@ -196,16 +195,16 @@ void IRJit::Comp_mxc1(MIPSOpcode op)
}
if (fs == 31) {
DISABLE;
} else if (fs == 0) {
gpr.SetImm(rt, MIPSState::FCR0_VALUE);
}
else if (fs == 0) {
ir.Write(IROp::SetConst, rt, ir.AddConstant(MIPSState::FCR0_VALUE));
} else {
// Unsupported regs are always 0.
gpr.SetImm(rt, 0);
ir.Write(IROp::SetConst, rt, ir.AddConstant(0));
}
return;
case 4: //FI(fs) = R(rt); break; //mtc1
gpr.MapDirty(rt);
ir.Write(IROp::FMovFromGPR, fs, rt);
return;

View File

@ -82,8 +82,6 @@ namespace MIPSComp {
return;
}
gpr.MapIn(rs);
gpr.MapDirty(rt);
int addrReg = IRTEMP_0;
switch (o) {
// Load

View File

@ -372,6 +372,9 @@ u32 IRInterpret(MIPSState *mips, const IRInst *inst, const u32 *constPool, int c
}
break; //cvt.w.s
}
case IROp::ZeroFpCond:
mips->fpcond = 0;
break;
case IROp::FMovFromGPR:
memcpy(&mips->f[inst->dest], &mips->r[inst->src1], 4);
@ -481,6 +484,10 @@ int IRWriter::AddConstant(u32 value) {
return (int)i;
}
constPool_.push_back(value);
if (constPool_.size() > 255) {
// Cannot have more than 256 constants in a block!
Crash();
}
return (int)constPool_.size() - 1;
}

View File

@ -121,6 +121,7 @@ enum class IROp : u8 {
FpCondToReg,
VfpCondToReg,
ZeroFpCond,
FCmpUnordered,
FCmpEqual,
FCmpEqualUnordered,

View File

@ -41,14 +41,14 @@
namespace MIPSComp
{
IRJit::IRJit(MIPSState *mips) : gpr(), mips_(mips) {
IRJit::IRJit(MIPSState *mips) : mips_(mips) {
logBlocks = 0;
dontLogBlocks = 0;
js.startDefaultPrefix = true;
js.currentRoundingFunc = convertS0ToSCRATCH1[0];
u32 size = 128 * 1024;
blTrampolines_ = kernelMemory.Alloc(size, true, "trampoline");
logBlocks = 0;
logBlocks = 100;
InitIR();
}
@ -88,7 +88,7 @@ void IRJit::DoDummyState(PointerWrap &p) {
}
void IRJit::FlushAll() {
gpr.FlushAll();
// gpr.FlushAll();
// FlushPrefixV();
}
@ -246,8 +246,6 @@ void IRJit::DoJit(u32 em_address, IRBlock *b) {
js.PrefixStart();
ir.Clear();
gpr.Start(&ir);
int partialFlushOffset = 0;
js.numInstructions = 0;
@ -273,7 +271,6 @@ void IRJit::DoJit(u32 em_address, IRBlock *b) {
}
}
if (logBlocks > 0 && dontLogBlocks == 0) {
ILOG("=============== IR (%d instructions) ===============", js.numInstructions);
for (int i = 0; i < ir.GetInstructions().size(); i++) {

View File

@ -84,16 +84,18 @@ public:
int GetNumBlocks() const { return (int)blocks_.size(); }
int AllocateBlock(int emAddr) {
blocks_.emplace_back(IRBlock(emAddr));
size_ = (int)blocks_.size();
return (int)blocks_.size() - 1;
}
IRBlock *GetBlock(int i) {
if (i >= 0 && i < blocks_.size()) {
return &blocks_[i];
if (i >= 0 && i < size_) {
return blocks_.data() + i;
} else {
return nullptr;
}
}
private:
int size_;
std::vector<IRBlock> blocks_;
};
@ -231,8 +233,6 @@ private:
void BranchRSRTComp(MIPSOpcode op, IRComparison cc, bool likely);
// Utilities to reduce duplicated code
void CompImmLogic(MIPSGPReg rs, MIPSGPReg rt, u32 uimm, IROp op);
void CompType3(MIPSGPReg rd, MIPSGPReg rs, MIPSGPReg rt, IROp op, IROp constOp, bool symmetric = false);
void CompShiftImm(MIPSOpcode op, IROp shiftType, int sa);
void CompShiftVar(MIPSOpcode op, IROp shiftType, IROp shiftTypeConst);
@ -258,9 +258,6 @@ private:
IRBlockCache blocks_;
IRRegCache gpr;
// Arm64RegCacheFPU fpr;
MIPSState *mips_;
int dontLogBlocks;

View File

@ -6,6 +6,10 @@ void SimplifyInPlace(IRInst *inst, int count, const u32 *constPool) {
case IROp::AddConst:
if (constPool[inst[i].src2] == 0)
inst[i].op = IROp::Mov;
else if (inst[i].src1 == 0) {
inst[i].op = IROp::SetConst;
inst[i].src1 = inst[i].src2;
}
break;
default:
break;