diff --git a/Common/Arm64Emitter.cpp b/Common/Arm64Emitter.cpp index 669123baf..0a86be89f 100644 --- a/Common/Arm64Emitter.cpp +++ b/Common/Arm64Emitter.cpp @@ -46,12 +46,12 @@ bool IsPowerOfTwo(uint64_t x) { bool IsImmArithmetic(uint64_t input, u32 *val, bool *shift) { if (input < 4096) { - *val = input; - *shift = false; + if (val) *val = input; + if (shift) *shift = false; return true; } else if ((input & 0xFFF000) == input) { - *val = input >> 12; - *shift = true; + if (val) *val = input >> 12; + if (shift) *shift = true; return true; } return false; diff --git a/Core/MIPS/ARM64/Arm64CompALU.cpp b/Core/MIPS/ARM64/Arm64CompALU.cpp index a0ddf233a..289b16b40 100644 --- a/Core/MIPS/ARM64/Arm64CompALU.cpp +++ b/Core/MIPS/ARM64/Arm64CompALU.cpp @@ -82,10 +82,22 @@ void Arm64Jit::Comp_IType(MIPSOpcode op) { switch (op >> 26) { case 8: // same as addiu? case 9: // R(rt) = R(rs) + simm; break; //addiu - if (simm >= 0) { - CompImmLogic(rs, rt, simm, &ARM64XEmitter::ADD, &ARM64XEmitter::TryADDI2R, &EvalAdd); - } else if (simm < 0) { - CompImmLogic(rs, rt, -simm, &ARM64XEmitter::SUB, &ARM64XEmitter::TrySUBI2R, &EvalSub); + // Special-case for small adjustments of pointerified registers. Commonly for SP but happens for others. + if (rs == rt && gpr.IsMappedAsPointer(rs) && IsImmArithmetic(simm < 0 ? -simm : simm, nullptr, nullptr)) { + ARM64Reg r32 = gpr.R(rs); + gpr.MarkDirty(r32); + ARM64Reg r = EncodeRegTo64(r32); + if (simm > 0) { + ADDI2R(r, r, simm); + } else { + SUBI2R(r, r, -simm); + } + } else { + if (simm >= 0) { + CompImmLogic(rs, rt, simm, &ARM64XEmitter::ADD, &ARM64XEmitter::TryADDI2R, &EvalAdd); + } else if (simm < 0) { + CompImmLogic(rs, rt, -simm, &ARM64XEmitter::SUB, &ARM64XEmitter::TrySUBI2R, &EvalSub); + } } break; diff --git a/Core/MIPS/ARM64/Arm64RegCache.cpp b/Core/MIPS/ARM64/Arm64RegCache.cpp index bb77fd867..5dfc094a4 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.cpp +++ b/Core/MIPS/ARM64/Arm64RegCache.cpp @@ -143,6 +143,10 @@ bool Arm64RegCache::IsMappedAsPointer(MIPSGPReg mipsReg) { return false; } +void Arm64RegCache::MarkDirty(ARM64Reg reg) { + ar[reg].isDirty = true; +} + void Arm64RegCache::SetRegImm(ARM64Reg reg, u64 imm) { // On ARM64, at least Cortex A57, good old MOVT/MOVW (MOVK in 64-bit) is really fast. emit_->MOVI2R(reg, imm); diff --git a/Core/MIPS/ARM64/Arm64RegCache.h b/Core/MIPS/ARM64/Arm64RegCache.h index 9416d7257..56c0eec18 100644 --- a/Core/MIPS/ARM64/Arm64RegCache.h +++ b/Core/MIPS/ARM64/Arm64RegCache.h @@ -109,6 +109,7 @@ public: bool IsMapped(MIPSGPReg reg); bool IsMappedAsPointer(MIPSGPReg reg); + void MarkDirty(Arm64Gen::ARM64Reg reg); void MapIn(MIPSGPReg rs); void MapInIn(MIPSGPReg rd, MIPSGPReg rs); void MapDirtyIn(MIPSGPReg rd, MIPSGPReg rs, bool avoidLoad = true);