mirror of
https://github.com/hrydgard/ppsspp.git
synced 2025-02-17 04:39:34 +00:00
Merge latest arm emitters from Sonic1's work.
This commit is contained in:
parent
de309f6a8b
commit
c1338259bc
@ -78,11 +78,25 @@ bool TryMakeOperand2_AllowNegation(s32 imm, Operand2 &op2, bool *negated)
|
||||
}
|
||||
}
|
||||
|
||||
void ARMXEmitter::MOVI2R(ARMReg reg, u32 val)
|
||||
void ARMXEmitter::MOVI2R(ARMReg reg, u32 val, bool optimize)
|
||||
{
|
||||
Operand2 op2;
|
||||
bool inverse;
|
||||
if (TryMakeOperand2_AllowInverse(val, op2, &inverse)) {
|
||||
if (!optimize)
|
||||
{
|
||||
// Only used in backpatch atm
|
||||
// Only support ARMv7 right now
|
||||
if (cpu_info.bArmV7) {
|
||||
MOVW(reg, val & 0xFFFF);
|
||||
MOVT(reg, val, true);
|
||||
}
|
||||
else
|
||||
{
|
||||
// ARMv6 version won't use backpatch for now
|
||||
// Run again with optimizations
|
||||
MOVI2R(reg, val);
|
||||
}
|
||||
} else if (TryMakeOperand2_AllowInverse(val, op2, &inverse)) {
|
||||
if (!inverse)
|
||||
MOV(reg, op2);
|
||||
else
|
||||
@ -115,7 +129,14 @@ void ARMXEmitter::MOVI2R(ARMReg reg, u32 val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Moves IMM to memory location
|
||||
void ARMXEmitter::ARMABI_MOVI2M(Operand2 op, Operand2 val)
|
||||
{
|
||||
// This moves imm to a memory location
|
||||
MOVW(R14, val); MOVT(R14, val, true);
|
||||
MOVW(R12, op); MOVT(R12, op, true);
|
||||
STR(R12, R14); // R10 is what we want to store
|
||||
}
|
||||
void ARMXEmitter::QuickCallFunction(ARMReg reg, void *func) {
|
||||
MOVI2R(reg, (u32)(func));
|
||||
BL(reg);
|
||||
@ -404,7 +425,7 @@ void ARMXEmitter::MOVT(ARMReg Rd, Operand2 Rm, bool TopBits) { WriteInstruction(
|
||||
|
||||
void ARMXEmitter::WriteInstruction (u32 Op, ARMReg Rd, ARMReg Rn, Operand2 Rm, bool SetFlags) // This can get renamed later
|
||||
{
|
||||
u32 op = InstOps[Op][Rm.GetType()]; // Type always decided by last operand
|
||||
s32 op = InstOps[Op][Rm.GetType()]; // Type always decided by last operand
|
||||
u32 Data = Rm.GetData();
|
||||
if (Rm.GetType() == TYPE_IMM)
|
||||
{
|
||||
@ -419,7 +440,7 @@ void ARMXEmitter::WriteInstruction (u32 Op, ARMReg Rd, ARMReg Rn, Operand2 Rm, b
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (op == (u32)-1)
|
||||
if (op == -1)
|
||||
_assert_msg_(DYNA_REC, false, "%s not yet support %d", InstNames[Op], Rm.GetType());
|
||||
Write32(condition | (op << 21) | (SetFlags ? (1 << 20) : 0) | Rn << 16 | Rd << 12 | Data);
|
||||
}
|
||||
@ -486,6 +507,10 @@ void ARMXEmitter::REV (ARMReg dest, ARMReg src )
|
||||
{
|
||||
Write32(condition | (107 << 20) | (15 << 16) | (dest << 12) | (243 << 4) | src);
|
||||
}
|
||||
void ARMXEmitter::REV16(ARMReg dest, ARMReg src)
|
||||
{
|
||||
Write32(condition | (0x3DF << 16) | (dest << 12) | (0xFD << 4) | src);
|
||||
}
|
||||
|
||||
void ARMXEmitter::_MSR (bool write_nzcvq, bool write_g, Operand2 op2)
|
||||
{
|
||||
@ -526,8 +551,17 @@ void ARMXEmitter::DMB ()
|
||||
{
|
||||
Write32(0xF57FF05E);
|
||||
}
|
||||
void ARMXEmitter::SVC(Operand2 op)
|
||||
{
|
||||
Write32(condition | (0x0F << 24) | op.Imm24());
|
||||
}
|
||||
|
||||
void ARMXEmitter::LDR (ARMReg dest, ARMReg src, Operand2 op) { WriteStoreOp(0x41, src, dest, op);}
|
||||
void ARMXEmitter::LDRH(ARMReg dest, ARMReg src, Operand2 op)
|
||||
{
|
||||
u8 Imm = op.Imm8();
|
||||
Write32(condition | (0x05 << 20) | (src << 16) | (dest << 12) | ((Imm >> 4) << 8) | (0xB << 4) | (Imm & 0x0F));
|
||||
}
|
||||
void ARMXEmitter::LDRB(ARMReg dest, ARMReg src, Operand2 op) { WriteStoreOp(0x45, src, dest, op);}
|
||||
|
||||
void ARMXEmitter::LDR (ARMReg dest, ARMReg base, ARMReg offset, bool Index, bool Add)
|
||||
@ -569,19 +603,249 @@ void ARMXEmitter::LDMFD(ARMReg dest, bool WriteBack, const int Regnum, ...)
|
||||
WriteRegStoreOp(0x89, dest, WriteBack, RegList);
|
||||
}
|
||||
|
||||
// NEON and ASIMD
|
||||
ARMReg ARMXEmitter::SubBase(ARMReg Reg)
|
||||
{
|
||||
if (Reg >= S0)
|
||||
{
|
||||
if (Reg >= D0)
|
||||
{
|
||||
if (Reg >= Q0)
|
||||
return (ARMReg)((Reg - Q0) * 2); // Always gets encoded as a double register
|
||||
return (ARMReg)(Reg - D0);
|
||||
}
|
||||
return (ARMReg)(Reg - S0);
|
||||
}
|
||||
return Reg;
|
||||
}
|
||||
// NEON Specific
|
||||
void ARMXEmitter::VADD(IntegerSize Size, ARMReg Vd, ARMReg Vn, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd >= Q0, "Pass invalid register to VADD(integer)");
|
||||
_assert_msg_(DYNA_REC, cpu_info.bNEON, "Can't use VADD(integer) when CPU doesn't support it");
|
||||
|
||||
// Gets encoded as a double register
|
||||
Vd = SubBase(Vd);
|
||||
Vn = SubBase(Vn);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
Write32((0xF2 << 24) | ((Vd & 0x10) << 18) | (Size << 20) | ((Vn & 0xF) << 16) \
|
||||
| ((Vd & 0xF) << 12) | (0x8 << 8) | ((Vn & 0x10) << 3) | (1 << 6) \
|
||||
| ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
|
||||
}
|
||||
void ARMXEmitter::VSUB(IntegerSize Size, ARMReg Vd, ARMReg Vn, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd >= Q0, "Pass invalid register to VSUB(integer)");
|
||||
_assert_msg_(DYNA_REC, cpu_info.bNEON, "Can't use VSUB(integer) when CPU doesn't support it");
|
||||
|
||||
// Gets encoded as a double register
|
||||
Vd = SubBase(Vd);
|
||||
Vn = SubBase(Vn);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
Write32((0xF3 << 24) | ((Vd & 0x10) << 18) | (Size << 20) | ((Vn & 0xF) << 16) \
|
||||
| ((Vd & 0xF) << 12) | (0x8 << 8) | ((Vn & 0x10) << 3) | (1 << 6) \
|
||||
| ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
|
||||
}
|
||||
|
||||
// VFP Specific
|
||||
|
||||
void ARMXEmitter::VLDR(ARMReg Dest, ARMReg Base, Operand2 op)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Dest >= S0 && Dest <= D31, "Passed Invalid dest register to VLDR");
|
||||
_assert_msg_(DYNA_REC, Base <= R15, "Passed invalid Base register to VLDR");
|
||||
_assert_msg_(DYNA_REC, !(op.Imm12() & 4), "Offset needs to be word aligned");
|
||||
bool single_reg = Dest < D0;
|
||||
|
||||
Dest = SubBase(Dest);
|
||||
|
||||
if (single_reg)
|
||||
Dest = (ARMReg)(Dest - S0);
|
||||
{
|
||||
Write32(NO_COND | (0x1B << 23) | ((Dest & 0x1) << 22) | (1 << 20) | (Base << 16) \
|
||||
| ((Dest & 0x1E) << 11) | (10 << 8) | (op.Imm12() >> 2));
|
||||
|
||||
}
|
||||
else
|
||||
Dest = (ARMReg)(Dest - D0);
|
||||
Write32(NO_COND | (13 << 24) | ((Dest & 0x10) << 18) | (1 << 20) | (Base << 16) \
|
||||
| (5 << 9) | (!single_reg << 8) | op.Imm8());
|
||||
{
|
||||
Write32(NO_COND | (0x1B << 23) | ((Dest & 0x10) << 18) | (1 << 20) | (Base << 16) \
|
||||
| ((Dest & 0xF) << 12) | (11 << 8) | (op.Imm12() >> 2));
|
||||
}
|
||||
}
|
||||
void ARMXEmitter::VSTR(ARMReg Src, ARMReg Base, Operand2 op)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Src >= S0 && Src <= D31, "Passed invalid src register to VSTR");
|
||||
_assert_msg_(DYNA_REC, Base <= R15, "Passed invalid base register to VSTR");
|
||||
_assert_msg_(DYNA_REC, !(op.Imm12() & 4), "Offset needs to be word aligned");
|
||||
bool single_reg = Src < D0;
|
||||
|
||||
Src = SubBase(Src);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1B << 23) | ((Src & 0x1) << 22) | (Base << 16) \
|
||||
| ((Src & 0x1E) << 11) | (10 << 8) | (op.Imm12() >> 2));
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
Write32(NO_COND | (0x1B << 23) | ((Src & 0x10) << 18) | (Base << 16) \
|
||||
| ((Src & 0xF) << 12) | (11 << 8) | (op.Imm12() >> 2));
|
||||
}
|
||||
}
|
||||
void ARMXEmitter::VCMP(ARMReg Vd, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd < Q0, "Passed invalid Vd to VCMP");
|
||||
bool single_reg = Vd < D0;
|
||||
|
||||
Vd = SubBase(Vd);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x1) << 22) | (0x34 << 16) | ((Vd & 0x1E) << 11) \
|
||||
| (0x2B << 6) | ((Vm & 0x1) << 5) | (Vm >> 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x10) << 18) | (0x34 << 16) | ((Vd & 0xF) << 12) \
|
||||
| (0x2F << 6) | ((Vm & 0x10) << 1) | (Vm & 0xF));
|
||||
}
|
||||
}
|
||||
void ARMXEmitter::VCMP(ARMReg Vd)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd < Q0, "Passed invalid Vd to VCMP");
|
||||
bool single_reg = Vd < D0;
|
||||
|
||||
Vd = SubBase(Vd);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x1) << 22) | (0x35 << 16) | ((Vd & 0x1E) << 11) \
|
||||
| (0x2B << 6));
|
||||
}
|
||||
else
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x10) << 18) | (0x35 << 16) | ((Vd & 0xF) << 12) \
|
||||
| (0x2F << 6));
|
||||
}
|
||||
}
|
||||
void ARMXEmitter::VDIV(ARMReg Vd, ARMReg Vn, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd < Q0, "Pased invalid dest register to VSQRT");
|
||||
_assert_msg_(DYNA_REC, Vn < Q0, "Passed invalid Vn to VSQRT");
|
||||
_assert_msg_(DYNA_REC, Vm < Q0, "Passed invalid Vm to VSQRT");
|
||||
bool single_reg = Vd < D0;
|
||||
|
||||
Vd = SubBase(Vd);
|
||||
Vn = SubBase(Vn);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x1) << 22) | ((Vn & 0x1E) << 16) \
|
||||
| ((Vd & 0x1E) << 11) | (0xA << 8) | ((Vn & 0x1) << 7) | ((Vm & 0x1) << 5) \
|
||||
| (Vm >> 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x10) << 18) | ((Vn & 0xF) << 16) \
|
||||
| ((Vd & 0xF) << 12) | (0xB << 8) | ((Vn & 0x10) << 3) | ((Vm & 0x10) << 2) \
|
||||
| (Vm & 0xF));
|
||||
}
|
||||
}
|
||||
void ARMXEmitter::VSQRT(ARMReg Vd, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd < Q0, "Pased invalid dest register to VSQRT");
|
||||
_assert_msg_(DYNA_REC, Vm < Q0, "Passed invalid Vm to VSQRT");
|
||||
bool single_reg = Vd < D0;
|
||||
|
||||
Vd = SubBase(Vd);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x1) << 22) | (0x31 << 16) \
|
||||
| ((Vd & 0x1E) << 11) | (0x2B << 6) | ((Vm & 0x1) << 5) | (Vm >> 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
Write32(NO_COND | (0x1D << 23) | ((Vd & 0x10) << 18) | (0x31 << 16) \
|
||||
| ((Vd & 0xF) << 12) | (0x2F << 6) | ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
}
|
||||
}
|
||||
// VFP and ASIMD
|
||||
void ARMXEmitter::VADD(ARMReg Vd, ARMReg Vn, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd >= S0, "Passed invalid dest register to VADD");
|
||||
_assert_msg_(DYNA_REC, Vn >= S0, "Passed invalid Vn to VADD");
|
||||
_assert_msg_(DYNA_REC, Vm >= S0, "Passed invalid Vm to VADD");
|
||||
bool single_reg = Vd < D0;
|
||||
bool double_reg = Vd < Q0;
|
||||
|
||||
Vd = SubBase(Vd);
|
||||
Vn = SubBase(Vn);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1C << 23) | ((Vd & 0x1) << 22) | (0x3 << 20) \
|
||||
| ((Vn & 0x1E) << 15) | ((Vd & 0x1E) << 12) | (0x5 << 9) \
|
||||
| ((Vn & 0x1) << 7) | ((Vm & 0x1) << 5) | (Vm >> 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (double_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1C << 23) | ((Vd & 0x10) << 18) | (0x3 << 20) \
|
||||
| ((Vn & 0xF) << 16) | ((Vd & 0xF) << 12) | (0xB << 8) \
|
||||
| ((Vn & 0x10) << 3) | ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
}
|
||||
else
|
||||
{
|
||||
_assert_msg_(DYNA_REC, cpu_info.bNEON, "Trying to use VADD with Quad Reg without support!");
|
||||
Write32((0xF2 << 24) | ((Vd & 0x10) << 18) | ((Vn & 0xF) << 16) \
|
||||
| ((Vd & 0xF) << 12) | (0xD << 8) | ((Vn & 0x10) << 3) \
|
||||
| (1 << 6) | ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
}
|
||||
}
|
||||
}
|
||||
void ARMXEmitter::VSUB(ARMReg Vd, ARMReg Vn, ARMReg Vm)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, Vd >= S0, "Passed invalid dest register to VSUB");
|
||||
_assert_msg_(DYNA_REC, Vn >= S0, "Passed invalid Vn to VSUB");
|
||||
_assert_msg_(DYNA_REC, Vm >= S0, "Passed invalid Vm to VSUB");
|
||||
bool single_reg = Vd < D0;
|
||||
bool double_reg = Vd < Q0;
|
||||
|
||||
Vd = SubBase(Vd);
|
||||
Vn = SubBase(Vn);
|
||||
Vm = SubBase(Vm);
|
||||
|
||||
if (single_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1C << 23) | ((Vd & 0x1) << 22) | (0x3 << 20) \
|
||||
| ((Vn & 0x1E) << 15) | ((Vd & 0x1E) << 12) | (0x5 << 9) \
|
||||
| ((Vn & 0x1) << 7) | (1 << 6) | ((Vm & 0x1) << 5) | (Vm >> 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
if (double_reg)
|
||||
{
|
||||
Write32(NO_COND | (0x1C << 23) | ((Vd & 0x10) << 18) | (0x3 << 20) \
|
||||
| ((Vn & 0xF) << 16) | ((Vd & 0xF) << 12) | (0xB << 8) \
|
||||
| ((Vn & 0x10) << 3) | (1 << 6) | ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
}
|
||||
else
|
||||
{
|
||||
_assert_msg_(DYNA_REC, cpu_info.bNEON, "Trying to use VADD with Quad Reg without support!");
|
||||
Write32((0xF2 << 24) | (1 << 21) | ((Vd & 0x10) << 18) | ((Vn & 0xF) << 16) \
|
||||
| ((Vd & 0xF) << 12) | (0xD << 8) | ((Vn & 0x10) << 3) \
|
||||
| (1 << 6) | ((Vm & 0x10) << 2) | (Vm & 0xF));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ARMXEmitter::VMOV(ARMReg Dest, ARMReg Src)
|
||||
{
|
||||
if (Dest > R15)
|
||||
@ -600,6 +864,7 @@ void ARMXEmitter::VMOV(ARMReg Dest, ARMReg Src)
|
||||
{
|
||||
// Move 64bit from Arm reg
|
||||
_assert_msg_(DYNA_REC, false, "This VMOV doesn't support moving 64bit ARM to NEON");
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -613,13 +878,13 @@ void ARMXEmitter::VMOV(ARMReg Dest, ARMReg Src)
|
||||
Src = (ARMReg)(Src - S0);
|
||||
Write32(NO_COND | (0xE1 << 20) | ((Src & 0x1E) << 15) | (Dest << 12) \
|
||||
| (0xA << 8) | ((Src & 0x1) << 7) | (1 << 4));
|
||||
|
||||
return;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Move 64bit To Arm reg
|
||||
_assert_msg_(DYNA_REC, false, "This VMOV doesn't support moving 64bit ARM From NEON");
|
||||
return;
|
||||
}
|
||||
}
|
||||
else
|
||||
@ -632,32 +897,35 @@ void ARMXEmitter::VMOV(ARMReg Dest, ARMReg Src)
|
||||
int SrcSize = Src < D0 ? 1 : Src < Q0 ? 2 : 4;
|
||||
int DestSize = Dest < D0 ? 1 : Dest < Q0 ? 2 : 4;
|
||||
bool Single = DestSize == 1;
|
||||
bool Quad = DestSize == 4;
|
||||
|
||||
_assert_msg_(DYNA_REC, SrcSize == DestSize, "VMOV doesn't support moving different register sizes");
|
||||
|
||||
Dest = SubBase(Dest);
|
||||
Src = SubBase(Src);
|
||||
|
||||
if (Single)
|
||||
{
|
||||
Dest = (ARMReg)(Dest - S0);
|
||||
Src = (ARMReg)(Src - S0);
|
||||
Write32(NO_COND | (0x1D << 23) | ((Dest & 0x1) << 22) | (0x3 << 20) | ((Dest & 0x1E) << 11) \
|
||||
| (0x5 << 9) | (1 << 6) | ((Src & 0x1) << 5) | ((Src & 0x1E) >> 1));
|
||||
}
|
||||
else
|
||||
{
|
||||
// Double and quad
|
||||
bool Quad = DestSize == 4;
|
||||
if (Quad)
|
||||
{
|
||||
_assert_msg_(DYNA_REC, cpu_info.bNEON, "Trying to use quad registers when you don't support ASIMD.");
|
||||
// Gets encoded as a Double register
|
||||
Dest = (ARMReg)((Dest - Q0) * 2);
|
||||
Src = (ARMReg)((Src - Q0) * 2);
|
||||
Write32((0xF2 << 24) | ((Dest & 0x10) << 18) | (2 << 20) | ((Src & 0xF) << 16) \
|
||||
| ((Dest & 0xF) << 12) | (1 << 8) | ((Src & 0x10) << 3) | (1 << 6) \
|
||||
| ((Src & 0x10) << 1) | (1 << 4) | (Src & 0xF));
|
||||
|
||||
}
|
||||
else
|
||||
{
|
||||
Dest = (ARMReg)(Dest - D0);
|
||||
Src = (ARMReg)(Src - D0);
|
||||
Write32(NO_COND | (0x1D << 23) | ((Dest & 0x10) << 18) | (0x3 << 20) | ((Dest & 0xF) << 12) \
|
||||
| (0x2D << 6) | ((Src & 0x10) << 1) | (Src & 0xF));
|
||||
}
|
||||
Write32((0xF2 << 24) | ((Dest & 0x10) << 18) | (1 << 21) | ((Src & 0xF) << 16) \
|
||||
| ((Dest & 0xF) << 12) | (1 << 8) | ((Src & 0x10) << 3) | (Quad << 6) \
|
||||
| ((Src & 0x10) << 1) | (1 << 4) | (Src & 0xF));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -97,6 +97,13 @@ enum ShiftType
|
||||
ST_ROR = 3,
|
||||
ST_RRX = 4
|
||||
};
|
||||
enum IntegerSize
|
||||
{
|
||||
I_I8 = 0,
|
||||
I_I16,
|
||||
I_I32,
|
||||
I_I64
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
@ -309,6 +316,9 @@ bool TryMakeOperand2_AllowNegation(s32 imm, Operand2 &op2, bool *negated);
|
||||
inline Operand2 R(ARMReg Reg) { return Operand2(Reg, TYPE_REG); }
|
||||
inline Operand2 IMM(u32 Imm) { return Operand2(Imm, TYPE_IMM); }
|
||||
inline Operand2 Mem(void *ptr) { return Operand2((u32)ptr, TYPE_IMM); }
|
||||
//usage: struct {int e;} s; STRUCT_OFFSET(s,e)
|
||||
#define STRUCT_OFF(str,elem) ((u32)((u32)&(str).elem-(u32)&(str)))
|
||||
|
||||
|
||||
struct FixupBranch
|
||||
{
|
||||
@ -418,6 +428,7 @@ public:
|
||||
void SBC (ARMReg dest, ARMReg src, Operand2 op2);
|
||||
void SBCS(ARMReg dest, ARMReg src, Operand2 op2);
|
||||
void REV (ARMReg dest, ARMReg src);
|
||||
void REV16 (ARMReg dest, ARMReg src);
|
||||
void RSC (ARMReg dest, ARMReg src, Operand2 op2);
|
||||
void RSCS(ARMReg dest, ARMReg src, Operand2 op2);
|
||||
void TST ( ARMReg src, Operand2 op2);
|
||||
@ -459,6 +470,7 @@ public:
|
||||
void LDR (ARMReg dest, ARMReg src, Operand2 op2 = 0);
|
||||
// Offset adds to the base register in LDR
|
||||
void LDR (ARMReg dest, ARMReg base, ARMReg offset, bool Index, bool Add);
|
||||
void LDRH(ARMReg dest, ARMReg src, Operand2 op = 0);
|
||||
void LDRB(ARMReg dest, ARMReg src, Operand2 op2 = 0);
|
||||
void STR (ARMReg dest, ARMReg src, Operand2 op2 = 0);
|
||||
// Offset adds on to the destination register in STR
|
||||
@ -473,23 +485,37 @@ public:
|
||||
// dest contains the result if the instruction managed to store the value
|
||||
void STREX(ARMReg dest, ARMReg base, ARMReg op);
|
||||
void DMB ();
|
||||
void SVC(Operand2 op);
|
||||
|
||||
// NEON and ASIMD instructions
|
||||
// None of these will be created with conditional since ARM
|
||||
// is deprecating conditional execution of ASIMD instructions.
|
||||
// Some ASIMD instructions don't even have a conditional encoding.
|
||||
// ASIMD instructions don't even have a conditional encoding.
|
||||
|
||||
// Subtracts the base from the register to give us the real one
|
||||
ARMReg SubBase(ARMReg Reg);
|
||||
// NEON Only
|
||||
void VADD(IntegerSize Size, ARMReg Vd, ARMReg Vn, ARMReg Vm);
|
||||
void VSUB(IntegerSize Size, ARMReg Vd, ARMReg Vn, ARMReg Vm);
|
||||
|
||||
void VLDR(ARMReg dest, ARMReg Base, Operand2 op);
|
||||
// VFP Only
|
||||
void VLDR(ARMReg Dest, ARMReg Base, Operand2 op);
|
||||
void VSTR(ARMReg Src, ARMReg Base, Operand2 op);
|
||||
void VCMP(ARMReg Vd, ARMReg Vm);
|
||||
// Compares against zero
|
||||
void VCMP(ARMReg Vd);
|
||||
void VDIV(ARMReg Vd, ARMReg Vn, ARMReg Vm);
|
||||
void VSQRT(ARMReg Vd, ARMReg Vm);
|
||||
|
||||
// NEON and VFP
|
||||
void VADD(ARMReg Vd, ARMReg Vn, ARMReg Vm);
|
||||
void VSUB(ARMReg Vd, ARMReg Vn, ARMReg Vm);
|
||||
void VMOV(ARMReg Dest, ARMReg Src);
|
||||
|
||||
void QuickCallFunction(ARMReg scratchreg, void *func);
|
||||
// Utility functions
|
||||
void MOVI2R(ARMReg reg, u32 val);
|
||||
void ARMABI_ShowConditions();
|
||||
void ARMABI_Return();
|
||||
|
||||
void UpdateAPSR(bool NZCVQ, u8 Flags, bool GE, u8 GEval);
|
||||
|
||||
void MOVI2R(ARMReg reg, u32 val, bool optimize = true);
|
||||
void ARMABI_MOVI2M(Operand2 op, Operand2 val);
|
||||
}; // class ARMXEmitter
|
||||
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user