2015-07-04 06:47:32 +00:00
|
|
|
// Copyright 2015 Dolphin Emulator Project
|
|
|
|
// Licensed under GPLv2+
|
2014-12-15 21:04:08 +00:00
|
|
|
// Refer to the license.txt file included.
|
|
|
|
|
|
|
|
#pragma once
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
#include <functional>
|
|
|
|
|
2020-10-04 07:29:36 +00:00
|
|
|
#include "Common/ArmCommon.h"
|
|
|
|
#include "Common/BitSet.h"
|
|
|
|
#include "Common/CodeBlock.h"
|
2022-01-30 23:49:02 +00:00
|
|
|
#include "Common/CommonTypes.h"
|
2020-10-04 07:29:36 +00:00
|
|
|
#include "Common/Log.h"
|
2014-12-15 21:04:08 +00:00
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
#define DYNA_REC JIT
|
|
|
|
|
2015-03-21 20:18:26 +00:00
|
|
|
#ifdef FMAX
|
|
|
|
#undef FMAX
|
|
|
|
#endif
|
|
|
|
#ifdef FMIN
|
|
|
|
#undef FMIN
|
|
|
|
#endif
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
namespace Arm64Gen
|
|
|
|
{
|
|
|
|
|
|
|
|
// X30 serves a dual purpose as a link register
|
|
|
|
// Encoded as <u3:type><u5:reg>
|
|
|
|
// Types:
|
|
|
|
// 000 - 32bit GPR
|
|
|
|
// 001 - 64bit GPR
|
|
|
|
// 010 - VFP single precision
|
|
|
|
// 100 - VFP double precision
|
|
|
|
// 110 - VFP quad precision
|
|
|
|
enum ARM64Reg
|
|
|
|
{
|
|
|
|
// 32bit registers
|
|
|
|
W0 = 0, W1, W2, W3, W4, W5, W6,
|
|
|
|
W7, W8, W9, W10, W11, W12, W13, W14,
|
|
|
|
W15, W16, W17, W18, W19, W20, W21, W22,
|
|
|
|
W23, W24, W25, W26, W27, W28, W29, W30,
|
|
|
|
|
|
|
|
WSP, // 32bit stack pointer
|
|
|
|
|
|
|
|
// 64bit registers
|
|
|
|
X0 = 0x20, X1, X2, X3, X4, X5, X6,
|
|
|
|
X7, X8, X9, X10, X11, X12, X13, X14,
|
|
|
|
X15, X16, X17, X18, X19, X20, X21, X22,
|
|
|
|
X23, X24, X25, X26, X27, X28, X29, X30,
|
|
|
|
|
|
|
|
SP, // 64bit stack pointer
|
|
|
|
|
|
|
|
// VFP single precision registers
|
|
|
|
S0 = 0x40, S1, S2, S3, S4, S5, S6,
|
|
|
|
S7, S8, S9, S10, S11, S12, S13,
|
|
|
|
S14, S15, S16, S17, S18, S19, S20,
|
|
|
|
S21, S22, S23, S24, S25, S26, S27,
|
|
|
|
S28, S29, S30, S31,
|
|
|
|
|
|
|
|
// VFP Double Precision registers
|
|
|
|
D0 = 0x80, D1, D2, D3, D4, D5, D6, D7,
|
|
|
|
D8, D9, D10, D11, D12, D13, D14, D15,
|
|
|
|
D16, D17, D18, D19, D20, D21, D22, D23,
|
|
|
|
D24, D25, D26, D27, D28, D29, D30, D31,
|
|
|
|
|
|
|
|
// ASIMD Quad-Word registers
|
|
|
|
Q0 = 0xC0, Q1, Q2, Q3, Q4, Q5, Q6, Q7,
|
|
|
|
Q8, Q9, Q10, Q11, Q12, Q13, Q14, Q15,
|
|
|
|
Q16, Q17, Q18, Q19, Q20, Q21, Q22, Q23,
|
|
|
|
Q24, Q25, Q26, Q27, Q28, Q29, Q30, Q31,
|
|
|
|
|
|
|
|
// For PRFM(prefetch memory) encoding
|
|
|
|
// This is encoded in the Rt register
|
|
|
|
// Data preload
|
|
|
|
PLDL1KEEP = 0, PLDL1STRM,
|
|
|
|
PLDL2KEEP, PLDL2STRM,
|
|
|
|
PLDL3KEEP, PLDL3STRM,
|
|
|
|
// Instruction preload
|
|
|
|
PLIL1KEEP = 8, PLIL1STRM,
|
|
|
|
PLIL2KEEP, PLIL2STRM,
|
|
|
|
PLIL3KEEP, PLIL3STRM,
|
|
|
|
// Prepare for store
|
|
|
|
PLTL1KEEP = 16, PLTL1STRM,
|
|
|
|
PLTL2KEEP, PLTL2STRM,
|
|
|
|
PLTL3KEEP, PLTL3STRM,
|
|
|
|
|
2015-03-15 22:30:13 +00:00
|
|
|
WZR = WSP,
|
|
|
|
ZR = SP,
|
2017-01-26 18:34:21 +00:00
|
|
|
FP = X29,
|
|
|
|
LR = X30,
|
2015-03-15 22:30:13 +00:00
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
INVALID_REG = 0xFFFFFFFF
|
|
|
|
};
|
|
|
|
|
2017-01-26 18:34:21 +00:00
|
|
|
// R19-R28. R29 (FP), R30 (LR) are always saved and FP updated appropriately.
|
|
|
|
const u32 ALL_CALLEE_SAVED = 0x1FF80000;
|
2023-06-12 22:23:04 +00:00
|
|
|
const u32 ALL_CALLEE_SAVED_FP = 0x0000FF00; // q8-q15
|
2015-06-14 10:56:44 +00:00
|
|
|
|
2015-03-07 13:44:15 +00:00
|
|
|
inline bool Is64Bit(ARM64Reg reg) { return (reg & 0x20) != 0; }
|
2015-03-05 23:42:08 +00:00
|
|
|
inline bool IsSingle(ARM64Reg reg) { return (reg & 0xC0) == 0x40; }
|
|
|
|
inline bool IsDouble(ARM64Reg reg) { return (reg & 0xC0) == 0x80; }
|
2015-03-21 10:50:15 +00:00
|
|
|
inline bool IsScalar(ARM64Reg reg) { return IsSingle(reg) || IsDouble(reg); }
|
2015-03-05 23:42:08 +00:00
|
|
|
inline bool IsQuad(ARM64Reg reg) { return (reg & 0xC0) == 0xC0; }
|
2014-12-15 21:04:08 +00:00
|
|
|
inline bool IsVector(ARM64Reg reg) { return (reg & 0xC0) != 0; }
|
2015-03-18 13:44:01 +00:00
|
|
|
inline bool IsGPR(ARM64Reg reg) { return (int)reg < 0x40; }
|
|
|
|
|
|
|
|
int CountLeadingZeros(uint64_t value, int width);
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
inline ARM64Reg DecodeReg(ARM64Reg reg) { return (ARM64Reg)(reg & 0x1F); }
|
|
|
|
inline ARM64Reg EncodeRegTo64(ARM64Reg reg) { return (ARM64Reg)(reg | 0x20); }
|
2015-04-05 20:40:05 +00:00
|
|
|
inline ARM64Reg EncodeRegToSingle(ARM64Reg reg) { return (ARM64Reg)(DecodeReg(reg) + S0); }
|
2015-03-05 23:42:08 +00:00
|
|
|
inline ARM64Reg EncodeRegToDouble(ARM64Reg reg) { return (ARM64Reg)((reg & ~0xC0) | 0x80); }
|
|
|
|
inline ARM64Reg EncodeRegToQuad(ARM64Reg reg) { return (ARM64Reg)(reg | 0xC0); }
|
2014-12-15 21:04:08 +00:00
|
|
|
|
2015-03-15 22:30:13 +00:00
|
|
|
// For AND/TST/ORR/EOR etc
|
|
|
|
bool IsImmLogical(uint64_t value, unsigned int width, unsigned int *n, unsigned int *imm_s, unsigned int *imm_r);
|
|
|
|
// For ADD/SUB
|
|
|
|
bool IsImmArithmetic(uint64_t input, u32 *val, bool *shift);
|
|
|
|
|
2015-03-21 22:32:16 +00:00
|
|
|
float FPImm8ToFloat(uint8_t bits);
|
|
|
|
bool FPImm8FromFloat(float value, uint8_t *immOut);
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
enum OpType
|
|
|
|
{
|
|
|
|
TYPE_IMM = 0,
|
|
|
|
TYPE_REG,
|
|
|
|
TYPE_IMMSREG,
|
|
|
|
TYPE_RSR,
|
|
|
|
TYPE_MEM
|
|
|
|
};
|
|
|
|
|
|
|
|
enum ShiftType
|
|
|
|
{
|
|
|
|
ST_LSL = 0,
|
|
|
|
ST_LSR = 1,
|
|
|
|
ST_ASR = 2,
|
|
|
|
ST_ROR = 3,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum IndexType
|
|
|
|
{
|
2015-03-18 20:43:49 +00:00
|
|
|
INDEX_UNSIGNED = 0,
|
|
|
|
INDEX_POST = 1,
|
|
|
|
INDEX_PRE = 2,
|
2015-07-04 06:47:32 +00:00
|
|
|
INDEX_SIGNED = 3, // used in LDP/STP
|
2014-12-15 21:04:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum ShiftAmount
|
|
|
|
{
|
|
|
|
SHIFT_0 = 0,
|
|
|
|
SHIFT_16 = 1,
|
|
|
|
SHIFT_32 = 2,
|
|
|
|
SHIFT_48 = 3,
|
|
|
|
};
|
|
|
|
|
2015-03-21 10:50:15 +00:00
|
|
|
enum RoundingMode {
|
|
|
|
ROUND_A, // round to nearest, ties to away
|
|
|
|
ROUND_M, // round towards -inf
|
|
|
|
ROUND_N, // round to nearest, ties to even
|
|
|
|
ROUND_P, // round towards +inf
|
|
|
|
ROUND_Z, // round towards zero
|
|
|
|
};
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
struct FixupBranch
|
|
|
|
{
|
2020-05-17 05:12:35 +00:00
|
|
|
// Pointer to executable code address.
|
|
|
|
const u8 *ptr;
|
2014-12-15 21:04:08 +00:00
|
|
|
// Type defines
|
|
|
|
// 0 = CBZ (32bit)
|
|
|
|
// 1 = CBNZ (32bit)
|
|
|
|
// 2 = B (conditional)
|
|
|
|
// 3 = TBZ
|
|
|
|
// 4 = TBNZ
|
|
|
|
// 5 = B (unconditional)
|
|
|
|
// 6 = BL (unconditional)
|
|
|
|
u32 type;
|
|
|
|
|
|
|
|
// Used with B.cond
|
|
|
|
CCFlags cond;
|
|
|
|
|
|
|
|
// Used with TBZ/TBNZ
|
|
|
|
u8 bit;
|
|
|
|
|
|
|
|
// Used with Test/Compare and Branch
|
|
|
|
ARM64Reg reg;
|
|
|
|
};
|
|
|
|
|
|
|
|
enum PStateField
|
|
|
|
{
|
|
|
|
FIELD_SPSel = 0,
|
|
|
|
FIELD_DAIFSet,
|
|
|
|
FIELD_DAIFClr,
|
2015-03-25 23:11:25 +00:00
|
|
|
FIELD_NZCV, // The only system registers accessible from EL0 (user space)
|
|
|
|
FIELD_FPCR = 0x340,
|
|
|
|
FIELD_FPSR = 0x341,
|
2014-12-15 21:04:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
enum SystemHint
|
|
|
|
{
|
|
|
|
HINT_NOP = 0,
|
|
|
|
HINT_YIELD,
|
|
|
|
HINT_WFE,
|
|
|
|
HINT_WFI,
|
|
|
|
HINT_SEV,
|
|
|
|
HINT_SEVL,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum BarrierType
|
|
|
|
{
|
|
|
|
OSHLD = 1,
|
|
|
|
OSHST = 2,
|
|
|
|
OSH = 3,
|
|
|
|
NSHLD = 5,
|
|
|
|
NSHST = 6,
|
|
|
|
NSH = 7,
|
|
|
|
ISHLD = 9,
|
|
|
|
ISHST = 10,
|
|
|
|
ISH = 11,
|
|
|
|
LD = 13,
|
|
|
|
ST = 14,
|
|
|
|
SY = 15,
|
|
|
|
};
|
|
|
|
|
|
|
|
class ArithOption
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
enum WidthSpecifier
|
|
|
|
{
|
|
|
|
WIDTH_DEFAULT,
|
|
|
|
WIDTH_32BIT,
|
|
|
|
WIDTH_64BIT,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum ExtendSpecifier
|
|
|
|
{
|
|
|
|
EXTEND_UXTB = 0x0,
|
|
|
|
EXTEND_UXTH = 0x1,
|
|
|
|
EXTEND_UXTW = 0x2, /* Also LSL on 32bit width */
|
|
|
|
EXTEND_UXTX = 0x3, /* Also LSL on 64bit width */
|
|
|
|
EXTEND_SXTB = 0x4,
|
|
|
|
EXTEND_SXTH = 0x5,
|
|
|
|
EXTEND_SXTW = 0x6,
|
|
|
|
EXTEND_SXTX = 0x7,
|
|
|
|
};
|
|
|
|
|
|
|
|
enum TypeSpecifier
|
|
|
|
{
|
|
|
|
TYPE_EXTENDEDREG,
|
|
|
|
TYPE_IMM,
|
|
|
|
TYPE_SHIFTEDREG,
|
|
|
|
};
|
|
|
|
|
|
|
|
private:
|
|
|
|
ARM64Reg m_destReg;
|
|
|
|
WidthSpecifier m_width;
|
2022-12-11 05:09:50 +00:00
|
|
|
ExtendSpecifier m_extend = EXTEND_UXTB;
|
2014-12-15 21:04:08 +00:00
|
|
|
TypeSpecifier m_type;
|
|
|
|
ShiftType m_shifttype;
|
|
|
|
u32 m_shift;
|
|
|
|
|
|
|
|
public:
|
2015-03-05 23:42:08 +00:00
|
|
|
ArithOption(ARM64Reg Rd, bool index = false)
|
2014-12-15 21:04:08 +00:00
|
|
|
{
|
2015-03-05 23:42:08 +00:00
|
|
|
// Indexed registers are a certain feature of AARch64
|
|
|
|
// On Loadstore instructions that use a register offset
|
|
|
|
// We can have the register as an index
|
|
|
|
// If we are indexing then the offset register will
|
|
|
|
// be shifted to the left so we are indexing at intervals
|
|
|
|
// of the size of what we are loading
|
|
|
|
// 8-bit: Index does nothing
|
|
|
|
// 16-bit: Index LSL 1
|
|
|
|
// 32-bit: Index LSL 2
|
|
|
|
// 64-bit: Index LSL 3
|
|
|
|
if (index)
|
|
|
|
m_shift = 4;
|
|
|
|
else
|
|
|
|
m_shift = 0;
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
m_destReg = Rd;
|
|
|
|
m_type = TYPE_EXTENDEDREG;
|
|
|
|
if (Is64Bit(Rd))
|
|
|
|
{
|
|
|
|
m_width = WIDTH_64BIT;
|
|
|
|
m_extend = EXTEND_UXTX;
|
|
|
|
}
|
|
|
|
else
|
|
|
|
{
|
|
|
|
m_width = WIDTH_32BIT;
|
|
|
|
m_extend = EXTEND_UXTW;
|
|
|
|
}
|
2015-03-18 15:02:25 +00:00
|
|
|
m_shifttype = ST_LSL;
|
2014-12-15 21:04:08 +00:00
|
|
|
}
|
2023-09-04 00:37:33 +00:00
|
|
|
ArithOption(ARM64Reg Rd, bool index, bool signExtend) {
|
|
|
|
if (index)
|
|
|
|
m_shift = 4;
|
|
|
|
else
|
|
|
|
m_shift = 0;
|
|
|
|
|
|
|
|
m_destReg = Rd;
|
|
|
|
m_type = TYPE_EXTENDEDREG;
|
|
|
|
if (Is64Bit(Rd)) {
|
|
|
|
m_width = WIDTH_64BIT;
|
|
|
|
m_extend = EXTEND_UXTX;
|
|
|
|
} else {
|
|
|
|
m_width = WIDTH_32BIT;
|
|
|
|
m_extend = signExtend ? EXTEND_SXTW : EXTEND_UXTW;
|
|
|
|
}
|
|
|
|
m_shifttype = ST_LSL;
|
|
|
|
}
|
2014-12-15 21:04:08 +00:00
|
|
|
ArithOption(ARM64Reg Rd, ShiftType shift_type, u32 shift)
|
|
|
|
{
|
|
|
|
m_destReg = Rd;
|
|
|
|
m_shift = shift;
|
|
|
|
m_shifttype = shift_type;
|
|
|
|
m_type = TYPE_SHIFTEDREG;
|
|
|
|
if (Is64Bit(Rd))
|
2015-03-05 23:42:08 +00:00
|
|
|
{
|
2014-12-15 21:04:08 +00:00
|
|
|
m_width = WIDTH_64BIT;
|
2015-03-05 23:42:08 +00:00
|
|
|
if (shift == 64)
|
|
|
|
m_shift = 0;
|
|
|
|
}
|
2014-12-15 21:04:08 +00:00
|
|
|
else
|
2015-03-05 23:42:08 +00:00
|
|
|
{
|
2014-12-15 21:04:08 +00:00
|
|
|
m_width = WIDTH_32BIT;
|
2015-03-05 23:42:08 +00:00
|
|
|
if (shift == 32)
|
|
|
|
m_shift = 0;
|
|
|
|
}
|
2014-12-15 21:04:08 +00:00
|
|
|
}
|
|
|
|
TypeSpecifier GetType() const
|
|
|
|
{
|
|
|
|
return m_type;
|
|
|
|
}
|
2015-07-04 06:47:32 +00:00
|
|
|
ARM64Reg GetReg() const
|
2015-03-05 23:42:08 +00:00
|
|
|
{
|
|
|
|
return m_destReg;
|
|
|
|
}
|
2014-12-15 21:04:08 +00:00
|
|
|
u32 GetData() const
|
|
|
|
{
|
|
|
|
switch (m_type)
|
|
|
|
{
|
|
|
|
case TYPE_EXTENDEDREG:
|
2015-03-05 23:42:08 +00:00
|
|
|
return (m_extend << 13) |
|
2014-12-15 21:04:08 +00:00
|
|
|
(m_shift << 10);
|
|
|
|
break;
|
|
|
|
case TYPE_SHIFTEDREG:
|
2015-03-05 23:42:08 +00:00
|
|
|
return (m_shifttype << 22) |
|
2014-12-15 21:04:08 +00:00
|
|
|
(m_shift << 10);
|
|
|
|
break;
|
|
|
|
default:
|
2020-07-19 15:47:02 +00:00
|
|
|
_dbg_assert_msg_(false, "Invalid type in GetData");
|
2014-12-15 21:04:08 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
class ARM64XEmitter
|
|
|
|
{
|
2015-03-05 23:42:08 +00:00
|
|
|
friend class ARM64FloatEmitter;
|
2020-05-17 05:12:35 +00:00
|
|
|
friend class ARM64CodeBlock;
|
2015-03-05 23:42:08 +00:00
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
private:
|
2020-05-17 05:12:35 +00:00
|
|
|
const u8 *m_code = nullptr;
|
|
|
|
u8 *m_writable = nullptr;
|
|
|
|
const u8 *m_lastCacheFlushEnd = nullptr;
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
void EncodeCompareBranchInst(u32 op, ARM64Reg Rt, const void* ptr);
|
|
|
|
void EncodeTestBranchInst(u32 op, ARM64Reg Rt, u8 bits, const void* ptr);
|
|
|
|
void EncodeUnconditionalBranchInst(u32 op, const void* ptr);
|
|
|
|
void EncodeUnconditionalBranchInst(u32 opc, u32 op2, u32 op3, u32 op4, ARM64Reg Rn);
|
|
|
|
void EncodeExceptionInst(u32 instenc, u32 imm);
|
|
|
|
void EncodeSystemInst(u32 op0, u32 op1, u32 CRn, u32 CRm, u32 op2, ARM64Reg Rt);
|
|
|
|
void EncodeArithmeticInst(u32 instenc, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
void EncodeArithmeticCarryInst(u32 op, bool flags, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void EncodeCondCompareImmInst(u32 op, ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond);
|
|
|
|
void EncodeCondCompareRegInst(u32 op, ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond);
|
|
|
|
void EncodeCondSelectInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond);
|
|
|
|
void EncodeData1SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void EncodeData2SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void EncodeData3SrcInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
|
|
|
void EncodeLogicalInst(u32 instenc, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void EncodeLoadRegisterInst(u32 bitop, ARM64Reg Rt, u32 imm);
|
|
|
|
void EncodeLoadStoreExcInst(u32 instenc, ARM64Reg Rs, ARM64Reg Rt2, ARM64Reg Rn, ARM64Reg Rt);
|
|
|
|
void EncodeLoadStorePairedInst(u32 op, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm);
|
|
|
|
void EncodeLoadStoreIndexedInst(u32 op, u32 op2, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EncodeLoadStoreIndexedInst(u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm, u8 size);
|
2014-12-15 21:04:08 +00:00
|
|
|
void EncodeMOVWideInst(u32 op, ARM64Reg Rd, u32 imm, ShiftAmount pos);
|
|
|
|
void EncodeBitfieldMOVInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EncodeLoadStoreRegisterOffset(u32 size, u32 opc, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
2014-12-15 21:04:08 +00:00
|
|
|
void EncodeAddSubImmInst(u32 op, bool flags, u32 shift, u32 imm, ARM64Reg Rn, ARM64Reg Rd);
|
2015-03-18 13:44:01 +00:00
|
|
|
void EncodeLogicalImmInst(u32 op, ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, int n);
|
2015-07-04 06:47:32 +00:00
|
|
|
void EncodeLoadStorePair(u32 op, u32 load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
2014-12-15 21:04:08 +00:00
|
|
|
void EncodeAddressInst(u32 op, ARM64Reg Rd, s32 imm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EncodeLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
protected:
|
|
|
|
inline void Write32(u32 value)
|
|
|
|
{
|
2020-05-17 05:12:35 +00:00
|
|
|
*(u32 *)m_writable = value;
|
2014-12-15 21:04:08 +00:00
|
|
|
m_code += 4;
|
2020-05-17 05:12:35 +00:00
|
|
|
m_writable += 4;
|
2014-12-15 21:04:08 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
public:
|
|
|
|
ARM64XEmitter()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-05-17 05:12:35 +00:00
|
|
|
ARM64XEmitter(const u8 *codePtr, u8 *writablePtr);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
virtual ~ARM64XEmitter()
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
2020-05-17 05:12:35 +00:00
|
|
|
void SetCodePointer(const u8 *ptr, u8 *writePtr);
|
2016-05-01 09:13:51 +00:00
|
|
|
const u8* GetCodePointer() const;
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
void ReserveCodeSpace(u32 bytes);
|
|
|
|
const u8* AlignCode16();
|
|
|
|
const u8* AlignCodePage();
|
2023-06-12 22:05:01 +00:00
|
|
|
const u8 *NopAlignCode16();
|
2014-12-15 21:04:08 +00:00
|
|
|
void FlushIcache();
|
2020-05-17 05:12:35 +00:00
|
|
|
void FlushIcacheSection(const u8* start, const u8* end);
|
2014-12-15 21:04:08 +00:00
|
|
|
u8* GetWritableCodePtr();
|
|
|
|
|
|
|
|
// FixupBranch branching
|
|
|
|
void SetJumpTarget(FixupBranch const& branch);
|
|
|
|
FixupBranch CBZ(ARM64Reg Rt);
|
|
|
|
FixupBranch CBNZ(ARM64Reg Rt);
|
|
|
|
FixupBranch B(CCFlags cond);
|
|
|
|
FixupBranch TBZ(ARM64Reg Rt, u8 bit);
|
|
|
|
FixupBranch TBNZ(ARM64Reg Rt, u8 bit);
|
|
|
|
FixupBranch B();
|
|
|
|
FixupBranch BL();
|
|
|
|
|
|
|
|
// Compare and Branch
|
|
|
|
void CBZ(ARM64Reg Rt, const void* ptr);
|
|
|
|
void CBNZ(ARM64Reg Rt, const void* ptr);
|
|
|
|
|
|
|
|
// Conditional Branch
|
|
|
|
void B(CCFlags cond, const void* ptr);
|
|
|
|
|
|
|
|
// Test and Branch
|
|
|
|
void TBZ(ARM64Reg Rt, u8 bits, const void* ptr);
|
|
|
|
void TBNZ(ARM64Reg Rt, u8 bits, const void* ptr);
|
|
|
|
|
|
|
|
// Unconditional Branch
|
|
|
|
void B(const void* ptr);
|
|
|
|
void BL(const void* ptr);
|
|
|
|
|
|
|
|
// Unconditional Branch (register)
|
|
|
|
void BR(ARM64Reg Rn);
|
|
|
|
void BLR(ARM64Reg Rn);
|
2015-03-15 19:51:08 +00:00
|
|
|
void RET(ARM64Reg Rn = X30);
|
2014-12-15 21:04:08 +00:00
|
|
|
void ERET();
|
|
|
|
void DRPS();
|
|
|
|
|
|
|
|
// Exception generation
|
|
|
|
void SVC(u32 imm);
|
|
|
|
void HVC(u32 imm);
|
|
|
|
void SMC(u32 imm);
|
|
|
|
void BRK(u32 imm);
|
|
|
|
void HLT(u32 imm);
|
|
|
|
void DCPS1(u32 imm);
|
|
|
|
void DCPS2(u32 imm);
|
|
|
|
void DCPS3(u32 imm);
|
|
|
|
|
|
|
|
// System
|
|
|
|
void _MSR(PStateField field, u8 imm);
|
2015-03-07 21:27:44 +00:00
|
|
|
|
|
|
|
void _MSR(PStateField field, ARM64Reg Rt);
|
|
|
|
void MRS(ARM64Reg Rt, PStateField field);
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
void HINT(SystemHint op);
|
|
|
|
void CLREX();
|
|
|
|
void DSB(BarrierType type);
|
|
|
|
void DMB(BarrierType type);
|
|
|
|
void ISB(BarrierType type);
|
|
|
|
|
|
|
|
// Add/Subtract (Extended/Shifted register)
|
|
|
|
void ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void ADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
void ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void ADDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
void SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
void SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SUBS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
void CMN(ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMN(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
void CMP(ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMP(ARM64Reg Rn, ARM64Reg Rm, ArithOption Option);
|
|
|
|
|
|
|
|
// Add/Subtract (with carry)
|
|
|
|
void ADC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void ADCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SBC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SBCS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
|
|
|
|
// Conditional Compare (immediate)
|
|
|
|
void CCMN(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond);
|
|
|
|
void CCMP(ARM64Reg Rn, u32 imm, u32 nzcv, CCFlags cond);
|
|
|
|
|
|
|
|
// Conditional Compare (register)
|
|
|
|
void CCMN(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond);
|
|
|
|
void CCMP(ARM64Reg Rn, ARM64Reg Rm, u32 nzcv, CCFlags cond);
|
|
|
|
|
|
|
|
// Conditional Select
|
|
|
|
void CSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond);
|
|
|
|
void CSINC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond);
|
|
|
|
void CSINV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond);
|
|
|
|
void CSNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond);
|
|
|
|
|
2015-03-15 22:30:13 +00:00
|
|
|
// Aliases
|
|
|
|
void CSET(ARM64Reg Rd, CCFlags cond) {
|
|
|
|
ARM64Reg zr = Is64Bit(Rd) ? ZR : WZR;
|
|
|
|
CSINC(Rd, zr, zr, (CCFlags)((u32)cond ^ 1));
|
|
|
|
}
|
2015-03-23 23:29:12 +00:00
|
|
|
void NEG(ARM64Reg Rd, ARM64Reg Rs) {
|
|
|
|
SUB(Rd, Is64Bit(Rd) ? ZR : WZR, Rs);
|
|
|
|
}
|
2015-03-15 22:30:13 +00:00
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
// Data-Processing 1 source
|
|
|
|
void RBIT(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void REV16(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void REV32(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void REV64(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void CLZ(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void CLS(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
|
|
|
// Data-Processing 2 source
|
|
|
|
void UDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void LSLV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void LSRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void ASRV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void RORV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32B(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32H(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32W(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32CB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32CH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32CW(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32X(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CRC32CX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
|
|
|
|
// Data-Processing 3 source
|
|
|
|
void MADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
|
|
|
void MSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
|
|
|
void SMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
2015-03-05 23:42:08 +00:00
|
|
|
void SMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2014-12-15 21:04:08 +00:00
|
|
|
void SMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
2015-03-05 23:42:08 +00:00
|
|
|
void SMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2014-12-15 21:04:08 +00:00
|
|
|
void UMADDL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
2015-07-03 03:08:45 +00:00
|
|
|
void UMULL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2014-12-15 21:04:08 +00:00
|
|
|
void UMSUBL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
2015-03-05 23:42:08 +00:00
|
|
|
void UMULH(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void MUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void MNEG(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
// Logical (shifted register)
|
|
|
|
void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
|
|
|
void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
2015-09-09 03:06:53 +00:00
|
|
|
void TST(ARM64Reg Rn, ARM64Reg Rm, ArithOption Shift);
|
2015-03-07 13:44:15 +00:00
|
|
|
|
|
|
|
// Wrap the above for saner syntax
|
2015-03-18 15:02:25 +00:00
|
|
|
void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { AND(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void BIC(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { BIC(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ORR(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void ORN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ORN(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { EOR(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void EON(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { EON(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void ANDS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { ANDS(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
|
|
|
void BICS(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm) { BICS(Rd, Rn, Rm, ArithOption(Rd, ST_LSL, 0)); }
|
2015-09-09 03:06:53 +00:00
|
|
|
void TST(ARM64Reg Rn, ARM64Reg Rm) { TST(Rn, Rm, ArithOption(Is64Bit(Rn) ? ZR : WZR, ST_LSL, 0)); }
|
2015-03-07 13:44:15 +00:00
|
|
|
|
2015-03-07 16:58:15 +00:00
|
|
|
// Convenience wrappers around ORR. These match the official convenience syntax.
|
2015-03-18 15:57:06 +00:00
|
|
|
void MOV(ARM64Reg Rd, ARM64Reg Rm, ArithOption Shift);
|
2014-12-15 21:04:08 +00:00
|
|
|
void MOV(ARM64Reg Rd, ARM64Reg Rm);
|
|
|
|
void MVN(ARM64Reg Rd, ARM64Reg Rm);
|
2015-03-21 17:26:53 +00:00
|
|
|
|
2017-01-26 13:22:51 +00:00
|
|
|
// Wrapper around ADD reg, reg, imm.
|
|
|
|
void MOVfromSP(ARM64Reg Rd);
|
|
|
|
void MOVtoSP(ARM64Reg Rn);
|
|
|
|
|
2015-03-21 17:26:53 +00:00
|
|
|
// TODO: These are "slow" as they use arith+shift, should be replaced with UBFM/EXTR variants.
|
2015-03-07 16:58:15 +00:00
|
|
|
void LSR(ARM64Reg Rd, ARM64Reg Rm, int shift);
|
|
|
|
void LSL(ARM64Reg Rd, ARM64Reg Rm, int shift);
|
|
|
|
void ASR(ARM64Reg Rd, ARM64Reg Rm, int shift);
|
|
|
|
void ROR(ARM64Reg Rd, ARM64Reg Rm, int shift);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
// Logical (immediate)
|
2015-03-15 22:30:13 +00:00
|
|
|
void AND(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
|
|
|
void ANDS(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
|
|
|
void EOR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
|
|
|
void ORR(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
|
|
|
void TST(ARM64Reg Rn, u32 immr, u32 imms, bool invert = false);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
// Add/subtract (immediate)
|
|
|
|
void ADD(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
|
|
|
void ADDS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
|
|
|
void SUB(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
|
|
|
void SUBS(ARM64Reg Rd, ARM64Reg Rn, u32 imm, bool shift = false);
|
|
|
|
void CMP(ARM64Reg Rn, u32 imm, bool shift = false);
|
2017-12-29 20:46:09 +00:00
|
|
|
void CMN(ARM64Reg Rn, u32 imm, bool shift = false);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
// Data Processing (Immediate)
|
|
|
|
void MOVZ(ARM64Reg Rd, u32 imm, ShiftAmount pos = SHIFT_0);
|
|
|
|
void MOVN(ARM64Reg Rd, u32 imm, ShiftAmount pos = SHIFT_0);
|
|
|
|
void MOVK(ARM64Reg Rd, u32 imm, ShiftAmount pos = SHIFT_0);
|
|
|
|
|
|
|
|
// Bitfield move
|
|
|
|
void BFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms);
|
|
|
|
void SBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms);
|
|
|
|
void UBFM(ARM64Reg Rd, ARM64Reg Rn, u32 immr, u32 imms);
|
2015-07-04 06:47:32 +00:00
|
|
|
void BFI(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width);
|
|
|
|
void UBFIZ(ARM64Reg Rd, ARM64Reg Rn, u32 lsb, u32 width);
|
2015-06-28 15:45:17 +00:00
|
|
|
|
2015-03-21 17:26:53 +00:00
|
|
|
// Extract register (ROR with two inputs, if same then faster on A67)
|
|
|
|
void EXTR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u32 shift);
|
|
|
|
|
|
|
|
// Aliases
|
2014-12-15 21:04:08 +00:00
|
|
|
void SXTB(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void SXTH(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void SXTW(ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
void UXTB(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UXTH(ARM64Reg Rd, ARM64Reg Rn);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
2015-03-21 17:26:53 +00:00
|
|
|
void UBFX(ARM64Reg Rd, ARM64Reg Rn, int lsb, int width) {
|
2015-06-27 04:27:03 +00:00
|
|
|
UBFM(Rd, Rn, lsb, lsb + width - 1);
|
2015-03-21 17:26:53 +00:00
|
|
|
}
|
|
|
|
|
2014-12-15 21:04:08 +00:00
|
|
|
// Load Register (Literal)
|
|
|
|
void LDR(ARM64Reg Rt, u32 imm);
|
|
|
|
void LDRSW(ARM64Reg Rt, u32 imm);
|
|
|
|
void PRFM(ARM64Reg Rt, u32 imm);
|
|
|
|
|
|
|
|
// Load/Store Exclusive
|
|
|
|
void STXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STLXRB(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDXRB(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDAXRB(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STLRB(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDARB(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STLXRH(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDXRH(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDAXRH(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STLRH(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDARH(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STLXR(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void STXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn);
|
|
|
|
void STLXP(ARM64Reg Rs, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn);
|
|
|
|
void LDXR(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDAXR(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn);
|
|
|
|
void LDAXP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn);
|
|
|
|
void STLR(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LDAR(ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
|
|
|
|
// Load/Store no-allocate pair (offset)
|
|
|
|
void STNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm);
|
|
|
|
void LDNP(ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, u32 imm);
|
|
|
|
|
|
|
|
// Load/Store register (immediate indexed)
|
|
|
|
void STRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDRB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDRSB(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void STRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDRH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDRSH(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void STR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDR(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDRSW(IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
|
|
|
|
// Load/Store register (register offset)
|
2015-03-05 23:42:08 +00:00
|
|
|
void STRB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDRB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDRSB(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void STRH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDRH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDRSH(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void STR(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDR(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDRSW(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void PRFM(ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
|
|
|
|
// Load/Store register (unscaled offset)
|
|
|
|
void STURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDURB(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDURSB(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void STURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDURH(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDURSH(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void STUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDUR(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDURSW(ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
2014-12-15 21:04:08 +00:00
|
|
|
|
|
|
|
// Load/Store pair
|
|
|
|
void LDP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
|
|
|
void LDPSW(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
|
|
|
void STP(IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
|
|
|
|
|
|
|
// Address of label/page PC-relative
|
|
|
|
void ADR(ARM64Reg Rd, s32 imm);
|
|
|
|
void ADRP(ARM64Reg Rd, s32 imm);
|
|
|
|
|
|
|
|
// Wrapper around MOVZ+MOVK
|
|
|
|
void MOVI2R(ARM64Reg Rd, u64 imm, bool optimize = true);
|
2015-03-07 16:58:15 +00:00
|
|
|
template <class P>
|
|
|
|
void MOVP2R(ARM64Reg Rd, P *ptr) {
|
2020-07-19 15:47:02 +00:00
|
|
|
_assert_msg_(Is64Bit(Rd), "Can't store pointers in 32-bit registers");
|
2015-03-07 16:58:15 +00:00
|
|
|
MOVI2R(Rd, (uintptr_t)ptr);
|
|
|
|
}
|
2014-12-15 21:04:08 +00:00
|
|
|
|
2015-03-07 16:58:15 +00:00
|
|
|
// Wrapper around AND x, y, imm etc. If you are sure the imm will work, no need to pass a scratch register.
|
|
|
|
void ANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
|
|
|
void ANDSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
2015-03-15 22:30:13 +00:00
|
|
|
void TSTI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG) { ANDSI2R(Is64Bit(Rn) ? ZR : WZR, Rn, imm, scratch); }
|
2015-03-21 23:04:50 +00:00
|
|
|
void ORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
2015-03-15 22:30:13 +00:00
|
|
|
void EORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
|
|
|
void CMPI2R(ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
2015-03-07 13:44:15 +00:00
|
|
|
|
2015-03-07 22:04:48 +00:00
|
|
|
void ADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
|
|
|
void SUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
|
|
|
void SUBSI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm, ARM64Reg scratch = INVALID_REG);
|
|
|
|
|
2017-12-29 19:40:26 +00:00
|
|
|
bool TryADDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
|
|
|
bool TrySUBI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
|
|
|
bool TryCMPI2R(ARM64Reg Rn, u64 imm);
|
2015-03-15 22:30:13 +00:00
|
|
|
|
2017-12-29 19:40:26 +00:00
|
|
|
bool TryANDI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
|
|
|
bool TryORRI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
|
|
|
bool TryEORI2R(ARM64Reg Rd, ARM64Reg Rn, u64 imm);
|
2015-03-15 22:30:13 +00:00
|
|
|
|
2015-03-23 23:29:12 +00:00
|
|
|
// Pseudo-instruction for convenience. PUSH pushes 16 bytes even though we only push a single register.
|
|
|
|
// This is so the stack pointer is always 16-byte aligned, which is checked by hardware!
|
|
|
|
void PUSH(ARM64Reg Rd);
|
|
|
|
void POP(ARM64Reg Rd);
|
|
|
|
void PUSH2(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void POP2(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// Utility to generate a call to a std::function object.
|
|
|
|
//
|
|
|
|
// Unfortunately, calling operator() directly is undefined behavior in C++
|
|
|
|
// (this method might be a thunk in the case of multi-inheritance) so we
|
|
|
|
// have to go through a trampoline function.
|
|
|
|
template <typename T, typename... Args>
|
|
|
|
static void CallLambdaTrampoline(const std::function<T(Args...)>* f,
|
|
|
|
Args... args)
|
|
|
|
{
|
|
|
|
(*f)(args...);
|
|
|
|
}
|
|
|
|
|
|
|
|
// This function expects you to have set up the state.
|
|
|
|
// Overwrites X0 and X30
|
|
|
|
template <typename T, typename... Args>
|
|
|
|
ARM64Reg ABI_SetupLambda(const std::function<T(Args...)>* f)
|
|
|
|
{
|
|
|
|
auto trampoline = &ARM64XEmitter::CallLambdaTrampoline<T, Args...>;
|
2015-03-07 23:56:15 +00:00
|
|
|
MOVI2R(X30, (uintptr_t)trampoline);
|
|
|
|
MOVI2R(X0, (uintptr_t)const_cast<void*>((const void*)f));
|
2015-03-05 23:42:08 +00:00
|
|
|
return X30;
|
|
|
|
}
|
2015-03-07 13:44:15 +00:00
|
|
|
|
|
|
|
// Plain function call
|
|
|
|
void QuickCallFunction(ARM64Reg scratchreg, const void *func);
|
|
|
|
template <typename T> void QuickCallFunction(ARM64Reg scratchreg, T func) {
|
|
|
|
QuickCallFunction(scratchreg, (const void *)func);
|
|
|
|
}
|
2015-03-05 23:42:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class ARM64FloatEmitter
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
ARM64FloatEmitter(ARM64XEmitter* emit) : m_emit(emit) {}
|
|
|
|
|
|
|
|
void LDR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void STR(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
|
|
|
|
// Loadstore unscaled
|
|
|
|
void LDUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
void STUR(u8 size, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
|
|
|
|
|
|
|
// Loadstore single structure
|
|
|
|
void LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn);
|
|
|
|
void LD1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn);
|
2015-07-04 06:47:32 +00:00
|
|
|
void LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void LD1R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void LD2R(u8 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn);
|
|
|
|
void ST1(u8 size, ARM64Reg Rt, u8 index, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
|
|
|
|
// Loadstore multiple structure
|
|
|
|
void LD1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn);
|
2015-07-04 06:47:32 +00:00
|
|
|
void LD1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = SP);
|
2015-03-05 23:42:08 +00:00
|
|
|
void ST1(u8 size, u8 count, ARM64Reg Rt, ARM64Reg Rn);
|
2015-07-04 06:47:32 +00:00
|
|
|
void ST1(u8 size, u8 count, IndexType type, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm = SP);
|
|
|
|
|
|
|
|
// Loadstore paired
|
|
|
|
void LDP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
|
|
|
void STP(u8 size, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
|
|
|
|
|
|
|
// Loadstore register offset
|
|
|
|
void STR(u8 size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
|
|
|
void LDR(u8 size, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
|
|
|
// Scalar - 1 Source
|
|
|
|
void FABS(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FNEG(ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-16 23:54:56 +00:00
|
|
|
void FSQRT(ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-21 17:26:53 +00:00
|
|
|
void FMOV(ARM64Reg Rd, ARM64Reg Rn, bool top = false); // Also generalized move between GPR/FP
|
2015-03-05 23:42:08 +00:00
|
|
|
|
2023-09-05 02:26:41 +00:00
|
|
|
// Scalar - pairwise
|
|
|
|
void FADDP(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FMAXP(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FMINP(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FMAXNMP(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FMINNMP(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// Scalar - 2 Source
|
|
|
|
void FADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-16 00:24:03 +00:00
|
|
|
void FDIV(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-21 20:18:26 +00:00
|
|
|
void FMAX(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FMIN(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FMAXNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FMINNM(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FNMUL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
|
|
|
|
// Scalar - 3 Source. Note - the accumulator is last on ARM!
|
|
|
|
void FMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
|
|
|
void FMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
|
|
|
void FNMADD(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
|
|
|
void FNMSUB(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
|
|
|
// Scalar floating point immediate
|
2015-03-21 22:32:16 +00:00
|
|
|
void FMOV(ARM64Reg Rd, uint8_t imm8);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
|
|
|
// Vector
|
|
|
|
void AND(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-07-11 10:36:44 +00:00
|
|
|
void EOR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void BSL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2023-09-06 06:47:52 +00:00
|
|
|
void BIT(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void BIF(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index);
|
|
|
|
void FABS(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FADD(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2023-09-05 02:26:41 +00:00
|
|
|
void FADDP(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-07-04 06:47:32 +00:00
|
|
|
void FMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-04-04 17:46:15 +00:00
|
|
|
void FMLA(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FMLS(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-07-04 06:47:32 +00:00
|
|
|
void FMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FCVTL(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCVTL2(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
void FCVTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCVTZS(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCVTZU(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FDIV(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FNEG(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FRSQRTE(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FSUB(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void NOT(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void ORR(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-04-05 09:12:42 +00:00
|
|
|
void MOV(ARM64Reg Rd, ARM64Reg Rn) {
|
|
|
|
ORR(Rd, Rn, Rn);
|
|
|
|
}
|
2015-07-11 10:16:26 +00:00
|
|
|
|
|
|
|
void UMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void UMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SMIN(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void SMAX(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
void REV16(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void REV32(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void REV64(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
2015-04-05 20:40:05 +00:00
|
|
|
void SCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale);
|
|
|
|
void UCVTF(u8 size, ARM64Reg Rd, ARM64Reg Rn, int scale);
|
2015-06-14 10:56:44 +00:00
|
|
|
void SQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void SQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UQXTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UQXTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
2015-07-04 06:47:32 +00:00
|
|
|
void XTN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
2015-06-14 10:56:44 +00:00
|
|
|
void XTN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
2023-09-06 06:47:52 +00:00
|
|
|
void CMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMHI(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMHS(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMTST(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void CMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void CMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void CMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void CMLE(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void CMLT(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// Move
|
|
|
|
void DUP(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void INS(u8 size, ARM64Reg Rd, u8 index, ARM64Reg Rn);
|
|
|
|
void INS(u8 size, ARM64Reg Rd, u8 index1, ARM64Reg Rn, u8 index2);
|
|
|
|
void UMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index);
|
|
|
|
void SMOV(u8 size, ARM64Reg Rd, ARM64Reg Rn, u8 index);
|
|
|
|
|
2023-09-05 04:54:21 +00:00
|
|
|
// Vector immediates
|
|
|
|
void FMOV(u8 size, ARM64Reg Rd, u8 imm8);
|
|
|
|
// MSL means bits shifted in are 1s. For size=64, each bit of imm8 is expanded to 8 actual bits.
|
|
|
|
void MOVI(u8 size, ARM64Reg Rd, u8 imm8, u8 shift = 0, bool MSL = false);
|
|
|
|
void MVNI(u8 size, ARM64Reg Rd, u8 imm8, u8 shift = 0, bool MSL = false);
|
|
|
|
void ORR(u8 size, ARM64Reg Rd, u8 imm8, u8 shift = 0);
|
|
|
|
void BIC(u8 size, ARM64Reg Rd, u8 imm8, u8 shift = 0);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// One source
|
|
|
|
void FCVT(u8 size_to, u8 size_from, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
2015-03-21 10:50:15 +00:00
|
|
|
// Scalar convert float to int, in a lot of variants.
|
|
|
|
// Note that the scalar version of this operation has two encodings, one that goes to an integer register
|
|
|
|
// and one that outputs to a scalar fp register.
|
|
|
|
void FCVTS(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round);
|
|
|
|
void FCVTU(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round);
|
|
|
|
|
|
|
|
// Scalar convert int to float. No rounding mode specifier necessary.
|
2015-03-05 23:42:08 +00:00
|
|
|
void SCVTF(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UCVTF(ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
2015-03-21 10:50:15 +00:00
|
|
|
// Scalar fixed point to float. scale is the number of fractional bits.
|
2015-03-18 22:58:34 +00:00
|
|
|
void SCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale);
|
|
|
|
void UCVTF(ARM64Reg Rd, ARM64Reg Rn, int scale);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// Float comparison
|
|
|
|
void FCMP(ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FCMP(ARM64Reg Rn);
|
|
|
|
void FCMPE(ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FCMPE(ARM64Reg Rn);
|
|
|
|
void FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FCMEQ(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FCMGE(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void FCMGT(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCMLE(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void FCMLT(u8 size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
|
|
|
|
// Conditional select
|
|
|
|
void FCSEL(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, CCFlags cond);
|
|
|
|
|
2023-09-07 02:02:24 +00:00
|
|
|
// Conditional compare
|
|
|
|
void FCCMP(ARM64Reg Rn, ARM64Reg Rm, u8 nzcv, CCFlags cond);
|
|
|
|
void FCCMPE(ARM64Reg Rn, ARM64Reg Rm, u8 nzcv, CCFlags cond);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// Permute
|
|
|
|
void UZP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void TRN1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void ZIP1(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void UZP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void TRN2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void ZIP2(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2023-09-04 19:27:39 +00:00
|
|
|
// Related to permute, extract vector from pair (always by byte arrangement.)
|
|
|
|
void EXT(ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, int index);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
|
|
|
// Shift by immediate
|
2015-07-04 06:47:32 +00:00
|
|
|
void SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
void SSHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
void USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
void USHLL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
2015-03-05 23:42:08 +00:00
|
|
|
void SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
2015-07-04 06:47:32 +00:00
|
|
|
void SHRN2(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
void SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void SXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void UXTL2(u8 src_size, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
2015-07-11 10:16:26 +00:00
|
|
|
void SHL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
void USHR(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
void SSHR(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift);
|
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// vector x indexed element
|
|
|
|
void FMUL(u8 size, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index);
|
2015-03-22 10:05:36 +00:00
|
|
|
void FMLA(u8 esize, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, u8 index);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
2015-03-21 20:18:26 +00:00
|
|
|
void MOVI2F(ARM64Reg Rd, float value, ARM64Reg scratch = INVALID_REG, bool negate = false);
|
2023-09-05 04:54:21 +00:00
|
|
|
void MOVI2FDUP(ARM64Reg Rd, float value, ARM64Reg scratch = INVALID_REG, bool negate = false);
|
2015-03-21 20:18:26 +00:00
|
|
|
|
2015-03-05 23:42:08 +00:00
|
|
|
// ABI related
|
2017-01-26 18:34:21 +00:00
|
|
|
void ABI_PushRegisters(uint32_t gpr_registers, uint32_t fp_registers);
|
|
|
|
void ABI_PopRegisters(uint32_t gpr_registers, uint32_t fp_registers);
|
2015-03-05 23:42:08 +00:00
|
|
|
|
|
|
|
private:
|
|
|
|
ARM64XEmitter* m_emit;
|
|
|
|
inline void Write32(u32 value) { m_emit->Write32(value); }
|
|
|
|
|
|
|
|
// Emitting functions
|
|
|
|
void EmitLoadStoreImmediate(u8 size, u32 opc, IndexType type, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
2015-03-22 10:05:36 +00:00
|
|
|
void EmitScalar2Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EmitThreeSame(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void EmitCopy(bool Q, u32 op, u32 imm5, u32 imm4, ARM64Reg Rd, ARM64Reg Rn);
|
2023-09-05 02:26:41 +00:00
|
|
|
void EmitScalarPairwise(bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-22 21:42:33 +00:00
|
|
|
void Emit2RegMisc(bool Q, bool U, u32 size, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, ARM64Reg Rn);
|
|
|
|
void EmitLoadStoreSingleStructure(bool L, bool R, u32 opcode, bool S, u32 size, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void Emit1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void EmitConversion(bool sf, bool S, u32 type, u32 rmode, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-21 10:50:15 +00:00
|
|
|
void EmitConversion2(bool sf, bool S, bool direction, u32 type, u32 rmode, u32 opcode, int scale, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EmitCompare(bool M, bool S, u32 op, u32 opcode2, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void EmitCondSelect(bool M, bool S, CCFlags cond, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2023-09-07 02:02:24 +00:00
|
|
|
void EmitCondCompare(bool M, bool S, CCFlags cond, int op, u8 nzcv, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EmitPermute(u32 size, u32 op, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-18 17:23:39 +00:00
|
|
|
void EmitScalarImm(bool M, bool S, u32 type, u32 imm5, ARM64Reg Rd, u32 imm8);
|
2015-04-04 19:18:30 +00:00
|
|
|
void EmitShiftImm(bool Q, bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
2015-04-05 20:40:05 +00:00
|
|
|
void EmitScalarShiftImm(bool U, u32 immh, u32 immb, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EmitLoadStoreMultipleStructure(u32 size, bool L, u32 opcode, ARM64Reg Rt, ARM64Reg Rn);
|
2015-07-04 06:47:32 +00:00
|
|
|
void EmitLoadStoreMultipleStructurePost(u32 size, bool L, u32 opcode, ARM64Reg Rt, ARM64Reg Rn, ARM64Reg Rm);
|
2015-03-05 23:42:08 +00:00
|
|
|
void EmitScalar1Source(bool M, bool S, u32 type, u32 opcode, ARM64Reg Rd, ARM64Reg Rn);
|
|
|
|
void EmitVectorxElement(bool U, u32 size, bool L, u32 opcode, bool H, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm);
|
|
|
|
void EmitLoadStoreUnscaled(u32 size, u32 op, ARM64Reg Rt, ARM64Reg Rn, s32 imm);
|
2015-03-21 10:50:15 +00:00
|
|
|
void EmitConvertScalarToInt(ARM64Reg Rd, ARM64Reg Rn, RoundingMode round, bool sign);
|
2015-03-22 10:05:36 +00:00
|
|
|
void EmitScalar3Source(bool isDouble, ARM64Reg Rd, ARM64Reg Rn, ARM64Reg Rm, ARM64Reg Ra, int opcode);
|
2015-07-04 06:47:32 +00:00
|
|
|
void EncodeLoadStorePair(u32 size, bool load, IndexType type, ARM64Reg Rt, ARM64Reg Rt2, ARM64Reg Rn, s32 imm);
|
|
|
|
void EncodeLoadStoreRegisterOffset(u32 size, bool load, ARM64Reg Rt, ARM64Reg Rn, ArithOption Rm);
|
2023-09-05 04:54:21 +00:00
|
|
|
void EncodeModImm(bool Q, u8 op, u8 cmode, u8 o2, ARM64Reg Rd, u8 abcdefgh);
|
2015-07-04 06:47:32 +00:00
|
|
|
|
|
|
|
void SSHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper);
|
|
|
|
void USHLL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper);
|
|
|
|
void SHRN(u8 dest_size, ARM64Reg Rd, ARM64Reg Rn, u32 shift, bool upper);
|
|
|
|
void SXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper);
|
|
|
|
void UXTL(u8 src_size, ARM64Reg Rd, ARM64Reg Rn, bool upper);
|
2014-12-15 21:04:08 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
class ARM64CodeBlock : public CodeBlock<ARM64XEmitter>
|
|
|
|
{
|
|
|
|
private:
|
2017-05-26 13:39:27 +00:00
|
|
|
void PoisonMemory(int offset) override;
|
2014-12-15 21:04:08 +00:00
|
|
|
};
|
|
|
|
}
|