Merge x64 emitter from a newer Dolphin version.

This one can generate slightly smaller code by exploiting some EAX-only
encoding and various other short forms, and adds support for many newer
CPU instructions.
This commit is contained in:
Henrik Rydgård 2014-10-10 20:41:00 +02:00 committed by Henrik Rydgard
parent 3b1476c8ec
commit 7bde976069
4 changed files with 912 additions and 455 deletions

View File

@ -49,6 +49,17 @@ void do_cpuid(u32 regs[4], u32 cpuid_leaf) {
#ifdef _M_SSE
#include <xmmintrin.h>
#define _XCR_XFEATURE_ENABLED_MASK 0
static unsigned long long _xgetbv(unsigned int index)
{
unsigned int eax, edx;
__asm__ __volatile__("xgetbv" : "=a"(eax), "=d"(edx) : "c"(index));
return ((unsigned long long)edx << 32) | eax;
}
#else
#define _XCR_XFEATURE_ENABLED_MASK 0
#endif
#if defined __FreeBSD__
@ -172,6 +183,38 @@ void CPUInfo::Detect() {
bFMA = true;
}
if ((cpu_id[2] >> 25) & 1) bAES = true;
if ((cpu_id[3] >> 24) & 1)
{
// We can use FXSAVE.
bFXSR = true;
}
// AVX support requires 3 separate checks:
// - Is the AVX bit set in CPUID?
// - Is the XSAVE bit set in CPUID?
// - XGETBV result has the XCR bit set.
if (((cpu_id[2] >> 28) & 1) && ((cpu_id[2] >> 27) & 1))
{
if ((_xgetbv(_XCR_XFEATURE_ENABLED_MASK) & 0x6) == 0x6)
{
bAVX = true;
if ((cpu_id[2] >> 12) & 1)
bFMA = true;
}
}
if (max_std_fn >= 7)
{
do_cpuid(cpu_id, 0x00000007);
// careful; we can't enable AVX2 unless the XSAVE/XGETBV checks above passed
if ((cpu_id[1] >> 5) & 1)
bAVX2 = bAVX;
if ((cpu_id[1] >> 3) & 1)
bBMI1 = true;
if ((cpu_id[1] >> 8) & 1)
bBMI2 = true;
}
}
if (max_ex_fn >= 0x80000004) {
// Extract brand string

View File

@ -56,10 +56,15 @@ struct CPUInfo {
bool bLZCNT;
bool bSSE4A;
bool bAVX;
bool bAVX2;
bool bFMA;
bool bAES;
bool bLAHFSAHF64;
bool bLongMode;
bool bBMI1;
bool bBMI2;
bool bMOVBE;
bool bFXSR;
// ARM specific CPUInfo
bool bSwp;

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,10 @@
#include "Common.h"
#ifdef _M_X64
#define _ARCH_64
#endif
namespace Gen
{
@ -55,10 +59,10 @@ enum CCFlags
{
CC_O = 0,
CC_NO = 1,
CC_B = 2, CC_C = 2, CC_NAE = 2,
CC_NB = 3, CC_NC = 3, CC_AE = 3,
CC_B = 2, CC_C = 2, CC_NAE = 2,
CC_NB = 3, CC_NC = 3, CC_AE = 3,
CC_Z = 4, CC_E = 4,
CC_NZ = 5, CC_NE = 5,
CC_NZ = 5, CC_NE = 5,
CC_BE = 6, CC_NA = 6,
CC_NBE = 7, CC_A = 7,
CC_S = 8,
@ -121,6 +125,16 @@ enum {
CMP_ORD = 7,
};
enum FloatOp {
floatLD = 0,
floatST = 2,
floatSTP = 3,
floatLD80 = 5,
floatSTP80 = 7,
floatINVALID = -1,
};
class XEmitter;
// RIP addressing does not benefit from micro op fusion on Core arch
@ -136,9 +150,15 @@ struct OpArg
//if scale == 0 never mind offsetting
offset = _offset;
}
bool operator==(OpArg b)
{
return operandReg == b.operandReg && scale == b.scale && offsetOrBaseReg == b.offsetOrBaseReg &&
indexReg == b.indexReg && offset == b.offset;
}
void WriteRex(XEmitter *emit, int opBits, int bits, int customOp = -1) const;
void WriteVex(XEmitter* emit, int size, int packed, Gen::X64Reg regOp1, X64Reg regOp2) const;
void WriteRest(XEmitter *emit, int extraBytes=0, X64Reg operandReg=(X64Reg)0xFF, bool warn_64bit_offset = true) const;
void WriteVex(XEmitter* emit, X64Reg regOp1, X64Reg regOp2, int L, int pp, int mmmmm, int W = 0) const;
void WriteRest(XEmitter *emit, int extraBytes=0, X64Reg operandReg=INVALID_REG, bool warn_64bit_offset = true) const;
void WriteFloatModRM(XEmitter *emit, FloatOp op);
void WriteSingleByteOp(XEmitter *emit, u8 op, X64Reg operandReg, int bits);
// This one is public - must be written to
u64 offset; // use RIP-relative as much as possible - 64-bit immediates are not available.
@ -147,7 +167,8 @@ struct OpArg
void WriteNormalOp(XEmitter *emit, bool toRM, NormalOp op, const OpArg &operand, int bits) const;
bool IsImm() const {return scale == SCALE_IMM8 || scale == SCALE_IMM16 || scale == SCALE_IMM32 || scale == SCALE_IMM64;}
bool IsSimpleReg() const {return scale == SCALE_NONE;}
bool IsSimpleReg(X64Reg reg) const {
bool IsSimpleReg(X64Reg reg) const
{
if (!IsSimpleReg())
return false;
return GetSimpleReg() == reg;
@ -195,26 +216,35 @@ private:
u16 indexReg;
};
inline OpArg M(void *ptr) {return OpArg((u64)ptr, (int)SCALE_RIP);}
inline OpArg M(const void *ptr) {return OpArg((u64)ptr, (int)SCALE_RIP);}
template <typename T>
inline OpArg M(const T *ptr) {return OpArg((u64)(const void *)ptr, (int)SCALE_RIP);}
inline OpArg R(X64Reg value) {return OpArg(0, SCALE_NONE, value);}
inline OpArg R(X64Reg value) {return OpArg(0, SCALE_NONE, value);}
inline OpArg MatR(X64Reg value) {return OpArg(0, SCALE_ATREG, value);}
inline OpArg MDisp(X64Reg value, int offset) {
inline OpArg MDisp(X64Reg value, int offset)
{
return OpArg((u32)offset, SCALE_ATREG, value);
}
inline OpArg MComplex(X64Reg base, X64Reg scaled, int scale, int offset) {
inline OpArg MComplex(X64Reg base, X64Reg scaled, int scale, int offset)
{
return OpArg(offset, scale, base, scaled);
}
inline OpArg MScaled(X64Reg scaled, int scale, int offset) {
inline OpArg MScaled(X64Reg scaled, int scale, int offset)
{
if (scale == SCALE_1)
return OpArg(offset, SCALE_ATREG, scaled);
else
return OpArg(offset, scale | 0x20, RAX, scaled);
}
inline OpArg MRegSum(X64Reg base, X64Reg offset) {
inline OpArg MRegSum(X64Reg base, X64Reg offset)
{
return MComplex(base, offset, 1, 0);
}
inline OpArg Imm8 (u8 imm) {return OpArg(imm, SCALE_IMM8);}
inline OpArg Imm16(u16 imm) {return OpArg(imm, SCALE_IMM16);} //rarely used
inline OpArg Imm32(u32 imm) {return OpArg(imm, SCALE_IMM32);}
@ -226,19 +256,23 @@ inline OpArg SImmAuto(s32 imm) {
return OpArg(imm, (imm >= 128 || imm < -128) ? SCALE_IMM32 : SCALE_IMM8);
}
#ifdef _M_X64
#ifdef _ARCH_64
inline OpArg ImmPtr(const void* imm) {return Imm64((u64)imm);}
#else
inline OpArg ImmPtr(const void* imm) {return Imm32((u32)imm);}
#endif
inline u32 PtrOffset(const void* ptr, const void* base) {
#ifdef _M_X64
inline u32 PtrOffset(const void* ptr, const void* base)
{
#ifdef _ARCH_64
s64 distance = (s64)ptr-(s64)base;
if (distance >= 0x80000000LL ||
distance < -0x80000000LL) {
_assert_msg_(JIT, 0, "pointer offset out of range");
distance < -0x80000000LL)
{
_assert_msg_(DYNA_REC, 0, "pointer offset out of range");
return 0;
}
return (u32)distance;
#else
return (u32)ptr-(u32)base;
@ -275,21 +309,31 @@ class XEmitter
friend struct OpArg; // for Write8 etc
private:
u8 *code;
bool flags_locked;
void CheckFlags();
void Rex(int w, int r, int x, int b);
void WriteSimple1Byte(int bits, u8 byte, X64Reg reg);
void WriteSimple2Byte(int bits, u8 byte1, u8 byte2, X64Reg reg);
void WriteMulDivType(int bits, OpArg src, int ext);
void WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2);
void WriteBitSearchType(int bits, X64Reg dest, OpArg src, u8 byte2, bool rep = false);
void WriteShift(int bits, OpArg dest, OpArg &shift, int ext);
void WriteBitTest(int bits, OpArg &dest, OpArg &index, int ext);
void WriteMXCSR(OpArg arg, int ext);
void WriteSSEOp(int size, u8 sseOp, bool packed, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteSSEOp2(int size, u8 sseOp, bool packed, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteAVXOp(int size, u8 sseOp, bool packed, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteAVXOp(int size, u8 sseOp, bool packed, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
void WriteSSEOp(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteSSSE3Op(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteSSE41Op(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp, OpArg arg, int extrabytes = 0);
void WriteAVXOp(u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
void WriteVEXOp(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
void WriteBMI1Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
void WriteBMI2Op(int size, u8 opPrefix, u16 op, X64Reg regOp1, X64Reg regOp2, OpArg arg, int extrabytes = 0);
void WriteFloatLoadStore(int bits, FloatOp op, FloatOp op_80b, OpArg arg);
void WriteNormalOp(XEmitter *emit, int bits, NormalOp op, const OpArg &a1, const OpArg &a2);
void ABI_CalculateFrameSize(u32 mask, size_t rsp_alignment, size_t needed_frame_size, size_t* shadowp, size_t* subtractionp, size_t* xmm_offsetp);
protected:
inline void Write8(u8 value) {*code++ = value;}
inline void Write16(u16 value) {*(u16*)code = (value); code += 2;}
@ -297,8 +341,8 @@ protected:
inline void Write64(u64 value) {*(u64*)code = (value); code += 8;}
public:
XEmitter() { code = NULL; }
XEmitter(u8 *code_ptr) { code = code_ptr; }
XEmitter() { code = nullptr; flags_locked = false; }
XEmitter(u8 *code_ptr) { code = code_ptr; flags_locked = false; }
virtual ~XEmitter() {}
void WriteModRM(int mod, int rm, int reg);
@ -312,6 +356,9 @@ public:
const u8 *GetCodePtr() const;
u8 *GetWritableCodePtr();
void LockFlags() { flags_locked = true; }
void UnlockFlags() { flags_locked = false; }
// Looking for one of these? It's BANNED!! Some instructions are slow on modern CPU
// INC, DEC, LOOP, LOOPNE, LOOPE, ENTER, LEAVE, XCHG, XLAT, REP MOVSB/MOVSD, REP SCASD + other string instr.,
// INC and DEC are slow on Intel Core, but not on AMD. They create a
@ -322,7 +369,7 @@ public:
void INT3();
// Do nothing
void NOP(int count = 1); //nop padding - TODO: fast nop slides, for amd and intel (check their manuals)
void NOP(size_t count = 1);
// Save energy in wait-loops on P4 only. Probably not too useful.
void PAUSE();
@ -459,6 +506,14 @@ public:
void MOVSX(int dbits, int sbits, X64Reg dest, OpArg src); //automatically uses MOVSXD if necessary
void MOVZX(int dbits, int sbits, X64Reg dest, OpArg src);
// Available only on Atom or >= Haswell so far. Test with cpu_info.bMOVBE.
void MOVBE(int dbits, const OpArg& dest, const OpArg& src);
// Available only on AMD >= Phenom or Intel >= Haswell
void LZCNT(int bits, X64Reg dest, OpArg src);
// Note: this one is actually part of BMI1
void TZCNT(int bits, X64Reg dest, OpArg src);
// WARNING - These two take 11-13 cycles and are VectorPath! (AMD64)
void STMXCSR(OpArg memloc);
void LDMXCSR(OpArg memloc);
@ -467,7 +522,31 @@ public:
void LOCK();
void REP();
void REPNE();
void FSOverride();
void GSOverride();
// x87
enum x87StatusWordBits {
x87_InvalidOperation = 0x1,
x87_DenormalizedOperand = 0x2,
x87_DivisionByZero = 0x4,
x87_Overflow = 0x8,
x87_Underflow = 0x10,
x87_Precision = 0x20,
x87_StackFault = 0x40,
x87_ErrorSummary = 0x80,
x87_C0 = 0x100,
x87_C1 = 0x200,
x87_C2 = 0x400,
x87_TopOfStack = 0x2000 | 0x1000 | 0x800,
x87_C3 = 0x4000,
x87_FPUBusy = 0x8000,
};
void FLD(int bits, OpArg src);
void FST(int bits, OpArg dest);
void FSTP(int bits, OpArg dest);
void FNSTSW_AX();
void FWAIT();
// SSE/SSE2: Floating point arithmetic
@ -490,14 +569,6 @@ public:
// SSE/SSE2: Floating point bitwise (yes)
void CMPSS(X64Reg regOp, OpArg arg, u8 compare);
void CMPSD(X64Reg regOp, OpArg arg, u8 compare);
void ANDSS(X64Reg regOp, OpArg arg);
void ANDSD(X64Reg regOp, OpArg arg);
void ANDNSS(X64Reg regOp, OpArg arg);
void ANDNSD(X64Reg regOp, OpArg arg);
void ORSS(X64Reg regOp, OpArg arg);
void ORSD(X64Reg regOp, OpArg arg);
void XORSS(X64Reg regOp, OpArg arg);
void XORSD(X64Reg regOp, OpArg arg);
inline void CMPEQSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_EQ); }
inline void CMPLTSS(X64Reg regOp, OpArg arg) { CMPSS(regOp, arg, CMP_LT); }
@ -543,11 +614,8 @@ public:
// SSE/SSE2: Useful alternative to shuffle in some cases.
void MOVDDUP(X64Reg regOp, OpArg arg);
// THESE TWO ARE NEW AND UNTESTED
void UNPCKLPS(X64Reg dest, OpArg src);
void UNPCKHPS(X64Reg dest, OpArg src);
// These are OK.
void UNPCKLPD(X64Reg dest, OpArg src);
void UNPCKHPD(X64Reg dest, OpArg src);
@ -568,7 +636,6 @@ public:
void MOVUPS(OpArg arg, X64Reg regOp);
void MOVUPD(OpArg arg, X64Reg regOp);
// Integers (NOTE: untested - I added these then it turned out I didn't have a use for them after all).
void MOVDQA(X64Reg regOp, OpArg arg);
void MOVDQA(OpArg arg, X64Reg regOp);
void MOVDQU(X64Reg regOp, OpArg arg);
@ -579,6 +646,14 @@ public:
void MOVSS(OpArg arg, X64Reg regOp);
void MOVSD(OpArg arg, X64Reg regOp);
void MOVLPD(X64Reg regOp, OpArg arg);
void MOVHPD(X64Reg regOp, OpArg arg);
void MOVLPD(OpArg arg, X64Reg regOp);
void MOVHPD(OpArg arg, X64Reg regOp);
void MOVHLPS(X64Reg regOp1, X64Reg regOp2);
void MOVLHPS(X64Reg regOp1, X64Reg regOp2);
void MOVD_xmm(X64Reg dest, const OpArg &arg);
void MOVQ_xmm(X64Reg dest, OpArg arg);
void MOVD_xmm(const OpArg &arg, X64Reg src);
@ -596,37 +671,34 @@ public:
void CVTPS2PD(X64Reg dest, OpArg src);
void CVTPD2PS(X64Reg dest, OpArg src);
void CVTSS2SD(X64Reg dest, OpArg src);
void CVTSI2SS(X64Reg dest, OpArg src);
void CVTSD2SS(X64Reg dest, OpArg src);
void CVTSD2SI(X64Reg dest, OpArg src);
void CVTSI2SD(X64Reg dest, OpArg src);
void CVTDQ2PD(X64Reg regOp, OpArg arg);
void CVTPD2DQ(X64Reg regOp, OpArg arg);
void CVTDQ2PS(X64Reg regOp, OpArg arg);
void CVTPS2DQ(X64Reg regOp, OpArg arg);
void CVTTSS2SI(X64Reg xregdest, OpArg arg); // Yeah, destination really is a GPR like EAX!
void CVTTPS2DQ(X64Reg regOp, OpArg arg);
void CVTSI2SS(X64Reg xregdest, OpArg arg); // Yeah, destination really is a GPR like EAX!
void CVTSS2SI(X64Reg xregdest, OpArg arg); // Yeah, destination really is a GPR like EAX!
void CVTTSD2SI(X64Reg xregdest, OpArg arg); // Yeah, destination really is a GPR like EAX!
void CVTTPD2DQ(X64Reg xregdest, OpArg arg);
void CVTTPD2DQ(X64Reg regOp, OpArg arg);
// Destinations are X64 regs (rax, rbx, ...) for these instructions.
void CVTSS2SI(X64Reg xregdest, OpArg src);
void CVTSD2SI(X64Reg xregdest, OpArg src);
void CVTTSS2SI(X64Reg xregdest, OpArg arg);
void CVTTSD2SI(X64Reg xregdest, OpArg arg);
// SSE2: Packed integer instructions
void PACKSSDW(X64Reg dest, OpArg arg);
void PACKSSWB(X64Reg dest, OpArg arg);
//void PACKUSDW(X64Reg dest, OpArg arg);
void PACKUSDW(X64Reg dest, OpArg arg);
void PACKUSWB(X64Reg dest, OpArg arg);
void PUNPCKLBW(X64Reg dest, const OpArg &arg);
void PUNPCKLWD(X64Reg dest, const OpArg &arg);
void PUNPCKLDQ(X64Reg dest, const OpArg &arg);
void PMOVSXBW(X64Reg dest, const OpArg &arg);
void PMOVSXBD(X64Reg dest, const OpArg &arg);
void PMOVSXWD(X64Reg dest, const OpArg &arg);
void PMOVZXBW(X64Reg dest, const OpArg &arg);
void PMOVZXBD(X64Reg dest, const OpArg &arg);
void PMOVZXWD(X64Reg dest, const OpArg &arg);
void PTEST(X64Reg dest, OpArg arg);
void PAND(X64Reg dest, OpArg arg);
void PANDN(X64Reg dest, OpArg arg);
void PXOR(X64Reg dest, OpArg arg);
@ -680,29 +752,75 @@ public:
void PSHUFB(X64Reg dest, OpArg arg);
void PSHUFLW(X64Reg dest, OpArg arg, u8 shuffle);
void PSHUFHW(X64Reg dest, OpArg arg, u8 shuffle);
void PSRLW(X64Reg reg, int shift);
void PSRLD(X64Reg reg, int shift);
void PSRLQ(X64Reg reg, int shift);
void PSRLQ(X64Reg reg, OpArg arg);
void PSRLDQ(X64Reg reg, int shift);
void PSLLW(X64Reg reg, int shift);
void PSLLD(X64Reg reg, int shift);
void PSLLQ(X64Reg reg, int shift);
void PSRLDQ(X64Reg reg, int shift);
void PSLLDQ(X64Reg reg, int shift);
void PSRAW(X64Reg reg, int shift);
void PSRAD(X64Reg reg, int shift);
// SSE4: data type conversions
void PMOVSXBW(X64Reg dest, OpArg arg);
void PMOVSXBD(X64Reg dest, OpArg arg);
void PMOVSXBQ(X64Reg dest, OpArg arg);
void PMOVSXWD(X64Reg dest, OpArg arg);
void PMOVSXWQ(X64Reg dest, OpArg arg);
void PMOVSXDQ(X64Reg dest, OpArg arg);
void PMOVZXBW(X64Reg dest, OpArg arg);
void PMOVZXBD(X64Reg dest, OpArg arg);
void PMOVZXBQ(X64Reg dest, OpArg arg);
void PMOVZXWD(X64Reg dest, OpArg arg);
void PMOVZXWQ(X64Reg dest, OpArg arg);
void PMOVZXDQ(X64Reg dest, OpArg arg);
// SSE4: variable blend instructions (xmm0 implicit argument)
void PBLENDVB(X64Reg dest, OpArg arg);
void BLENDVPS(X64Reg dest, OpArg arg);
void BLENDVPD(X64Reg dest, OpArg arg);
// AVX
void VADDSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VSUBSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VMULSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VDIVSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VADDPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VSUBPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VMULPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VDIVPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VSQRTSD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VPAND(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VPANDN(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VPOR(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VPXOR(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VSHUFPD(X64Reg regOp1, X64Reg regOp2, OpArg arg, u8 shuffle);
void VUNPCKLPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void VUNPCKHPD(X64Reg regOp1, X64Reg regOp2, OpArg arg);
void RTDSC();
// VEX GPR instructions
void SARX(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
void SHLX(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
void SHRX(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
void RORX(int bits, X64Reg regOp, OpArg arg, u8 rotate);
void PEXT(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
void PDEP(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
void MULX(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
void BZHI(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
void BLSR(int bits, X64Reg regOp, OpArg arg);
void BLSMSK(int bits, X64Reg regOp, OpArg arg);
void BLSI(int bits, X64Reg regOp, OpArg arg);
void BEXTR(int bits, X64Reg regOp1, OpArg arg, X64Reg regOp2);
void ANDN(int bits, X64Reg regOp1, X64Reg regOp2, OpArg arg);
void RDTSC();
// Utility functions
// The difference between this and CALL is that this aligns the stack
@ -719,6 +837,7 @@ public:
void ABI_CallFunctionC16(const void *func, u16 param1);
void ABI_CallFunctionCC16(const void *func, u32 param1, u16 param2);
// These only support u32 parameters, but that's enough for a lot of uses.
// These will destroy the 1 or 2 first "parameter regs".
void ABI_CallFunctionC(const void *func, u32 param1);
@ -736,8 +855,8 @@ public:
void ABI_CallFunctionAA(const void *func, const Gen::OpArg &arg1, const Gen::OpArg &arg2);
// Pass a register as a parameter.
void ABI_CallFunctionR(const void *func, Gen::X64Reg reg1);
void ABI_CallFunctionRR(const void *func, Gen::X64Reg reg1, Gen::X64Reg reg2);
void ABI_CallFunctionR(const void *func, X64Reg reg1);
void ABI_CallFunctionRR(const void *func, X64Reg reg1, X64Reg reg2);
template <typename Tr, typename T1>
void ABI_CallFunctionC(Tr (*func)(T1), u32 param1) {
@ -822,4 +941,4 @@ public:
} // namespace
#endif // _DOLPHIN_INTEL_CODEGEN_
#endif