mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-12-02 00:16:25 +00:00
Add an x86 prefix encoding for instructions that would decode to a different instruction with 0xf2/f3/66 were in front of them, but don't themselves have a prefix. For now this doesn't change any bbehavior, but plan to use it to fix some bugs in the disassembler.
llvm-svn: 201538
This commit is contained in:
parent
73eacbb02b
commit
de78f4304d
@ -335,21 +335,21 @@ namespace X86II {
|
||||
// no prefix.
|
||||
//
|
||||
OpPrefixShift = 9,
|
||||
OpPrefixMask = 0x3 << OpPrefixShift,
|
||||
OpPrefixMask = 0x7 << OpPrefixShift,
|
||||
|
||||
// PD - Prefix code for packed double precision vector floating point
|
||||
// operations performed in the SSE registers.
|
||||
PD = 1 << OpPrefixShift,
|
||||
// PS, PD - Prefix code for packed single and double precision vector
|
||||
// floating point operations performed in the SSE registers.
|
||||
PS = 1 << OpPrefixShift, PD = 2 << OpPrefixShift,
|
||||
|
||||
// XS, XD - These prefix codes are for single and double precision scalar
|
||||
// floating point operations performed in the SSE registers.
|
||||
XS = 2 << OpPrefixShift, XD = 3 << OpPrefixShift,
|
||||
XS = 3 << OpPrefixShift, XD = 4 << OpPrefixShift,
|
||||
|
||||
//===------------------------------------------------------------------===//
|
||||
// OpMap - This field determines which opcode map this instruction
|
||||
// belongs to. i.e. one-byte, two-byte, 0x0f 0x38, 0x0f 0x3a, etc.
|
||||
//
|
||||
OpMapShift = OpPrefixShift + 2,
|
||||
OpMapShift = OpPrefixShift + 3,
|
||||
OpMapMask = 0x1f << OpMapShift,
|
||||
|
||||
// OB - OneByte - Set if this instruction has a one byte opcode.
|
||||
|
@ -850,7 +850,7 @@ multiclass avx512_cmp_packed<RegisterClass KRC, RegisterClass RC,
|
||||
}
|
||||
|
||||
defm VCMPPSZ : avx512_cmp_packed<VK16, VR512, f512mem, v16f32,
|
||||
"ps", SSEPackedSingle>, TB, EVEX_4V, EVEX_V512,
|
||||
"ps", SSEPackedSingle>, PS, EVEX_4V, EVEX_V512,
|
||||
EVEX_CD8<32, CD8VF>;
|
||||
defm VCMPPDZ : avx512_cmp_packed<VK8, VR512, f512mem, v8f64,
|
||||
"pd", SSEPackedDouble>, PD, EVEX_4V, VEX_W, EVEX_V512,
|
||||
@ -930,9 +930,9 @@ multiclass avx512_mask_mov_gpr<bits<8> opc_kr, bits<8> opc_rk,
|
||||
|
||||
let Predicates = [HasAVX512] in {
|
||||
defm KMOVW : avx512_mask_mov<0x90, 0x90, 0x91, "kmovw", VK16, v16i1, i16mem>,
|
||||
VEX, TB;
|
||||
VEX, PS;
|
||||
defm KMOVW : avx512_mask_mov_gpr<0x92, 0x93, "kmovw", VK16, GR32>,
|
||||
VEX, TB;
|
||||
VEX, PS;
|
||||
}
|
||||
|
||||
let Predicates = [HasAVX512] in {
|
||||
@ -1005,7 +1005,7 @@ multiclass avx512_mask_unop<bits<8> opc, string OpcodeStr,
|
||||
multiclass avx512_mask_unop_w<bits<8> opc, string OpcodeStr,
|
||||
SDPatternOperator OpNode> {
|
||||
defm W : avx512_mask_unop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
|
||||
VEX, TB;
|
||||
VEX, PS;
|
||||
}
|
||||
|
||||
defm KNOT : avx512_mask_unop_w<0x44, "knot", not>;
|
||||
@ -1042,7 +1042,7 @@ multiclass avx512_mask_binop<bits<8> opc, string OpcodeStr,
|
||||
multiclass avx512_mask_binop_w<bits<8> opc, string OpcodeStr,
|
||||
SDPatternOperator OpNode> {
|
||||
defm W : avx512_mask_binop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
|
||||
VEX_4V, VEX_L, TB;
|
||||
VEX_4V, VEX_L, PS;
|
||||
}
|
||||
|
||||
def andn : PatFrag<(ops node:$i0, node:$i1), (and (not node:$i0), node:$i1)>;
|
||||
@ -1140,7 +1140,7 @@ multiclass avx512_mask_testop<bits<8> opc, string OpcodeStr, RegisterClass KRC,
|
||||
|
||||
multiclass avx512_mask_testop_w<bits<8> opc, string OpcodeStr, SDNode OpNode> {
|
||||
defm W : avx512_mask_testop<opc, !strconcat(OpcodeStr, "w"), VK16, OpNode>,
|
||||
VEX, TB;
|
||||
VEX, PS;
|
||||
}
|
||||
|
||||
defm KORTEST : avx512_mask_testop_w<0x98, "kortest", X86kortest>;
|
||||
@ -1232,14 +1232,14 @@ let Constraints = "$src1 = $dst" in {
|
||||
|
||||
defm VMOVAPSZ : avx512_mov_packed<0x28, VR512, VK16WM, f512mem, alignedloadv16f32,
|
||||
"vmovaps", SSEPackedSingle>,
|
||||
TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
defm VMOVAPDZ : avx512_mov_packed<0x28, VR512, VK8WM, f512mem, alignedloadv8f64,
|
||||
"vmovapd", SSEPackedDouble>,
|
||||
PD, EVEX_V512, VEX_W,
|
||||
EVEX_CD8<64, CD8VF>;
|
||||
defm VMOVUPSZ : avx512_mov_packed<0x10, VR512, VK16WM, f512mem, loadv16f32,
|
||||
"vmovups", SSEPackedSingle>,
|
||||
TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
defm VMOVUPDZ : avx512_mov_packed<0x10, VR512, VK8WM, f512mem, loadv8f64,
|
||||
"vmovupd", SSEPackedDouble, 0>,
|
||||
PD, EVEX_V512, VEX_W,
|
||||
@ -1247,7 +1247,7 @@ defm VMOVUPDZ : avx512_mov_packed<0x10, VR512, VK8WM, f512mem, loadv8f64,
|
||||
def VMOVAPSZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
|
||||
"vmovaps\t{$src, $dst|$dst, $src}",
|
||||
[(alignedstore512 (v16f32 VR512:$src), addr:$dst)],
|
||||
SSEPackedSingle>, EVEX, EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
SSEPackedSingle>, EVEX, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
def VMOVAPDZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
|
||||
"vmovapd\t{$src, $dst|$dst, $src}",
|
||||
[(alignedstore512 (v8f64 VR512:$src), addr:$dst)],
|
||||
@ -1256,7 +1256,7 @@ def VMOVAPDZmr : AVX512PI<0x29, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$sr
|
||||
def VMOVUPSZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
|
||||
"vmovups\t{$src, $dst|$dst, $src}",
|
||||
[(store (v16f32 VR512:$src), addr:$dst)],
|
||||
SSEPackedSingle>, EVEX, EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
SSEPackedSingle>, EVEX, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
def VMOVUPDZmr : AVX512PI<0x11, MRMDestMem, (outs), (ins f512mem:$dst, VR512:$src),
|
||||
"vmovupd\t{$src, $dst|$dst, $src}",
|
||||
[(store (v8f64 VR512:$src), addr:$dst)],
|
||||
@ -1902,13 +1902,13 @@ multiclass avx512_unpack_fp<bits<8> opc, SDNode OpNode, ValueType vt,
|
||||
|
||||
defm VUNPCKHPSZ: avx512_unpack_fp<0x15, X86Unpckh, v16f32, memopv8f64,
|
||||
VR512, f512mem, "vunpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedSingle>, TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
defm VUNPCKHPDZ: avx512_unpack_fp<0x15, X86Unpckh, v8f64, memopv8f64,
|
||||
VR512, f512mem, "vunpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
||||
defm VUNPCKLPSZ: avx512_unpack_fp<0x14, X86Unpckl, v16f32, memopv8f64,
|
||||
VR512, f512mem, "vunpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedSingle>, TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
defm VUNPCKLPDZ: avx512_unpack_fp<0x14, X86Unpckl, v8f64, memopv8f64,
|
||||
VR512, f512mem, "vunpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedDouble>, PD, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
||||
@ -2062,7 +2062,7 @@ multiclass avx512_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
|
||||
defm VADDPSZ : avx512_fp_packed<0x58, "addps", fadd, VR512, v16f32, f512mem,
|
||||
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
|
||||
SSE_ALU_ITINS_P.s, 1>, EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
|
||||
defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VR512, v8f64, f512mem,
|
||||
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
|
||||
@ -2071,7 +2071,7 @@ defm VADDPDZ : avx512_fp_packed<0x58, "addpd", fadd, VR512, v8f64, f512mem,
|
||||
|
||||
defm VMULPSZ : avx512_fp_packed<0x59, "mulps", fmul, VR512, v16f32, f512mem,
|
||||
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
|
||||
SSE_ALU_ITINS_P.s, 1>, EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
SSE_ALU_ITINS_P.s, 1>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VR512, v8f64, f512mem,
|
||||
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
|
||||
SSE_ALU_ITINS_P.d, 1>,
|
||||
@ -2080,11 +2080,11 @@ defm VMULPDZ : avx512_fp_packed<0x59, "mulpd", fmul, VR512, v8f64, f512mem,
|
||||
defm VMINPSZ : avx512_fp_packed<0x5D, "minps", X86fmin, VR512, v16f32, f512mem,
|
||||
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
|
||||
SSE_ALU_ITINS_P.s, 1>,
|
||||
EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
defm VMAXPSZ : avx512_fp_packed<0x5F, "maxps", X86fmax, VR512, v16f32, f512mem,
|
||||
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
|
||||
SSE_ALU_ITINS_P.s, 1>,
|
||||
EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
|
||||
defm VMINPDZ : avx512_fp_packed<0x5D, "minpd", X86fmin, VR512, v8f64, f512mem,
|
||||
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
|
||||
@ -2097,10 +2097,10 @@ defm VMAXPDZ : avx512_fp_packed<0x5F, "maxpd", X86fmax, VR512, v8f64, f512mem,
|
||||
|
||||
defm VSUBPSZ : avx512_fp_packed<0x5C, "subps", fsub, VR512, v16f32, f512mem,
|
||||
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
|
||||
SSE_ALU_ITINS_P.s, 0>, EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
defm VDIVPSZ : avx512_fp_packed<0x5E, "divps", fdiv, VR512, v16f32, f512mem,
|
||||
memopv16f32, f32mem, loadf32, "{1to16}", SSEPackedSingle,
|
||||
SSE_ALU_ITINS_P.s, 0>, EVEX_V512, TB, EVEX_CD8<32, CD8VF>;
|
||||
SSE_ALU_ITINS_P.s, 0>, EVEX_V512, PS, EVEX_CD8<32, CD8VF>;
|
||||
|
||||
defm VSUBPDZ : avx512_fp_packed<0x5C, "subpd", fsub, VR512, v8f64, f512mem,
|
||||
memopv8f64, f64mem, loadf64, "{1to8}", SSEPackedDouble,
|
||||
@ -2884,7 +2884,7 @@ defm VCVTPD2PSZ : avx512_vcvt_fp_with_rc<0x5A, "vcvtpd2ps", VR512, VR256X, froun
|
||||
|
||||
defm VCVTPS2PDZ : avx512_vcvt_fp<0x5A, "vcvtps2pd", VR256X, VR512, fextend,
|
||||
memopv4f64, f256mem, v8f64, v8f32,
|
||||
SSEPackedDouble>, EVEX_V512, TB,
|
||||
SSEPackedDouble>, EVEX_V512, PS,
|
||||
EVEX_CD8<32, CD8VH>;
|
||||
def : Pat<(v8f64 (extloadv8f32 addr:$src)),
|
||||
(VCVTPS2PDZrm addr:$src)>;
|
||||
@ -2903,7 +2903,7 @@ def : Pat<(v8f32 (int_x86_avx512_mask_cvtpd2ps_512 (v8f64 VR512:$src),
|
||||
|
||||
defm VCVTDQ2PSZ : avx512_vcvt_fp_with_rc<0x5B, "vcvtdq2ps", VR512, VR512, sint_to_fp,
|
||||
memopv8i64, i512mem, v16f32, v16i32,
|
||||
SSEPackedSingle>, EVEX_V512, TB,
|
||||
SSEPackedSingle>, EVEX_V512, PS,
|
||||
EVEX_CD8<32, CD8VF>;
|
||||
|
||||
defm VCVTDQ2PDZ : avx512_vcvt_fp<0xE6, "vcvtdq2pd", VR256X, VR512, sint_to_fp,
|
||||
@ -2923,7 +2923,7 @@ defm VCVTTPD2DQZ : avx512_vcvt_fp<0xE6, "vcvttpd2dq", VR512, VR256X, fp_to_sint,
|
||||
|
||||
defm VCVTTPS2UDQZ : avx512_vcvt_fp<0x78, "vcvttps2udq", VR512, VR512, fp_to_uint,
|
||||
memopv16f32, f512mem, v16i32, v16f32,
|
||||
SSEPackedSingle>, EVEX_V512, TB,
|
||||
SSEPackedSingle>, EVEX_V512, PS,
|
||||
EVEX_CD8<32, CD8VF>;
|
||||
|
||||
// cvttps2udq (src, 0, mask-all-ones, sae-current)
|
||||
@ -2933,7 +2933,7 @@ def : Pat<(v16i32 (int_x86_avx512_mask_cvttps2udq_512 (v16f32 VR512:$src),
|
||||
|
||||
defm VCVTTPD2UDQZ : avx512_vcvt_fp<0x78, "vcvttpd2udq", VR512, VR256X, fp_to_uint,
|
||||
memopv8f64, f512mem, v8i32, v8f64,
|
||||
SSEPackedDouble>, EVEX_V512, TB, VEX_W,
|
||||
SSEPackedDouble>, EVEX_V512, PS, VEX_W,
|
||||
EVEX_CD8<64, CD8VF>;
|
||||
|
||||
// cvttpd2udq (src, 0, mask-all-ones, sae-current)
|
||||
@ -3003,10 +3003,10 @@ def : Pat <(v8i32 (int_x86_avx512_mask_cvtpd2dq_512 (v8f64 VR512:$src),
|
||||
|
||||
defm VCVTPS2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtps2udq", VR512, VR512,
|
||||
memopv16f32, f512mem, SSEPackedSingle>,
|
||||
TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
defm VCVTPD2UDQZ : avx512_vcvt_fp2int<0x79, "vcvtpd2udq", VR512, VR256X,
|
||||
memopv8f64, f512mem, SSEPackedDouble>, VEX_W,
|
||||
TB, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
||||
PS, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
||||
|
||||
def : Pat <(v16i32 (int_x86_avx512_mask_cvtps2udq_512 (v16f32 VR512:$src),
|
||||
(v16i32 immAllZerosV), (i16 -1), imm:$rc)),
|
||||
@ -3063,14 +3063,14 @@ def : Pat<(v16f32 (int_x86_avx512_mask_vcvtph2ps_512 (v16i16 VR256X:$src),
|
||||
|
||||
let Defs = [EFLAGS], Predicates = [HasAVX512] in {
|
||||
defm VUCOMISSZ : sse12_ord_cmp<0x2E, FR32X, X86cmp, f32, f32mem, loadf32,
|
||||
"ucomiss">, TB, EVEX, VEX_LIG,
|
||||
"ucomiss">, PS, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm VUCOMISDZ : sse12_ord_cmp<0x2E, FR64X, X86cmp, f64, f64mem, loadf64,
|
||||
"ucomisd">, PD, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
let Pattern = []<dag> in {
|
||||
defm VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, undef, v4f32, f128mem, load,
|
||||
"comiss">, TB, EVEX, VEX_LIG,
|
||||
"comiss">, PS, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, undef, v2f64, f128mem, load,
|
||||
"comisd">, PD, EVEX,
|
||||
@ -3078,14 +3078,14 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
|
||||
}
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB, EVEX, VEX_LIG,
|
||||
load, "ucomiss">, PS, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, PD, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
|
||||
defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
|
||||
load, "comiss">, TB, EVEX, VEX_LIG,
|
||||
load, "comiss">, PS, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
|
||||
load, "comisd">, PD, EVEX,
|
||||
@ -3841,7 +3841,7 @@ multiclass avx512_shufp<RegisterClass RC, X86MemOperand x86memop,
|
||||
}
|
||||
|
||||
defm VSHUFPSZ : avx512_shufp<VR512, f512mem, v16f32, "vshufps", memopv16f32,
|
||||
SSEPackedSingle>, TB, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
SSEPackedSingle>, PS, EVEX_V512, EVEX_CD8<32, CD8VF>;
|
||||
defm VSHUFPDZ : avx512_shufp<VR512, f512mem, v8f64, "vshufpd", memopv8f64,
|
||||
SSEPackedDouble>, PD, VEX_W, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
||||
|
||||
|
@ -1311,8 +1311,8 @@ multiclass bmi_andn<string mnemonic, RegisterClass RC, X86MemOperand x86memop,
|
||||
}
|
||||
|
||||
let Predicates = [HasBMI], Defs = [EFLAGS] in {
|
||||
defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32>, T8, VEX_4V;
|
||||
defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64>, T8, VEX_4V, VEX_W;
|
||||
defm ANDN32 : bmi_andn<"andn{l}", GR32, i32mem, loadi32>, T8PS, VEX_4V;
|
||||
defm ANDN64 : bmi_andn<"andn{q}", GR64, i64mem, loadi64>, T8PS, VEX_4V, VEX_W;
|
||||
}
|
||||
|
||||
let Predicates = [HasBMI] in {
|
||||
|
@ -114,13 +114,14 @@ def CD8VT4 : CD8VForm<6>; // v := 4
|
||||
def CD8VT8 : CD8VForm<7>; // v := 8
|
||||
|
||||
// Class specifying the prefix used an opcode extension.
|
||||
class Prefix<bits<2> val> {
|
||||
bits<2> Value = val;
|
||||
class Prefix<bits<3> val> {
|
||||
bits<3> Value = val;
|
||||
}
|
||||
def NoPrfx : Prefix<0>;
|
||||
def PD : Prefix<1>;
|
||||
def XS : Prefix<2>;
|
||||
def XD : Prefix<3>;
|
||||
def PS : Prefix<1>;
|
||||
def PD : Prefix<2>;
|
||||
def XS : Prefix<3>;
|
||||
def XD : Prefix<4>;
|
||||
|
||||
// Class specifying the opcode map.
|
||||
class Map<bits<5> val> {
|
||||
@ -182,15 +183,18 @@ class T8 { Map OpMap = T8; }
|
||||
class TA { Map OpMap = TA; }
|
||||
class A6 { Map OpMap = A6; }
|
||||
class A7 { Map OpMap = A7; }
|
||||
class XOP8 { Map OpMap = XOP8; }
|
||||
class XOP9 { Map OpMap = XOP9; }
|
||||
class XOPA { Map OpMap = XOPA; }
|
||||
class XOP8 { Map OpMap = XOP8; Prefix OpPrefix = PS; }
|
||||
class XOP9 { Map OpMap = XOP9; Prefix OpPrefix = PS; }
|
||||
class XOPA { Map OpMap = XOPA; Prefix OpPrefix = PS; }
|
||||
class PS : TB { Prefix OpPrefix = PS; }
|
||||
class PD : TB { Prefix OpPrefix = PD; }
|
||||
class XD : TB { Prefix OpPrefix = XD; }
|
||||
class XS : TB { Prefix OpPrefix = XS; }
|
||||
class T8PS : T8 { Prefix OpPrefix = PS; }
|
||||
class T8PD : T8 { Prefix OpPrefix = PD; }
|
||||
class T8XD : T8 { Prefix OpPrefix = XD; }
|
||||
class T8XS : T8 { Prefix OpPrefix = XS; }
|
||||
class TAPS : TA { Prefix OpPrefix = PS; }
|
||||
class TAPD : TA { Prefix OpPrefix = PD; }
|
||||
class TAXD : TA { Prefix OpPrefix = XD; }
|
||||
class VEX { Encoding OpEnc = EncVEX; }
|
||||
@ -282,31 +286,31 @@ class X86Inst<bits<8> opcod, Format f, ImmType i, dag outs, dag ins,
|
||||
let TSFlags{5-0} = FormBits;
|
||||
let TSFlags{7-6} = OpSize.Value;
|
||||
let TSFlags{8} = hasAdSizePrefix;
|
||||
let TSFlags{10-9} = OpPrefix.Value;
|
||||
let TSFlags{15-11} = OpMap.Value;
|
||||
let TSFlags{16} = hasREX_WPrefix;
|
||||
let TSFlags{20-17} = ImmT.Value;
|
||||
let TSFlags{23-21} = FPForm.Value;
|
||||
let TSFlags{24} = hasLockPrefix;
|
||||
let TSFlags{25} = hasREPPrefix;
|
||||
let TSFlags{27-26} = ExeDomain.Value;
|
||||
let TSFlags{29-28} = OpEnc.Value;
|
||||
let TSFlags{37-30} = Opcode;
|
||||
let TSFlags{38} = hasVEX_WPrefix;
|
||||
let TSFlags{39} = hasVEX_4V;
|
||||
let TSFlags{40} = hasVEX_4VOp3;
|
||||
let TSFlags{41} = hasVEX_i8ImmReg;
|
||||
let TSFlags{42} = hasVEX_L;
|
||||
let TSFlags{43} = ignoresVEX_L;
|
||||
let TSFlags{44} = hasEVEX_K;
|
||||
let TSFlags{45} = hasEVEX_Z;
|
||||
let TSFlags{46} = hasEVEX_L2;
|
||||
let TSFlags{47} = hasEVEX_B;
|
||||
let TSFlags{49-48} = EVEX_CD8E;
|
||||
let TSFlags{52-50} = EVEX_CD8V;
|
||||
let TSFlags{53} = has3DNow0F0FOpcode;
|
||||
let TSFlags{54} = hasMemOp4Prefix;
|
||||
let TSFlags{55} = hasEVEX_RC;
|
||||
let TSFlags{11-9} = OpPrefix.Value;
|
||||
let TSFlags{16-12} = OpMap.Value;
|
||||
let TSFlags{17} = hasREX_WPrefix;
|
||||
let TSFlags{21-18} = ImmT.Value;
|
||||
let TSFlags{24-22} = FPForm.Value;
|
||||
let TSFlags{25} = hasLockPrefix;
|
||||
let TSFlags{26} = hasREPPrefix;
|
||||
let TSFlags{28-27} = ExeDomain.Value;
|
||||
let TSFlags{30-29} = OpEnc.Value;
|
||||
let TSFlags{38-31} = Opcode;
|
||||
let TSFlags{39} = hasVEX_WPrefix;
|
||||
let TSFlags{40} = hasVEX_4V;
|
||||
let TSFlags{41} = hasVEX_4VOp3;
|
||||
let TSFlags{42} = hasVEX_i8ImmReg;
|
||||
let TSFlags{43} = hasVEX_L;
|
||||
let TSFlags{44} = ignoresVEX_L;
|
||||
let TSFlags{45} = hasEVEX_K;
|
||||
let TSFlags{46} = hasEVEX_Z;
|
||||
let TSFlags{47} = hasEVEX_L2;
|
||||
let TSFlags{48} = hasEVEX_B;
|
||||
let TSFlags{50-49} = EVEX_CD8E;
|
||||
let TSFlags{53-51} = EVEX_CD8V;
|
||||
let TSFlags{54} = has3DNow0F0FOpcode;
|
||||
let TSFlags{55} = hasMemOp4Prefix;
|
||||
let TSFlags{56} = hasEVEX_RC;
|
||||
}
|
||||
|
||||
class PseudoI<dag oops, dag iops, list<dag> pattern>
|
||||
@ -474,10 +478,10 @@ class PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
// SSE1 Instruction Templates:
|
||||
//
|
||||
// SSI - SSE1 instructions with XS prefix.
|
||||
// PSI - SSE1 instructions with TB prefix.
|
||||
// PSIi8 - SSE1 instructions with ImmT == Imm8 and TB prefix.
|
||||
// PSI - SSE1 instructions with PS prefix.
|
||||
// PSIi8 - SSE1 instructions with ImmT == Imm8 and PS prefix.
|
||||
// VSSI - SSE1 instructions with XS prefix in AVX form.
|
||||
// VPSI - SSE1 instructions with TB prefix in AVX form, packed single.
|
||||
// VPSI - SSE1 instructions with PS prefix in AVX form, packed single.
|
||||
|
||||
class SSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
@ -487,11 +491,11 @@ class SSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin>, XS, Requires<[UseSSE1]>;
|
||||
class PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
|
||||
Requires<[UseSSE1]>;
|
||||
class PSIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
|
||||
Requires<[UseSSE1]>;
|
||||
class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
@ -499,7 +503,7 @@ class VSSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
Requires<[HasAVX]>;
|
||||
class VPSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, TB,
|
||||
: I<o, F, outs, ins, !strconcat("v", asm), pattern, itin, SSEPackedSingle>, PS,
|
||||
Requires<[HasAVX]>;
|
||||
|
||||
// SSE2 Instruction Templates:
|
||||
@ -683,7 +687,7 @@ class AVX2AIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
// AVX5128I - AVX-512 instructions with T8PD prefix.
|
||||
// AVX512AIi8 - AVX-512 instructions with TAPD prefix and ImmT = Imm8.
|
||||
// AVX512PDI - AVX-512 instructions with PD, double packed.
|
||||
// AVX512PSI - AVX-512 instructions with TB, single packed.
|
||||
// AVX512PSI - AVX-512 instructions with PS, single packed.
|
||||
// AVX512XS8I - AVX-512 instructions with T8 and XS prefixes.
|
||||
// AVX512XSI - AVX-512 instructions with XS prefix, generic domain.
|
||||
// AVX512BI - AVX-512 instructions with PD, int packed domain.
|
||||
@ -727,7 +731,7 @@ class AVX512PDI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512PSI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, TB,
|
||||
: I<o, F, outs, ins, asm, pattern, itin, SSEPackedSingle>, PS,
|
||||
Requires<[HasAVX512]>;
|
||||
class AVX512PIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, Domain d, InstrItinClass itin = NoItinerary>
|
||||
@ -854,28 +858,28 @@ class VRS2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
// MMXI32 - MMX instructions with TB prefix valid only in 32 bit mode.
|
||||
// MMXI64 - MMX instructions with TB prefix valid only in 64 bit mode.
|
||||
// MMX2I - MMX / SSE2 instructions with PD prefix.
|
||||
// MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix.
|
||||
// MMXIi8 - MMX instructions with ImmT == Imm8 and TB prefix.
|
||||
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
|
||||
// MMXIi8 - MMX instructions with ImmT == Imm8 and PS prefix.
|
||||
// MMXID - MMX instructions with XD prefix.
|
||||
// MMXIS - MMX instructions with XS prefix.
|
||||
class MMXI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX]>;
|
||||
class MMXI32<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,Not64BitMode]>;
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX,Not64BitMode]>;
|
||||
class MMXI64<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX,In64BitMode]>;
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX,In64BitMode]>;
|
||||
class MMXRI<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, TB, REX_W, Requires<[HasMMX]>;
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, PS, REX_W, Requires<[HasMMX]>;
|
||||
class MMX2I<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: I<o, F, outs, ins, asm, pattern, itin>, PD, Requires<[HasMMX]>;
|
||||
class MMXIi8<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin>, TB, Requires<[HasMMX]>;
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin>, PS, Requires<[HasMMX]>;
|
||||
class MMXID<bits<8> o, Format F, dag outs, dag ins, string asm,
|
||||
list<dag> pattern, InstrItinClass itin = NoItinerary>
|
||||
: Ii8<o, F, outs, ins, asm, pattern, itin>, XD, Requires<[HasMMX]>;
|
||||
|
@ -1080,56 +1080,52 @@ let Defs = [EFLAGS] in {
|
||||
def BSF16rr : I<0xBC, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
|
||||
"bsf{w}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR16:$dst, EFLAGS, (X86bsf GR16:$src))],
|
||||
IIC_BIT_SCAN_REG>, TB, OpSize16, Sched<[WriteShift]>;
|
||||
IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>;
|
||||
def BSF16rm : I<0xBC, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
|
||||
"bsf{w}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR16:$dst, EFLAGS, (X86bsf (loadi16 addr:$src)))],
|
||||
IIC_BIT_SCAN_MEM>, TB, OpSize16, Sched<[WriteShiftLd]>;
|
||||
IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>;
|
||||
def BSF32rr : I<0xBC, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
|
||||
"bsf{l}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR32:$dst, EFLAGS, (X86bsf GR32:$src))],
|
||||
IIC_BIT_SCAN_REG>, TB, OpSize32,
|
||||
Sched<[WriteShift]>;
|
||||
IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>;
|
||||
def BSF32rm : I<0xBC, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
|
||||
"bsf{l}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR32:$dst, EFLAGS, (X86bsf (loadi32 addr:$src)))],
|
||||
IIC_BIT_SCAN_MEM>, TB, OpSize32, Sched<[WriteShiftLd]>;
|
||||
IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>;
|
||||
def BSF64rr : RI<0xBC, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
|
||||
"bsf{q}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, EFLAGS, (X86bsf GR64:$src))],
|
||||
IIC_BIT_SCAN_REG>, TB, Sched<[WriteShift]>;
|
||||
IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>;
|
||||
def BSF64rm : RI<0xBC, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
|
||||
"bsf{q}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, EFLAGS, (X86bsf (loadi64 addr:$src)))],
|
||||
IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
|
||||
IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>;
|
||||
|
||||
def BSR16rr : I<0xBD, MRMSrcReg, (outs GR16:$dst), (ins GR16:$src),
|
||||
"bsr{w}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR16:$dst, EFLAGS, (X86bsr GR16:$src))],
|
||||
IIC_BIT_SCAN_REG>,
|
||||
TB, OpSize16, Sched<[WriteShift]>;
|
||||
IIC_BIT_SCAN_REG>, PS, OpSize16, Sched<[WriteShift]>;
|
||||
def BSR16rm : I<0xBD, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
|
||||
"bsr{w}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR16:$dst, EFLAGS, (X86bsr (loadi16 addr:$src)))],
|
||||
IIC_BIT_SCAN_MEM>, TB,
|
||||
OpSize16, Sched<[WriteShiftLd]>;
|
||||
IIC_BIT_SCAN_MEM>, PS, OpSize16, Sched<[WriteShiftLd]>;
|
||||
def BSR32rr : I<0xBD, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
|
||||
"bsr{l}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR32:$dst, EFLAGS, (X86bsr GR32:$src))],
|
||||
IIC_BIT_SCAN_REG>, TB, OpSize32,
|
||||
Sched<[WriteShift]>;
|
||||
IIC_BIT_SCAN_REG>, PS, OpSize32, Sched<[WriteShift]>;
|
||||
def BSR32rm : I<0xBD, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
|
||||
"bsr{l}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR32:$dst, EFLAGS, (X86bsr (loadi32 addr:$src)))],
|
||||
IIC_BIT_SCAN_MEM>, TB, OpSize32, Sched<[WriteShiftLd]>;
|
||||
IIC_BIT_SCAN_MEM>, PS, OpSize32, Sched<[WriteShiftLd]>;
|
||||
def BSR64rr : RI<0xBD, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
|
||||
"bsr{q}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))], IIC_BIT_SCAN_REG>, TB,
|
||||
Sched<[WriteShift]>;
|
||||
[(set GR64:$dst, EFLAGS, (X86bsr GR64:$src))],
|
||||
IIC_BIT_SCAN_REG>, PS, Sched<[WriteShift]>;
|
||||
def BSR64rm : RI<0xBD, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
|
||||
"bsr{q}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, EFLAGS, (X86bsr (loadi64 addr:$src)))],
|
||||
IIC_BIT_SCAN_MEM>, TB, Sched<[WriteShiftLd]>;
|
||||
IIC_BIT_SCAN_MEM>, PS, Sched<[WriteShiftLd]>;
|
||||
} // Defs = [EFLAGS]
|
||||
|
||||
let SchedRW = [WriteMicrocoded] in {
|
||||
@ -1866,29 +1862,29 @@ let Predicates = [HasMOVBE] in {
|
||||
def MOVBE16rm : I<0xF0, MRMSrcMem, (outs GR16:$dst), (ins i16mem:$src),
|
||||
"movbe{w}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR16:$dst, (bswap (loadi16 addr:$src)))], IIC_MOVBE>,
|
||||
OpSize16, T8;
|
||||
OpSize16, T8PS;
|
||||
def MOVBE32rm : I<0xF0, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
|
||||
"movbe{l}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR32:$dst, (bswap (loadi32 addr:$src)))], IIC_MOVBE>,
|
||||
OpSize32, T8;
|
||||
OpSize32, T8PS;
|
||||
def MOVBE64rm : RI<0xF0, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
|
||||
"movbe{q}\t{$src, $dst|$dst, $src}",
|
||||
[(set GR64:$dst, (bswap (loadi64 addr:$src)))], IIC_MOVBE>,
|
||||
T8;
|
||||
T8PS;
|
||||
}
|
||||
let SchedRW = [WriteStore] in {
|
||||
def MOVBE16mr : I<0xF1, MRMDestMem, (outs), (ins i16mem:$dst, GR16:$src),
|
||||
"movbe{w}\t{$src, $dst|$dst, $src}",
|
||||
[(store (bswap GR16:$src), addr:$dst)], IIC_MOVBE>,
|
||||
OpSize16, T8;
|
||||
OpSize16, T8PS;
|
||||
def MOVBE32mr : I<0xF1, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
|
||||
"movbe{l}\t{$src, $dst|$dst, $src}",
|
||||
[(store (bswap GR32:$src), addr:$dst)], IIC_MOVBE>,
|
||||
OpSize32, T8;
|
||||
OpSize32, T8PS;
|
||||
def MOVBE64mr : RI<0xF1, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
|
||||
"movbe{q}\t{$src, $dst|$dst, $src}",
|
||||
[(store (bswap GR64:$src), addr:$dst)], IIC_MOVBE>,
|
||||
T8;
|
||||
T8PS;
|
||||
}
|
||||
}
|
||||
|
||||
@ -1991,11 +1987,11 @@ multiclass bmi_bls<string mnemonic, Format RegMRM, Format MemMRM,
|
||||
let hasSideEffects = 0 in {
|
||||
def rr : I<0xF3, RegMRM, (outs RC:$dst), (ins RC:$src),
|
||||
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
|
||||
[]>, T8, VEX_4V;
|
||||
[]>, T8PS, VEX_4V;
|
||||
let mayLoad = 1 in
|
||||
def rm : I<0xF3, MemMRM, (outs RC:$dst), (ins x86memop:$src),
|
||||
!strconcat(mnemonic, "\t{$src, $dst|$dst, $src}"),
|
||||
[]>, T8, VEX_4V;
|
||||
[]>, T8PS, VEX_4V;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2036,11 +2032,11 @@ multiclass bmi_bextr_bzhi<bits<8> opc, string mnemonic, RegisterClass RC,
|
||||
def rr : I<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
|
||||
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set RC:$dst, (Int RC:$src1, RC:$src2)), (implicit EFLAGS)]>,
|
||||
T8, VEX_4VOp3;
|
||||
T8PS, VEX_4VOp3;
|
||||
def rm : I<opc, MRMSrcMem, (outs RC:$dst), (ins x86memop:$src1, RC:$src2),
|
||||
!strconcat(mnemonic, "\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[(set RC:$dst, (Int (ld_frag addr:$src1), RC:$src2)),
|
||||
(implicit EFLAGS)]>, T8, VEX_4VOp3;
|
||||
(implicit EFLAGS)]>, T8PS, VEX_4VOp3;
|
||||
}
|
||||
|
||||
let Predicates = [HasBMI], Defs = [EFLAGS] in {
|
||||
|
@ -524,13 +524,13 @@ def MMX_PSHUFWmi : MMXIi8<0x70, MRMSrcMem,
|
||||
// -- Conversion Instructions
|
||||
defm MMX_CVTPS2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtps2pi,
|
||||
f64mem, load, "cvtps2pi\t{$src, $dst|$dst, $src}",
|
||||
MMX_CVT_PS_ITINS, SSEPackedSingle>, TB;
|
||||
MMX_CVT_PS_ITINS, SSEPackedSingle>, PS;
|
||||
defm MMX_CVTPD2PI : sse12_cvt_pint<0x2D, VR128, VR64, int_x86_sse_cvtpd2pi,
|
||||
f128mem, memop, "cvtpd2pi\t{$src, $dst|$dst, $src}",
|
||||
MMX_CVT_PD_ITINS, SSEPackedDouble>, PD;
|
||||
defm MMX_CVTTPS2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttps2pi,
|
||||
f64mem, load, "cvttps2pi\t{$src, $dst|$dst, $src}",
|
||||
MMX_CVT_PS_ITINS, SSEPackedSingle>, TB;
|
||||
MMX_CVT_PS_ITINS, SSEPackedSingle>, PS;
|
||||
defm MMX_CVTTPD2PI : sse12_cvt_pint<0x2C, VR128, VR64, int_x86_sse_cvttpd2pi,
|
||||
f128mem, memop, "cvttpd2pi\t{$src, $dst|$dst, $src}",
|
||||
MMX_CVT_PD_ITINS, SSEPackedDouble>, PD;
|
||||
@ -541,7 +541,7 @@ let Constraints = "$src1 = $dst" in {
|
||||
defm MMX_CVTPI2PS : sse12_cvt_pint_3addr<0x2A, VR64, VR128,
|
||||
int_x86_sse_cvtpi2ps,
|
||||
i64mem, load, "cvtpi2ps\t{$src2, $dst|$dst, $src2}",
|
||||
SSEPackedSingle>, TB;
|
||||
SSEPackedSingle>, PS;
|
||||
}
|
||||
|
||||
// Extract / Insert
|
||||
|
@ -812,38 +812,38 @@ let canFoldAsLoad = 1, isReMaterializable = IsReMaterializable in
|
||||
|
||||
defm VMOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
|
||||
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
|
||||
TB, VEX;
|
||||
PS, VEX;
|
||||
defm VMOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
|
||||
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
|
||||
PD, VEX;
|
||||
defm VMOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
|
||||
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
|
||||
TB, VEX;
|
||||
PS, VEX;
|
||||
defm VMOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
|
||||
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
|
||||
PD, VEX;
|
||||
|
||||
defm VMOVAPSY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv8f32,
|
||||
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
|
||||
TB, VEX, VEX_L;
|
||||
PS, VEX, VEX_L;
|
||||
defm VMOVAPDY : sse12_mov_packed<0x28, VR256, f256mem, alignedloadv4f64,
|
||||
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
|
||||
PD, VEX, VEX_L;
|
||||
defm VMOVUPSY : sse12_mov_packed<0x10, VR256, f256mem, loadv8f32,
|
||||
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
|
||||
TB, VEX, VEX_L;
|
||||
PS, VEX, VEX_L;
|
||||
defm VMOVUPDY : sse12_mov_packed<0x10, VR256, f256mem, loadv4f64,
|
||||
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
|
||||
PD, VEX, VEX_L;
|
||||
defm MOVAPS : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv4f32,
|
||||
"movaps", SSEPackedSingle, SSE_MOVA_ITINS>,
|
||||
TB;
|
||||
PS;
|
||||
defm MOVAPD : sse12_mov_packed<0x28, VR128, f128mem, alignedloadv2f64,
|
||||
"movapd", SSEPackedDouble, SSE_MOVA_ITINS>,
|
||||
PD;
|
||||
defm MOVUPS : sse12_mov_packed<0x10, VR128, f128mem, loadv4f32,
|
||||
"movups", SSEPackedSingle, SSE_MOVU_ITINS>,
|
||||
TB;
|
||||
PS;
|
||||
defm MOVUPD : sse12_mov_packed<0x10, VR128, f128mem, loadv2f64,
|
||||
"movupd", SSEPackedDouble, SSE_MOVU_ITINS, 0>,
|
||||
PD;
|
||||
@ -1142,7 +1142,7 @@ multiclass sse12_mov_hilo_packed_base<bits<8>opc, SDNode psnode, SDNode pdnode,
|
||||
[(set VR128:$dst,
|
||||
(psnode VR128:$src1,
|
||||
(bc_v4f32 (v2f64 (scalar_to_vector (loadf64 addr:$src2))))))],
|
||||
itin, SSEPackedSingle>, TB,
|
||||
itin, SSEPackedSingle>, PS,
|
||||
Sched<[WriteShuffleLd, ReadAfterLd]>;
|
||||
|
||||
def PDrm : PI<opc, MRMSrcMem,
|
||||
@ -1721,16 +1721,16 @@ defm CVTSS2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse_cvtss2si64,
|
||||
defm VCVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
|
||||
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
|
||||
SSEPackedSingle, SSE_CVT_PS>,
|
||||
TB, VEX, Requires<[HasAVX]>;
|
||||
PS, VEX, Requires<[HasAVX]>;
|
||||
defm VCVTDQ2PSY : sse12_cvt_p<0x5B, VR256, VR256, i256mem,
|
||||
"vcvtdq2ps\t{$src, $dst|$dst, $src}",
|
||||
SSEPackedSingle, SSE_CVT_PS>,
|
||||
TB, VEX, VEX_L, Requires<[HasAVX]>;
|
||||
PS, VEX, VEX_L, Requires<[HasAVX]>;
|
||||
|
||||
defm CVTDQ2PS : sse12_cvt_p<0x5B, VR128, VR128, i128mem,
|
||||
"cvtdq2ps\t{$src, $dst|$dst, $src}",
|
||||
SSEPackedSingle, SSE_CVT_PS>,
|
||||
TB, Requires<[UseSSE2]>;
|
||||
PS, Requires<[UseSSE2]>;
|
||||
|
||||
let Predicates = [UseAVX] in {
|
||||
def : InstAlias<"vcvtss2si{l}\t{$src, $dst|$dst, $src}",
|
||||
@ -2128,32 +2128,32 @@ let Predicates = [HasAVX] in {
|
||||
def VCVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
"vcvtps2pd\t{$src, $dst|$dst, $src}",
|
||||
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
|
||||
IIC_SSE_CVT_PD_RR>, TB, VEX, Sched<[WriteCvtF2F]>;
|
||||
IIC_SSE_CVT_PD_RR>, PS, VEX, Sched<[WriteCvtF2F]>;
|
||||
def VCVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
|
||||
"vcvtps2pd\t{$src, $dst|$dst, $src}",
|
||||
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
|
||||
IIC_SSE_CVT_PD_RM>, TB, VEX, Sched<[WriteCvtF2FLd]>;
|
||||
IIC_SSE_CVT_PD_RM>, PS, VEX, Sched<[WriteCvtF2FLd]>;
|
||||
def VCVTPS2PDYrr : I<0x5A, MRMSrcReg, (outs VR256:$dst), (ins VR128:$src),
|
||||
"vcvtps2pd\t{$src, $dst|$dst, $src}",
|
||||
[(set VR256:$dst,
|
||||
(int_x86_avx_cvt_ps2_pd_256 VR128:$src))],
|
||||
IIC_SSE_CVT_PD_RR>, TB, VEX, VEX_L, Sched<[WriteCvtF2F]>;
|
||||
IIC_SSE_CVT_PD_RR>, PS, VEX, VEX_L, Sched<[WriteCvtF2F]>;
|
||||
def VCVTPS2PDYrm : I<0x5A, MRMSrcMem, (outs VR256:$dst), (ins f128mem:$src),
|
||||
"vcvtps2pd\t{$src, $dst|$dst, $src}",
|
||||
[(set VR256:$dst,
|
||||
(int_x86_avx_cvt_ps2_pd_256 (loadv4f32 addr:$src)))],
|
||||
IIC_SSE_CVT_PD_RM>, TB, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
|
||||
IIC_SSE_CVT_PD_RM>, PS, VEX, VEX_L, Sched<[WriteCvtF2FLd]>;
|
||||
}
|
||||
|
||||
let Predicates = [UseSSE2] in {
|
||||
def CVTPS2PDrr : I<0x5A, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
"cvtps2pd\t{$src, $dst|$dst, $src}",
|
||||
[(set VR128:$dst, (int_x86_sse2_cvtps2pd VR128:$src))],
|
||||
IIC_SSE_CVT_PD_RR>, TB, Sched<[WriteCvtF2F]>;
|
||||
IIC_SSE_CVT_PD_RR>, PS, Sched<[WriteCvtF2F]>;
|
||||
def CVTPS2PDrm : I<0x5A, MRMSrcMem, (outs VR128:$dst), (ins f64mem:$src),
|
||||
"cvtps2pd\t{$src, $dst|$dst, $src}",
|
||||
[(set VR128:$dst, (v2f64 (extloadv2f32 addr:$src)))],
|
||||
IIC_SSE_CVT_PD_RM>, TB, Sched<[WriteCvtF2FLd]>;
|
||||
IIC_SSE_CVT_PD_RM>, PS, Sched<[WriteCvtF2FLd]>;
|
||||
}
|
||||
|
||||
// Convert Packed DW Integers to Packed Double FP
|
||||
@ -2391,47 +2391,47 @@ multiclass sse12_ord_cmp<bits<8> opc, RegisterClass RC, SDNode OpNode,
|
||||
|
||||
let Defs = [EFLAGS] in {
|
||||
defm VUCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
|
||||
"ucomiss">, TB, VEX, VEX_LIG;
|
||||
"ucomiss">, PS, VEX, VEX_LIG;
|
||||
defm VUCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
|
||||
"ucomisd">, PD, VEX, VEX_LIG;
|
||||
let Pattern = []<dag> in {
|
||||
defm VCOMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
|
||||
"comiss">, TB, VEX, VEX_LIG;
|
||||
"comiss">, PS, VEX, VEX_LIG;
|
||||
defm VCOMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
|
||||
"comisd">, PD, VEX, VEX_LIG;
|
||||
}
|
||||
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB, VEX;
|
||||
load, "ucomiss">, PS, VEX;
|
||||
defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, PD, VEX;
|
||||
|
||||
defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
|
||||
load, "comiss">, TB, VEX;
|
||||
load, "comiss">, PS, VEX;
|
||||
defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
|
||||
load, "comisd">, PD, VEX;
|
||||
}
|
||||
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
|
||||
"ucomiss">, TB;
|
||||
"ucomiss">, PS;
|
||||
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
|
||||
"ucomisd">, PD;
|
||||
|
||||
let Pattern = []<dag> in {
|
||||
defm COMISS : sse12_ord_cmp<0x2F, VR128, undef, v4f32, f128mem, load,
|
||||
"comiss">, TB;
|
||||
"comiss">, PS;
|
||||
defm COMISD : sse12_ord_cmp<0x2F, VR128, undef, v2f64, f128mem, load,
|
||||
"comisd">, PD;
|
||||
}
|
||||
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB;
|
||||
load, "ucomiss">, PS;
|
||||
defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, PD;
|
||||
|
||||
defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
|
||||
"comiss">, TB;
|
||||
"comiss">, PS;
|
||||
defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
|
||||
"comisd">, PD;
|
||||
}
|
||||
@ -2468,7 +2468,7 @@ multiclass sse12_cmp_packed<RegisterClass RC, X86MemOperand x86memop,
|
||||
defm VCMPPS : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse_cmp_ps,
|
||||
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
|
||||
SSEPackedSingle>, TB, VEX_4V;
|
||||
SSEPackedSingle>, PS, VEX_4V;
|
||||
defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
|
||||
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
|
||||
@ -2476,7 +2476,7 @@ defm VCMPPD : sse12_cmp_packed<VR128, f128mem, AVXCC, int_x86_sse2_cmp_pd,
|
||||
defm VCMPPSY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_ps_256,
|
||||
"cmp${cc}ps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
"cmpps\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
|
||||
SSEPackedSingle>, TB, VEX_4V, VEX_L;
|
||||
SSEPackedSingle>, PS, VEX_4V, VEX_L;
|
||||
defm VCMPPDY : sse12_cmp_packed<VR256, f256mem, AVXCC, int_x86_avx_cmp_pd_256,
|
||||
"cmp${cc}pd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
"cmppd\t{$cc, $src2, $src1, $dst|$dst, $src1, $src2, $cc}",
|
||||
@ -2485,7 +2485,7 @@ let Constraints = "$src1 = $dst" in {
|
||||
defm CMPPS : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse_cmp_ps,
|
||||
"cmp${cc}ps\t{$src2, $dst|$dst, $src2}",
|
||||
"cmpps\t{$cc, $src2, $dst|$dst, $src2, $cc}",
|
||||
SSEPackedSingle, SSE_ALU_F32P>, TB;
|
||||
SSEPackedSingle, SSE_ALU_F32P>, PS;
|
||||
defm CMPPD : sse12_cmp_packed<VR128, f128mem, SSECC, int_x86_sse2_cmp_pd,
|
||||
"cmp${cc}pd\t{$src2, $dst|$dst, $src2}",
|
||||
"cmppd\t{$cc, $src2, $dst|$dst, $src2, $cc}",
|
||||
@ -2549,10 +2549,10 @@ multiclass sse12_shuffle<RegisterClass RC, X86MemOperand x86memop,
|
||||
|
||||
defm VSHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
|
||||
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
||||
loadv4f32, SSEPackedSingle>, TB, VEX_4V;
|
||||
loadv4f32, SSEPackedSingle>, PS, VEX_4V;
|
||||
defm VSHUFPSY : sse12_shuffle<VR256, f256mem, v8f32,
|
||||
"shufps\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
||||
loadv8f32, SSEPackedSingle>, TB, VEX_4V, VEX_L;
|
||||
loadv8f32, SSEPackedSingle>, PS, VEX_4V, VEX_L;
|
||||
defm VSHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
|
||||
"shufpd\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}",
|
||||
loadv2f64, SSEPackedDouble>, PD, VEX_4V;
|
||||
@ -2563,7 +2563,7 @@ defm VSHUFPDY : sse12_shuffle<VR256, f256mem, v4f64,
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm SHUFPS : sse12_shuffle<VR128, f128mem, v4f32,
|
||||
"shufps\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
||||
memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>, TB;
|
||||
memopv4f32, SSEPackedSingle, 1 /* cvt to pshufd */>, PS;
|
||||
defm SHUFPD : sse12_shuffle<VR128, f128mem, v2f64,
|
||||
"shufpd\t{$src3, $src2, $dst|$dst, $src2, $src3}",
|
||||
memopv2f64, SSEPackedDouble, 1 /* cvt to pshufd */>, PD;
|
||||
@ -2638,26 +2638,26 @@ multiclass sse12_unpack_interleave<bits<8> opc, SDNode OpNode, ValueType vt,
|
||||
|
||||
defm VUNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, loadv4f32,
|
||||
VR128, f128mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedSingle>, TB, VEX_4V;
|
||||
SSEPackedSingle>, PS, VEX_4V;
|
||||
defm VUNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, loadv2f64,
|
||||
VR128, f128mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedDouble>, PD, VEX_4V;
|
||||
defm VUNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, loadv4f32,
|
||||
VR128, f128mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedSingle>, TB, VEX_4V;
|
||||
SSEPackedSingle>, PS, VEX_4V;
|
||||
defm VUNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, loadv2f64,
|
||||
VR128, f128mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedDouble>, PD, VEX_4V;
|
||||
|
||||
defm VUNPCKHPSY: sse12_unpack_interleave<0x15, X86Unpckh, v8f32, loadv8f32,
|
||||
VR256, f256mem, "unpckhps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedSingle>, TB, VEX_4V, VEX_L;
|
||||
SSEPackedSingle>, PS, VEX_4V, VEX_L;
|
||||
defm VUNPCKHPDY: sse12_unpack_interleave<0x15, X86Unpckh, v4f64, loadv4f64,
|
||||
VR256, f256mem, "unpckhpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedDouble>, PD, VEX_4V, VEX_L;
|
||||
defm VUNPCKLPSY: sse12_unpack_interleave<0x14, X86Unpckl, v8f32, loadv8f32,
|
||||
VR256, f256mem, "unpcklps\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedSingle>, TB, VEX_4V, VEX_L;
|
||||
SSEPackedSingle>, PS, VEX_4V, VEX_L;
|
||||
defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
|
||||
VR256, f256mem, "unpcklpd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
SSEPackedDouble>, PD, VEX_4V, VEX_L;
|
||||
@ -2665,13 +2665,13 @@ defm VUNPCKLPDY: sse12_unpack_interleave<0x14, X86Unpckl, v4f64, loadv4f64,
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm UNPCKHPS: sse12_unpack_interleave<0x15, X86Unpckh, v4f32, memopv4f32,
|
||||
VR128, f128mem, "unpckhps\t{$src2, $dst|$dst, $src2}",
|
||||
SSEPackedSingle>, TB;
|
||||
SSEPackedSingle>, PS;
|
||||
defm UNPCKHPD: sse12_unpack_interleave<0x15, X86Unpckh, v2f64, memopv2f64,
|
||||
VR128, f128mem, "unpckhpd\t{$src2, $dst|$dst, $src2}",
|
||||
SSEPackedDouble>, PD;
|
||||
defm UNPCKLPS: sse12_unpack_interleave<0x14, X86Unpckl, v4f32, memopv4f32,
|
||||
VR128, f128mem, "unpcklps\t{$src2, $dst|$dst, $src2}",
|
||||
SSEPackedSingle>, TB;
|
||||
SSEPackedSingle>, PS;
|
||||
defm UNPCKLPD: sse12_unpack_interleave<0x14, X86Unpckl, v2f64, memopv2f64,
|
||||
VR128, f128mem, "unpcklpd\t{$src2, $dst|$dst, $src2}",
|
||||
SSEPackedDouble>, PD;
|
||||
@ -2730,11 +2730,11 @@ multiclass sse12_extr_sign_mask<RegisterClass RC, Intrinsic Int, string asm,
|
||||
|
||||
let Predicates = [HasAVX] in {
|
||||
defm VMOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps,
|
||||
"movmskps", SSEPackedSingle>, TB, VEX;
|
||||
"movmskps", SSEPackedSingle>, PS, VEX;
|
||||
defm VMOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd,
|
||||
"movmskpd", SSEPackedDouble>, PD, VEX;
|
||||
defm VMOVMSKPSY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_ps_256,
|
||||
"movmskps", SSEPackedSingle>, TB,
|
||||
"movmskps", SSEPackedSingle>, PS,
|
||||
VEX, VEX_L;
|
||||
defm VMOVMSKPDY : sse12_extr_sign_mask<VR256, int_x86_avx_movmsk_pd_256,
|
||||
"movmskpd", SSEPackedDouble>, PD,
|
||||
@ -2753,7 +2753,7 @@ let Predicates = [HasAVX] in {
|
||||
}
|
||||
|
||||
defm MOVMSKPS : sse12_extr_sign_mask<VR128, int_x86_sse_movmsk_ps, "movmskps",
|
||||
SSEPackedSingle>, TB;
|
||||
SSEPackedSingle>, PS;
|
||||
defm MOVMSKPD : sse12_extr_sign_mask<VR128, int_x86_sse2_movmsk_pd, "movmskpd",
|
||||
SSEPackedDouble>, PD;
|
||||
|
||||
@ -2838,7 +2838,7 @@ multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
|
||||
SDNode OpNode, OpndItins itins> {
|
||||
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
|
||||
FR32, f32, f128mem, memopfsf32, SSEPackedSingle, itins, 0>,
|
||||
TB, VEX_4V;
|
||||
PS, VEX_4V;
|
||||
|
||||
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
|
||||
FR64, f64, f128mem, memopfsf64, SSEPackedDouble, itins, 0>,
|
||||
@ -2847,7 +2847,7 @@ multiclass sse12_fp_alias_pack_logical<bits<8> opc, string OpcodeStr,
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, FR32,
|
||||
f32, f128mem, memopfsf32, SSEPackedSingle, itins>,
|
||||
TB;
|
||||
PS;
|
||||
|
||||
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, FR64,
|
||||
f64, f128mem, memopfsf64, SSEPackedDouble, itins>,
|
||||
@ -2877,7 +2877,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
|
||||
!strconcat(OpcodeStr, "ps"), f256mem,
|
||||
[(set VR256:$dst, (v4i64 (OpNode VR256:$src1, VR256:$src2)))],
|
||||
[(set VR256:$dst, (OpNode (bc_v4i64 (v8f32 VR256:$src1)),
|
||||
(loadv4i64 addr:$src2)))], 0>, TB, VEX_4V, VEX_L;
|
||||
(loadv4i64 addr:$src2)))], 0>, PS, VEX_4V, VEX_L;
|
||||
|
||||
defm V#NAME#PDY : sse12_fp_packed_logical_rm<opc, VR256, SSEPackedDouble,
|
||||
!strconcat(OpcodeStr, "pd"), f256mem,
|
||||
@ -2894,7 +2894,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
|
||||
defm V#NAME#PS : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedSingle,
|
||||
!strconcat(OpcodeStr, "ps"), f128mem, [],
|
||||
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
|
||||
(loadv2i64 addr:$src2)))], 0>, TB, VEX_4V;
|
||||
(loadv2i64 addr:$src2)))], 0>, PS, VEX_4V;
|
||||
|
||||
defm V#NAME#PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
|
||||
!strconcat(OpcodeStr, "pd"), f128mem,
|
||||
@ -2909,7 +2909,7 @@ multiclass sse12_fp_packed_logical<bits<8> opc, string OpcodeStr,
|
||||
!strconcat(OpcodeStr, "ps"), f128mem,
|
||||
[(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))],
|
||||
[(set VR128:$dst, (OpNode (bc_v2i64 (v4f32 VR128:$src1)),
|
||||
(memopv2i64 addr:$src2)))]>, TB;
|
||||
(memopv2i64 addr:$src2)))]>, PS;
|
||||
|
||||
defm PD : sse12_fp_packed_logical_rm<opc, VR128, SSEPackedDouble,
|
||||
!strconcat(OpcodeStr, "pd"), f128mem,
|
||||
@ -2947,14 +2947,14 @@ multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
|
||||
SDNode OpNode, SizeItins itins> {
|
||||
defm V#NAME#PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode,
|
||||
VR128, v4f32, f128mem, loadv4f32,
|
||||
SSEPackedSingle, itins.s, 0>, TB, VEX_4V;
|
||||
SSEPackedSingle, itins.s, 0>, PS, VEX_4V;
|
||||
defm V#NAME#PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode,
|
||||
VR128, v2f64, f128mem, loadv2f64,
|
||||
SSEPackedDouble, itins.d, 0>, PD, VEX_4V;
|
||||
|
||||
defm V#NAME#PSY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"),
|
||||
OpNode, VR256, v8f32, f256mem, loadv8f32,
|
||||
SSEPackedSingle, itins.s, 0>, TB, VEX_4V, VEX_L;
|
||||
SSEPackedSingle, itins.s, 0>, PS, VEX_4V, VEX_L;
|
||||
defm V#NAME#PDY : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"),
|
||||
OpNode, VR256, v4f64, f256mem, loadv4f64,
|
||||
SSEPackedDouble, itins.d, 0>, PD, VEX_4V, VEX_L;
|
||||
@ -2962,7 +2962,7 @@ multiclass basic_sse12_fp_binop_p<bits<8> opc, string OpcodeStr,
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm PS : sse12_fp_packed<opc, !strconcat(OpcodeStr, "ps"), OpNode, VR128,
|
||||
v4f32, f128mem, memopv4f32, SSEPackedSingle,
|
||||
itins.s>, TB;
|
||||
itins.s>, PS;
|
||||
defm PD : sse12_fp_packed<opc, !strconcat(OpcodeStr, "pd"), OpNode, VR128,
|
||||
v2f64, f128mem, memopv2f64, SSEPackedDouble,
|
||||
itins.d>, PD;
|
||||
@ -3687,12 +3687,12 @@ def MOVNTImr : I<0xC3, MRMDestMem, (outs), (ins i32mem:$dst, GR32:$src),
|
||||
"movnti{l}\t{$src, $dst|$dst, $src}",
|
||||
[(nontemporalstore (i32 GR32:$src), addr:$dst)],
|
||||
IIC_SSE_MOVNT>,
|
||||
TB, Requires<[HasSSE2]>;
|
||||
PS, Requires<[HasSSE2]>;
|
||||
def MOVNTI_64mr : RI<0xC3, MRMDestMem, (outs), (ins i64mem:$dst, GR64:$src),
|
||||
"movnti{q}\t{$src, $dst|$dst, $src}",
|
||||
[(nontemporalstore (i64 GR64:$src), addr:$dst)],
|
||||
IIC_SSE_MOVNT>,
|
||||
TB, Requires<[HasSSE2]>;
|
||||
PS, Requires<[HasSSE2]>;
|
||||
} // SchedRW = [WriteStore]
|
||||
|
||||
} // AddedComplexity
|
||||
@ -8059,11 +8059,11 @@ let Defs = [YMM0, YMM1, YMM2, YMM3, YMM4, YMM5, YMM6, YMM7,
|
||||
YMM8, YMM9, YMM10, YMM11, YMM12, YMM13, YMM14, YMM15] in {
|
||||
// Zero All YMM registers
|
||||
def VZEROALL : I<0x77, RawFrm, (outs), (ins), "vzeroall",
|
||||
[(int_x86_avx_vzeroall)]>, TB, VEX, VEX_L, Requires<[HasAVX]>;
|
||||
[(int_x86_avx_vzeroall)]>, PS, VEX, VEX_L, Requires<[HasAVX]>;
|
||||
|
||||
// Zero Upper bits of YMM registers
|
||||
def VZEROUPPER : I<0x77, RawFrm, (outs), (ins), "vzeroupper",
|
||||
[(int_x86_avx_vzeroupper)]>, TB, VEX, Requires<[HasAVX]>;
|
||||
[(int_x86_avx_vzeroupper)]>, PS, VEX, Requires<[HasAVX]>;
|
||||
}
|
||||
|
||||
//===----------------------------------------------------------------------===//
|
||||
|
@ -40,25 +40,25 @@ def VMLAUNCH : I<0x01, MRM_C2, (outs), (ins), "vmlaunch", []>, TB;
|
||||
// 0F 01 C3
|
||||
def VMRESUME : I<0x01, MRM_C3, (outs), (ins), "vmresume", []>, TB;
|
||||
def VMPTRLDm : I<0xC7, MRM6m, (outs), (ins i64mem:$vmcs),
|
||||
"vmptrld\t$vmcs", []>, TB;
|
||||
"vmptrld\t$vmcs", []>, PS;
|
||||
def VMPTRSTm : I<0xC7, MRM7m, (outs i64mem:$vmcs), (ins),
|
||||
"vmptrst\t$vmcs", []>, TB;
|
||||
def VMREAD64rm : I<0x78, MRMDestMem, (outs i64mem:$dst), (ins GR64:$src),
|
||||
"vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
|
||||
"vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
|
||||
def VMREAD64rr : I<0x78, MRMDestReg, (outs GR64:$dst), (ins GR64:$src),
|
||||
"vmread{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
|
||||
"vmread{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
|
||||
def VMREAD32rm : I<0x78, MRMDestMem, (outs i32mem:$dst), (ins GR32:$src),
|
||||
"vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
|
||||
"vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
|
||||
def VMREAD32rr : I<0x78, MRMDestReg, (outs GR32:$dst), (ins GR32:$src),
|
||||
"vmread{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
|
||||
"vmread{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
|
||||
def VMWRITE64rm : I<0x79, MRMSrcMem, (outs GR64:$dst), (ins i64mem:$src),
|
||||
"vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
|
||||
"vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
|
||||
def VMWRITE64rr : I<0x79, MRMSrcReg, (outs GR64:$dst), (ins GR64:$src),
|
||||
"vmwrite{q}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[In64BitMode]>;
|
||||
"vmwrite{q}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[In64BitMode]>;
|
||||
def VMWRITE32rm : I<0x79, MRMSrcMem, (outs GR32:$dst), (ins i32mem:$src),
|
||||
"vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
|
||||
"vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
|
||||
def VMWRITE32rr : I<0x79, MRMSrcReg, (outs GR32:$dst), (ins GR32:$src),
|
||||
"vmwrite{l}\t{$src, $dst|$dst, $src}", []>, TB, Requires<[Not64BitMode]>;
|
||||
"vmwrite{l}\t{$src, $dst|$dst, $src}", []>, PS, Requires<[Not64BitMode]>;
|
||||
// 0F 01 C4
|
||||
def VMXOFF : I<0x01, MRM_C4, (outs), (ins), "vmxoff", []>, TB;
|
||||
def VMXON : I<0xC7, MRM6m, (outs), (ins i64mem:$vmxon),
|
||||
|
@ -84,7 +84,7 @@ namespace X86Local {
|
||||
};
|
||||
|
||||
enum {
|
||||
PD = 1, XS = 2, XD = 3
|
||||
PS = 1, PD = 2, XS = 3, XD = 4
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -259,8 +259,12 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W_XS);
|
||||
else if (OpPrefix == X86Local::XD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W_XD);
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_W);
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
} else if (HasVEX_LPrefix) {
|
||||
// VEX_L
|
||||
if (OpPrefix == X86Local::PD)
|
||||
@ -269,8 +273,12 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = EVEX_KB(IC_EVEX_L_XS);
|
||||
else if (OpPrefix == X86Local::XD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L_XD);
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L);
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
}
|
||||
else if (HasEVEX_L2Prefix && HasVEX_WPrefix) {
|
||||
// EVEX_L2 & VEX_W
|
||||
@ -280,8 +288,12 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W_XS);
|
||||
else if (OpPrefix == X86Local::XD)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W_XD);
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_W);
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
} else if (HasEVEX_L2Prefix) {
|
||||
// EVEX_L2
|
||||
if (OpPrefix == X86Local::PD)
|
||||
@ -290,8 +302,12 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_XD);
|
||||
else if (OpPrefix == X86Local::XS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2_XS);
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = EVEX_KB(IC_EVEX_L2);
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
}
|
||||
else if (HasVEX_WPrefix) {
|
||||
// VEX_W
|
||||
@ -301,8 +317,12 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = EVEX_KB(IC_EVEX_W_XS);
|
||||
else if (OpPrefix == X86Local::XD)
|
||||
insnContext = EVEX_KB(IC_EVEX_W_XD);
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = EVEX_KB(IC_EVEX_W);
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
}
|
||||
// No L, no W
|
||||
else if (OpPrefix == X86Local::PD)
|
||||
@ -322,8 +342,12 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = IC_VEX_L_W_XS;
|
||||
else if (OpPrefix == X86Local::XD)
|
||||
insnContext = IC_VEX_L_W_XD;
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = IC_VEX_L_W;
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
} else if (OpPrefix == X86Local::PD && HasVEX_LPrefix)
|
||||
insnContext = IC_VEX_L_OPSIZE;
|
||||
else if (OpPrefix == X86Local::PD && HasVEX_WPrefix)
|
||||
@ -338,16 +362,20 @@ InstructionContext RecognizableInstr::insnContext() const {
|
||||
insnContext = IC_VEX_W_XS;
|
||||
else if (HasVEX_WPrefix && OpPrefix == X86Local::XD)
|
||||
insnContext = IC_VEX_W_XD;
|
||||
else if (HasVEX_WPrefix)
|
||||
else if (HasVEX_WPrefix && OpPrefix == X86Local::PS)
|
||||
insnContext = IC_VEX_W;
|
||||
else if (HasVEX_LPrefix)
|
||||
else if (HasVEX_LPrefix && OpPrefix == X86Local::PS)
|
||||
insnContext = IC_VEX_L;
|
||||
else if (OpPrefix == X86Local::XD)
|
||||
insnContext = IC_VEX_XD;
|
||||
else if (OpPrefix == X86Local::XS)
|
||||
insnContext = IC_VEX_XS;
|
||||
else
|
||||
else if (OpPrefix == X86Local::PS)
|
||||
insnContext = IC_VEX;
|
||||
else {
|
||||
errs() << "Instruction does not use a prefix: " << Name << "\n";
|
||||
llvm_unreachable("Invalid prefix");
|
||||
}
|
||||
} else if (Is64Bit || HasREX_WPrefix) {
|
||||
if (HasREX_WPrefix && (OpSize == X86Local::OpSize16 || OpPrefix == X86Local::PD))
|
||||
insnContext = IC_64BIT_REXW_OPSIZE;
|
||||
|
Loading…
Reference in New Issue
Block a user