mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-27 22:55:15 +00:00
Mark all x86 Int_ and _Int patterns as isCodeGenOnly so the disassembler table builder doesn't need to string match them to exclude them.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@198323 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
54ed186e1b
commit
e625100c6a
@ -618,6 +618,7 @@ multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, Intrinsic Int,
|
||||
"\t{$src2, $src1, ${dst} {${mask}}|${dst} {${mask}}, $src1, $src2}"),
|
||||
[(set RC:$dst, (OpNode KRC:$mask, (vt RC:$src2),
|
||||
(vt RC:$src1)))]>, EVEX_4V, EVEX_K;
|
||||
let isCodeGenOnly = 1 in
|
||||
def rr_Int : AVX5128I<opc, MRMSrcReg, (outs RC:$dst),
|
||||
(ins KRC:$mask, RC:$src1, RC:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -633,6 +634,7 @@ multiclass avx512_blendmask<bits<8> opc, string OpcodeStr, Intrinsic Int,
|
||||
[]>,
|
||||
EVEX_4V, EVEX_K;
|
||||
|
||||
let isCodeGenOnly = 1 in
|
||||
def rm_Int : AVX5128I<opc, MRMSrcMem, (outs RC:$dst),
|
||||
(ins KRC:$mask, RC:$src1, x86memop:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -2588,62 +2590,66 @@ defm VCVTSD2USI64Z: avx512_cvt_s_int<0x79, VR128X, GR64,
|
||||
sse_load_f64, "cvtsd2usi">, XD, VEX_W,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
|
||||
defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V;
|
||||
defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
|
||||
defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
|
||||
defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_VCVTSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V;
|
||||
defm Int_VCVTSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
|
||||
defm Int_VCVTSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
|
||||
defm Int_VCVTSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
|
||||
|
||||
defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V;
|
||||
defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
|
||||
defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
|
||||
defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
|
||||
defm Int_VCVTUSI2SSZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_avx512_cvtusi2ss, i32mem, loadi32, "cvtusi2ss{l}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V;
|
||||
defm Int_VCVTUSI2SS64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_avx512_cvtusi642ss, i64mem, loadi64, "cvtusi2ss{q}",
|
||||
SSE_CVT_Scalar, 0>, XS, EVEX_4V, VEX_W;
|
||||
defm Int_VCVTUSI2SDZ : sse12_cvt_sint_3addr<0x2A, GR32, VR128X,
|
||||
int_x86_avx512_cvtusi2sd, i32mem, loadi32, "cvtusi2sd{l}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V;
|
||||
defm Int_VCVTUSI2SD64Z : sse12_cvt_sint_3addr<0x2A, GR64, VR128X,
|
||||
int_x86_avx512_cvtusi642sd, i64mem, loadi64, "cvtusi2sd{q}",
|
||||
SSE_CVT_Scalar, 0>, XD, EVEX_4V, VEX_W;
|
||||
} // isCodeGenOnly = 1
|
||||
|
||||
// Convert float/double to signed/unsigned int 32/64 with truncation
|
||||
defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
|
||||
ssmem, sse_load_f32, "cvttss2si">,
|
||||
XS, EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
|
||||
int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
|
||||
"cvttss2si">, XS, VEX_W,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
|
||||
sdmem, sse_load_f64, "cvttsd2si">, XD,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
|
||||
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
|
||||
"cvttsd2si">, XD, VEX_W,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
|
||||
int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
|
||||
"cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
|
||||
int_x86_avx512_cvttss2usi64, ssmem,
|
||||
sse_load_f32, "cvttss2usi">, XS, VEX_W,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
|
||||
int_x86_avx512_cvttsd2usi,
|
||||
sdmem, sse_load_f64, "cvttsd2usi">, XD,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
|
||||
int_x86_avx512_cvttsd2usi64, sdmem,
|
||||
sse_load_f64, "cvttsd2usi">, XD, VEX_W,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_VCVTTSS2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse_cvttss2si,
|
||||
ssmem, sse_load_f32, "cvttss2si">,
|
||||
XS, EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSS2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
|
||||
int_x86_sse_cvttss2si64, ssmem, sse_load_f32,
|
||||
"cvttss2si">, XS, VEX_W,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSD2SIZ : avx512_cvt_s_int<0x2C, VR128X, GR32, int_x86_sse2_cvttsd2si,
|
||||
sdmem, sse_load_f64, "cvttsd2si">, XD,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCVTTSD2SI64Z : avx512_cvt_s_int<0x2C, VR128X, GR64,
|
||||
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
|
||||
"cvttsd2si">, XD, VEX_W,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCVTTSS2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
|
||||
int_x86_avx512_cvttss2usi, ssmem, sse_load_f32,
|
||||
"cvttss2usi">, XS, EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSS2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
|
||||
int_x86_avx512_cvttss2usi64, ssmem,
|
||||
sse_load_f32, "cvttss2usi">, XS, VEX_W,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCVTTSD2USIZ : avx512_cvt_s_int<0x78, VR128X, GR32,
|
||||
int_x86_avx512_cvttsd2usi,
|
||||
sdmem, sse_load_f64, "cvttsd2usi">, XD,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCVTTSD2USI64Z : avx512_cvt_s_int<0x78, VR128X, GR64,
|
||||
int_x86_avx512_cvttsd2usi64, sdmem,
|
||||
sse_load_f64, "cvttsd2usi">, XD, VEX_W,
|
||||
EVEX_CD8<64, CD8VT1>;
|
||||
} // isCodeGenOnly = 1
|
||||
|
||||
multiclass avx512_cvt_s<bits<8> opc, RegisterClass SrcRC, RegisterClass DstRC,
|
||||
SDNode OpNode, X86MemOperand x86memop, PatFrag ld_frag,
|
||||
@ -2938,19 +2944,21 @@ let Defs = [EFLAGS], Predicates = [HasAVX512] in {
|
||||
"comisd">, TB, OpSize, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
}
|
||||
defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, TB, OpSize, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_VUCOMISSZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VUCOMISDZ : sse12_ord_cmp<0x2E, VR128X, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, TB, OpSize, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
|
||||
defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
|
||||
load, "comiss">, TB, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
|
||||
load, "comisd">, TB, OpSize, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
defm Int_VCOMISSZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v4f32, f128mem,
|
||||
load, "comiss">, TB, EVEX, VEX_LIG,
|
||||
EVEX_CD8<32, CD8VT1>;
|
||||
defm Int_VCOMISDZ : sse12_ord_cmp<0x2F, VR128X, X86comi, v2f64, f128mem,
|
||||
load, "comisd">, TB, OpSize, EVEX,
|
||||
VEX_LIG, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
}
|
||||
}
|
||||
|
||||
/// avx512_unop_p - AVX-512 unops in packed form.
|
||||
@ -2980,6 +2988,7 @@ multiclass avx512_fp_unop_p<bits<8> opc, string OpcodeStr, SDNode OpNode> {
|
||||
/// avx512_fp_unop_p_int - AVX-512 intrinsics unops in packed forms.
|
||||
multiclass avx512_fp_unop_p_int<bits<8> opc, string OpcodeStr,
|
||||
Intrinsic V16F32Int, Intrinsic V8F64Int> {
|
||||
let isCodeGenOnly = 1 in {
|
||||
def PSZr_Int : AVX5128I<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
|
||||
!strconcat(OpcodeStr,
|
||||
"ps\t{$src, $dst|$dst, $src}"),
|
||||
@ -3002,6 +3011,7 @@ multiclass avx512_fp_unop_p_int<bits<8> opc, string OpcodeStr,
|
||||
[(set VR512:$dst,
|
||||
(V8F64Int (memopv8f64 addr:$src)))]>,
|
||||
EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
||||
} // isCodeGenOnly = 1
|
||||
}
|
||||
|
||||
/// avx512_fp_unop_s - AVX-512 unops in scalar form.
|
||||
@ -3018,6 +3028,7 @@ multiclass avx512_fp_unop_s<bits<8> opc, string OpcodeStr> {
|
||||
!strconcat(OpcodeStr,
|
||||
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[]>, EVEX_4V, EVEX_CD8<32, CD8VT1>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def SSZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, ssmem:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3035,6 +3046,7 @@ multiclass avx512_fp_unop_s<bits<8> opc, string OpcodeStr> {
|
||||
!strconcat(OpcodeStr,
|
||||
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
|
||||
EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def SDZm_Int : AVX5128I<opc, MRMSrcMem, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, sdmem:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3122,6 +3134,7 @@ multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
(v8f64 (bitconvert (memopv16f32 addr:$src)))))],
|
||||
itins_d.rm>, EVEX, EVEX_V512, EVEX_CD8<64, CD8VF>;
|
||||
|
||||
let isCodeGenOnly = 1 in {
|
||||
def PSZr_Int : AVX512PSI<opc, MRMSrcReg, (outs VR512:$dst), (ins VR512:$src),
|
||||
!strconcat(OpcodeStr,
|
||||
"ps\t{$src, $dst|$dst, $src}"),
|
||||
@ -3140,7 +3153,8 @@ multiclass avx512_sqrt_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
!strconcat(OpcodeStr,
|
||||
"pd\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR512:$dst, (V8F64Int (memopv8f64 addr:$src)))]>,
|
||||
EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
||||
EVEX, EVEX_V512, VEX_W, EVEX_CD8<64, CD8VF>;
|
||||
} // isCodeGenOnly = 1
|
||||
}
|
||||
|
||||
multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
|
||||
@ -3151,6 +3165,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
|
||||
!strconcat(OpcodeStr,
|
||||
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[], itins_s.rr>, XS, EVEX_4V;
|
||||
let isCodeGenOnly = 1 in
|
||||
def SSZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, VR128X:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3164,6 +3179,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
|
||||
!strconcat(OpcodeStr,
|
||||
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[], itins_s.rm>, XS, EVEX_4V, EVEX_CD8<32, CD8VT1>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def SSZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, ssmem:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3177,6 +3193,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
|
||||
!strconcat(OpcodeStr,
|
||||
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
|
||||
XD, EVEX_4V, VEX_W;
|
||||
let isCodeGenOnly = 1 in
|
||||
def SDZr_Int : SIi8<opc, MRMSrcReg, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, VR128X:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3190,6 +3207,7 @@ multiclass avx512_sqrt_scalar<bits<8> opc, string OpcodeStr,
|
||||
!strconcat(OpcodeStr,
|
||||
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"), []>,
|
||||
XD, EVEX_4V, VEX_W, EVEX_CD8<64, CD8VT1>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def SDZm_Int : SIi8<opc, MRMSrcMem, (outs VR128X:$dst),
|
||||
(ins VR128X:$src1, sdmem:$src2),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3305,6 +3323,7 @@ let ExeDomain = GenericDomain in {
|
||||
[]>;
|
||||
|
||||
// Intrinsic operation, reg.
|
||||
let isCodeGenOnly = 1 in
|
||||
def SSr_Int : AVX512AIi8<opcss, MRMSrcReg,
|
||||
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
@ -3329,6 +3348,7 @@ let ExeDomain = GenericDomain in {
|
||||
[]>, VEX_W;
|
||||
|
||||
// Intrinsic operation, reg.
|
||||
let isCodeGenOnly = 1 in
|
||||
def SDr_Int : AVX512AIi8<opcsd, MRMSrcReg,
|
||||
(outs VR128X:$dst), (ins VR128X:$src1, VR128X:$src2, i32i8imm:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
|
@ -138,19 +138,21 @@ multiclass fma3s_rm<bits<8> opc, string OpcodeStr, X86MemOperand x86memop,
|
||||
multiclass fma3s_rm_int<bits<8> opc, string OpcodeStr, Operand memop,
|
||||
ComplexPattern mem_cpat, Intrinsic IntId,
|
||||
RegisterClass RC> {
|
||||
let isCommutable = 1 in
|
||||
def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
||||
[(set VR128:$dst, (IntId VR128:$src2, VR128:$src1,
|
||||
VR128:$src3))]>;
|
||||
def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2, memop:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
||||
[(set VR128:$dst,
|
||||
(IntId VR128:$src2, VR128:$src1, mem_cpat:$src3))]>;
|
||||
let isCodeGenOnly = 1 in {
|
||||
let isCommutable = 1 in
|
||||
def r_Int : FMA3<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
||||
[(set VR128:$dst, (IntId VR128:$src2, VR128:$src1,
|
||||
VR128:$src3))]>;
|
||||
def m_Int : FMA3<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2, memop:$src3),
|
||||
!strconcat(OpcodeStr,
|
||||
"\t{$src3, $src2, $dst|$dst, $src2, $src3}"),
|
||||
[(set VR128:$dst,
|
||||
(IntId VR128:$src2, VR128:$src1, mem_cpat:$src3))]>;
|
||||
} // isCodeGenOnly
|
||||
}
|
||||
} // Constraints = "$src1 = $dst"
|
||||
|
||||
@ -230,6 +232,7 @@ let isCodeGenOnly = 1, hasSideEffects = 0 in
|
||||
|
||||
multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
|
||||
ComplexPattern mem_cpat, Intrinsic Int> {
|
||||
let isCodeGenOnly = 1 in {
|
||||
let isCommutable = 1 in
|
||||
def rr_Int : FMA4<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2, VR128:$src3),
|
||||
@ -249,6 +252,7 @@ multiclass fma4s_int<bits<8> opc, string OpcodeStr, Operand memop,
|
||||
"\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"),
|
||||
[(set VR128:$dst,
|
||||
(Int VR128:$src1, mem_cpat:$src2, VR128:$src3))]>, VEX_LIG;
|
||||
} // isCodeGenOnly = 1
|
||||
}
|
||||
|
||||
multiclass fma4p<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
|
@ -210,6 +210,7 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
||||
Operand memopr, ComplexPattern mem_cpat,
|
||||
OpndItins itins,
|
||||
bit Is2Addr = 1> {
|
||||
let isCodeGenOnly = 1 in {
|
||||
def rr_Int : SI<opc, MRMSrcReg, (outs RC:$dst), (ins RC:$src1, RC:$src2),
|
||||
!if(Is2Addr,
|
||||
!strconcat(asm, "\t{$src2, $dst|$dst, $src2}"),
|
||||
@ -227,6 +228,7 @@ multiclass sse12_fp_scalar_int<bits<8> opc, string OpcodeStr, RegisterClass RC,
|
||||
RC:$src1, mem_cpat:$src2))], itins.rm>,
|
||||
Sched<[itins.Sched.Folded, ReadAfterLd]>;
|
||||
}
|
||||
}
|
||||
|
||||
/// sse12_fp_packed - SSE 1 & 2 packed instructions class
|
||||
multiclass sse12_fp_packed<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
@ -1632,40 +1634,43 @@ defm CVTSD2SI64 : sse12_cvt_sint<0x2D, VR128, GR64, int_x86_sse2_cvtsd2si64,
|
||||
sdmem, sse_load_f64, "cvtsd2si", SSE_CVT_SD2SI>, XD, REX_W;
|
||||
|
||||
|
||||
let Predicates = [UseAVX] in {
|
||||
defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
|
||||
SSE_CVT_Scalar, 0>, XS, VEX_4V;
|
||||
defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
|
||||
SSE_CVT_Scalar, 0>, XS, VEX_4V,
|
||||
VEX_W;
|
||||
defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
|
||||
SSE_CVT_Scalar, 0>, XD, VEX_4V;
|
||||
defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
|
||||
SSE_CVT_Scalar, 0>, XD,
|
||||
VEX_4V, VEX_W;
|
||||
}
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse_cvtsi2ss, i32mem, loadi32,
|
||||
"cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
|
||||
defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse_cvtsi642ss, i64mem, loadi64,
|
||||
"cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
|
||||
defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse2_cvtsi2sd, i32mem, loadi32,
|
||||
"cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
|
||||
defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse2_cvtsi642sd, i64mem, loadi64,
|
||||
"cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
|
||||
}
|
||||
let isCodeGenOnly = 1 in {
|
||||
let Predicates = [UseAVX] in {
|
||||
defm Int_VCVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse_cvtsi2ss, i32mem, loadi32, "cvtsi2ss{l}",
|
||||
SSE_CVT_Scalar, 0>, XS, VEX_4V;
|
||||
defm Int_VCVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse_cvtsi642ss, i64mem, loadi64, "cvtsi2ss{q}",
|
||||
SSE_CVT_Scalar, 0>, XS, VEX_4V,
|
||||
VEX_W;
|
||||
defm Int_VCVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse2_cvtsi2sd, i32mem, loadi32, "cvtsi2sd{l}",
|
||||
SSE_CVT_Scalar, 0>, XD, VEX_4V;
|
||||
defm Int_VCVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse2_cvtsi642sd, i64mem, loadi64, "cvtsi2sd{q}",
|
||||
SSE_CVT_Scalar, 0>, XD,
|
||||
VEX_4V, VEX_W;
|
||||
}
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm Int_CVTSI2SS : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse_cvtsi2ss, i32mem, loadi32,
|
||||
"cvtsi2ss{l}", SSE_CVT_Scalar>, XS;
|
||||
defm Int_CVTSI2SS64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse_cvtsi642ss, i64mem, loadi64,
|
||||
"cvtsi2ss{q}", SSE_CVT_Scalar>, XS, REX_W;
|
||||
defm Int_CVTSI2SD : sse12_cvt_sint_3addr<0x2A, GR32, VR128,
|
||||
int_x86_sse2_cvtsi2sd, i32mem, loadi32,
|
||||
"cvtsi2sd{l}", SSE_CVT_Scalar>, XD;
|
||||
defm Int_CVTSI2SD64 : sse12_cvt_sint_3addr<0x2A, GR64, VR128,
|
||||
int_x86_sse2_cvtsi642sd, i64mem, loadi64,
|
||||
"cvtsi2sd{q}", SSE_CVT_Scalar>, XD, REX_W;
|
||||
}
|
||||
} // isCodeGenOnly = 1
|
||||
|
||||
/// SSE 1 Only
|
||||
|
||||
// Aliases for intrinsics
|
||||
let isCodeGenOnly = 1 in {
|
||||
let Predicates = [UseAVX] in {
|
||||
defm Int_VCVTTSS2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse_cvttss2si,
|
||||
ssmem, sse_load_f32, "cvttss2si",
|
||||
@ -1694,6 +1699,7 @@ defm Int_CVTTSD2SI : sse12_cvt_sint<0x2C, VR128, GR32, int_x86_sse2_cvttsd2si,
|
||||
defm Int_CVTTSD2SI64 : sse12_cvt_sint<0x2C, VR128, GR64,
|
||||
int_x86_sse2_cvttsd2si64, sdmem, sse_load_f64,
|
||||
"cvttsd2si", SSE_CVT_SD2SI>, XD, REX_W;
|
||||
} // isCodeGenOnly = 1
|
||||
|
||||
let Predicates = [UseAVX] in {
|
||||
defm VCVTSS2SI : sse12_cvt_sint<0x2D, VR128, GR32, int_x86_sse_cvtss2si,
|
||||
@ -1792,6 +1798,7 @@ def CVTSD2SSrm : I<0x5A, MRMSrcMem, (outs FR32:$dst), (ins f64mem:$src),
|
||||
XD,
|
||||
Requires<[UseSSE2, OptForSize]>, Sched<[WriteCvtF2FLd]>;
|
||||
|
||||
let isCodeGenOnly = 1 in {
|
||||
def Int_VCVTSD2SSrr: I<0x5A, MRMSrcReg,
|
||||
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
||||
"vcvtsd2ss\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
@ -1823,6 +1830,7 @@ def Int_CVTSD2SSrm: I<0x5A, MRMSrcReg,
|
||||
IIC_SSE_CVT_Scalar_RM>, XD, Requires<[UseSSE2]>,
|
||||
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
|
||||
}
|
||||
} // isCodeGenOnly = 1
|
||||
|
||||
// Convert scalar single to scalar double
|
||||
// SSE2 instructions with XS prefix
|
||||
@ -1875,6 +1883,7 @@ def : Pat<(fextend (loadf32 addr:$src)),
|
||||
def : Pat<(extloadf32 addr:$src),
|
||||
(CVTSS2SDrr (MOVSSrm addr:$src))>, Requires<[UseSSE2, OptForSpeed]>;
|
||||
|
||||
let isCodeGenOnly = 1 in {
|
||||
def Int_VCVTSS2SDrr: I<0x5A, MRMSrcReg,
|
||||
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2),
|
||||
"vcvtss2sd\t{$src2, $src1, $dst|$dst, $src1, $src2}",
|
||||
@ -1905,6 +1914,7 @@ def Int_CVTSS2SDrm: I<0x5A, MRMSrcMem,
|
||||
IIC_SSE_CVT_Scalar_RM>, XS, Requires<[UseSSE2]>,
|
||||
Sched<[WriteCvtF2FLd, ReadAfterLd]>;
|
||||
}
|
||||
} // isCodeGenOnly = 1
|
||||
|
||||
// Convert packed single/double fp to doubleword
|
||||
def VCVTPS2DQrr : VPDI<0x5B, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
@ -2338,23 +2348,25 @@ multiclass sse12_cmp_scalar_int<X86MemOperand x86memop, Operand CC,
|
||||
Sched<[itins.Sched.Folded, ReadAfterLd]>;
|
||||
}
|
||||
|
||||
// Aliases to match intrinsics which expect XMM operand(s).
|
||||
defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
|
||||
"cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
|
||||
SSE_ALU_F32S>,
|
||||
XS, VEX_4V;
|
||||
defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
|
||||
"cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
|
||||
SSE_ALU_F32S>, // same latency as f32
|
||||
XD, VEX_4V;
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
|
||||
"cmp${cc}ss\t{$src, $dst|$dst, $src}",
|
||||
SSE_ALU_F32S>, XS;
|
||||
defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
|
||||
"cmp${cc}sd\t{$src, $dst|$dst, $src}",
|
||||
SSE_ALU_F64S>,
|
||||
XD;
|
||||
let isCodeGenOnly = 1 in {
|
||||
// Aliases to match intrinsics which expect XMM operand(s).
|
||||
defm Int_VCMPSS : sse12_cmp_scalar_int<f32mem, AVXCC, int_x86_sse_cmp_ss,
|
||||
"cmp${cc}ss\t{$src, $src1, $dst|$dst, $src1, $src}",
|
||||
SSE_ALU_F32S>,
|
||||
XS, VEX_4V;
|
||||
defm Int_VCMPSD : sse12_cmp_scalar_int<f64mem, AVXCC, int_x86_sse2_cmp_sd,
|
||||
"cmp${cc}sd\t{$src, $src1, $dst|$dst, $src1, $src}",
|
||||
SSE_ALU_F32S>, // same latency as f32
|
||||
XD, VEX_4V;
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
defm Int_CMPSS : sse12_cmp_scalar_int<f32mem, SSECC, int_x86_sse_cmp_ss,
|
||||
"cmp${cc}ss\t{$src, $dst|$dst, $src}",
|
||||
SSE_ALU_F32S>, XS;
|
||||
defm Int_CMPSD : sse12_cmp_scalar_int<f64mem, SSECC, int_x86_sse2_cmp_sd,
|
||||
"cmp${cc}sd\t{$src, $dst|$dst, $src}",
|
||||
SSE_ALU_F64S>,
|
||||
XD;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2387,15 +2399,17 @@ let Defs = [EFLAGS] in {
|
||||
"comisd">, TB, OpSize, VEX, VEX_LIG;
|
||||
}
|
||||
|
||||
defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB, VEX;
|
||||
defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, TB, OpSize, VEX;
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_VUCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB, VEX;
|
||||
defm Int_VUCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, TB, OpSize, VEX;
|
||||
|
||||
defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
|
||||
load, "comiss">, TB, VEX;
|
||||
defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
|
||||
load, "comisd">, TB, OpSize, VEX;
|
||||
defm Int_VCOMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem,
|
||||
load, "comiss">, TB, VEX;
|
||||
defm Int_VCOMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem,
|
||||
load, "comisd">, TB, OpSize, VEX;
|
||||
}
|
||||
defm UCOMISS : sse12_ord_cmp<0x2E, FR32, X86cmp, f32, f32mem, loadf32,
|
||||
"ucomiss">, TB;
|
||||
defm UCOMISD : sse12_ord_cmp<0x2E, FR64, X86cmp, f64, f64mem, loadf64,
|
||||
@ -2408,15 +2422,17 @@ let Defs = [EFLAGS] in {
|
||||
"comisd">, TB, OpSize;
|
||||
}
|
||||
|
||||
defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB;
|
||||
defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, TB, OpSize;
|
||||
let isCodeGenOnly = 1 in {
|
||||
defm Int_UCOMISS : sse12_ord_cmp<0x2E, VR128, X86ucomi, v4f32, f128mem,
|
||||
load, "ucomiss">, TB;
|
||||
defm Int_UCOMISD : sse12_ord_cmp<0x2E, VR128, X86ucomi, v2f64, f128mem,
|
||||
load, "ucomisd">, TB, OpSize;
|
||||
|
||||
defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
|
||||
"comiss">, TB;
|
||||
defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
|
||||
"comisd">, TB, OpSize;
|
||||
defm Int_COMISS : sse12_ord_cmp<0x2F, VR128, X86comi, v4f32, f128mem, load,
|
||||
"comiss">, TB;
|
||||
defm Int_COMISD : sse12_ord_cmp<0x2F, VR128, X86comi, v2f64, f128mem, load,
|
||||
"comisd">, TB, OpSize;
|
||||
}
|
||||
} // Defs = [EFLAGS]
|
||||
|
||||
// sse12_cmp_packed - sse 1 & 2 compare packed instructions
|
||||
@ -3277,6 +3293,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[]>, VEX_4V, VEX_LIG,
|
||||
Sched<[itins.Sched.Folded, ReadAfterLd]>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, ssmem:$src2),
|
||||
!strconcat("v", OpcodeStr,
|
||||
@ -3297,6 +3314,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
||||
[(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
|
||||
Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
|
||||
let isCodeGenOnly = 1 in {
|
||||
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (F32Int VR128:$src))], itins.rr>,
|
||||
@ -3306,6 +3324,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
[(set VR128:$dst, (F32Int sse_load_f32:$src))], itins.rm>,
|
||||
Sched<[itins.Sched.Folded]>;
|
||||
}
|
||||
}
|
||||
|
||||
/// sse1_fp_unop_s_rw - SSE1 unops where vector form has a read-write operand.
|
||||
multiclass sse1_fp_unop_rw<bits<8> opc, string OpcodeStr, SDNode OpNode,
|
||||
@ -3323,6 +3342,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
"ss\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[]>, VEX_4V, VEX_LIG,
|
||||
Sched<[itins.Sched.Folded, ReadAfterLd]>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def V#NAME#SSm_Int : SSI<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, ssmem:$src2),
|
||||
!strconcat("v", OpcodeStr,
|
||||
@ -3343,7 +3363,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
!strconcat(OpcodeStr, "ss\t{$src, $dst|$dst, $src}"),
|
||||
[(set FR32:$dst, (OpNode (load addr:$src)))], itins.rm>, XS,
|
||||
Requires<[UseSSE1, OptForSize]>, Sched<[itins.Sched.Folded]>;
|
||||
let Constraints = "$src1 = $dst" in {
|
||||
let isCodeGenOnly = 1, Constraints = "$src1 = $dst" in {
|
||||
def SSr_Int : SSI<opc, MRMSrcReg, (outs VR128:$dst),
|
||||
(ins VR128:$src1, VR128:$src2),
|
||||
!strconcat(OpcodeStr, "ss\t{$src2, $dst|$dst, $src2}"),
|
||||
@ -3396,6 +3416,7 @@ let Predicates = [HasAVX] in {
|
||||
multiclass sse1_fp_unop_p_int<bits<8> opc, string OpcodeStr,
|
||||
Intrinsic V4F32Int, Intrinsic V8F32Int,
|
||||
OpndItins itins> {
|
||||
let isCodeGenOnly = 1 in {
|
||||
let Predicates = [HasAVX] in {
|
||||
def V#NAME#PSr_Int : PSI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
!strconcat("v", OpcodeStr,
|
||||
@ -3428,6 +3449,7 @@ let Predicates = [HasAVX] in {
|
||||
!strconcat(OpcodeStr, "ps\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (V4F32Int (memopv4f32 addr:$src)))],
|
||||
itins.rm>, Sched<[itins.Sched.Folded]>;
|
||||
} // isCodeGenOnly = 1
|
||||
}
|
||||
|
||||
/// sse2_fp_unop_s - SSE2 unops in scalar form.
|
||||
@ -3446,6 +3468,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
"sd\t{$src2, $src1, $dst|$dst, $src1, $src2}"),
|
||||
[]>, VEX_4V, VEX_LIG,
|
||||
Sched<[itins.Sched.Folded, ReadAfterLd]>;
|
||||
let isCodeGenOnly = 1 in
|
||||
def V#NAME#SDm_Int : SDI<opc, MRMSrcMem, (outs VR128:$dst),
|
||||
(ins VR128:$src1, sdmem:$src2),
|
||||
!strconcat("v", OpcodeStr,
|
||||
@ -3464,6 +3487,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
|
||||
[(set FR64:$dst, (OpNode (load addr:$src)))], itins.rm>, XD,
|
||||
Requires<[UseSSE2, OptForSize]>, Sched<[itins.Sched.Folded]>;
|
||||
let isCodeGenOnly = 1 in {
|
||||
def SDr_Int : SDI<opc, MRMSrcReg, (outs VR128:$dst), (ins VR128:$src),
|
||||
!strconcat(OpcodeStr, "sd\t{$src, $dst|$dst, $src}"),
|
||||
[(set VR128:$dst, (F64Int VR128:$src))], itins.rr>,
|
||||
@ -3473,6 +3497,7 @@ let Predicates = [HasAVX], hasSideEffects = 0 in {
|
||||
[(set VR128:$dst, (F64Int sse_load_f64:$src))], itins.rm>,
|
||||
Sched<[itins.Sched.Folded]>;
|
||||
}
|
||||
}
|
||||
|
||||
/// sse2_fp_unop_p - SSE2 unops in vector forms.
|
||||
multiclass sse2_fp_unop_p<bits<8> opc, string OpcodeStr,
|
||||
@ -6521,6 +6546,7 @@ let ExeDomain = GenericDomain in {
|
||||
[]>, OpSize;
|
||||
|
||||
// Intrinsic operation, reg.
|
||||
let isCodeGenOnly = 1 in
|
||||
def SSr_Int : SS4AIi8<opcss, MRMSrcReg,
|
||||
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
|
||||
!if(Is2Addr,
|
||||
@ -6555,6 +6581,7 @@ let ExeDomain = GenericDomain in {
|
||||
[]>, OpSize;
|
||||
|
||||
// Intrinsic operation, reg.
|
||||
let isCodeGenOnly = 1 in
|
||||
def SDr_Int : SS4AIi8<opcsd, MRMSrcReg,
|
||||
(outs VR128:$dst), (ins VR128:$src1, VR128:$src2, i32i8imm:$src3),
|
||||
!if(Is2Addr,
|
||||
|
@ -503,10 +503,6 @@ RecognizableInstr::filter_ret RecognizableInstr::filter() const {
|
||||
// Filter out artificial instructions but leave in the LOCK_PREFIX so it is
|
||||
// printed as a separate "instruction".
|
||||
|
||||
if (Name.find("_Int") != Name.npos ||
|
||||
Name.find("Int_") != Name.npos)
|
||||
return FILTER_STRONG;
|
||||
|
||||
// Filter out instructions with segment override prefixes.
|
||||
// They're too messy to handle now and we'll special case them if needed.
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user