[X86][AVX512] Converted the MOVDDUP/MOVSLDUP/MOVSHDUP masked intrinsics to generic IR

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@274443 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Simon Pilgrim 2016-07-02 17:16:41 +00:00
parent f9c643b6e0
commit 1dcb078502
3 changed files with 156 additions and 72 deletions

View File

@ -1527,60 +1527,6 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_i32_ty, llvm_v16f32_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movshdup_128 :
GCCBuiltin<"__builtin_ia32_movshdup128_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movshdup_256 :
GCCBuiltin<"__builtin_ia32_movshdup256_mask">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movshdup_512 :
GCCBuiltin<"__builtin_ia32_movshdup512_mask">,
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movsldup_128 :
GCCBuiltin<"__builtin_ia32_movsldup128_mask">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movsldup_256 :
GCCBuiltin<"__builtin_ia32_movsldup256_mask">,
Intrinsic<[llvm_v8f32_ty],
[llvm_v8f32_ty, llvm_v8f32_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movsldup_512 :
GCCBuiltin<"__builtin_ia32_movsldup512_mask">,
Intrinsic<[llvm_v16f32_ty],
[llvm_v16f32_ty, llvm_v16f32_ty, llvm_i16_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movddup_128 :
GCCBuiltin<"__builtin_ia32_movddup128_mask">,
Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movddup_256 :
GCCBuiltin<"__builtin_ia32_movddup256_mask">,
Intrinsic<[llvm_v4f64_ty],
[llvm_v4f64_ty, llvm_v4f64_ty, llvm_i8_ty],
[IntrNoMem]>;
def int_x86_avx512_mask_movddup_512 :
GCCBuiltin<"__builtin_ia32_movddup512_mask">,
Intrinsic<[llvm_v8f64_ty],
[llvm_v8f64_ty, llvm_v8f64_ty, llvm_i8_ty],
[IntrNoMem]>;
}
// Vector blend

View File

@ -818,28 +818,10 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FMIN, X86ISD::FMIN_RND),
X86_INTRINSIC_DATA(avx512_mask_min_ss_round, INTR_TYPE_SCALAR_MASK_RM,
X86ISD::FMIN, X86ISD::FMIN_RND),
X86_INTRINSIC_DATA(avx512_mask_movddup_128, INTR_TYPE_1OP_MASK,
X86ISD::MOVDDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movddup_256, INTR_TYPE_1OP_MASK,
X86ISD::MOVDDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movddup_512, INTR_TYPE_1OP_MASK,
X86ISD::MOVDDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_move_sd, INTR_TYPE_SCALAR_MASK,
X86ISD::MOVSD, 0),
X86_INTRINSIC_DATA(avx512_mask_move_ss, INTR_TYPE_SCALAR_MASK,
X86ISD::MOVSS, 0),
X86_INTRINSIC_DATA(avx512_mask_movshdup_128, INTR_TYPE_1OP_MASK,
X86ISD::MOVSHDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movshdup_256, INTR_TYPE_1OP_MASK,
X86ISD::MOVSHDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movshdup_512, INTR_TYPE_1OP_MASK,
X86ISD::MOVSHDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movsldup_128, INTR_TYPE_1OP_MASK,
X86ISD::MOVSLDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movsldup_256, INTR_TYPE_1OP_MASK,
X86ISD::MOVSLDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_movsldup_512, INTR_TYPE_1OP_MASK,
X86ISD::MOVSLDUP, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_128, INTR_TYPE_2OP_MASK, ISD::FMUL, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_256, INTR_TYPE_2OP_MASK, ISD::FMUL, 0),
X86_INTRINSIC_DATA(avx512_mask_mul_pd_512, INTR_TYPE_2OP_MASK, ISD::FMUL,

View File

@ -4,6 +4,162 @@
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512f-builtins.c
define <8 x double> @test_mm512_movddup_pd(<8 x double> %a0) {
; X32-LABEL: test_mm512_movddup_pd:
; X32: # BB#0:
; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_movddup_pd:
; X64: # BB#0:
; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%res = shufflevector <8 x double> %a0, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
ret <8 x double> %res
}
define <8 x double> @test_mm512_mask_movddup_pd(<8 x double> %a0, i8 %a1, <8 x double> %a2) {
; X32-LABEL: test_mm512_mask_movddup_pd:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_movddup_pd:
; X64: # BB#0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%arg1 = bitcast i8 %a1 to <8 x i1>
%res0 = shufflevector <8 x double> %a2, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
%res1 = select <8 x i1> %arg1, <8 x double> %res0, <8 x double> %a0
ret <8 x double> %res1
}
define <8 x double> @test_mm512_maskz_movddup_pd(i8 %a0, <8 x double> %a1) {
; X32-LABEL: test_mm512_maskz_movddup_pd:
; X32: # BB#0:
; X32-NEXT: movb {{[0-9]+}}(%esp), %al
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_movddup_pd:
; X64: # BB#0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovddup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6]
; X64-NEXT: retq
%arg0 = bitcast i8 %a0 to <8 x i1>
%res0 = shufflevector <8 x double> %a1, <8 x double> undef, <8 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6>
%res1 = select <8 x i1> %arg0, <8 x double> %res0, <8 x double> zeroinitializer
ret <8 x double> %res1
}
define <16 x float> @test_mm512_movehdup_ps(<16 x float> %a0) {
; X32-LABEL: test_mm512_movehdup_ps:
; X32: # BB#0:
; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_movehdup_ps:
; X64: # BB#0:
; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
ret <16 x float> %res
}
define <16 x float> @test_mm512_mask_movehdup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_mask_movehdup_ps:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_movehdup_ps:
; X64: # BB#0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm1[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%arg1 = bitcast i16 %a1 to <16 x i1>
%res0 = shufflevector <16 x float> %a2, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
%res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0
ret <16 x float> %res1
}
define <16 x float> @test_mm512_maskz_movehdup_ps(i16 %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_maskz_movehdup_ps:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_movehdup_ps:
; X64: # BB#0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovshdup {{.*#+}} zmm0 = zmm0[1,1,3,3,5,5,7,7,9,9,11,11,13,13,15,15]
; X64-NEXT: retq
%arg0 = bitcast i16 %a0 to <16 x i1>
%res0 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> <i32 1, i32 1, i32 3, i32 3, i32 5, i32 5, i32 7, i32 7, i32 9, i32 9, i32 11, i32 11, i32 13, i32 13, i32 15, i32 15>
%res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer
ret <16 x float> %res1
}
define <16 x float> @test_mm512_moveldup_ps(<16 x float> %a0) {
; X32-LABEL: test_mm512_moveldup_ps:
; X32: # BB#0:
; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_moveldup_ps:
; X64: # BB#0:
; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%res = shufflevector <16 x float> %a0, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
ret <16 x float> %res
}
define <16 x float> @test_mm512_mask_moveldup_ps(<16 x float> %a0, i16 %a1, <16 x float> %a2) {
; X32-LABEL: test_mm512_mask_moveldup_ps:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_mask_moveldup_ps:
; X64: # BB#0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm1[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%arg1 = bitcast i16 %a1 to <16 x i1>
%res0 = shufflevector <16 x float> %a2, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
%res1 = select <16 x i1> %arg1, <16 x float> %res0, <16 x float> %a0
ret <16 x float> %res1
}
define <16 x float> @test_mm512_maskz_moveldup_ps(i16 %a0, <16 x float> %a1) {
; X32-LABEL: test_mm512_maskz_moveldup_ps:
; X32: # BB#0:
; X32-NEXT: movw {{[0-9]+}}(%esp), %ax
; X32-NEXT: kmovw %eax, %k1
; X32-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X32-NEXT: retl
;
; X64-LABEL: test_mm512_maskz_moveldup_ps:
; X64: # BB#0:
; X64-NEXT: kmovw %edi, %k1
; X64-NEXT: vmovsldup {{.*#+}} zmm0 = zmm0[0,0,2,2,4,4,6,6,8,8,10,10,12,12,14,14]
; X64-NEXT: retq
%arg0 = bitcast i16 %a0 to <16 x i1>
%res0 = shufflevector <16 x float> %a1, <16 x float> undef, <16 x i32> <i32 0, i32 0, i32 2, i32 2, i32 4, i32 4, i32 6, i32 6, i32 8, i32 8, i32 10, i32 10, i32 12, i32 12, i32 14, i32 14>
%res1 = select <16 x i1> %arg0, <16 x float> %res0, <16 x float> zeroinitializer
ret <16 x float> %res1
}
define <8 x i64> @test_mm512_unpackhi_epi32(<8 x i64> %a0, <8 x i64> %a1) {
; X32-LABEL: test_mm512_unpackhi_epi32:
; X32: # BB#0: