diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 5730036c765..d6e13793376 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -3162,6 +3162,7 @@ let Predicates = [HasAVX512] in { (VMOVSSZrr (v4i32 (V_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>; def : Pat<(v2f64 (X86vzmovl (v2f64 (scalar_to_vector FR64X:$src)))), (VMOVSDZrr (v2f64 (V_SET0)), FR64X:$src)>; + } // Move low f32 and clear high bits. def : Pat<(v8f32 (X86vzmovl (v8f32 VR256X:$src))), @@ -3172,7 +3173,6 @@ let Predicates = [HasAVX512] in { (SUBREG_TO_REG (i32 0), (VMOVSSZrr (v4i32 (V_SET0)), (EXTRACT_SUBREG (v8i32 VR256X:$src), sub_xmm)), sub_xmm)>; - } def : Pat<(v16f32 (X86vzmovl (v16f32 VR512:$src))), (SUBREG_TO_REG (i32 0), (VMOVSSZrr (v4f32 (V_SET0)), diff --git a/test/CodeGen/X86/vector-shuffle-512-v64.ll b/test/CodeGen/X86/vector-shuffle-512-v64.ll index 9ea5df5c076..4c6a2f4e527 100644 --- a/test/CodeGen/X86/vector-shuffle-512-v64.ll +++ b/test/CodeGen/X86/vector-shuffle-512-v64.ll @@ -93,20 +93,16 @@ define <64 x i8> @shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<64 x i8> %a) { ; AVX512F: # BB#0: ; AVX512F-NEXT: movl $255, %eax ; AVX512F-NEXT: vmovd %eax, %xmm1 -; AVX512F-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; AVX512F-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] -; AVX512F-NEXT: vandps %ymm1, %ymm0, %ymm0 -; AVX512F-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; AVX512F-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512F-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz: ; AVX512BW: # BB#0: ; AVX512BW-NEXT: movl $255, %eax ; AVX512BW-NEXT: vmovd %eax, %xmm1 -; AVX512BW-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; AVX512BW-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] -; AVX512BW-NEXT: vandps %ymm1, %ymm0, %ymm0 -; AVX512BW-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; AVX512BW-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512BW-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512BW-NEXT: vinserti64x4 $1, %ymm1, %zmm0, %zmm0 ; AVX512BW-NEXT: retq ; @@ -114,10 +110,8 @@ define <64 x i8> @shuffle_v64i8_0zzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz(<64 x i8> %a) { ; AVX512DQ: # BB#0: ; AVX512DQ-NEXT: movl $255, %eax ; AVX512DQ-NEXT: vmovd %eax, %xmm1 -; AVX512DQ-NEXT: vxorps %xmm2, %xmm2, %xmm2 -; AVX512DQ-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3] -; AVX512DQ-NEXT: vandps %ymm1, %ymm0, %ymm0 -; AVX512DQ-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; AVX512DQ-NEXT: vpand %ymm1, %ymm0, %ymm0 +; AVX512DQ-NEXT: vpxor %ymm1, %ymm1, %ymm1 ; AVX512DQ-NEXT: retq %shuffle = shufflevector <64 x i8> %a, <64 x i8> zeroinitializer, <64 x i32> ret <64 x i8> %shuffle