diff --git a/lib/Target/X86/X86InstrAVX512.td b/lib/Target/X86/X86InstrAVX512.td index 9fa75782499..e087b4e7fab 100644 --- a/lib/Target/X86/X86InstrAVX512.td +++ b/lib/Target/X86/X86InstrAVX512.td @@ -4606,10 +4606,6 @@ let Constraints = "$src0 = $dst" in let Predicates = [HasAVX512] in { let AddedComplexity = 15 in { - // Move scalar to XMM zero-extended, zeroing a VR128X then do a - // MOVS{S,D} to the lower bits. - def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32X:$src)))), - (VMOVSSZrr (v4f32 (AVX512_128_SET0)), FR32X:$src)>; def : Pat<(v4f32 (X86vzmovl (v4f32 VR128X:$src))), (VMOVSSZrr (v4f32 (AVX512_128_SET0)), (COPY_TO_REGCLASS VR128X:$src, FR32X))>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128X:$src))), diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index bbc9d8d6b90..cf9c54d0e84 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -678,8 +678,6 @@ let Predicates = [UseSSE1] in { let Predicates = [NoSSE41], AddedComplexity = 15 in { // Move scalar to XMM zero-extended, zeroing a VR128 then do a // MOVSS to the lower bits. - def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))), - (MOVSSrr (v4f32 (V_SET0)), FR32:$src)>; def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))), (MOVSSrr (v4f32 (V_SET0)), (COPY_TO_REGCLASS VR128:$src, FR32))>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))), @@ -6962,10 +6960,6 @@ let Predicates = [HasAVX2] in { // movs[s/d] are 1-2 byte shorter instructions. let Predicates = [UseAVX] in { let AddedComplexity = 15 in { - // Move scalar to XMM zero-extended, zeroing a VR128 then do a - // MOVS{S,D} to the lower bits. - def : Pat<(v4f32 (X86vzmovl (v4f32 (scalar_to_vector FR32:$src)))), - (VMOVSSrr (v4f32 (V_SET0)), FR32:$src)>; def : Pat<(v4f32 (X86vzmovl (v4f32 VR128:$src))), (VBLENDPSrri (v4f32 (V_SET0)), VR128:$src, (i8 1))>; def : Pat<(v4i32 (X86vzmovl (v4i32 VR128:$src))), diff --git a/test/CodeGen/X86/vec_ss_load_fold.ll b/test/CodeGen/X86/vec_ss_load_fold.ll index a74a4ed36d7..1010f97ccdb 100644 --- a/test/CodeGen/X86/vec_ss_load_fold.ll +++ b/test/CodeGen/X86/vec_ss_load_fold.ll @@ -35,9 +35,9 @@ define i16 @test1(float %f) nounwind { ; X32_AVX1-LABEL: test1: ; X32_AVX1: ## BB#0: ; X32_AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X32_AVX1-NEXT: vaddss LCPI0_0, %xmm0, %xmm0 ; X32_AVX1-NEXT: vmulss LCPI0_1, %xmm0, %xmm0 +; X32_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X32_AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; X32_AVX1-NEXT: vminss LCPI0_2, %xmm0, %xmm0 ; X32_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 @@ -47,9 +47,9 @@ define i16 @test1(float %f) nounwind { ; ; X64_AVX1-LABEL: test1: ; X64_AVX1: ## BB#0: -; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X64_AVX1-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 ; X64_AVX1-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X64_AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; X64_AVX1-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0 ; X64_AVX1-NEXT: vmaxss %xmm1, %xmm0, %xmm0 @@ -60,9 +60,9 @@ define i16 @test1(float %f) nounwind { ; X32_AVX512-LABEL: test1: ; X32_AVX512: ## BB#0: ; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; X32_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X32_AVX512-NEXT: vaddss LCPI0_0, %xmm0, %xmm0 ; X32_AVX512-NEXT: vmulss LCPI0_1, %xmm0, %xmm0 +; X32_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X32_AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; X32_AVX512-NEXT: vminss LCPI0_2, %xmm0, %xmm0 ; X32_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0 @@ -72,9 +72,9 @@ define i16 @test1(float %f) nounwind { ; ; X64_AVX512-LABEL: test1: ; X64_AVX512: ## BB#0: -; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X64_AVX512-NEXT: vaddss {{.*}}(%rip), %xmm0, %xmm0 ; X64_AVX512-NEXT: vmulss {{.*}}(%rip), %xmm0, %xmm0 +; X64_AVX512-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; X64_AVX512-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; X64_AVX512-NEXT: vminss {{.*}}(%rip), %xmm0, %xmm0 ; X64_AVX512-NEXT: vmaxss %xmm1, %xmm0, %xmm0