diff --git a/test/CodeGen/X86/fma-intrinsics-fast-isel.ll b/test/CodeGen/X86/fma-intrinsics-fast-isel.ll index d82fe58ec40..d8183b5ce25 100644 --- a/test/CodeGen/X86/fma-intrinsics-fast-isel.ll +++ b/test/CodeGen/X86/fma-intrinsics-fast-isel.ll @@ -61,6 +61,18 @@ entry: ret <4 x float> %0 } +define <4 x float> @test_mm_fmsub_ps_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fmsub_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm2, %xmm2 +; CHECK-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x float> %c + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %neg.i) #2 + ret <4 x float> %0 +} + define <2 x double> @test_mm_fmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fmsub_pd: ; CHECK: # %bb.0: # %entry @@ -73,6 +85,18 @@ entry: ret <2 x double> %0 } +define <2 x double> @test_mm_fmsub_pd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fmsub_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm2, %xmm2 +; CHECK-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <2 x double> %c + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %neg.i) #2 + ret <2 x double> %0 +} + define <4 x float> @test_mm_fmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fmsub_ss: ; CHECK: # %bb.0: # %entry @@ -88,6 +112,21 @@ entry: ret <4 x float> %4 } +define <4 x float> @test_mm_fmsub_ss_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fmsub_ss_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmsub213ss {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq +entry: + %0 = extractelement <4 x float> %a, i64 0 + %1 = extractelement <4 x float> %b, i64 0 + %.rhs.i = extractelement <4 x float> %c, i64 0 + %2 = fneg float %.rhs.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + define <2 x double> @test_mm_fmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fmsub_sd: ; CHECK: # %bb.0: # %entry @@ -103,6 +142,21 @@ entry: ret <2 x double> %4 } +define <2 x double> @test_mm_fmsub_sd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fmsub_sd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmsub213sd {{.*#+}} xmm0 = (xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq +entry: + %0 = extractelement <2 x double> %a, i64 0 + %1 = extractelement <2 x double> %b, i64 0 + %.rhs.i = extractelement <2 x double> %c, i64 0 + %2 = fneg double %.rhs.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + define <4 x float> @test_mm_fnmadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fnmadd_ps: ; CHECK: # %bb.0: # %entry @@ -115,6 +169,18 @@ entry: ret <4 x float> %0 } +define <4 x float> @test_mm_fnmadd_ps_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fnmadd_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorps {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-NEXT: vfmadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x float> %a + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %neg.i, <4 x float> %b, <4 x float> %c) #2 + ret <4 x float> %0 +} + define <2 x double> @test_mm_fnmadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fnmadd_pd: ; CHECK: # %bb.0: # %entry @@ -127,6 +193,18 @@ entry: ret <2 x double> %0 } +define <2 x double> @test_mm_fnmadd_pd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fnmadd_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorpd {{.*}}(%rip), %xmm0, %xmm0 +; CHECK-NEXT: vfmadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <2 x double> %a + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %neg.i, <2 x double> %b, <2 x double> %c) #2 + ret <2 x double> %0 +} + define <4 x float> @test_mm_fnmadd_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fnmadd_ss: ; CHECK: # %bb.0: # %entry @@ -142,6 +220,21 @@ entry: ret <4 x float> %4 } +define <4 x float> @test_mm_fnmadd_ss_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fnmadd_ss_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfnmadd213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq +entry: + %0 = extractelement <4 x float> %a, i64 0 + %.rhs.i = extractelement <4 x float> %b, i64 0 + %1 = fneg float %.rhs.i + %2 = extractelement <4 x float> %c, i64 0 + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + define <2 x double> @test_mm_fnmadd_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fnmadd_sd: ; CHECK: # %bb.0: # %entry @@ -157,6 +250,21 @@ entry: ret <2 x double> %4 } +define <2 x double> @test_mm_fnmadd_sd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fnmadd_sd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfnmadd213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) + xmm2 +; CHECK-NEXT: retq +entry: + %0 = extractelement <2 x double> %a, i64 0 + %.rhs.i = extractelement <2 x double> %b, i64 0 + %1 = fneg double %.rhs.i + %2 = extractelement <2 x double> %c, i64 0 + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + define <4 x float> @test_mm_fnmsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fnmsub_ps: ; CHECK: # %bb.0: # %entry @@ -172,6 +280,21 @@ entry: ret <4 x float> %0 } +define <4 x float> @test_mm_fnmsub_ps_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fnmsub_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmovaps {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; CHECK-NEXT: vxorps %xmm3, %xmm0, %xmm4 +; CHECK-NEXT: vxorps %xmm3, %xmm2, %xmm0 +; CHECK-NEXT: vfmadd231ps {{.*#+}} xmm0 = (xmm1 * xmm4) + xmm0 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x float> %a + %neg1.i = fneg <4 x float> %c + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %neg.i, <4 x float> %b, <4 x float> %neg1.i) #2 + ret <4 x float> %0 +} + define <2 x double> @test_mm_fnmsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fnmsub_pd: ; CHECK: # %bb.0: # %entry @@ -187,6 +310,21 @@ entry: ret <2 x double> %0 } +define <2 x double> @test_mm_fnmsub_pd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fnmsub_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmovapd {{.*#+}} xmm3 = [-0.0E+0,-0.0E+0] +; CHECK-NEXT: vxorpd %xmm3, %xmm0, %xmm4 +; CHECK-NEXT: vxorpd %xmm3, %xmm2, %xmm0 +; CHECK-NEXT: vfmadd231pd {{.*#+}} xmm0 = (xmm1 * xmm4) + xmm0 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <2 x double> %a + %neg1.i = fneg <2 x double> %c + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %neg.i, <2 x double> %b, <2 x double> %neg1.i) #2 + ret <2 x double> %0 +} + define <4 x float> @test_mm_fnmsub_ss(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fnmsub_ss: ; CHECK: # %bb.0: # %entry @@ -203,6 +341,22 @@ entry: ret <4 x float> %4 } +define <4 x float> @test_mm_fnmsub_ss_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fnmsub_ss_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfnmsub213ss {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq +entry: + %0 = extractelement <4 x float> %a, i64 0 + %.rhs.i = extractelement <4 x float> %b, i64 0 + %1 = fneg float %.rhs.i + %.rhs2.i = extractelement <4 x float> %c, i64 0 + %2 = fneg float %.rhs2.i + %3 = tail call float @llvm.fma.f32(float %0, float %1, float %2) #2 + %4 = insertelement <4 x float> %a, float %3, i64 0 + ret <4 x float> %4 +} + define <2 x double> @test_mm_fnmsub_sd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fnmsub_sd: ; CHECK: # %bb.0: # %entry @@ -219,6 +373,22 @@ entry: ret <2 x double> %4 } +define <2 x double> @test_mm_fnmsub_sd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fnmsub_sd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfnmsub213sd {{.*#+}} xmm0 = -(xmm1 * xmm0) - xmm2 +; CHECK-NEXT: retq +entry: + %0 = extractelement <2 x double> %a, i64 0 + %.rhs.i = extractelement <2 x double> %b, i64 0 + %1 = fneg double %.rhs.i + %.rhs2.i = extractelement <2 x double> %c, i64 0 + %2 = fneg double %.rhs2.i + %3 = tail call double @llvm.fma.f64(double %0, double %1, double %2) #2 + %4 = insertelement <2 x double> %a, double %3, i64 0 + ret <2 x double> %4 +} + define <4 x float> @test_mm_fmaddsub_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fmaddsub_ps: ; CHECK: # %bb.0: # %entry @@ -232,6 +402,19 @@ entry: ret <4 x float> %3 } +define <4 x float> @test_mm_fmaddsub_ps_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fmaddsub_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmaddsub213ps {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-NEXT: retq +entry: + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2 + %1 = fneg <4 x float> %c + %2 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %1) #2 + %3 = shufflevector <4 x float> %2, <4 x float> %0, <4 x i32> + ret <4 x float> %3 +} + define <2 x double> @test_mm_fmaddsub_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fmaddsub_pd: ; CHECK: # %bb.0: # %entry @@ -245,6 +428,19 @@ entry: ret <2 x double> %3 } +define <2 x double> @test_mm_fmaddsub_pd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fmaddsub_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmaddsub213pd {{.*#+}} xmm0 = (xmm1 * xmm0) +/- xmm2 +; CHECK-NEXT: retq +entry: + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2 + %1 = fneg <2 x double> %c + %2 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %1) #2 + %3 = shufflevector <2 x double> %2, <2 x double> %0, <2 x i32> + ret <2 x double> %3 +} + define <4 x float> @test_mm_fmsubadd_ps(<4 x float> %a, <4 x float> %b, <4 x float> %c) { ; CHECK-LABEL: test_mm_fmsubadd_ps: ; CHECK: # %bb.0: # %entry @@ -258,6 +454,19 @@ entry: ret <4 x float> %2 } +define <4 x float> @test_mm_fmsubadd_ps_unary_fneg(<4 x float> %a, <4 x float> %b, <4 x float> %c) { +; CHECK-LABEL: test_mm_fmsubadd_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmsubadd213ps {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x float> %c + %0 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %neg.i) #2 + %1 = tail call <4 x float> @llvm.fma.v4f32(<4 x float> %a, <4 x float> %b, <4 x float> %c) #2 + %2 = shufflevector <4 x float> %1, <4 x float> %0, <4 x i32> + ret <4 x float> %2 +} + define <2 x double> @test_mm_fmsubadd_pd(<2 x double> %a, <2 x double> %b, <2 x double> %c) { ; CHECK-LABEL: test_mm_fmsubadd_pd: ; CHECK: # %bb.0: # %entry @@ -271,6 +480,19 @@ entry: ret <2 x double> %2 } +define <2 x double> @test_mm_fmsubadd_pd_unary_fneg(<2 x double> %a, <2 x double> %b, <2 x double> %c) { +; CHECK-LABEL: test_mm_fmsubadd_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmsubadd213pd {{.*#+}} xmm0 = (xmm1 * xmm0) -/+ xmm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <2 x double> %c + %0 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %neg.i) #2 + %1 = tail call <2 x double> @llvm.fma.v2f64(<2 x double> %a, <2 x double> %b, <2 x double> %c) #2 + %2 = shufflevector <2 x double> %1, <2 x double> %0, <2 x i32> + ret <2 x double> %2 +} + define <8 x float> @test_mm256_fmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-LABEL: test_mm256_fmadd_ps: ; CHECK: # %bb.0: # %entry @@ -303,6 +525,18 @@ entry: ret <8 x float> %0 } +define <8 x float> @test_mm256_fmsub_ps_unary_fneg(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-LABEL: test_mm256_fmsub_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorps {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <8 x float> %c + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %neg.i) #2 + ret <8 x float> %0 +} + define <4 x double> @test_mm256_fmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { ; CHECK-LABEL: test_mm256_fmsub_pd: ; CHECK: # %bb.0: # %entry @@ -315,6 +549,18 @@ entry: ret <4 x double> %0 } +define <4 x double> @test_mm256_fmsub_pd_unary_fneg(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-LABEL: test_mm256_fmsub_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorpd {{.*}}(%rip), %ymm2, %ymm2 +; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x double> %c + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %neg.i) #2 + ret <4 x double> %0 +} + define <8 x float> @test_mm256_fnmadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-LABEL: test_mm256_fnmadd_ps: ; CHECK: # %bb.0: # %entry @@ -327,6 +573,18 @@ entry: ret <8 x float> %0 } +define <8 x float> @test_mm256_fnmadd_ps_unary_fneg(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-LABEL: test_mm256_fnmadd_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorps {{.*}}(%rip), %ymm0, %ymm0 +; CHECK-NEXT: vfmadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <8 x float> %a + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %neg.i, <8 x float> %b, <8 x float> %c) #2 + ret <8 x float> %0 +} + define <4 x double> @test_mm256_fnmadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { ; CHECK-LABEL: test_mm256_fnmadd_pd: ; CHECK: # %bb.0: # %entry @@ -339,6 +597,18 @@ entry: ret <4 x double> %0 } +define <4 x double> @test_mm256_fnmadd_pd_unary_fneg(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-LABEL: test_mm256_fnmadd_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vxorpd {{.*}}(%rip), %ymm0, %ymm0 +; CHECK-NEXT: vfmadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) + ymm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x double> %a + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %neg.i, <4 x double> %b, <4 x double> %c) #2 + ret <4 x double> %0 +} + define <8 x float> @test_mm256_fnmsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-LABEL: test_mm256_fnmsub_ps: ; CHECK: # %bb.0: # %entry @@ -354,6 +624,21 @@ entry: ret <8 x float> %0 } +define <8 x float> @test_mm256_fnmsub_ps_unary_fneg(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-LABEL: test_mm256_fnmsub_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmovaps {{.*#+}} ymm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; CHECK-NEXT: vxorps %ymm3, %ymm0, %ymm4 +; CHECK-NEXT: vxorps %ymm3, %ymm2, %ymm0 +; CHECK-NEXT: vfmadd231ps {{.*#+}} ymm0 = (ymm1 * ymm4) + ymm0 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <8 x float> %a + %neg1.i = fneg <8 x float> %c + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %neg.i, <8 x float> %b, <8 x float> %neg1.i) #2 + ret <8 x float> %0 +} + define <4 x double> @test_mm256_fnmsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { ; CHECK-LABEL: test_mm256_fnmsub_pd: ; CHECK: # %bb.0: # %entry @@ -369,6 +654,21 @@ entry: ret <4 x double> %0 } +define <4 x double> @test_mm256_fnmsub_pd_unary_fneg(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-LABEL: test_mm256_fnmsub_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmovapd {{.*#+}} ymm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0] +; CHECK-NEXT: vxorpd %ymm3, %ymm0, %ymm4 +; CHECK-NEXT: vxorpd %ymm3, %ymm2, %ymm0 +; CHECK-NEXT: vfmadd231pd {{.*#+}} ymm0 = (ymm1 * ymm4) + ymm0 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x double> %a + %neg1.i = fneg <4 x double> %c + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %neg.i, <4 x double> %b, <4 x double> %neg1.i) #2 + ret <4 x double> %0 +} + define <8 x float> @test_mm256_fmaddsub_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-LABEL: test_mm256_fmaddsub_ps: ; CHECK: # %bb.0: # %entry @@ -382,6 +682,19 @@ entry: ret <8 x float> %3 } +define <8 x float> @test_mm256_fmaddsub_ps_unary_fneg(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-LABEL: test_mm256_fmaddsub_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmaddsub213ps {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-NEXT: retq +entry: + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2 + %1 = fneg <8 x float> %c + %2 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %1) #2 + %3 = shufflevector <8 x float> %2, <8 x float> %0, <8 x i32> + ret <8 x float> %3 +} + define <4 x double> @test_mm256_fmaddsub_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { ; CHECK-LABEL: test_mm256_fmaddsub_pd: ; CHECK: # %bb.0: # %entry @@ -395,6 +708,19 @@ entry: ret <4 x double> %3 } +define <4 x double> @test_mm256_fmaddsub_pd_unary_fneg(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-LABEL: test_mm256_fmaddsub_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmaddsub213pd {{.*#+}} ymm0 = (ymm1 * ymm0) +/- ymm2 +; CHECK-NEXT: retq +entry: + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2 + %1 = fneg <4 x double> %c + %2 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %1) #2 + %3 = shufflevector <4 x double> %2, <4 x double> %0, <4 x i32> + ret <4 x double> %3 +} + define <8 x float> @test_mm256_fmsubadd_ps(<8 x float> %a, <8 x float> %b, <8 x float> %c) { ; CHECK-LABEL: test_mm256_fmsubadd_ps: ; CHECK: # %bb.0: # %entry @@ -408,6 +734,19 @@ entry: ret <8 x float> %2 } +define <8 x float> @test_mm256_fmsubadd_ps_unary_fneg(<8 x float> %a, <8 x float> %b, <8 x float> %c) { +; CHECK-LABEL: test_mm256_fmsubadd_ps_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmsubadd213ps {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <8 x float> %c + %0 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %neg.i) #2 + %1 = tail call <8 x float> @llvm.fma.v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c) #2 + %2 = shufflevector <8 x float> %1, <8 x float> %0, <8 x i32> + ret <8 x float> %2 +} + define <4 x double> @test_mm256_fmsubadd_pd(<4 x double> %a, <4 x double> %b, <4 x double> %c) { ; CHECK-LABEL: test_mm256_fmsubadd_pd: ; CHECK: # %bb.0: # %entry @@ -421,6 +760,19 @@ entry: ret <4 x double> %2 } +define <4 x double> @test_mm256_fmsubadd_pd_unary_fneg(<4 x double> %a, <4 x double> %b, <4 x double> %c) { +; CHECK-LABEL: test_mm256_fmsubadd_pd_unary_fneg: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vfmsubadd213pd {{.*#+}} ymm0 = (ymm1 * ymm0) -/+ ymm2 +; CHECK-NEXT: retq +entry: + %neg.i = fneg <4 x double> %c + %0 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %neg.i) #2 + %1 = tail call <4 x double> @llvm.fma.v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c) #2 + %2 = shufflevector <4 x double> %1, <4 x double> %0, <4 x i32> + ret <4 x double> %2 +} + declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) #1 declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) #1 declare float @llvm.fma.f32(float, float, float) #1 diff --git a/test/CodeGen/X86/neg_fp.ll b/test/CodeGen/X86/neg_fp.ll index 9cfe686b277..63e71128a77 100644 --- a/test/CodeGen/X86/neg_fp.ll +++ b/test/CodeGen/X86/neg_fp.ll @@ -1,5 +1,5 @@ ; RUN: llc < %s -mtriple=i686-- -mattr=+sse4.1 -o %t -; RUN: grep xorps %t | count 1 +; RUN: grep xorps %t | count 2 ; Test that when we don't -enable-unsafe-fp-math, we don't do the optimization ; -0 - (A - B) to (B - A) because A==B, -0 != 0 @@ -10,3 +10,10 @@ entry: %neg = fsub float -0.000000e+00, %sub ; [#uses=1] ret float %neg } + +define float @unary_negfp(float %a, float %b) { +entry: + %sub = fsub float %a, %b ; [#uses=1] + %neg = fneg float %sub ; [#uses=1] + ret float %neg +} diff --git a/test/CodeGen/X86/vec_fneg.ll b/test/CodeGen/X86/vec_fneg.ll index 4d5539feef3..b028907281c 100644 --- a/test/CodeGen/X86/vec_fneg.ll +++ b/test/CodeGen/X86/vec_fneg.ll @@ -21,6 +21,20 @@ define <4 x float> @t1(<4 x float> %Q) nounwind { ret <4 x float> %tmp } +define <4 x float> @t1_unary(<4 x float> %Q) nounwind { +; X32-SSE-LABEL: t1_unary: +; X32-SSE: # %bb.0: +; X32-SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; X32-SSE-NEXT: retl +; +; X64-SSE-LABEL: t1_unary: +; X64-SSE: # %bb.0: +; X64-SSE-NEXT: xorps {{.*}}(%rip), %xmm0 +; X64-SSE-NEXT: retq + %tmp = fneg <4 x float> %Q + ret <4 x float> %tmp +} + ; Possibly misplaced test, but since we're checking undef scenarios... define float @scalar_fsub_neg0_undef(float %x) nounwind { diff --git a/test/CodeGen/X86/vec_logical.ll b/test/CodeGen/X86/vec_logical.ll index ec29d4886a2..1c3f93b0d80 100644 --- a/test/CodeGen/X86/vec_logical.ll +++ b/test/CodeGen/X86/vec_logical.ll @@ -19,6 +19,23 @@ define void @t(<4 x float> %A) { ret void } +define void @t_unary(<4 x float> %A) { +; SSE-LABEL: t_unary: +; SSE: # %bb.0: +; SSE-NEXT: xorps {{\.LCPI.*}}, %xmm0 +; SSE-NEXT: movaps %xmm0, 0 +; SSE-NEXT: retl +; +; AVX-LABEL: t_unary: +; AVX: # %bb.0: +; AVX-NEXT: vxorps {{\.LCPI.*}}, %xmm0, %xmm0 +; AVX-NEXT: vmovaps %xmm0, 0 +; AVX-NEXT: retl + %tmp1277 = fneg <4 x float> %A + store <4 x float> %tmp1277, <4 x float>* null + ret void +} + define <4 x float> @t1(<4 x float> %a, <4 x float> %b) { ; SSE-LABEL: t1: ; SSE: # %bb.0: # %entry diff --git a/test/CodeGen/XCore/fneg.ll b/test/CodeGen/XCore/fneg.ll index 20433da3bbe..d10c283cc4c 100644 --- a/test/CodeGen/XCore/fneg.ll +++ b/test/CodeGen/XCore/fneg.ll @@ -1,4 +1,5 @@ ; RUN: llc < %s -march=xcore | FileCheck %s + define i1 @test(double %F, double %G) nounwind { entry: ; CHECK-LABEL: test: @@ -7,3 +8,12 @@ entry: %1 = fcmp olt double %G, %0 ret i1 %1 } + +define i1 @test_unary(double %F, double %G) nounwind { +entry: +; CHECK-LABEL: test_unary: +; CHECK: xor + %0 = fneg double %F + %1 = fcmp olt double %G, %0 + ret i1 %1 +}