From 97eda18693aeecacd996441c61b3842a228c1f1c Mon Sep 17 00:00:00 2001 From: Chad Rosier Date: Mon, 9 Dec 2013 22:47:34 +0000 Subject: [PATCH] [AArch64] Refactor NEON scalar reduce pairwise front-end codegen to remove unnecessary patterns in tablegen. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@196832 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64InstrNEON.td | 17 +------ .../AArch64/neon-scalar-reduce-pairwise.ll | 44 +++++-------------- 2 files changed, 13 insertions(+), 48 deletions(-) diff --git a/lib/Target/AArch64/AArch64InstrNEON.td b/lib/Target/AArch64/AArch64InstrNEON.td index 99328c81a05..d9901942647 100644 --- a/lib/Target/AArch64/AArch64InstrNEON.td +++ b/lib/Target/AArch64/AArch64InstrNEON.td @@ -5333,27 +5333,12 @@ defm : Neon_ScalarPair_SD_size_patterns; -defm : Neon_ScalarPair_SD_size_patterns; - -def : Pat<(v1f32 (int_aarch64_neon_vaddv (v4f32 VPR128:$Rn))), +def : Pat<(v1f32 (int_aarch64_neon_vpfadd (v4f32 VPR128:$Rn))), (FADDPvv_S_2S (v2f32 (EXTRACT_SUBREG (v4f32 (FADDP_4S (v4f32 VPR128:$Rn), (v4f32 VPR128:$Rn))), sub_64)))>; -defm : Neon_ScalarPair_SD_size_patterns; - -defm : Neon_ScalarPair_SD_size_patterns; - -defm : Neon_ScalarPair_SD_size_patterns; - -defm : Neon_ScalarPair_SD_size_patterns; - // Scalar by element Arithmetic class NeonI_ScalarXIndexedElemArith opcode, diff --git a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll index 401ceec255e..3da90365d6d 100644 --- a/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll +++ b/test/CodeGen/AArch64/neon-scalar-reduce-pairwise.ll @@ -103,7 +103,7 @@ define <1 x double> @test_fminnmp_v1f64(<2 x double> %a) { define float @test_vaddv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vaddv_f32 ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float> %a) + %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v2f32(<2 x float> %a) %2 = extractelement <1 x float> %1, i32 0 ret float %2 } @@ -112,7 +112,7 @@ define float @test_vaddvq_f32(<4 x float> %a) { ; CHECK-LABEL: test_vaddvq_f32 ; CHECK: faddp {{v[0-9]+}}.4s, {{v[0-9]+}}.4s, {{v[0-9]+}}.4s ; CHECK: faddp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float> %a) + %1 = tail call <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float> %a) %2 = extractelement <1 x float> %1, i32 0 ret float %2 } @@ -120,7 +120,7 @@ define float @test_vaddvq_f32(<4 x float> %a) { define double @test_vaddvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vaddvq_f64 ; CHECK: faddp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double> %a) + %1 = tail call <1 x double> @llvm.aarch64.neon.vpfadd.v1f64.v2f64(<2 x double> %a) %2 = extractelement <1 x double> %1, i32 0 ret double %2 } @@ -128,7 +128,7 @@ define double @test_vaddvq_f64(<2 x double> %a) { define float @test_vmaxv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vmaxv_f32 ; CHECK: fmaxp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float> %a) + %1 = tail call <1 x float> @llvm.aarch64.neon.vpmax.v1f32.v2f32(<2 x float> %a) %2 = extractelement <1 x float> %1, i32 0 ret float %2 } @@ -136,7 +136,7 @@ define float @test_vmaxv_f32(<2 x float> %a) { define double @test_vmaxvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vmaxvq_f64 ; CHECK: fmaxp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double> %a) + %1 = tail call <1 x double> @llvm.aarch64.neon.vpmax.v1f64.v2f64(<2 x double> %a) %2 = extractelement <1 x double> %1, i32 0 ret double %2 } @@ -144,7 +144,7 @@ define double @test_vmaxvq_f64(<2 x double> %a) { define float @test_vminv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vminv_f32 ; CHECK: fminp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float> %a) + %1 = tail call <1 x float> @llvm.aarch64.neon.vpmin.v1f32.v2f32(<2 x float> %a) %2 = extractelement <1 x float> %1, i32 0 ret float %2 } @@ -152,7 +152,7 @@ define float @test_vminv_f32(<2 x float> %a) { define double @test_vminvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vminvq_f64 ; CHECK: fminp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double> %a) + %1 = tail call <1 x double> @llvm.aarch64.neon.vpmin.v1f64.v2f64(<2 x double> %a) %2 = extractelement <1 x double> %1, i32 0 ret double %2 } @@ -160,7 +160,7 @@ define double @test_vminvq_f64(<2 x double> %a) { define double @test_vmaxnmvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vmaxnmvq_f64 ; CHECK: fmaxnmp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double> %a) + %1 = tail call <1 x double> @llvm.aarch64.neon.vpfmaxnm.v1f64.v2f64(<2 x double> %a) %2 = extractelement <1 x double> %1, i32 0 ret double %2 } @@ -168,7 +168,7 @@ define double @test_vmaxnmvq_f64(<2 x double> %a) { define float @test_vmaxnmv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vmaxnmv_f32 ; CHECK: fmaxnmp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float> %a) + %1 = tail call <1 x float> @llvm.aarch64.neon.vpfmaxnm.v1f32.v2f32(<2 x float> %a) %2 = extractelement <1 x float> %1, i32 0 ret float %2 } @@ -176,7 +176,7 @@ define float @test_vmaxnmv_f32(<2 x float> %a) { define double @test_vminnmvq_f64(<2 x double> %a) { ; CHECK-LABEL: test_vminnmvq_f64 ; CHECK: fminnmp {{d[0-9]+}}, {{v[0-9]+}}.2d - %1 = tail call <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double> %a) + %1 = tail call <1 x double> @llvm.aarch64.neon.vpfminnm.v1f64.v2f64(<2 x double> %a) %2 = extractelement <1 x double> %1, i32 0 ret double %2 } @@ -184,7 +184,7 @@ define double @test_vminnmvq_f64(<2 x double> %a) { define float @test_vminnmv_f32(<2 x float> %a) { ; CHECK-LABEL: test_vminnmv_f32 ; CHECK: fminnmp {{s[0-9]+}}, {{v[0-9]+}}.2s - %1 = tail call <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float> %a) + %1 = tail call <1 x float> @llvm.aarch64.neon.vpfminnm.v1f32.v2f32(<2 x float> %a) %2 = extractelement <1 x float> %1, i32 0 ret float %2 } @@ -223,24 +223,4 @@ declare <1 x i64> @llvm.aarch64.neon.vaddv.v1i64.v2i64(<2 x i64>) declare <2 x i64> @llvm.arm.neon.vpadd.v2i64(<2 x i64>, <2 x i64>) -declare <1 x float> @llvm.aarch64.neon.vminnmv.v1f32.v2f32(<2 x float>) - -declare <1 x double> @llvm.aarch64.neon.vminnmv.v1f64.v2f64(<2 x double>) - -declare <1 x float> @llvm.aarch64.neon.vmaxnmv.v1f32.v2f32(<2 x float>) - -declare <1 x double> @llvm.aarch64.neon.vmaxnmv.v1f64.v2f64(<2 x double>) - -declare <1 x double> @llvm.aarch64.neon.vminv.v1f64.v2f64(<2 x double>) - -declare <1 x float> @llvm.aarch64.neon.vminv.v1f32.v2f32(<2 x float>) - -declare <1 x double> @llvm.aarch64.neon.vmaxv.v1f64.v2f64(<2 x double>) - -declare <1 x float> @llvm.aarch64.neon.vmaxv.v1f32.v2f32(<2 x float>) - -declare <1 x double> @llvm.aarch64.neon.vaddv.v1f64.v2f64(<2 x double>) - -declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v4f32(<4 x float>) - -declare <1 x float> @llvm.aarch64.neon.vaddv.v1f32.v2f32(<2 x float>) \ No newline at end of file +declare <1 x float> @llvm.aarch64.neon.vpfadd.v1f32.v4f32(<4 x float>)