From a23236c360c06019721bf0ccb7a039d14d0d6114 Mon Sep 17 00:00:00 2001 From: Bruno Cardoso Lopes Date: Thu, 28 Jul 2011 01:26:43 +0000 Subject: [PATCH] Add a few patterns to match allzeros without having to use the fp unit. Take advantage that the 128-bit vpxor zeros the higher part and use it. This also fixes PR10491 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@136321 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86InstrSSE.td | 10 ++++++++++ test/CodeGen/X86/avx-cast.ll | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/lib/Target/X86/X86InstrSSE.td b/lib/Target/X86/X86InstrSSE.td index e15d7bd76b8..cb4dbcc9691 100644 --- a/lib/Target/X86/X86InstrSSE.td +++ b/lib/Target/X86/X86InstrSSE.td @@ -2098,6 +2098,16 @@ def : Pat<(v16i8 immAllZerosV), (V_SET0PI)>; def : Pat<(f32 (vector_extract (v4f32 VR128:$src), (iPTR 0))), (f32 (EXTRACT_SUBREG (v4f32 VR128:$src), sub_ss))>; +// AVX has no support for 256-bit integer instructions, but since the 128-bit +// VPXOR instruction writes zero to its upper part, it's safe build zeros. +def : Pat<(v8i32 immAllZerosV), (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>; +def : Pat<(bc_v8i32 (v8f32 immAllZerosV)), + (SUBREG_TO_REG (i32 0), (AVX_SET0PI), sub_xmm)>; + +def : Pat<(v4i64 immAllZerosV), (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>; +def : Pat<(bc_v4i64 (v8f32 immAllZerosV)), + (SUBREG_TO_REG (i64 0), (AVX_SET0PI), sub_xmm)>; + //===----------------------------------------------------------------------===// // SSE 1 & 2 - Load/Store XCSR register //===----------------------------------------------------------------------===// diff --git a/test/CodeGen/X86/avx-cast.ll b/test/CodeGen/X86/avx-cast.ll index e24cfeb5085..7e368b3d9ee 100644 --- a/test/CodeGen/X86/avx-cast.ll +++ b/test/CodeGen/X86/avx-cast.ll @@ -16,7 +16,7 @@ entry: ret <4 x double> %shuffle.i } -; CHECK: vxorps +; CHECK: vpxor ; CHECK-NEXT: vinsertf128 $0 define <4 x i64> @castC(<2 x i64> %m) nounwind uwtable readnone ssp { entry: