From c05f998efeab4494559598347b4c5983e1401475 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Fri, 19 Aug 2016 17:02:00 +0000 Subject: [PATCH] [X86][SSE] Generalised combining to VZEXT_MOVL to any vector size This doesn't change tests codegen as we already combined to blend+zero which is what we lower VZEXT_MOVL to on SSE41+ targets, but it does put us in a better position when we improve shuffling for optsize. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@279273 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 14 ++--- .../X86/vector-shuffle-combining-avx2.ll | 24 +++++++++ .../X86/vector-shuffle-combining-ssse3.ll | 53 +++++++++++++++++++ 3 files changed, 85 insertions(+), 6 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 7745e6c4ce9..84dade757fc 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -24906,14 +24906,17 @@ static SDValue combineShuffle256(SDNode *N, SelectionDAG &DAG, static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef Mask, const X86Subtarget &Subtarget, unsigned &Shuffle, MVT &ShuffleVT) { + unsigned NumMaskElts = Mask.size(); bool FloatDomain = MaskVT.isFloatingPoint() || (!Subtarget.hasAVX2() && MaskVT.is256BitVector()); - // Match a 128-bit vector against a VZEXT_MOVL instruction. - if (MaskVT.is128BitVector() && Subtarget.hasSSE2() && - isTargetShuffleEquivalent(Mask, {0, SM_SentinelZero})) { + // Match against a VZEXT_MOVL instruction, SSE1 only supports 32-bits (MOVSS). + if (((MaskVT.getScalarSizeInBits() == 32) || + (MaskVT.getScalarSizeInBits() == 64 && Subtarget.hasSSE2())) && + isUndefOrEqual(Mask[0], 0) && + isUndefOrZeroInRange(Mask, 1, NumMaskElts - 1)) { Shuffle = X86ISD::VZEXT_MOVL; - ShuffleVT = MaskVT; + ShuffleVT = !Subtarget.hasSSE2() ? MVT::v4f32 : MaskVT; return true; } @@ -24981,8 +24984,7 @@ static bool matchUnaryVectorShuffle(MVT MaskVT, ArrayRef Mask, // Attempt to match against broadcast-from-vector. if (Subtarget.hasAVX2()) { - unsigned NumElts = Mask.size(); - SmallVector BroadcastMask(NumElts, 0); + SmallVector BroadcastMask(NumMaskElts, 0); if (isTargetShuffleEquivalent(Mask, BroadcastMask)) { ShuffleVT = MaskVT; Shuffle = X86ISD::VBROADCAST; diff --git a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll index f2d74dcf2e9..9340b3f6324 100644 --- a/test/CodeGen/X86/vector-shuffle-combining-avx2.ll +++ b/test/CodeGen/X86/vector-shuffle-combining-avx2.ll @@ -277,6 +277,30 @@ define <8 x float> @combine_permps_as_permpd(<8 x float> %a) { ret <8 x float> %1 } +define <4 x double> @combine_pshufb_as_vzmovl_64(<4 x double> %a0) { +; CHECK-LABEL: combine_pshufb_as_vzmovl_64: +; CHECK: # BB#0: +; CHECK-NEXT: vxorpd %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vblendpd {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3] +; CHECK-NEXT: retq + %1 = bitcast <4 x double> %a0 to <32 x i8> + %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> ) + %3 = bitcast <32 x i8> %2 to <4 x double> + ret <4 x double> %3 +} + +define <8 x float> @combine_pshufb_as_vzmovl_32(<8 x float> %a0) { +; CHECK-LABEL: combine_pshufb_as_vzmovl_32: +; CHECK: # BB#0: +; CHECK-NEXT: vxorps %ymm1, %ymm1, %ymm1 +; CHECK-NEXT: vblendps {{.*#+}} ymm0 = ymm0[0],ymm1[1,2,3,4,5,6,7] +; CHECK-NEXT: retq + %1 = bitcast <8 x float> %a0 to <32 x i8> + %2 = call <32 x i8> @llvm.x86.avx2.pshuf.b(<32 x i8> %1, <32 x i8> ) + %3 = bitcast <32 x i8> %2 to <8 x float> + ret <8 x float> %3 +} + define <32 x i8> @combine_pshufb_as_pslldq(<32 x i8> %a0) { ; CHECK-LABEL: combine_pshufb_as_pslldq: ; CHECK: # BB#0: diff --git a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll index 3ef956f1c61..0d03ab0e559 100644 --- a/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll +++ b/test/CodeGen/X86/vector-shuffle-combining-ssse3.ll @@ -88,6 +88,59 @@ define <4 x float> @combine_pshufb_as_movss(<4 x float> %a0, <4 x float> %a1) { ret <4 x float> %4 } +define <2 x double> @combine_pshufb_as_vzmovl_64(<2 x double> %a0) { +; SSE-LABEL: combine_pshufb_as_vzmovl_64: +; SSE: # BB#0: +; SSE-NEXT: movq {{.*#+}} xmm0 = xmm0[0],zero +; SSE-NEXT: retq +; +; AVX-LABEL: combine_pshufb_as_vzmovl_64: +; AVX: # BB#0: +; AVX-NEXT: vmovq {{.*#+}} xmm0 = xmm0[0],zero +; AVX-NEXT: retq + %1 = bitcast <2 x double> %a0 to <16 x i8> + %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> ) + %3 = bitcast <16 x i8> %2 to <2 x double> + ret <2 x double> %3 +} + +define <4 x float> @combine_pshufb_as_vzmovl_32(<4 x float> %a0) { +; SSSE3-LABEL: combine_pshufb_as_vzmovl_32: +; SSSE3: # BB#0: +; SSSE3-NEXT: xorps %xmm1, %xmm1 +; SSSE3-NEXT: movss {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] +; SSSE3-NEXT: movaps %xmm1, %xmm0 +; SSSE3-NEXT: retq +; +; SSE41-LABEL: combine_pshufb_as_vzmovl_32: +; SSE41: # BB#0: +; SSE41-NEXT: xorps %xmm1, %xmm1 +; SSE41-NEXT: blendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; SSE41-NEXT: retq +; +; AVX1-LABEL: combine_pshufb_as_vzmovl_32: +; AVX1: # BB#0: +; AVX1-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX1-NEXT: retq +; +; AVX2-LABEL: combine_pshufb_as_vzmovl_32: +; AVX2: # BB#0: +; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX2-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX2-NEXT: retq +; +; AVX512F-LABEL: combine_pshufb_as_vzmovl_32: +; AVX512F: # BB#0: +; AVX512F-NEXT: vxorps %xmm1, %xmm1, %xmm1 +; AVX512F-NEXT: vmovss {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] +; AVX512F-NEXT: retq + %1 = bitcast <4 x float> %a0 to <16 x i8> + %2 = call <16 x i8> @llvm.x86.ssse3.pshuf.b.128(<16 x i8> %1, <16 x i8> ) + %3 = bitcast <16 x i8> %2 to <4 x float> + ret <4 x float> %3 +} + define <4 x float> @combine_pshufb_movddup(<4 x float> %a0) { ; SSE-LABEL: combine_pshufb_movddup: ; SSE: # BB#0: