From 6e9255f2d059c0fe09358627b68b13fa05706ab6 Mon Sep 17 00:00:00 2001 From: Simon Pilgrim Date: Tue, 6 Dec 2016 19:09:37 +0000 Subject: [PATCH] [DAGCombine] Add (sext_in_reg (zext x)) -> (sext x) combine Handle the case where a sign extension has ended up being split into separate stages (typically to get around vector legal ops) and a zext + sext_in_reg gets inserted. Differential Revision: https://reviews.llvm.org/D27461 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@288842 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/CodeGen/SelectionDAG/DAGCombiner.cpp | 9 +++++++++ test/CodeGen/X86/combine-sext-in-reg.ll | 12 ------------ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp index 590b10b5ce2..0fa0b919355 100644 --- a/lib/CodeGen/SelectionDAG/DAGCombiner.cpp +++ b/lib/CodeGen/SelectionDAG/DAGCombiner.cpp @@ -7137,6 +7137,15 @@ SDValue DAGCombiner::visitSIGN_EXTEND_INREG(SDNode *N) { return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); } + // fold (sext_in_reg (zext x)) -> (sext x) + // iff we are extending the source sign bit. + if (N0.getOpcode() == ISD::ZERO_EXTEND) { + SDValue N00 = N0.getOperand(0); + if (N00.getScalarValueSizeInBits() == EVTBits && + (!LegalOperations || TLI.isOperationLegal(ISD::SIGN_EXTEND, VT))) + return DAG.getNode(ISD::SIGN_EXTEND, SDLoc(N), VT, N00, N1); + } + // fold (sext_in_reg x) -> (zext_in_reg x) if the sign bit is known zero. if (DAG.MaskedValueIsZero(N0, APInt::getBitsSet(VTBits, EVTBits-1, EVTBits))) return DAG.getZeroExtendInReg(N0, SDLoc(N), EVT.getScalarType()); diff --git a/test/CodeGen/X86/combine-sext-in-reg.ll b/test/CodeGen/X86/combine-sext-in-reg.ll index 9407d1bb4e7..3e60f3bf95e 100644 --- a/test/CodeGen/X86/combine-sext-in-reg.ll +++ b/test/CodeGen/X86/combine-sext-in-reg.ll @@ -30,24 +30,12 @@ define <4 x i64> @sextinreg_zext_sext_v16i8_4i64(<16 x i8> %a0) { ; SSE-NEXT: pmovsxbq %xmm0, %xmm2 ; SSE-NEXT: psrld $16, %xmm0 ; SSE-NEXT: pmovsxbq %xmm0, %xmm1 -; SSE-NEXT: psllq $32, %xmm2 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm2[1,1,3,3] -; SSE-NEXT: psrad $31, %xmm2 -; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7] -; SSE-NEXT: psllq $32, %xmm1 -; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3] -; SSE-NEXT: psrad $31, %xmm1 -; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: sextinreg_zext_sext_v16i8_4i64: ; AVX: # BB#0: ; AVX-NEXT: vpmovsxbq %xmm0, %ymm0 -; AVX-NEXT: vpsllq $32, %ymm0, %ymm0 -; AVX-NEXT: vpsrad $31, %ymm0, %ymm1 -; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[1,1,3,3,5,5,7,7] -; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm1[1],ymm0[2],ymm1[3],ymm0[4],ymm1[5],ymm0[6],ymm1[7] ; AVX-NEXT: retq %1 = shufflevector <16 x i8> %a0, <16 x i8> undef, <4 x i32> %2 = sext <4 x i8> %1 to <4 x i32>