[DAGCombiner] Add splatted vector support to (udiv x, (shl pow2, y)) -> x >>u (log2(pow2)+y)

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@284491 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Simon Pilgrim 2016-10-18 16:36:00 +00:00
parent da27c36c5d
commit 08bb504cb9
2 changed files with 22 additions and 49 deletions

View File

@ -2387,6 +2387,7 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
if (SDValue Folded = DAG.FoldConstantArithmetic(ISD::UDIV, DL, VT,
N0C, N1C))
return Folded;
// fold (udiv x, (1 << c)) -> x >>u c
if (N1C && !N1C->isOpaque() && N1C->getAPIntValue().isPowerOf2())
return DAG.getNode(ISD::SRL, DL, VT, N0,
@ -2395,8 +2396,8 @@ SDValue DAGCombiner::visitUDIV(SDNode *N) {
// fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
if (N1.getOpcode() == ISD::SHL) {
if (ConstantSDNode *SHC = getAsNonOpaqueConstant(N1.getOperand(0))) {
if (SHC->getAPIntValue().isPowerOf2()) {
if (ConstantSDNode *SHC = isConstOrConstSplat(N1.getOperand(0))) {
if (!SHC->isOpaque() && SHC->getAPIntValue().isPowerOf2()) {
EVT ADDVT = N1.getOperand(1).getValueType();
SDValue Add = DAG.getNode(ISD::ADD, DL, ADDVT,
N1.getOperand(1),

View File

@ -77,59 +77,31 @@ define <4 x i32> @combine_vec_udiv_by_pow2b(<4 x i32> %x) {
define <4 x i32> @combine_vec_udiv_by_shl_pow2a(<4 x i32> %x, <4 x i32> %y) {
; SSE-LABEL: combine_vec_udiv_by_shl_pow2a:
; SSE: # BB#0:
; SSE-NEXT: pslld $23, %xmm1
; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
; SSE-NEXT: cvttps2dq %xmm1, %xmm2
; SSE-NEXT: pslld $2, %xmm2
; SSE-NEXT: pextrd $1, %xmm0, %eax
; SSE-NEXT: pextrd $1, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: movl %eax, %ecx
; SSE-NEXT: movd %xmm0, %eax
; SSE-NEXT: movd %xmm2, %esi
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %esi
; SSE-NEXT: movd %eax, %xmm1
; SSE-NEXT: pinsrd $1, %ecx, %xmm1
; SSE-NEXT: pextrd $2, %xmm0, %eax
; SSE-NEXT: pextrd $2, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: pinsrd $2, %eax, %xmm1
; SSE-NEXT: pextrd $3, %xmm0, %eax
; SSE-NEXT: pextrd $3, %xmm2, %ecx
; SSE-NEXT: xorl %edx, %edx
; SSE-NEXT: divl %ecx
; SSE-NEXT: pinsrd $3, %eax, %xmm1
; SSE-NEXT: movdqa %xmm1, %xmm0
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: movdqa %xmm0, %xmm3
; SSE-NEXT: psrld %xmm2, %xmm3
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: psrlq $32, %xmm2
; SSE-NEXT: movdqa %xmm0, %xmm4
; SSE-NEXT: psrld %xmm2, %xmm4
; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: psrld %xmm1, %xmm2
; SSE-NEXT: psrld %xmm3, %xmm0
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
; SSE-NEXT: retq
;
; AVX-LABEL: combine_vec_udiv_by_shl_pow2a:
; AVX: # BB#0:
; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm2
; AVX-NEXT: vpsllvd %xmm1, %xmm2, %xmm1
; AVX-NEXT: vpextrd $1, %xmm1, %ecx
; AVX-NEXT: vpextrd $1, %xmm0, %eax
; AVX-NEXT: xorl %edx, %edx
; AVX-NEXT: divl %ecx
; AVX-NEXT: movl %eax, %ecx
; AVX-NEXT: vmovd %xmm1, %esi
; AVX-NEXT: vmovd %xmm0, %eax
; AVX-NEXT: xorl %edx, %edx
; AVX-NEXT: divl %esi
; AVX-NEXT: vmovd %eax, %xmm2
; AVX-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; AVX-NEXT: vpextrd $2, %xmm1, %ecx
; AVX-NEXT: vpextrd $2, %xmm0, %eax
; AVX-NEXT: xorl %edx, %edx
; AVX-NEXT: divl %ecx
; AVX-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; AVX-NEXT: vpextrd $3, %xmm1, %ecx
; AVX-NEXT: vpextrd $3, %xmm0, %eax
; AVX-NEXT: xorl %edx, %edx
; AVX-NEXT: divl %ecx
; AVX-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; AVX-NEXT: vpaddd %xmm2, %xmm1, %xmm1
; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
; AVX-NEXT: retq
%1 = shl <4 x i32> <i32 4, i32 4, i32 4, i32 4>, %y
%2 = udiv <4 x i32> %x, %1