[SelectionDAG] Add support for vector demandedelts in UDIV opcodes

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@286576 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Simon Pilgrim 2016-11-11 10:47:24 +00:00
parent a597cc62da
commit 5809b919f1
2 changed files with 6 additions and 52 deletions

View File

@ -2192,10 +2192,12 @@ void SelectionDAG::computeKnownBits(SDValue Op, APInt &KnownZero,
// For the purposes of computing leading zeros we can conservatively
// treat a udiv as a logical right shift by the power of 2 known to
// be less than the denominator.
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, Depth+1);
computeKnownBits(Op.getOperand(0), KnownZero2, KnownOne2, DemandedElts,
Depth + 1);
unsigned LeadZ = KnownZero2.countLeadingOnes();
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, Depth+1);
computeKnownBits(Op.getOperand(1), KnownZero2, KnownOne2, DemandedElts,
Depth + 1);
unsigned RHSUnknownLeadingOnes = KnownOne2.countLeadingZeros();
if (RHSUnknownLeadingOnes != BitWidth)
LeadZ = std::min(BitWidth,

View File

@ -242,60 +242,12 @@ define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind {
define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
; X32-LABEL: knownbits_mask_udiv_shuffle_lshr:
; X32: # BB#0:
; X32-NEXT: pushl %esi
; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
; X32-NEXT: vpextrd $1, %xmm1, %ecx
; X32-NEXT: vpextrd $1, %xmm0, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divl %ecx
; X32-NEXT: movl %eax, %ecx
; X32-NEXT: vmovd %xmm1, %esi
; X32-NEXT: vmovd %xmm0, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divl %esi
; X32-NEXT: vmovd %eax, %xmm2
; X32-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; X32-NEXT: vpextrd $2, %xmm1, %ecx
; X32-NEXT: vpextrd $2, %xmm0, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divl %ecx
; X32-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; X32-NEXT: vpextrd $3, %xmm1, %ecx
; X32-NEXT: vpextrd $3, %xmm0, %eax
; X32-NEXT: xorl %edx, %edx
; X32-NEXT: divl %ecx
; X32-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X32-NEXT: vpsrld $22, %xmm0, %xmm0
; X32-NEXT: popl %esi
; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X32-NEXT: retl
;
; X64-LABEL: knownbits_mask_udiv_shuffle_lshr:
; X64: # BB#0:
; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
; X64-NEXT: vpextrd $1, %xmm1, %ecx
; X64-NEXT: vpextrd $1, %xmm0, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %ecx
; X64-NEXT: movl %eax, %ecx
; X64-NEXT: vmovd %xmm1, %esi
; X64-NEXT: vmovd %xmm0, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %esi
; X64-NEXT: vmovd %eax, %xmm2
; X64-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
; X64-NEXT: vpextrd $2, %xmm1, %ecx
; X64-NEXT: vpextrd $2, %xmm0, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %ecx
; X64-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
; X64-NEXT: vpextrd $3, %xmm1, %ecx
; X64-NEXT: vpextrd $3, %xmm0, %eax
; X64-NEXT: xorl %edx, %edx
; X64-NEXT: divl %ecx
; X64-NEXT: vpinsrd $3, %eax, %xmm2, %xmm0
; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
; X64-NEXT: vpsrld $22, %xmm0, %xmm0
; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
; X64-NEXT: retq
%1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
%2 = udiv <4 x i32> %1, %a1