[TargetLowering] SimplifyDemandedBits ZERO_EXTEND_VECTOR_INREG -> ANY_EXTEND_VECTOR_INREG

Simplify ZERO_EXTEND_VECTOR_INREG if the extended bits are not required.

Matches what we already do for ZERO_EXTEND.

Reapplies rL363850 but now with legality checks added at rL364290

llvm-svn: 364303
This commit is contained in:
Simon Pilgrim 2019-06-25 12:57:43 +00:00
parent b956364009
commit f92e9771b2
3 changed files with 48 additions and 70 deletions

View File

@ -1381,7 +1381,6 @@ bool TargetLowering::SimplifyDemandedBits(
bool IsVecInReg = Op.getOpcode() == ISD::ZERO_EXTEND_VECTOR_INREG;
// If none of the top bits are demanded, convert this into an any_extend.
// TODO: Add ZERO_EXTEND_VECTOR_INREG - ANY_EXTEND_VECTOR_INREG fold.
if (DemandedBits.getActiveBits() <= InBits) {
// If we only need the non-extended bits of the bottom element
// then we can just bitcast to the result.
@ -1390,11 +1389,10 @@ bool TargetLowering::SimplifyDemandedBits(
TLO.DAG.getDataLayout().isLittleEndian())
return TLO.CombineTo(Op, TLO.DAG.getBitcast(VT, Src));
if (!IsVecInReg) {
unsigned Opc = ISD::ANY_EXTEND;
if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
}
unsigned Opc =
IsVecInReg ? ISD::ANY_EXTEND_VECTOR_INREG : ISD::ANY_EXTEND;
if (!TLO.LegalOperations() || isOperationLegal(Opc, VT))
return TLO.CombineTo(Op, TLO.DAG.getNode(Opc, dl, VT, Src));
}
APInt InDemandedBits = DemandedBits.trunc(InBits);

View File

@ -91,18 +91,15 @@ define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1)
define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
; SSE-LABEL: combine_zext_pmuludq_256:
; SSE: # %bb.0:
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,2,3,3]
; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [715827883,715827883]
; SSE-NEXT: pmuludq %xmm1, %xmm0
; SSE-NEXT: pmuludq %xmm1, %xmm2
; SSE-NEXT: pmuludq %xmm1, %xmm4
; SSE-NEXT: pmuludq %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm4, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [715827883,715827883]
; SSE-NEXT: pmuludq %xmm4, %xmm0
; SSE-NEXT: pmuludq %xmm4, %xmm1
; SSE-NEXT: pmuludq %xmm4, %xmm2
; SSE-NEXT: pmuludq %xmm4, %xmm3
; SSE-NEXT: retq
;
; AVX2-LABEL: combine_zext_pmuludq_256:

View File

@ -1016,30 +1016,24 @@ entry:
define <4 x i32> @mul_v4i64_zero_upper(<4 x i32> %val1, <4 x i32> %val2) {
; SSE2-LABEL: mul_v4i64_zero_upper:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
; SSE2-NEXT: movdqa %xmm1, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm3[0],xmm4[1],xmm3[1]
; SSE2-NEXT: pmuludq %xmm4, %xmm2
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm3[2],xmm1[3],xmm3[3]
; SSE2-NEXT: pmuludq %xmm0, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,3],xmm1[1,3]
; SSE2-NEXT: movaps %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[0,1,1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,1,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,1,1,3]
; SSE2-NEXT: pmuludq %xmm2, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,3,3]
; SSE2-NEXT: pmuludq %xmm3, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v4i64_zero_upper:
; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmuludq %xmm2, %xmm4
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pmuludq %xmm3, %xmm0
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm4[1,3]
; SSE41-NEXT: pmuludq %xmm2, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,3,3]
; SSE41-NEXT: pmuludq %xmm3, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE41-NEXT: retq
;
; AVX-LABEL: mul_v4i64_zero_upper:
@ -1171,48 +1165,37 @@ entry:
define <8 x i32> @mul_v8i64_zero_upper(<8 x i32> %val1, <8 x i32> %val2) {
; SSE2-LABEL: mul_v8i64_zero_upper:
; SSE2: # %bb.0: # %entry
; SSE2-NEXT: pxor %xmm6, %xmm6
; SSE2-NEXT: movdqa %xmm0, %xmm4
; SSE2-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm6[2],xmm0[3],xmm6[3]
; SSE2-NEXT: movdqa %xmm1, %xmm5
; SSE2-NEXT: punpckldq {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1]
; SSE2-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm6[2],xmm1[3],xmm6[3]
; SSE2-NEXT: movdqa %xmm2, %xmm7
; SSE2-NEXT: punpckldq {{.*#+}} xmm7 = xmm7[0],xmm6[0],xmm7[1],xmm6[1]
; SSE2-NEXT: pmuludq %xmm7, %xmm4
; SSE2-NEXT: punpckhdq {{.*#+}} xmm2 = xmm2[2],xmm6[2],xmm2[3],xmm6[3]
; SSE2-NEXT: pmuludq %xmm0, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,3],xmm2[1,3]
; SSE2-NEXT: movdqa %xmm3, %xmm0
; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm6[0],xmm0[1],xmm6[1]
; SSE2-NEXT: pmuludq %xmm0, %xmm5
; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm6[2],xmm3[3],xmm6[3]
; SSE2-NEXT: pmuludq %xmm1, %xmm3
; SSE2-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,3],xmm3[1,3]
; SSE2-NEXT: movaps %xmm4, %xmm0
; SSE2-NEXT: movaps %xmm5, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,1,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,1,1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,1,3,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,1,1,3]
; SSE2-NEXT: pmuludq %xmm4, %xmm0
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,1,3,3]
; SSE2-NEXT: pmuludq %xmm5, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm3[0,1,1,3]
; SSE2-NEXT: pmuludq %xmm6, %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,1,3,3]
; SSE2-NEXT: pmuludq %xmm7, %xmm2
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; SSE2-NEXT: retq
;
; SSE41-LABEL: mul_v8i64_zero_upper:
; SSE41: # %bb.0: # %entry
; SSE41-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm4[0],zero,xmm4[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm5 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm7 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pmuludq %xmm4, %xmm1
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm0[0],zero,xmm0[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,2,3,3]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm6 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,2,3,3]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm2[0],zero,xmm2[1],zero
; SSE41-NEXT: pmuludq %xmm5, %xmm0
; SSE41-NEXT: pmuludq %xmm4, %xmm0
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm2[2,2,3,3]
; SSE41-NEXT: pmuludq %xmm5, %xmm1
; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,0,1]
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
; SSE41-NEXT: pmuludq %xmm6, %xmm2
; SSE41-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm3[0],zero,xmm3[1],zero
; SSE41-NEXT: pmuludq %xmm7, %xmm1
; SSE41-NEXT: pmuludq %xmm6, %xmm1
; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm3[2,2,3,3]
; SSE41-NEXT: pmuludq %xmm7, %xmm2
; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
; SSE41-NEXT: retq
;