[X86][SSE] Simplified blend-with-zero combining

We were being too aggressive in trying to combine a shuffle into a blend-with-zero pattern, often resulting in a endless loop of contrasting combines

This patch stops the combine if we already have a blend in place (means we miss some domain corrections)

llvm-svn: 263717
This commit is contained in:
Simon Pilgrim 2016-03-17 15:59:36 +00:00
parent 1aa6c6adf1
commit f9f3d37f61
4 changed files with 63 additions and 20 deletions

View File

@ -1919,7 +1919,7 @@ EVT X86TargetLowering::getSetCCResultType(const DataLayout &DL,
EVT LegalVT = getTypeToTransformTo(Context, VT);
EltVT = LegalVT.getVectorElementType().getSimpleVT();
}
if (Subtarget.hasVLX() && EltVT.getSizeInBits() >= 32)
switch(NumElts) {
case 2: return MVT::v2i1;
@ -23958,23 +23958,22 @@ static bool combineX86ShuffleChain(SDValue Input, SDValue Root,
unsigned ShuffleSize = ShuffleVT.getVectorNumElements();
unsigned MaskRatio = ShuffleSize / Mask.size();
if (Depth == 1 && Root.getOpcode() == X86ISD::BLENDI)
return false;
for (unsigned i = 0; i != ShuffleSize; ++i)
if (Mask[i / MaskRatio] < 0)
BlendMask |= 1u << i;
if (Depth != 1 || RootVT != ShuffleVT ||
Root.getOpcode() != X86ISD::BLENDI ||
Root->getConstantOperandVal(2) != BlendMask) {
SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL);
Res = DAG.getBitcast(ShuffleVT, Input);
DCI.AddToWorklist(Res.getNode());
Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero,
DAG.getConstant(BlendMask, DL, MVT::i8));
DCI.AddToWorklist(Res.getNode());
DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
SDValue Zero = getZeroVector(ShuffleVT, Subtarget, DAG, DL);
Res = DAG.getBitcast(ShuffleVT, Input);
DCI.AddToWorklist(Res.getNode());
Res = DAG.getNode(X86ISD::BLENDI, DL, ShuffleVT, Res, Zero,
DAG.getConstant(BlendMask, DL, MVT::i8));
DCI.AddToWorklist(Res.getNode());
DCI.CombineTo(Root.getNode(), DAG.getBitcast(RootVT, Res),
/*AddTo*/ true);
return true;
}
}

View File

@ -278,8 +278,8 @@ define <8 x float> @merge_8f32_2f32_23z5(<2 x float>* %ptr) nounwind uwtable noi
; X32-AVX-LABEL: merge_8f32_2f32_23z5:
; X32-AVX: # BB#0:
; X32-AVX-NEXT: movl {{[0-9]+}}(%esp), %eax
; X32-AVX-NEXT: vxorps %ymm0, %ymm0, %ymm0
; X32-AVX-NEXT: vblendps {{.*#+}} ymm0 = mem[0,1,2,3],ymm0[4,5],mem[6,7]
; X32-AVX-NEXT: vxorpd %ymm0, %ymm0, %ymm0
; X32-AVX-NEXT: vblendpd {{.*#+}} ymm0 = mem[0,1],ymm0[2],mem[3]
; X32-AVX-NEXT: retl
%ptr0 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 2
%ptr1 = getelementptr inbounds <2 x float>, <2 x float>* %ptr, i64 3

View File

@ -2819,6 +2819,50 @@ define <4 x float> @combine_insertps4(<4 x float> %a, <4 x float> %b) {
ret <4 x float> %d
}
; FIXME: Failed to recognise that the VMOVSD has already zero'd the upper element
define void @combine_scalar_load_with_blend_with_zero(double* %a0, <4 x float>* %a1) {
; SSE2-LABEL: combine_scalar_load_with_blend_with_zero:
; SSE2: # BB#0:
; SSE2-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE2-NEXT: xorps %xmm1, %xmm1
; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSE2-NEXT: movaps %xmm0, (%rsi)
; SSE2-NEXT: retq
;
; SSSE3-LABEL: combine_scalar_load_with_blend_with_zero:
; SSSE3: # BB#0:
; SSSE3-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSSE3-NEXT: xorps %xmm1, %xmm1
; SSSE3-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,0],xmm0[3,0]
; SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[0,2]
; SSSE3-NEXT: movaps %xmm0, (%rsi)
; SSSE3-NEXT: retq
;
; SSE41-LABEL: combine_scalar_load_with_blend_with_zero:
; SSE41: # BB#0:
; SSE41-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
; SSE41-NEXT: xorpd %xmm1, %xmm1
; SSE41-NEXT: blendpd {{.*#+}} xmm1 = xmm0[0],xmm1[1]
; SSE41-NEXT: movapd %xmm1, (%rsi)
; SSE41-NEXT: retq
;
; AVX-LABEL: combine_scalar_load_with_blend_with_zero:
; AVX: # BB#0:
; AVX-NEXT: vmovsd {{.*#+}} xmm0 = mem[0],zero
; AVX-NEXT: vxorpd %xmm1, %xmm1, %xmm1
; AVX-NEXT: vblendpd {{.*#+}} xmm0 = xmm0[0],xmm1[1]
; AVX-NEXT: vmovapd %xmm0, (%rsi)
; AVX-NEXT: retq
%1 = load double, double* %a0, align 8
%2 = insertelement <2 x double> undef, double %1, i32 0
%3 = insertelement <2 x double> %2, double 0.000000e+00, i32 1
%4 = bitcast <2 x double> %3 to <4 x float>
%5 = shufflevector <4 x float> %4, <4 x float> <float 0.000000e+00, float undef, float undef, float undef>, <4 x i32> <i32 0, i32 1, i32 4, i32 3>
store <4 x float> %5, <4 x float>* %a1, align 16
ret void
}
define <4 x float> @PR22377(<4 x float> %a, <4 x float> %b) {
; SSE-LABEL: PR22377:
; SSE: # BB#0: # %entry

View File

@ -1151,8 +1151,8 @@ define <4 x i64> @shuf_zext_4i32_to_4i64(<4 x i32> %A) nounwind uwtable readnone
; AVX1-LABEL: shuf_zext_4i32_to_4i64:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5,6,7]
; AVX1-NEXT: vxorpd %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vblendpd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[2,0,3,0]
; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm0
; AVX1-NEXT: retq
@ -1581,8 +1581,8 @@ define <4 x i64> @shuf_zext_4i32_to_4i64_offset1(<4 x i32> %A) nounwind uwtable
; AVX1-LABEL: shuf_zext_4i32_to_4i64_offset1:
; AVX1: # BB#0: # %entry
; AVX1-NEXT: vinsertps {{.*#+}} xmm1 = xmm0[3],zero,zero,zero
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm2[0,1,2,3],xmm0[4,5],xmm2[6,7]
; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm2[0,1],xmm0[2],xmm2[3]
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;