[SelectionDAG] Add BITCAST handling to ComputeNumSignBits for splatted sign bits.

For cases where we are BITCASTing to vectors of smaller elements, then if the entire source was a splatted sign (src's NumSignBits == SrcBitWidth) we can say that the dst's NumSignBit == DstBitWidth, as we're just splitting those sign bits across multiple elements.

We could generalize this but at the moment the only use case I have is to peek through bitcasts to vector comparison results.

Differential Revision: https://reviews.llvm.org/D37849

git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@313543 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
Simon Pilgrim 2017-09-18 16:45:05 +00:00
parent 09f2a0775a
commit 3632da7880
7 changed files with 830 additions and 1146 deletions

View File

@ -3035,6 +3035,30 @@ unsigned SelectionDAG::ComputeNumSignBits(SDValue Op, const APInt &DemandedElts,
return Tmp;
}
case ISD::BITCAST: {
SDValue N0 = Op.getOperand(0);
unsigned SrcBits = N0.getScalarValueSizeInBits();
// Ignore bitcasts from floating point.
if (!N0.getValueType().isInteger())
break;
// Fast handling of 'identity' bitcasts.
if (VTBits == SrcBits)
return ComputeNumSignBits(N0, DemandedElts, Depth + 1);
// Bitcast 'large element' scalar/vector to 'small element' vector.
// TODO: Handle cases other than 'sign splat' when we have a use case.
// Requires handling of DemandedElts and Endianness.
if ((SrcBits % VTBits) == 0) {
assert(Op.getValueType().isVector() && "Expected bitcast to vector");
Tmp = ComputeNumSignBits(N0, Depth + 1);
if (Tmp == SrcBits)
return VTBits;
}
break;
}
case ISD::SIGN_EXTEND:
case ISD::SIGN_EXTEND_VECTOR_INREG:
Tmp = VTBits - Op.getOperand(0).getScalarValueSizeInBits();

View File

@ -30,8 +30,6 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm2, %xmm0
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; SSE2-SSSE3-NEXT: pslld $31, %xmm0
; SSE2-SSSE3-NEXT: psrad $31, %xmm0
; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm7
; SSE2-SSSE3-NEXT: pxor %xmm8, %xmm5
; SSE2-SSSE3-NEXT: movdqa %xmm5, %xmm1
@ -53,9 +51,7 @@ define i4 @v4i64(<4 x i64> %a, <4 x i64> %b, <4 x i64> %c, <4 x i64> %d) {
; SSE2-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2-SSSE3-NEXT: por %xmm4, %xmm2
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; SSE2-SSSE3-NEXT: pslld $31, %xmm2
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
; SSE2-SSSE3-NEXT: pand %xmm0, %xmm2
; SSE2-SSSE3-NEXT: andps %xmm0, %xmm2
; SSE2-SSSE3-NEXT: movmskps %xmm2, %eax
; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
@ -114,14 +110,10 @@ define i4 @v4f64(<4 x double> %a, <4 x double> %b, <4 x double> %c, <4 x double>
; SSE2-SSSE3-NEXT: cmpltpd %xmm1, %xmm3
; SSE2-SSSE3-NEXT: cmpltpd %xmm0, %xmm2
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE2-SSSE3-NEXT: pslld $31, %xmm2
; SSE2-SSSE3-NEXT: psrad $31, %xmm2
; SSE2-SSSE3-NEXT: cmpltpd %xmm5, %xmm7
; SSE2-SSSE3-NEXT: cmpltpd %xmm4, %xmm6
; SSE2-SSSE3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE2-SSSE3-NEXT: pslld $31, %xmm6
; SSE2-SSSE3-NEXT: psrad $31, %xmm6
; SSE2-SSSE3-NEXT: pand %xmm2, %xmm6
; SSE2-SSSE3-NEXT: andps %xmm2, %xmm6
; SSE2-SSSE3-NEXT: movmskps %xmm6, %eax
; SSE2-SSSE3-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE2-SSSE3-NEXT: retq
@ -165,22 +157,13 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSE2-NEXT: pcmpgtw %xmm2, %xmm0
; SSE2-NEXT: pand %xmm3, %xmm0
; SSE2-NEXT: packuswb %xmm1, %xmm0
; SSE2-NEXT: psllw $7, %xmm0
; SSE2-NEXT: movdqa {{.*#+}} xmm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE2-NEXT: pand %xmm8, %xmm0
; SSE2-NEXT: pxor %xmm2, %xmm2
; SSE2-NEXT: pxor %xmm1, %xmm1
; SSE2-NEXT: pcmpgtb %xmm0, %xmm1
; SSE2-NEXT: pcmpgtw %xmm7, %xmm5
; SSE2-NEXT: pand %xmm3, %xmm5
; SSE2-NEXT: pcmpgtw %xmm6, %xmm4
; SSE2-NEXT: pand %xmm3, %xmm4
; SSE2-NEXT: packuswb %xmm5, %xmm4
; SSE2-NEXT: psllw $7, %xmm4
; SSE2-NEXT: pand %xmm8, %xmm4
; SSE2-NEXT: pcmpgtb %xmm4, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: pmovmskb %xmm2, %eax
; SSE2-NEXT: pand %xmm0, %xmm4
; SSE2-NEXT: pmovmskb %xmm4, %eax
; SSE2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE2-NEXT: retq
;
@ -192,22 +175,13 @@ define i16 @v16i16(<16 x i16> %a, <16 x i16> %b, <16 x i16> %c, <16 x i16> %d) {
; SSSE3-NEXT: pcmpgtw %xmm2, %xmm0
; SSSE3-NEXT: pshufb %xmm3, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: psllw $7, %xmm0
; SSSE3-NEXT: movdqa {{.*#+}} xmm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSSE3-NEXT: pand %xmm8, %xmm0
; SSSE3-NEXT: pxor %xmm2, %xmm2
; SSSE3-NEXT: pxor %xmm1, %xmm1
; SSSE3-NEXT: pcmpgtb %xmm0, %xmm1
; SSSE3-NEXT: pcmpgtw %xmm7, %xmm5
; SSSE3-NEXT: pshufb %xmm3, %xmm5
; SSSE3-NEXT: pcmpgtw %xmm6, %xmm4
; SSSE3-NEXT: pshufb %xmm3, %xmm4
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSSE3-NEXT: psllw $7, %xmm4
; SSSE3-NEXT: pand %xmm8, %xmm4
; SSSE3-NEXT: pcmpgtb %xmm4, %xmm2
; SSSE3-NEXT: pand %xmm1, %xmm2
; SSSE3-NEXT: pmovmskb %xmm2, %eax
; SSSE3-NEXT: pand %xmm0, %xmm4
; SSSE3-NEXT: pmovmskb %xmm4, %eax
; SSSE3-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSSE3-NEXT: retq
;
@ -270,8 +244,6 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: psllw $15, %xmm0
; SSE2-NEXT: psraw $15, %xmm0
; SSE2-NEXT: pcmpgtd %xmm7, %xmm5
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@ -281,8 +253,6 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE2-NEXT: psllw $15, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: packuswb %xmm2, %xmm2
@ -298,15 +268,11 @@ define i8 @v8i32(<8 x i32> %a, <8 x i32> %b, <8 x i32> %c, <8 x i32> %d) {
; SSSE3-NEXT: pcmpgtd %xmm2, %xmm0
; SSSE3-NEXT: pshufb %xmm3, %xmm0
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSSE3-NEXT: psllw $15, %xmm0
; SSSE3-NEXT: psraw $15, %xmm0
; SSSE3-NEXT: pcmpgtd %xmm7, %xmm5
; SSSE3-NEXT: pshufb %xmm3, %xmm5
; SSSE3-NEXT: pcmpgtd %xmm6, %xmm4
; SSSE3-NEXT: pshufb %xmm3, %xmm4
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSSE3-NEXT: psllw $15, %xmm4
; SSSE3-NEXT: psraw $15, %xmm4
; SSSE3-NEXT: pand %xmm0, %xmm4
; SSSE3-NEXT: pshufb {{.*#+}} xmm4 = xmm4[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm4, %eax
@ -374,8 +340,6 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSE2-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE2-NEXT: psllw $15, %xmm0
; SSE2-NEXT: psraw $15, %xmm0
; SSE2-NEXT: cmpltps %xmm5, %xmm7
; SSE2-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[0,2,2,3,4,5,6,7]
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
@ -385,8 +349,6 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; SSE2-NEXT: psllw $15, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
; SSE2-NEXT: pand %xmm0, %xmm2
; SSE2-NEXT: pand {{.*}}(%rip), %xmm2
; SSE2-NEXT: packuswb %xmm2, %xmm2
@ -402,15 +364,11 @@ define i8 @v8f32(<8 x float> %a, <8 x float> %b, <8 x float> %c, <8 x float> %d)
; SSSE3-NEXT: cmpltps %xmm0, %xmm2
; SSSE3-NEXT: pshufb %xmm1, %xmm2
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSSE3-NEXT: psllw $15, %xmm2
; SSSE3-NEXT: psraw $15, %xmm2
; SSSE3-NEXT: cmpltps %xmm5, %xmm7
; SSSE3-NEXT: pshufb %xmm1, %xmm7
; SSSE3-NEXT: cmpltps %xmm4, %xmm6
; SSSE3-NEXT: pshufb %xmm1, %xmm6
; SSSE3-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; SSSE3-NEXT: psllw $15, %xmm6
; SSSE3-NEXT: psraw $15, %xmm6
; SSSE3-NEXT: pand %xmm2, %xmm6
; SSSE3-NEXT: pshufb {{.*#+}} xmm6 = xmm6[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSSE3-NEXT: pmovmskb %xmm6, %eax

View File

@ -8,44 +8,40 @@
define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; SSE-LABEL: v8i64:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pcmpgtq %xmm7, %xmm3
; SSE-NEXT: pcmpgtq %xmm6, %xmm2
; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2]
; SSE-NEXT: pslld $31, %xmm2
; SSE-NEXT: psrad $31, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13]
; SSE-NEXT: pshufb %xmm3, %xmm2
; SSE-NEXT: pcmpgtq %xmm5, %xmm1
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pcmpgtq %xmm4, %xmm0
; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
; SSE-NEXT: pslld $31, %xmm0
; SSE-NEXT: psrad $31, %xmm0
; SSE-NEXT: pshufb %xmm3, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: psllw $15, %xmm0
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2]
; SSE-NEXT: pslld $31, %xmm9
; SSE-NEXT: psrad $31, %xmm9
; SSE-NEXT: pshufb %xmm3, %xmm9
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2]
; SSE-NEXT: pslld $31, %xmm8
; SSE-NEXT: psrad $31, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[0,2]
; SSE-NEXT: pshufb %xmm3, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
; SSE-NEXT: psllw $15, %xmm8
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; SSE-NEXT: pcmpgtq {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5,6,7]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: psllw $15, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE-NEXT: pmovmskb %xmm2, %eax
; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE-NEXT: retq
;
@ -65,8 +61,6 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0]
; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2
; AVX1-NEXT: vpcmpgtq %xmm1, %xmm2, %xmm1
@ -80,9 +74,9 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpshufb %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vpsllw $15, %xmm1, %xmm1
; AVX1-NEXT: vpsraw $15, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX1-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@ -101,8 +95,6 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX2-NEXT: vpcmpgtq %ymm7, %ymm5, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
@ -112,9 +104,9 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
; AVX2-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vpsllw $15, %xmm1, %xmm1
; AVX2-NEXT: vpsraw $15, %xmm1, %xmm1
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX2-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@ -148,44 +140,40 @@ define i8 @v8i64(<8 x i64> %a, <8 x i64> %b, <8 x i64> %c, <8 x i64> %d) {
define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double> %d) {
; SSE-LABEL: v8f64:
; SSE: # BB#0:
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movapd {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: cmpltpd %xmm3, %xmm7
; SSE-NEXT: cmpltpd %xmm2, %xmm6
; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE-NEXT: pslld $31, %xmm6
; SSE-NEXT: psrad $31, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [0,1,4,5,4,5,6,7,0,1,4,5,8,9,12,13]
; SSE-NEXT: pshufb %xmm2, %xmm6
; SSE-NEXT: cmpltpd %xmm1, %xmm5
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm5[0,2,2,3,4,5,6,7]
; SSE-NEXT: cmpltpd %xmm0, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2],xmm5[0,2]
; SSE-NEXT: pslld $31, %xmm4
; SSE-NEXT: psrad $31, %xmm4
; SSE-NEXT: pshufb %xmm2, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
; SSE-NEXT: psllw $15, %xmm4
; SSE-NEXT: psraw $15, %xmm4
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm6[4,5,6,7]
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2],xmm11[0,2]
; SSE-NEXT: pslld $31, %xmm9
; SSE-NEXT: psrad $31, %xmm9
; SSE-NEXT: pshufb %xmm2, %xmm9
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm10[0,2]
; SSE-NEXT: pslld $31, %xmm8
; SSE-NEXT: psrad $31, %xmm8
; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,2],xmm11[0,2]
; SSE-NEXT: pshufb %xmm2, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
; SSE-NEXT: psllw $15, %xmm8
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pshufb {{.*#+}} xmm8 = xmm8[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm10[0,2,2,3,4,5,6,7]
; SSE-NEXT: cmpltpd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,2,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm9[0,2,2,3,4,5,6,7]
; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1]
; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm8[4,5,6,7]
; SSE-NEXT: pand %xmm0, %xmm2
; SSE-NEXT: psllw $15, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: pshufb {{.*#+}} xmm2 = xmm2[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; SSE-NEXT: pmovmskb %xmm2, %eax
; SSE-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
; SSE-NEXT: retq
;
@ -201,8 +189,6 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX12-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX12-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX12-NEXT: vcmpltpd %ymm5, %ymm7, %ymm1
; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
@ -212,9 +198,9 @@ define i8 @v8f64(<8 x double> %a, <8 x double> %b, <8 x double> %c, <8 x double>
; AVX12-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
; AVX12-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX12-NEXT: vpsllw $15, %xmm1, %xmm1
; AVX12-NEXT: vpsraw $15, %xmm1, %xmm1
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX12-NEXT: vpsraw $15, %xmm0, %xmm0
; AVX12-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u]
; AVX12-NEXT: vpmovmskb %xmm0, %eax
; AVX12-NEXT: # kill: %AL<def> %AL<kill> %EAX<kill>
@ -249,8 +235,8 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; SSE-LABEL: v32i16:
; SSE: # BB#0:
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: movdqa {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pcmpgtw %xmm5, %xmm1
; SSE-NEXT: movdqa {{.*#+}} xmm5 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
@ -258,42 +244,25 @@ define i32 @v32i16(<32 x i16> %a, <32 x i16> %b, <32 x i16> %c, <32 x i16> %d) {
; SSE-NEXT: pcmpgtw %xmm4, %xmm0
; SSE-NEXT: pshufb %xmm5, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: psllw $7, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm12 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pand %xmm12, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pcmpgtb %xmm0, %xmm4
; SSE-NEXT: pcmpgtw %xmm7, %xmm3
; SSE-NEXT: pshufb %xmm5, %xmm3
; SSE-NEXT: pcmpgtw %xmm6, %xmm2
; SSE-NEXT: pshufb %xmm5, %xmm2
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: psllw $7, %xmm2
; SSE-NEXT: pand %xmm12, %xmm2
; SSE-NEXT: pxor %xmm0, %xmm0
; SSE-NEXT: pcmpgtb %xmm2, %xmm0
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pshufb %xmm5, %xmm11
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: pshufb %xmm5, %xmm10
; SSE-NEXT: punpcklqdq {{.*#+}} xmm10 = xmm10[0],xmm11[0]
; SSE-NEXT: psllw $7, %xmm10
; SSE-NEXT: pand %xmm12, %xmm10
; SSE-NEXT: pxor %xmm2, %xmm2
; SSE-NEXT: pcmpgtb %xmm10, %xmm2
; SSE-NEXT: pand %xmm4, %xmm2
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: pshufb %xmm5, %xmm9
; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
; SSE-NEXT: pand %xmm0, %xmm9
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: pshufb %xmm5, %xmm10
; SSE-NEXT: pcmpgtw {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: pshufb %xmm5, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
; SSE-NEXT: psllw $7, %xmm8
; SSE-NEXT: pand %xmm12, %xmm8
; SSE-NEXT: pcmpgtb %xmm8, %xmm1
; SSE-NEXT: pand %xmm0, %xmm1
; SSE-NEXT: pmovmskb %xmm2, %ecx
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
; SSE-NEXT: pand %xmm2, %xmm8
; SSE-NEXT: pmovmskb %xmm9, %ecx
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: shll $16, %eax
; SSE-NEXT: orl %ecx, %eax
; SSE-NEXT: retq
@ -675,8 +644,6 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; SSE-NEXT: pcmpgtd %xmm6, %xmm2
; SSE-NEXT: pshufb %xmm7, %xmm2
; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE-NEXT: psllw $15, %xmm2
; SSE-NEXT: psraw $15, %xmm2
; SSE-NEXT: movdqa {{.*#+}} xmm3 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE-NEXT: pshufb %xmm3, %xmm2
; SSE-NEXT: pcmpgtd %xmm5, %xmm1
@ -684,38 +651,23 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; SSE-NEXT: pcmpgtd %xmm4, %xmm0
; SSE-NEXT: pshufb %xmm7, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: psllw $15, %xmm0
; SSE-NEXT: psraw $15, %xmm0
; SSE-NEXT: pshufb %xmm3, %xmm0
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0]
; SSE-NEXT: psllw $7, %xmm0
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: pxor %xmm1, %xmm1
; SSE-NEXT: pxor %xmm4, %xmm4
; SSE-NEXT: pcmpgtb %xmm0, %xmm4
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pshufb %xmm7, %xmm11
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: pshufb %xmm7, %xmm9
; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
; SSE-NEXT: psllw $15, %xmm9
; SSE-NEXT: psraw $15, %xmm9
; SSE-NEXT: pshufb %xmm3, %xmm9
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: pshufb %xmm7, %xmm10
; SSE-NEXT: pcmpgtd {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: pshufb %xmm7, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
; SSE-NEXT: psllw $15, %xmm8
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: pshufb %xmm3, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
; SSE-NEXT: psllw $7, %xmm8
; SSE-NEXT: pand %xmm2, %xmm8
; SSE-NEXT: pcmpgtb %xmm8, %xmm1
; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: pmovmskb %xmm1, %eax
; SSE-NEXT: pand %xmm0, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE-NEXT: retq
;
@ -735,27 +687,19 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX1-NEXT: vpacksswb %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpshufb %xmm8, %xmm0, %xmm0
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm9[0]
; AVX1-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX1-NEXT: vmovdqa {{.*#+}} xmm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX1-NEXT: vpand %xmm9, %xmm0, %xmm0
; AVX1-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX1-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm1
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm5, %xmm3
; AVX1-NEXT: vpacksswb %xmm1, %xmm3, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm5, %xmm2
; AVX1-NEXT: vpcmpgtd %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpcmpgtd %xmm7, %xmm5, %xmm2
; AVX1-NEXT: vpacksswb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vpshufb %xmm8, %xmm1, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm3
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
; AVX1-NEXT: vpcmpgtd %xmm3, %xmm5, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm4
; AVX1-NEXT: vpacksswb %xmm3, %xmm4, %xmm3
; AVX1-NEXT: vpshufb %xmm8, %xmm3, %xmm3
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm1[0]
; AVX1-NEXT: vpsllw $7, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm9, %xmm1, %xmm1
; AVX1-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
; AVX1-NEXT: vextractf128 $1, %ymm6, %xmm2
; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm3
; AVX1-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpcmpgtd %xmm6, %xmm4, %xmm3
; AVX1-NEXT: vpacksswb %xmm2, %xmm3, %xmm2
; AVX1-NEXT: vpshufb %xmm8, %xmm2, %xmm2
; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX1-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vpmovmskb %xmm0, %eax
; AVX1-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
@ -774,23 +718,15 @@ define i16 @v16i32(<16 x i32> %a, <16 x i32> %b, <16 x i32> %c, <16 x i32> %d) {
; AVX2-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX2-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX2-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX2-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX2-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX2-NEXT: vpcmpgtd %ymm7, %ymm5, %ymm5
; AVX2-NEXT: vextracti128 $1, %ymm5, %xmm7
; AVX2-NEXT: vpacksswb %xmm7, %xmm5, %xmm5
; AVX2-NEXT: vpshufb %xmm3, %xmm5, %xmm5
; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm4
; AVX2-NEXT: vextracti128 $1, %ymm4, %xmm6
; AVX2-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
; AVX2-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; AVX2-NEXT: vpsllw $7, %xmm3, %xmm3
; AVX2-NEXT: vpand %xmm1, %xmm3, %xmm1
; AVX2-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
; AVX2-NEXT: vpcmpgtd %ymm7, %ymm5, %ymm1
; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2
; AVX2-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX2-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX2-NEXT: vpcmpgtd %ymm6, %ymm4, %ymm2
; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm4
; AVX2-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
; AVX2-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX2-NEXT: vpmovmskb %xmm0, %eax
; AVX2-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
@ -834,8 +770,6 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; SSE-NEXT: cmpltps %xmm2, %xmm6
; SSE-NEXT: pshufb %xmm3, %xmm6
; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm7[0]
; SSE-NEXT: psllw $15, %xmm6
; SSE-NEXT: psraw $15, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm2 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u>
; SSE-NEXT: pshufb %xmm2, %xmm6
; SSE-NEXT: cmpltps %xmm1, %xmm5
@ -843,38 +777,23 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; SSE-NEXT: cmpltps %xmm0, %xmm4
; SSE-NEXT: pshufb %xmm3, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
; SSE-NEXT: psllw $15, %xmm4
; SSE-NEXT: psraw $15, %xmm4
; SSE-NEXT: pshufb %xmm2, %xmm4
; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm6[0]
; SSE-NEXT: psllw $7, %xmm4
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; SSE-NEXT: pand %xmm1, %xmm4
; SSE-NEXT: xorps %xmm0, %xmm0
; SSE-NEXT: pxor %xmm5, %xmm5
; SSE-NEXT: pcmpgtb %xmm4, %xmm5
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm11
; SSE-NEXT: pshufb %xmm3, %xmm11
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm9
; SSE-NEXT: pshufb %xmm3, %xmm9
; SSE-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm11[0]
; SSE-NEXT: psllw $15, %xmm9
; SSE-NEXT: psraw $15, %xmm9
; SSE-NEXT: pshufb %xmm2, %xmm9
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm10
; SSE-NEXT: pshufb %xmm3, %xmm10
; SSE-NEXT: cmpltps {{[0-9]+}}(%rsp), %xmm8
; SSE-NEXT: pshufb %xmm3, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm10[0]
; SSE-NEXT: psllw $15, %xmm8
; SSE-NEXT: psraw $15, %xmm8
; SSE-NEXT: pshufb %xmm2, %xmm8
; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm9[0]
; SSE-NEXT: psllw $7, %xmm8
; SSE-NEXT: pand %xmm1, %xmm8
; SSE-NEXT: pcmpgtb %xmm8, %xmm0
; SSE-NEXT: pand %xmm5, %xmm0
; SSE-NEXT: pmovmskb %xmm0, %eax
; SSE-NEXT: pand %xmm4, %xmm8
; SSE-NEXT: pmovmskb %xmm8, %eax
; SSE-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>
; SSE-NEXT: retq
;
@ -890,23 +809,15 @@ define i16 @v16f32(<16 x float> %a, <16 x float> %b, <16 x float> %c, <16 x floa
; AVX12-NEXT: vpacksswb %xmm2, %xmm0, %xmm0
; AVX12-NEXT: vpshufb %xmm3, %xmm0, %xmm0
; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; AVX12-NEXT: vpsllw $7, %xmm0, %xmm0
; AVX12-NEXT: vmovdqa {{.*#+}} xmm1 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128]
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpxor %xmm2, %xmm2, %xmm2
; AVX12-NEXT: vpcmpgtb %xmm0, %xmm2, %xmm0
; AVX12-NEXT: vcmpltps %ymm5, %ymm7, %ymm5
; AVX12-NEXT: vextractf128 $1, %ymm5, %xmm7
; AVX12-NEXT: vpacksswb %xmm7, %xmm5, %xmm5
; AVX12-NEXT: vpshufb %xmm3, %xmm5, %xmm5
; AVX12-NEXT: vcmpltps %ymm4, %ymm6, %ymm4
; AVX12-NEXT: vextractf128 $1, %ymm4, %xmm6
; AVX12-NEXT: vpacksswb %xmm6, %xmm4, %xmm4
; AVX12-NEXT: vpshufb %xmm3, %xmm4, %xmm3
; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm5[0]
; AVX12-NEXT: vpsllw $7, %xmm3, %xmm3
; AVX12-NEXT: vpand %xmm1, %xmm3, %xmm1
; AVX12-NEXT: vpcmpgtb %xmm1, %xmm2, %xmm1
; AVX12-NEXT: vcmpltps %ymm5, %ymm7, %ymm1
; AVX12-NEXT: vextractf128 $1, %ymm1, %xmm2
; AVX12-NEXT: vpacksswb %xmm2, %xmm1, %xmm1
; AVX12-NEXT: vpshufb %xmm3, %xmm1, %xmm1
; AVX12-NEXT: vcmpltps %ymm4, %ymm6, %ymm2
; AVX12-NEXT: vextractf128 $1, %ymm2, %xmm4
; AVX12-NEXT: vpacksswb %xmm4, %xmm2, %xmm2
; AVX12-NEXT: vpshufb %xmm3, %xmm2, %xmm2
; AVX12-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0]
; AVX12-NEXT: vpand %xmm1, %xmm0, %xmm0
; AVX12-NEXT: vpmovmskb %xmm0, %eax
; AVX12-NEXT: # kill: %AX<def> %AX<kill> %EAX<kill>

View File

@ -314,8 +314,6 @@ define void @example25() nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; SSE2-NEXT: psllw $15, %xmm1
; SSE2-NEXT: psraw $15, %xmm1
; SSE2-NEXT: movaps dc+4096(%rax), %xmm2
; SSE2-NEXT: movaps dc+4112(%rax), %xmm3
; SSE2-NEXT: cmpltps dd+4112(%rax), %xmm3
@ -327,8 +325,6 @@ define void @example25() nounwind {
; SSE2-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,6,6,7]
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE2-NEXT: psllw $15, %xmm2
; SSE2-NEXT: psraw $15, %xmm2
; SSE2-NEXT: pand %xmm1, %xmm2
; SSE2-NEXT: movdqa %xmm2, %xmm1
; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
@ -357,8 +353,6 @@ define void @example25() nounwind {
; SSE41-NEXT: cmpltps db+4096(%rax), %xmm2
; SSE41-NEXT: pshufb %xmm0, %xmm2
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
; SSE41-NEXT: psllw $15, %xmm2
; SSE41-NEXT: psraw $15, %xmm2
; SSE41-NEXT: movaps dc+4096(%rax), %xmm3
; SSE41-NEXT: movaps dc+4112(%rax), %xmm4
; SSE41-NEXT: cmpltps dd+4112(%rax), %xmm4
@ -366,8 +360,6 @@ define void @example25() nounwind {
; SSE41-NEXT: cmpltps dd+4096(%rax), %xmm3
; SSE41-NEXT: pshufb %xmm0, %xmm3
; SSE41-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm4[0]
; SSE41-NEXT: psllw $15, %xmm3
; SSE41-NEXT: psraw $15, %xmm3
; SSE41-NEXT: pand %xmm2, %xmm3
; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
; SSE41-NEXT: pand %xmm1, %xmm2

View File

@ -99,119 +99,100 @@ while.end: ; preds = %while.body, %entry
define <16 x float> @foo(<16 x float> %x) {
; CHECK-LABEL: foo:
; CHECK: ## BB#0: ## %bb
; CHECK-NEXT: movaps %xmm3, %xmm9
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ## 16-byte Spill
; CHECK-NEXT: xorps %xmm3, %xmm3
; CHECK-NEXT: xorps %xmm4, %xmm4
; CHECK-NEXT: pxor %xmm6, %xmm6
; CHECK-NEXT: pcmpgtd %xmm0, %xmm6
; CHECK-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[0,2,2,0,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,4]
; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,1]
; CHECK-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,3,2,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,6,4,5]
; CHECK-NEXT: psllw $15, %xmm4
; CHECK-NEXT: psraw $15, %xmm4
; CHECK-NEXT: pand {{.*}}(%rip), %xmm4
; CHECK-NEXT: packuswb %xmm4, %xmm4
; CHECK-NEXT: psllw $7, %xmm4
; CHECK-NEXT: pand {{.*}}(%rip), %xmm4
; CHECK-NEXT: pxor %xmm15, %xmm15
; CHECK-NEXT: pcmpgtb %xmm4, %xmm15
; CHECK-NEXT: movdqa %xmm0, %xmm12
; CHECK-NEXT: cmpltps %xmm3, %xmm12
; CHECK-NEXT: movdqa %xmm6, %xmm10
; CHECK-NEXT: pxor %xmm12, %xmm10
; CHECK-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,255]
; CHECK-NEXT: pand %xmm6, %xmm5
; CHECK-NEXT: cvttps2dq %xmm0, %xmm13
; CHECK-NEXT: movdqa %xmm0, %xmm10
; CHECK-NEXT: cmpltps %xmm4, %xmm10
; CHECK-NEXT: movdqa %xmm6, %xmm8
; CHECK-NEXT: pxor %xmm10, %xmm8
; CHECK-NEXT: cvttps2dq %xmm1, %xmm14
; CHECK-NEXT: movaps %xmm1, %xmm11
; CHECK-NEXT: cmpltps %xmm3, %xmm11
; CHECK-NEXT: movdqa %xmm6, %xmm4
; CHECK-NEXT: pxor %xmm11, %xmm4
; CHECK-NEXT: cvttps2dq %xmm2, %xmm8
; CHECK-NEXT: cmpltps %xmm3, %xmm2
; CHECK-NEXT: cmpltps %xmm4, %xmm11
; CHECK-NEXT: movdqa %xmm6, %xmm9
; CHECK-NEXT: pxor %xmm11, %xmm9
; CHECK-NEXT: cvttps2dq %xmm2, %xmm1
; CHECK-NEXT: cmpltps %xmm4, %xmm2
; CHECK-NEXT: movdqa %xmm6, %xmm7
; CHECK-NEXT: pxor %xmm2, %xmm7
; CHECK-NEXT: cvttps2dq %xmm9, %xmm13
; CHECK-NEXT: cmpltps %xmm3, %xmm9
; CHECK-NEXT: pxor %xmm9, %xmm6
; CHECK-NEXT: movdqa {{.*#+}} xmm14 = [1,1,1,1]
; CHECK-NEXT: pand %xmm14, %xmm6
; CHECK-NEXT: pand %xmm14, %xmm7
; CHECK-NEXT: pand %xmm14, %xmm4
; CHECK-NEXT: pand %xmm14, %xmm10
; CHECK-NEXT: cvtdq2ps %xmm8, %xmm14
; CHECK-NEXT: cvtdq2ps %xmm13, %xmm1
; CHECK-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) ## 16-byte Spill
; CHECK-NEXT: xorps %xmm5, %xmm5
; CHECK-NEXT: cmpltps %xmm1, %xmm5
; CHECK-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: cmpltps %xmm14, %xmm1
; CHECK-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm5[0]
; CHECK-NEXT: cvttps2dq -{{[0-9]+}}(%rsp), %xmm5 ## 16-byte Folded Reload
; CHECK-NEXT: cvttps2dq %xmm0, %xmm0
; CHECK-NEXT: cvtdq2ps %xmm0, %xmm8
; CHECK-NEXT: cvtdq2ps %xmm5, %xmm13
; CHECK-NEXT: xorps %xmm0, %xmm0
; CHECK-NEXT: cmpltps %xmm13, %xmm0
; CHECK-NEXT: cvttps2dq %xmm3, %xmm12
; CHECK-NEXT: cmpltps %xmm4, %xmm3
; CHECK-NEXT: pxor %xmm3, %xmm6
; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [1,1,1,1]
; CHECK-NEXT: pand %xmm0, %xmm6
; CHECK-NEXT: pand %xmm0, %xmm7
; CHECK-NEXT: pand %xmm0, %xmm9
; CHECK-NEXT: pand %xmm0, %xmm8
; CHECK-NEXT: cvtdq2ps %xmm13, %xmm15
; CHECK-NEXT: cvtdq2ps %xmm14, %xmm14
; CHECK-NEXT: cvtdq2ps %xmm1, %xmm13
; CHECK-NEXT: cvtdq2ps %xmm12, %xmm12
; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: cmpltps %xmm12, %xmm0
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: xorps %xmm5, %xmm5
; CHECK-NEXT: cmpltps %xmm8, %xmm5
; CHECK-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,2,3]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm0[0]
; CHECK-NEXT: psllw $15, %xmm1
; CHECK-NEXT: psraw $15, %xmm1
; CHECK-NEXT: xorps %xmm1, %xmm1
; CHECK-NEXT: cmpltps %xmm13, %xmm1
; CHECK-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm0[0]
; CHECK-NEXT: pxor %xmm0, %xmm0
; CHECK-NEXT: cmpltps %xmm14, %xmm0
; CHECK-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; CHECK-NEXT: cmpltps %xmm15, %xmm4
; CHECK-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,2,2,3,4,5,6,7]
; CHECK-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,6,7]
; CHECK-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,2,2,3]
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; CHECK-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255]
; CHECK-NEXT: pand %xmm0, %xmm1
; CHECK-NEXT: psllw $15, %xmm5
; CHECK-NEXT: psraw $15, %xmm5
; CHECK-NEXT: pand %xmm0, %xmm5
; CHECK-NEXT: packuswb %xmm1, %xmm5
; CHECK-NEXT: psllw $7, %xmm5
; CHECK-NEXT: pand {{.*}}(%rip), %xmm5
; CHECK-NEXT: pcmpgtb %xmm5, %xmm3
; CHECK-NEXT: pand %xmm15, %xmm3
; CHECK-NEXT: movdqa %xmm3, %xmm1
; CHECK-NEXT: pand %xmm0, %xmm4
; CHECK-NEXT: packuswb %xmm5, %xmm5
; CHECK-NEXT: packuswb %xmm5, %xmm5
; CHECK-NEXT: packuswb %xmm1, %xmm4
; CHECK-NEXT: pand %xmm5, %xmm4
; CHECK-NEXT: movdqa %xmm4, %xmm1
; CHECK-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; CHECK-NEXT: movdqa %xmm1, %xmm0
; CHECK-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
; CHECK-NEXT: pslld $31, %xmm0
; CHECK-NEXT: psrad $31, %xmm0
; CHECK-NEXT: pxor %xmm10, %xmm0
; CHECK-NEXT: pxor %xmm8, %xmm0
; CHECK-NEXT: pxor %xmm15, %xmm0
; CHECK-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
; CHECK-NEXT: pslld $31, %xmm1
; CHECK-NEXT: psrad $31, %xmm1
; CHECK-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; CHECK-NEXT: pxor %xmm4, %xmm1
; CHECK-NEXT: pxor %xmm13, %xmm1
; CHECK-NEXT: movdqa %xmm3, %xmm5
; CHECK-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
; CHECK-NEXT: pxor %xmm9, %xmm1
; CHECK-NEXT: pxor %xmm14, %xmm1
; CHECK-NEXT: movdqa %xmm4, %xmm5
; CHECK-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
; CHECK-NEXT: pslld $31, %xmm5
; CHECK-NEXT: psrad $31, %xmm5
; CHECK-NEXT: pxor %xmm7, %xmm5
; CHECK-NEXT: pxor %xmm14, %xmm5
; CHECK-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm0[4],xmm3[5],xmm0[5],xmm3[6],xmm0[6],xmm3[7],xmm0[7]
; CHECK-NEXT: pslld $31, %xmm3
; CHECK-NEXT: psrad $31, %xmm3
; CHECK-NEXT: pxor %xmm6, %xmm3
; CHECK-NEXT: pxor -{{[0-9]+}}(%rsp), %xmm3 ## 16-byte Folded Reload
; CHECK-NEXT: pand %xmm9, %xmm3
; CHECK-NEXT: pxor %xmm13, %xmm5
; CHECK-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7]
; CHECK-NEXT: pslld $31, %xmm4
; CHECK-NEXT: psrad $31, %xmm4
; CHECK-NEXT: pxor %xmm6, %xmm4
; CHECK-NEXT: pxor %xmm12, %xmm4
; CHECK-NEXT: pand %xmm3, %xmm4
; CHECK-NEXT: pand %xmm2, %xmm5
; CHECK-NEXT: pand %xmm11, %xmm1
; CHECK-NEXT: pand %xmm12, %xmm0
; CHECK-NEXT: pxor %xmm10, %xmm0
; CHECK-NEXT: pxor %xmm4, %xmm1
; CHECK-NEXT: pand %xmm10, %xmm0
; CHECK-NEXT: pxor %xmm8, %xmm0
; CHECK-NEXT: pxor %xmm9, %xmm1
; CHECK-NEXT: pxor %xmm7, %xmm5
; CHECK-NEXT: pxor %xmm6, %xmm3
; CHECK-NEXT: pxor %xmm6, %xmm4
; CHECK-NEXT: movdqa %xmm5, %xmm2
; CHECK-NEXT: movdqa %xmm4, %xmm3
; CHECK-NEXT: retq
bb:
%v3 = icmp slt <16 x i32> undef, zeroinitializer

File diff suppressed because it is too large Load Diff

View File

@ -26,28 +26,24 @@ define void @t2(<3 x i64>* %dst, <3 x i64> %src1, <3 x i64> %src2) nounwind read
; CHECK-NEXT: movq %r9, %xmm1
; CHECK-NEXT: movq %r8, %xmm0
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; CHECK-NEXT: movq %rdx, %xmm2
; CHECK-NEXT: movq %rsi, %xmm1
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm2[0]
; CHECK-NEXT: movq %rcx, %xmm2
; CHECK-NEXT: movq %rdx, %xmm1
; CHECK-NEXT: movq %rsi, %xmm2
; CHECK-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm1[0]
; CHECK-NEXT: movq %rcx, %xmm1
; CHECK-NEXT: movq {{.*#+}} xmm3 = mem[0],zero
; CHECK-NEXT: pxor %xmm4, %xmm4
; CHECK-NEXT: pcmpeqq %xmm4, %xmm2
; CHECK-NEXT: pcmpeqd %xmm5, %xmm5
; CHECK-NEXT: pxor %xmm5, %xmm2
; CHECK-NEXT: pcmpeqq %xmm4, %xmm1
; CHECK-NEXT: pcmpeqd %xmm5, %xmm5
; CHECK-NEXT: pxor %xmm5, %xmm1
; CHECK-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
; CHECK-NEXT: pslld $31, %xmm1
; CHECK-NEXT: psrad $31, %xmm1
; CHECK-NEXT: pcmpeqq %xmm4, %xmm2
; CHECK-NEXT: pxor %xmm5, %xmm2
; CHECK-NEXT: shufps {{.*#+}} xmm2 = xmm2[0,2],xmm1[0,2]
; CHECK-NEXT: pcmpeqq %xmm4, %xmm3
; CHECK-NEXT: pxor %xmm5, %xmm3
; CHECK-NEXT: pcmpeqq %xmm4, %xmm0
; CHECK-NEXT: pxor %xmm5, %xmm0
; CHECK-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm3[0,2]
; CHECK-NEXT: pslld $31, %xmm0
; CHECK-NEXT: psrad $31, %xmm0
; CHECK-NEXT: pand %xmm1, %xmm0
; CHECK-NEXT: andps %xmm2, %xmm0
; CHECK-NEXT: pmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero
; CHECK-NEXT: psllq $63, %xmm1
; CHECK-NEXT: psrad $31, %xmm1