[X86][SSE] Add extra trunc(shl) test cases

The existing trunc_shl_17_v8i16_v8i32 test case should (but doesn't) fold to zero, I've added 2 new test cases:
 - trunc_shl_16_v8i16_v8i32 which folds to zero (this is actually testing the target faux shuffle combine)
 - trunc_shl_15_v8i16_v8i32 which should perform the full shl + truncate

llvm-svn: 334188
This commit is contained in:
Simon Pilgrim 2018-06-07 11:22:52 +00:00
parent fff954c47e
commit d656899c27

View File

@ -26,8 +26,48 @@ define void @trunc_shl_7_v4i32_v4i64(<4 x i32> addrspace(1)* %out, <4 x i64> add
ret void
}
define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
; SSE2-LABEL: trunc_shl_v8i16_v8i32:
define <8 x i16> @trunc_shl_15_v8i16_v8i32(<8 x i32> %a) {
; SSE2-LABEL: trunc_shl_15_v8i16_v8i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pslld $16, %xmm1
; SSE2-NEXT: psrad $16, %xmm1
; SSE2-NEXT: pslld $16, %xmm0
; SSE2-NEXT: psrad $16, %xmm0
; SSE2-NEXT: packssdw %xmm1, %xmm0
; SSE2-NEXT: psllw $15, %xmm0
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_15_v8i16_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]
; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
; AVX2-NEXT: vpsllw $15, %xmm0, %xmm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15, i32 15>
%conv = trunc <8 x i32> %shl to <8 x i16>
ret <8 x i16> %conv
}
define <8 x i16> @trunc_shl_16_v8i16_v8i32(<8 x i32> %a) {
; SSE2-LABEL: trunc_shl_16_v8i16_v8i32:
; SSE2: # %bb.0:
; SSE2-NEXT: xorps %xmm0, %xmm0
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_16_v8i16_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,ymm0[28,29]
; AVX2-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
%shl = shl <8 x i32> %a, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
%conv = trunc <8 x i32> %shl to <8 x i16>
ret <8 x i16> %conv
}
define <8 x i16> @trunc_shl_17_v8i16_v8i32(<8 x i32> %a) {
; SSE2-LABEL: trunc_shl_17_v8i16_v8i32:
; SSE2: # %bb.0:
; SSE2-NEXT: pslld $17, %xmm0
; SSE2-NEXT: pslld $17, %xmm1
@ -38,7 +78,7 @@ define <8 x i16> @trunc_shl_v8i16_v8i32(<8 x i32> %a) {
; SSE2-NEXT: packssdw %xmm1, %xmm0
; SSE2-NEXT: retq
;
; AVX2-LABEL: trunc_shl_v8i16_v8i32:
; AVX2-LABEL: trunc_shl_17_v8i16_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpslld $17, %ymm0, %ymm0
; AVX2-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31]