mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-03 01:12:53 +00:00
[X86][AVX2] Fix v16i16 SHL lowering (PR27730)
The AVX2 v16i16 shift lowering works by unpacking to 2 x v8i32, performing the shift and then truncating the result. The unpacking is used to place the values in the upper 16-bits so that we can correctly sign-extend for SRA shifts. Unfortunately we weren't ensuring that the lower 16-bits were zero to ensure that SHL correctly shifts in zero bits. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@271796 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
180fafda4a
commit
ebbbdf51f2
@ -20082,8 +20082,8 @@ static SDValue LowerShift(SDValue Op, const X86Subtarget &Subtarget,
|
||||
SDValue Z = getZeroVector(VT, Subtarget, DAG, dl);
|
||||
SDValue ALo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Amt, Z);
|
||||
SDValue AHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Amt, Z);
|
||||
SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, R, R);
|
||||
SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, R, R);
|
||||
SDValue RLo = DAG.getNode(X86ISD::UNPCKL, dl, VT, Z, R);
|
||||
SDValue RHi = DAG.getNode(X86ISD::UNPCKH, dl, VT, Z, R);
|
||||
ALo = DAG.getBitcast(ExtVT, ALo);
|
||||
AHi = DAG.getBitcast(ExtVT, AHi);
|
||||
RLo = DAG.getBitcast(ExtVT, RLo);
|
||||
|
@ -290,11 +290,11 @@ define <16 x i16> @shl_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; CHECK-NEXT: vpsllvd %ymm3, %ymm4, %ymm3
|
||||
; CHECK-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; CHECK-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
@ -341,11 +341,11 @@ define <16 x i16> @ashr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; CHECK-NEXT: vpsravd %ymm3, %ymm4, %ymm3
|
||||
; CHECK-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; CHECK-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
@ -405,11 +405,11 @@ define <16 x i16> @lshr_16i16(<16 x i16> %r, <16 x i16> %a) nounwind {
|
||||
; CHECK: # BB#0:
|
||||
; CHECK-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; CHECK-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; CHECK-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
|
||||
; CHECK-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; CHECK-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; CHECK-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; CHECK-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
|
@ -223,11 +223,11 @@ define <16 x i16> @var_rotate_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2-NEXT: vpsubw %ymm1, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpxor %ymm3, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm3[4],ymm1[5],ymm3[5],ymm1[6],ymm3[6],ymm1[7],ymm3[7],ymm1[12],ymm3[12],ymm1[13],ymm3[13],ymm1[14],ymm3[14],ymm1[15],ymm3[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsllvd %ymm4, %ymm5, %ymm4
|
||||
; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm3[0],ymm1[1],ymm3[1],ymm1[2],ymm3[2],ymm1[3],ymm3[3],ymm1[8],ymm3[8],ymm1[9],ymm3[9],ymm1[10],ymm3[10],ymm1[11],ymm3[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm1
|
||||
; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
|
||||
@ -531,12 +531,12 @@ define <16 x i16> @constant_rotate_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [16,15,14,13,12,11,10,9,8,7,6,5,4,3,2,1]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsrlvd %ymm4, %ymm5, %ymm4
|
||||
; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsrlvd %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm4, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0
|
||||
|
@ -178,11 +178,11 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3
|
||||
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
@ -784,12 +784,12 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsravd %ymm3, %ymm4, %ymm3
|
||||
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpsravd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsravd %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
|
@ -28,20 +28,20 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
||||
; AVX512DQ: ## BB#0:
|
||||
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm5, %ymm6, %ymm5
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm2, %ymm5, %ymm2
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
|
||||
@ -129,7 +129,7 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
|
||||
; ALL-LABEL: splatvar_shift_v16i32:
|
||||
; ALL: ## BB#0:
|
||||
; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||
; ALL-NEXT: vmovss %xmm1, %xmm2, %xmm1
|
||||
; ALL-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
|
||||
; ALL-NEXT: vpsrad %xmm1, %zmm0, %zmm0
|
||||
; ALL-NEXT: retq
|
||||
%splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
@ -238,21 +238,21 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
|
||||
; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm4, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm2, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm3, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm4, %ymm3, %ymm3
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm2, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm4, %ymm5, %ymm4
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm4, %ymm4
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX512DQ-NEXT: vpsravd %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_shift_v32i16:
|
||||
|
@ -155,11 +155,11 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
|
||||
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
@ -639,12 +639,12 @@ define <16 x i16> @constant_shift_v16i16(<16 x i16> %a) nounwind {
|
||||
; AVX2-NEXT: vpxor %ymm1, %ymm1, %ymm1
|
||||
; AVX2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsrlvd %ymm3, %ymm4, %ymm3
|
||||
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
; AVX2-NEXT: retq
|
||||
|
@ -29,20 +29,20 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
||||
; AVX512DQ: ## BB#0:
|
||||
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
|
||||
@ -110,7 +110,7 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
|
||||
; ALL-LABEL: splatvar_shift_v16i32:
|
||||
; ALL: ## BB#0:
|
||||
; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||
; ALL-NEXT: vmovss %xmm1, %xmm2, %xmm1
|
||||
; ALL-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
|
||||
; ALL-NEXT: vpsrld %xmm1, %zmm0, %zmm0
|
||||
; ALL-NEXT: retq
|
||||
%splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
@ -202,21 +202,21 @@ define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
|
||||
; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm3, %ymm3
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm4
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm4, %ymm4
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
||||
; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: retq
|
||||
;
|
||||
; AVX512BW-LABEL: constant_shift_v32i16:
|
||||
|
@ -136,11 +136,11 @@ define <16 x i16> @var_shift_v16i16(<16 x i16> %a, <16 x i16> %b) nounwind {
|
||||
; AVX2: # BB#0:
|
||||
; AVX2-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm2[4],ymm1[5],ymm2[5],ymm1[6],ymm2[6],ymm1[7],ymm2[7],ymm1[12],ymm2[12],ymm1[13],ymm2[13],ymm1[14],ymm2[14],ymm1[15],ymm2[15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
||||
; AVX2-NEXT: vpsllvd %ymm3, %ymm4, %ymm3
|
||||
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0],ymm2[0],ymm1[1],ymm2[1],ymm1[2],ymm2[2],ymm1[3],ymm2[3],ymm1[8],ymm2[8],ymm1[9],ymm2[9],ymm1[10],ymm2[10],ymm1[11],ymm2[11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
||||
; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
|
||||
|
@ -29,20 +29,20 @@ define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
||||
; AVX512DQ: ## BB#0:
|
||||
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm0[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
|
||||
; AVX512DQ-NEXT: vpsllvd %ymm5, %ymm6, %ymm5
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
|
||||
; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm1[4,4,5,5,6,6,7,7,12,12,13,13,14,14,15,15]
|
||||
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
|
||||
; AVX512DQ-NEXT: vpsllvd %ymm2, %ymm5, %ymm2
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm1[0,0,1,1,2,2,3,3,8,8,9,9,10,10,11,11]
|
||||
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
|
||||
; AVX512DQ-NEXT: vpsllvd %ymm3, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
||||
; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
|
||||
@ -106,7 +106,7 @@ define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind
|
||||
; ALL-LABEL: splatvar_shift_v16i32:
|
||||
; ALL: ## BB#0:
|
||||
; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
||||
; ALL-NEXT: vmovss %xmm1, %xmm2, %xmm1
|
||||
; ALL-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
|
||||
; ALL-NEXT: vpslld %xmm1, %zmm0, %zmm0
|
||||
; ALL-NEXT: retq
|
||||
%splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
||||
|
Loading…
Reference in New Issue
Block a user