mirror of
https://github.com/RPCS3/llvm.git
synced 2024-12-13 14:47:00 +00:00
ebbbdf51f2
The AVX2 v16i16 shift lowering works by unpacking to 2 x v8i32, performing the shift and then truncating the result. The unpacking is used to place the values in the upper 16-bits so that we can correctly sign-extend for SRA shifts. Unfortunately we weren't ensuring that the lower 16-bits were zero to ensure that SHL correctly shifts in zero bits. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@271796 91177308-0d34-0410-b5e6-96231b3b80d8
318 lines
16 KiB
LLVM
318 lines
16 KiB
LLVM
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512dq | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512DQ
|
|
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512 --check-prefix=AVX512BW
|
|
|
|
;
|
|
; Variable Shifts
|
|
;
|
|
|
|
define <8 x i64> @var_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
|
|
; ALL-LABEL: var_shift_v8i64:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrlvq %zmm1, %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%shift = lshr <8 x i64> %a, %b
|
|
ret <8 x i64> %shift
|
|
}
|
|
|
|
define <16 x i32> @var_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
|
|
; ALL-LABEL: var_shift_v16i32:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%shift = lshr <16 x i32> %a, %b
|
|
ret <16 x i32> %shift
|
|
}
|
|
|
|
define <32 x i16> @var_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
|
; AVX512DQ-LABEL: var_shift_v32i16:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpxor %ymm4, %ymm4, %ymm4
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm0[4],ymm4[5],ymm0[5],ymm4[6],ymm0[6],ymm4[7],ymm0[7],ymm4[12],ymm0[12],ymm4[13],ymm0[13],ymm4[14],ymm0[14],ymm4[15],ymm0[15]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm5, %ymm6, %ymm5
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm4[0],ymm0[0],ymm4[1],ymm0[1],ymm4[2],ymm0[2],ymm4[3],ymm0[3],ymm4[8],ymm0[8],ymm4[9],ymm0[9],ymm4[10],ymm0[10],ymm4[11],ymm0[11]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm2 = ymm3[4],ymm4[4],ymm3[5],ymm4[5],ymm3[6],ymm4[6],ymm3[7],ymm4[7],ymm3[12],ymm4[12],ymm3[13],ymm4[13],ymm3[14],ymm4[14],ymm3[15],ymm4[15]
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm4[4],ymm1[4],ymm4[5],ymm1[5],ymm4[6],ymm1[6],ymm4[7],ymm1[7],ymm4[12],ymm1[12],ymm4[13],ymm1[13],ymm4[14],ymm1[14],ymm4[15],ymm1[15]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm2, %ymm5, %ymm2
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm4[0],ymm3[1],ymm4[1],ymm3[2],ymm4[2],ymm3[3],ymm4[3],ymm3[8],ymm4[8],ymm3[9],ymm4[9],ymm3[10],ymm4[10],ymm3[11],ymm4[11]
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm4[0],ymm1[0],ymm4[1],ymm1[1],ymm4[2],ymm1[2],ymm4[3],ymm1[3],ymm4[8],ymm1[8],ymm4[9],ymm1[9],ymm4[10],ymm1[10],ymm4[11],ymm1[11]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpackusdw %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: var_shift_v32i16:
|
|
; AVX512BW: ## BB#0:
|
|
; AVX512BW-NEXT: vpsrlvw %zmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
%shift = lshr <32 x i16> %a, %b
|
|
ret <32 x i16> %shift
|
|
}
|
|
|
|
define <64 x i8> @var_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
|
; AVX512DQ-LABEL: var_shift_v64i8:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm4
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
|
; AVX512DQ-NEXT: vpand %ymm5, %ymm4, %ymm4
|
|
; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm4
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm6 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
|
; AVX512DQ-NEXT: vpand %ymm6, %ymm4, %ymm4
|
|
; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm4
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512DQ-NEXT: vpand %ymm7, %ymm4, %ymm4
|
|
; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm4, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpsllw $5, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm6, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpaddb %ymm3, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpblendvb %ymm3, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
|
|
%shift = lshr <64 x i8> %a, %b
|
|
ret <64 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Uniform Variable Shifts
|
|
;
|
|
|
|
define <8 x i64> @splatvar_shift_v8i64(<8 x i64> %a, <8 x i64> %b) nounwind {
|
|
; ALL-LABEL: splatvar_shift_v8i64:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrlq %xmm1, %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%splat = shufflevector <8 x i64> %b, <8 x i64> undef, <8 x i32> zeroinitializer
|
|
%shift = lshr <8 x i64> %a, %splat
|
|
ret <8 x i64> %shift
|
|
}
|
|
|
|
define <16 x i32> @splatvar_shift_v16i32(<16 x i32> %a, <16 x i32> %b) nounwind {
|
|
; ALL-LABEL: splatvar_shift_v16i32:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vxorps %xmm2, %xmm2, %xmm2
|
|
; ALL-NEXT: vmovss {{.*#+}} xmm1 = xmm1[0],xmm2[1,2,3]
|
|
; ALL-NEXT: vpsrld %xmm1, %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%splat = shufflevector <16 x i32> %b, <16 x i32> undef, <16 x i32> zeroinitializer
|
|
%shift = lshr <16 x i32> %a, %splat
|
|
ret <16 x i32> %shift
|
|
}
|
|
|
|
define <32 x i16> @splatvar_shift_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind {
|
|
; AVX512DQ-LABEL: splatvar_shift_v32i16:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vmovd %xmm2, %eax
|
|
; AVX512DQ-NEXT: movzwl %ax, %eax
|
|
; AVX512DQ-NEXT: vmovd %eax, %xmm2
|
|
; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw %xmm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: splatvar_shift_v32i16:
|
|
; AVX512BW: ## BB#0:
|
|
; AVX512BW-NEXT: vmovd %xmm1, %eax
|
|
; AVX512BW-NEXT: movzwl %ax, %eax
|
|
; AVX512BW-NEXT: vmovd %eax, %xmm1
|
|
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
%splat = shufflevector <32 x i16> %b, <32 x i16> undef, <32 x i32> zeroinitializer
|
|
%shift = lshr <32 x i16> %a, %splat
|
|
ret <32 x i16> %shift
|
|
}
|
|
|
|
define <64 x i8> @splatvar_shift_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
|
|
; AVX512DQ-LABEL: splatvar_shift_v64i8:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpbroadcastb %xmm2, %ymm2
|
|
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm3
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
|
; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpsllw $5, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm3
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
|
; AVX512DQ-NEXT: vpand %ymm5, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpaddb %ymm2, %ymm2, %ymm6
|
|
; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm3
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512DQ-NEXT: vpand %ymm7, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8
|
|
; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm3
|
|
; AVX512DQ-NEXT: vpand %ymm4, %ymm3, %ymm3
|
|
; AVX512DQ-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
%splat = shufflevector <64 x i8> %b, <64 x i8> undef, <64 x i32> zeroinitializer
|
|
%shift = lshr <64 x i8> %a, %splat
|
|
ret <64 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Constant Shifts
|
|
;
|
|
|
|
define <8 x i64> @constant_shift_v8i64(<8 x i64> %a) nounwind {
|
|
; ALL-LABEL: constant_shift_v8i64:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrlvq {{.*}}(%rip), %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%shift = lshr <8 x i64> %a, <i64 1, i64 7, i64 31, i64 62, i64 1, i64 7, i64 31, i64 62>
|
|
ret <8 x i64> %shift
|
|
}
|
|
|
|
define <16 x i32> @constant_shift_v16i32(<16 x i32> %a) nounwind {
|
|
; ALL-LABEL: constant_shift_v16i32:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrlvd {{.*}}(%rip), %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%shift = lshr <16 x i32> %a, <i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 8, i32 7>
|
|
ret <16 x i32> %shift
|
|
}
|
|
|
|
define <32 x i16> @constant_shift_v32i16(<32 x i16> %a) nounwind {
|
|
; AVX512DQ-LABEL: constant_shift_v32i16:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpxor %ymm2, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15]
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm2[4],ymm3[5],ymm2[5],ymm3[6],ymm2[6],ymm3[7],ymm2[7],ymm3[12],ymm2[12],ymm3[13],ymm2[13],ymm3[14],ymm2[14],ymm3[15],ymm2[15]
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm0[4],ymm2[5],ymm0[5],ymm2[6],ymm0[6],ymm2[7],ymm0[7],ymm2[12],ymm0[12],ymm2[13],ymm0[13],ymm2[14],ymm0[14],ymm2[15],ymm0[15]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm5
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm5, %ymm5
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm3 = ymm3[0],ymm2[0],ymm3[1],ymm2[1],ymm3[2],ymm2[2],ymm3[3],ymm2[3],ymm3[8],ymm2[8],ymm3[9],ymm2[9],ymm3[10],ymm2[10],ymm3[11],ymm2[11]
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm2[0],ymm0[0],ymm2[1],ymm0[1],ymm2[2],ymm0[2],ymm2[3],ymm0[3],ymm2[8],ymm0[8],ymm2[9],ymm0[9],ymm2[10],ymm0[10],ymm2[11],ymm0[11]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpackusdw %ymm5, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm1[4],ymm2[5],ymm1[5],ymm2[6],ymm1[6],ymm2[7],ymm1[7],ymm2[12],ymm1[12],ymm2[13],ymm1[13],ymm2[14],ymm1[14],ymm2[15],ymm1[15]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm4, %ymm5, %ymm4
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm4, %ymm4
|
|
; AVX512DQ-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm1[0],ymm2[1],ymm1[1],ymm2[2],ymm1[2],ymm2[3],ymm1[3],ymm2[8],ymm1[8],ymm2[9],ymm1[9],ymm2[10],ymm1[10],ymm2[11],ymm1[11]
|
|
; AVX512DQ-NEXT: vpsrlvd %ymm3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrld $16, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: constant_shift_v32i16:
|
|
; AVX512BW: ## BB#0:
|
|
; AVX512BW-NEXT: vpsrlvw {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
%shift = lshr <32 x i16> %a, <i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15, i16 0, i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8, i16 9, i16 10, i16 11, i16 12, i16 13, i16 14, i16 15>
|
|
ret <32 x i16> %shift
|
|
}
|
|
|
|
define <64 x i8> @constant_shift_v64i8(<64 x i8> %a) nounwind {
|
|
; AVX512DQ-LABEL: constant_shift_v64i8:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpsrlw $4, %ymm0, %ymm2
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
|
|
; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0,0,1,2,3,4,5,6,7,7,6,5,4,3,2,1,0]
|
|
; AVX512DQ-NEXT: vpsllw $5, %ymm4, %ymm4
|
|
; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $2, %ymm0, %ymm2
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm5 = [63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63,63]
|
|
; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpaddb %ymm4, %ymm4, %ymm6
|
|
; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $1, %ymm0, %ymm2
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm7 = [127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127,127]
|
|
; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpaddb %ymm6, %ymm6, %ymm8
|
|
; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $4, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm3, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm4, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $2, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm5, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm6, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpsrlw $1, %ymm1, %ymm2
|
|
; AVX512DQ-NEXT: vpand %ymm7, %ymm2, %ymm2
|
|
; AVX512DQ-NEXT: vpblendvb %ymm8, %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
%shift = lshr <64 x i8> %a, <i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0, i8 0, i8 1, i8 2, i8 3, i8 4, i8 5, i8 6, i8 7, i8 7, i8 6, i8 5, i8 4, i8 3, i8 2, i8 1, i8 0>
|
|
ret <64 x i8> %shift
|
|
}
|
|
|
|
;
|
|
; Uniform Constant Shifts
|
|
;
|
|
|
|
define <8 x i64> @splatconstant_shift_v8i64(<8 x i64> %a) nounwind {
|
|
; ALL-LABEL: splatconstant_shift_v8i64:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrlq $7, %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%shift = lshr <8 x i64> %a, <i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7, i64 7>
|
|
ret <8 x i64> %shift
|
|
}
|
|
|
|
define <16 x i32> @splatconstant_shift_v16i32(<16 x i32> %a) nounwind {
|
|
; ALL-LABEL: splatconstant_shift_v16i32:
|
|
; ALL: ## BB#0:
|
|
; ALL-NEXT: vpsrld $5, %zmm0, %zmm0
|
|
; ALL-NEXT: retq
|
|
%shift = lshr <16 x i32> %a, <i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5, i32 5>
|
|
ret <16 x i32> %shift
|
|
}
|
|
|
|
define <32 x i16> @splatconstant_shift_v32i16(<32 x i16> %a) nounwind {
|
|
; AVX512DQ-LABEL: splatconstant_shift_v32i16:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: splatconstant_shift_v32i16:
|
|
; AVX512BW: ## BB#0:
|
|
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
%shift = lshr <32 x i16> %a, <i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3, i16 3>
|
|
ret <32 x i16> %shift
|
|
}
|
|
|
|
define <64 x i8> @splatconstant_shift_v64i8(<64 x i8> %a) nounwind {
|
|
; AVX512DQ-LABEL: splatconstant_shift_v64i8:
|
|
; AVX512DQ: ## BB#0:
|
|
; AVX512DQ-NEXT: vpsrlw $3, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vmovdqa {{.*#+}} ymm2 = [31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31,31]
|
|
; AVX512DQ-NEXT: vpand %ymm2, %ymm0, %ymm0
|
|
; AVX512DQ-NEXT: vpsrlw $3, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: vpand %ymm2, %ymm1, %ymm1
|
|
; AVX512DQ-NEXT: retq
|
|
;
|
|
; AVX512BW-LABEL: splatconstant_shift_v64i8:
|
|
; AVX512BW: ## BB#0:
|
|
; AVX512BW-NEXT: vpsrlw $3, %zmm0, %zmm0
|
|
; AVX512BW-NEXT: vpandq {{.*}}(%rip), %zmm0, %zmm0
|
|
; AVX512BW-NEXT: retq
|
|
%shift = lshr <64 x i8> %a, <i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3, i8 3>
|
|
ret <64 x i8> %shift
|
|
}
|