[x86][icelake]vbmi2

introducing vbmi2, consisting of
vpcompress{b,w}
vpexpand{b,w}
vpsh{l,r}d{w,d,q}
vpsh{l,r}dv{w,d,q}
Differential Revision: https://reviews.llvm.org/D40206

llvm-svn: 318745
This commit is contained in:
Coby Tayree 2017-11-21 09:48:44 +00:00
parent be536e28e1
commit c6c4bff339
15 changed files with 6956 additions and 10 deletions

View File

@ -5164,6 +5164,56 @@ let TargetPrefix = "x86" in {
Intrinsic<[], [llvm_ptr_ty, llvm_v2i64_ty,
llvm_i8_ty], [IntrArgMemOnly]>;
def int_x86_avx512_mask_compress_b_512 :
GCCBuiltin<"__builtin_ia32_compressqi512_mask">,
Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
llvm_i64_ty], [IntrNoMem]>;
def int_x86_avx512_mask_compress_w_512 :
GCCBuiltin<"__builtin_ia32_compresshi512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_compress_b_256 :
GCCBuiltin<"__builtin_ia32_compressqi256_mask">,
Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_compress_w_256 :
GCCBuiltin<"__builtin_ia32_compresshi256_mask">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_compress_b_128 :
GCCBuiltin<"__builtin_ia32_compressqi128_mask">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_compress_w_128 :
GCCBuiltin<"__builtin_ia32_compresshi128_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_compress_store_b_512 :
GCCBuiltin<"__builtin_ia32_compressstoreqi512_mask">,
Intrinsic<[], [llvm_ptr_ty, llvm_v64i8_ty,
llvm_i64_ty], [IntrArgMemOnly]>;
def int_x86_avx512_mask_compress_store_w_512 :
GCCBuiltin<"__builtin_ia32_compressstorehi512_mask">,
Intrinsic<[], [llvm_ptr_ty, llvm_v32i16_ty,
llvm_i32_ty], [IntrArgMemOnly]>;
def int_x86_avx512_mask_compress_store_b_256 :
GCCBuiltin<"__builtin_ia32_compressstoreqi256_mask">,
Intrinsic<[], [llvm_ptr_ty, llvm_v32i8_ty,
llvm_i32_ty], [IntrArgMemOnly]>;
def int_x86_avx512_mask_compress_store_w_256 :
GCCBuiltin<"__builtin_ia32_compressstorehi256_mask">,
Intrinsic<[], [llvm_ptr_ty, llvm_v16i16_ty,
llvm_i16_ty], [IntrArgMemOnly]>;
def int_x86_avx512_mask_compress_store_b_128 :
GCCBuiltin<"__builtin_ia32_compressstoreqi128_mask">,
Intrinsic<[], [llvm_ptr_ty, llvm_v16i8_ty,
llvm_i16_ty], [IntrArgMemOnly]>;
def int_x86_avx512_mask_compress_store_w_128 :
GCCBuiltin<"__builtin_ia32_compressstorehi128_mask">,
Intrinsic<[], [llvm_ptr_ty, llvm_v8i16_ty,
llvm_i8_ty], [IntrArgMemOnly]>;
// expand
def int_x86_avx512_mask_expand_ps_512 :
GCCBuiltin<"__builtin_ia32_expandsf512_mask">,
@ -5265,6 +5315,304 @@ let TargetPrefix = "x86" in {
Intrinsic<[llvm_v2i64_ty], [llvm_ptr_ty, llvm_v2i64_ty,
llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_x86_avx512_mask_expand_b_512 :
GCCBuiltin<"__builtin_ia32_expandqi512_mask">,
Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty,
llvm_i64_ty], [IntrNoMem]>;
def int_x86_avx512_mask_expand_w_512 :
GCCBuiltin<"__builtin_ia32_expandhi512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_expand_b_256 :
GCCBuiltin<"__builtin_ia32_expandqi256_mask">,
Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty, llvm_v32i8_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_expand_w_256 :
GCCBuiltin<"__builtin_ia32_expandhi256_mask">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_expand_b_128 :
GCCBuiltin<"__builtin_ia32_expandqi128_mask">,
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty, llvm_v16i8_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_expand_w_128 :
GCCBuiltin<"__builtin_ia32_expandhi128_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_expand_load_b_512 :
GCCBuiltin<"__builtin_ia32_expandloadqi512_mask">,
Intrinsic<[llvm_v64i8_ty], [llvm_ptr_ty, llvm_v64i8_ty,
llvm_i64_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_x86_avx512_mask_expand_load_w_512 :
GCCBuiltin<"__builtin_ia32_expandloadhi512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_ptr_ty, llvm_v32i16_ty,
llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_x86_avx512_mask_expand_load_b_256 :
GCCBuiltin<"__builtin_ia32_expandloadqi256_mask">,
Intrinsic<[llvm_v32i8_ty], [llvm_ptr_ty, llvm_v32i8_ty,
llvm_i32_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_x86_avx512_mask_expand_load_w_256 :
GCCBuiltin<"__builtin_ia32_expandloadhi256_mask">,
Intrinsic<[llvm_v16i16_ty], [llvm_ptr_ty, llvm_v16i16_ty,
llvm_i16_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_x86_avx512_mask_expand_load_b_128 :
GCCBuiltin<"__builtin_ia32_expandloadqi128_mask">,
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_v16i8_ty,
llvm_i16_ty], [IntrReadMem, IntrArgMemOnly]>;
def int_x86_avx512_mask_expand_load_w_128 :
GCCBuiltin<"__builtin_ia32_expandloadhi128_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_ptr_ty, llvm_v8i16_ty,
llvm_i8_ty], [IntrReadMem, IntrArgMemOnly]>;
}
// VBMI2 Concat & Shift
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
def int_x86_avx512_mask_vpshld_q_512 :
GCCBuiltin<"__builtin_ia32_vpshldq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_q_256 :
GCCBuiltin<"__builtin_ia32_vpshldq256_mask">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_q_128 :
GCCBuiltin<"__builtin_ia32_vpshldq128_mask">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_d_512 :
GCCBuiltin<"__builtin_ia32_vpshldd512_mask">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_d_256 :
GCCBuiltin<"__builtin_ia32_vpshldd256_mask">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_d_128 :
GCCBuiltin<"__builtin_ia32_vpshldd128_mask">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_w_512 :
GCCBuiltin<"__builtin_ia32_vpshldw512_mask">,
Intrinsic<[llvm_v32i16_ty],
[llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_v32i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_w_256 :
GCCBuiltin<"__builtin_ia32_vpshldw256_mask">,
Intrinsic<[llvm_v16i16_ty],
[llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty, llvm_v16i16_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshld_w_128 :
GCCBuiltin<"__builtin_ia32_vpshldw128_mask">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_q_512 :
GCCBuiltin<"__builtin_ia32_vpshrdq512_mask">,
Intrinsic<[llvm_v8i64_ty],
[llvm_v8i64_ty, llvm_v8i64_ty, llvm_i32_ty, llvm_v8i64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_q_256 :
GCCBuiltin<"__builtin_ia32_vpshrdq256_mask">,
Intrinsic<[llvm_v4i64_ty],
[llvm_v4i64_ty, llvm_v4i64_ty, llvm_i32_ty, llvm_v4i64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_q_128 :
GCCBuiltin<"__builtin_ia32_vpshrdq128_mask">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty, llvm_v2i64_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_d_512 :
GCCBuiltin<"__builtin_ia32_vpshrdd512_mask">,
Intrinsic<[llvm_v16i32_ty],
[llvm_v16i32_ty, llvm_v16i32_ty, llvm_i32_ty, llvm_v16i32_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_d_256 :
GCCBuiltin<"__builtin_ia32_vpshrdd256_mask">,
Intrinsic<[llvm_v8i32_ty],
[llvm_v8i32_ty, llvm_v8i32_ty, llvm_i32_ty, llvm_v8i32_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_d_128 :
GCCBuiltin<"__builtin_ia32_vpshrdd128_mask">,
Intrinsic<[llvm_v4i32_ty],
[llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty, llvm_v4i32_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_w_512 :
GCCBuiltin<"__builtin_ia32_vpshrdw512_mask">,
Intrinsic<[llvm_v32i16_ty],
[llvm_v32i16_ty, llvm_v32i16_ty, llvm_i32_ty, llvm_v32i16_ty,
llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_w_256 :
GCCBuiltin<"__builtin_ia32_vpshrdw256_mask">,
Intrinsic<[llvm_v16i16_ty],
[llvm_v16i16_ty, llvm_v16i16_ty, llvm_i32_ty, llvm_v16i16_ty,
llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrd_w_128 :
GCCBuiltin<"__builtin_ia32_vpshrdw128_mask">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_v8i16_ty, llvm_i32_ty, llvm_v8i16_ty,
llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_w_128 :
GCCBuiltin<"__builtin_ia32_vpshldvw128_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_w_128 :
GCCBuiltin<"__builtin_ia32_vpshldvw128_maskz">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_w_256 :
GCCBuiltin<"__builtin_ia32_vpshldvw256_mask">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_w_256 :
GCCBuiltin<"__builtin_ia32_vpshldvw256_maskz">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_w_512 :
GCCBuiltin<"__builtin_ia32_vpshldvw512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_w_512 :
GCCBuiltin<"__builtin_ia32_vpshldvw512_maskz">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_q_128 :
GCCBuiltin<"__builtin_ia32_vpshldvq128_mask">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_q_128 :
GCCBuiltin<"__builtin_ia32_vpshldvq128_maskz">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_q_256 :
GCCBuiltin<"__builtin_ia32_vpshldvq256_mask">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_q_256 :
GCCBuiltin<"__builtin_ia32_vpshldvq256_maskz">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_q_512 :
GCCBuiltin<"__builtin_ia32_vpshldvq512_mask">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_q_512 :
GCCBuiltin<"__builtin_ia32_vpshldvq512_maskz">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_d_128 :
GCCBuiltin<"__builtin_ia32_vpshldvd128_mask">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_d_128 :
GCCBuiltin<"__builtin_ia32_vpshldvd128_maskz">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_d_256 :
GCCBuiltin<"__builtin_ia32_vpshldvd256_mask">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_d_256 :
GCCBuiltin<"__builtin_ia32_vpshldvd256_maskz">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshldv_d_512 :
GCCBuiltin<"__builtin_ia32_vpshldvd512_mask">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshldv_d_512 :
GCCBuiltin<"__builtin_ia32_vpshldvd512_maskz">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_w_128 :
GCCBuiltin<"__builtin_ia32_vpshrdvw128_mask">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_w_128 :
GCCBuiltin<"__builtin_ia32_vpshrdvw128_maskz">,
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty, llvm_v8i16_ty,
llvm_v8i16_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_w_256 :
GCCBuiltin<"__builtin_ia32_vpshrdvw256_mask">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_w_256 :
GCCBuiltin<"__builtin_ia32_vpshrdvw256_maskz">,
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty, llvm_v16i16_ty,
llvm_v16i16_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_w_512 :
GCCBuiltin<"__builtin_ia32_vpshrdvw512_mask">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_w_512 :
GCCBuiltin<"__builtin_ia32_vpshrdvw512_maskz">,
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_q_128 :
GCCBuiltin<"__builtin_ia32_vpshrdvq128_mask">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_q_128 :
GCCBuiltin<"__builtin_ia32_vpshrdvq128_maskz">,
Intrinsic<[llvm_v2i64_ty], [llvm_v2i64_ty, llvm_v2i64_ty,
llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_q_256 :
GCCBuiltin<"__builtin_ia32_vpshrdvq256_mask">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_q_256 :
GCCBuiltin<"__builtin_ia32_vpshrdvq256_maskz">,
Intrinsic<[llvm_v4i64_ty], [llvm_v4i64_ty, llvm_v4i64_ty,
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_q_512 :
GCCBuiltin<"__builtin_ia32_vpshrdvq512_mask">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_q_512 :
GCCBuiltin<"__builtin_ia32_vpshrdvq512_maskz">,
Intrinsic<[llvm_v8i64_ty], [llvm_v8i64_ty, llvm_v8i64_ty,
llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_d_128 :
GCCBuiltin<"__builtin_ia32_vpshrdvd128_mask">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_d_128 :
GCCBuiltin<"__builtin_ia32_vpshrdvd128_maskz">,
Intrinsic<[llvm_v4i32_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
llvm_v4i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_d_256 :
GCCBuiltin<"__builtin_ia32_vpshrdvd256_mask">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_d_256 :
GCCBuiltin<"__builtin_ia32_vpshrdvd256_maskz">,
Intrinsic<[llvm_v8i32_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
llvm_v8i32_ty, llvm_i8_ty], [IntrNoMem]>;
def int_x86_avx512_mask_vpshrdv_d_512 :
GCCBuiltin<"__builtin_ia32_vpshrdvd512_mask">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
def int_x86_avx512_maskz_vpshrdv_d_512 :
GCCBuiltin<"__builtin_ia32_vpshrdvd512_maskz">,
Intrinsic<[llvm_v16i32_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
llvm_v16i32_ty, llvm_i16_ty], [IntrNoMem]>;
}
// truncate

View File

@ -1258,6 +1258,7 @@ bool sys::getHostCPUFeatures(StringMap<bool> &Features) {
Features["prefetchwt1"] = HasLeaf7 && ((ECX >> 0) & 1);
Features["avx512vbmi"] = HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save;
Features["avx512vbmi2"] = HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save;
Features["avx512vpopcntdq"] = HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save;
Features["pku"] = HasLeaf7 && ((ECX >> 4) & 1);
Features["vaes"] = HasLeaf7 && ((ECX >> 9) & 1) && HasAVXSave;

View File

@ -152,6 +152,9 @@ def FeatureVLX : SubtargetFeature<"avx512vl", "HasVLX", "true",
def FeatureVBMI : SubtargetFeature<"avx512vbmi", "HasVBMI", "true",
"Enable AVX-512 Vector Byte Manipulation Instructions",
[FeatureBWI]>;
def FeatureVBMI2 : SubtargetFeature<"avx512vbmi2", "HasVBMI2", "true",
"Enable AVX-512 further Vector Byte Manipulation Instructions",
[FeatureBWI]>;
def FeatureIFMA : SubtargetFeature<"avx512ifma", "HasIFMA", "true",
"Enable AVX-512 Integer Fused Multiple-Add",
[FeatureAVX512]>;

View File

@ -25090,6 +25090,10 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
case X86ISD::PACKUS: return "X86ISD::PACKUS";
case X86ISD::PALIGNR: return "X86ISD::PALIGNR";
case X86ISD::VALIGN: return "X86ISD::VALIGN";
case X86ISD::VSHLD: return "X86ISD::VSHLD";
case X86ISD::VSHRD: return "X86ISD::VSHRD";
case X86ISD::VSHLDV: return "X86ISD::VSHLDV";
case X86ISD::VSHRDV: return "X86ISD::VSHRDV";
case X86ISD::PSHUFD: return "X86ISD::PSHUFD";
case X86ISD::PSHUFHW: return "X86ISD::PSHUFHW";
case X86ISD::PSHUFLW: return "X86ISD::PSHUFLW";

View File

@ -391,6 +391,11 @@ namespace llvm {
PSHUFHW,
PSHUFLW,
SHUFP,
// VBMI2 Concat & Shift.
VSHLD,
VSHRD,
VSHLDV,
VSHRDV,
//Shuffle Packed Values at 128-bit granularity.
SHUF128,
MOVDDUP,

View File

@ -8491,11 +8491,13 @@ multiclass compress_by_vec_width_lowering<X86VectorVTInfo _ > {
}
multiclass compress_by_elt_width<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo> {
AVX512VLVectorVTInfo VTInfo,
Predicate Pred = HasAVX512> {
let Predicates = [Pred] in
defm Z : compress_by_vec_width_common<opc, VTInfo.info512, OpcodeStr>,
compress_by_vec_width_lowering<VTInfo.info512>, EVEX_V512;
let Predicates = [HasVLX] in {
let Predicates = [Pred, HasVLX] in {
defm Z256 : compress_by_vec_width_common<opc, VTInfo.info256, OpcodeStr>,
compress_by_vec_width_lowering<VTInfo.info256>, EVEX_V256;
defm Z128 : compress_by_vec_width_common<opc, VTInfo.info128, OpcodeStr>,
@ -8539,11 +8541,13 @@ multiclass expand_by_vec_width_lowering<X86VectorVTInfo _ > {
}
multiclass expand_by_elt_width<bits<8> opc, string OpcodeStr,
AVX512VLVectorVTInfo VTInfo> {
AVX512VLVectorVTInfo VTInfo,
Predicate Pred = HasAVX512> {
let Predicates = [Pred] in
defm Z : expand_by_vec_width<opc, VTInfo.info512, OpcodeStr>,
expand_by_vec_width_lowering<VTInfo.info512>, EVEX_V512;
let Predicates = [HasVLX] in {
let Predicates = [Pred, HasVLX] in {
defm Z256 : expand_by_vec_width<opc, VTInfo.info256, OpcodeStr>,
expand_by_vec_width_lowering<VTInfo.info256>, EVEX_V256;
defm Z128 : expand_by_vec_width<opc, VTInfo.info128, OpcodeStr>,
@ -8748,12 +8752,13 @@ multiclass avx512_common_fp_sae_packed_imm<string OpcodeStr,
}
multiclass avx512_common_3Op_rm_imm8<bits<8> opc, SDNode OpNode, string OpStr,
AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo>{
let Predicates = [HasBWI] in {
AVX512VLVectorVTInfo DestInfo, AVX512VLVectorVTInfo SrcInfo,
Predicate Pred = HasBWI> {
let Predicates = [Pred] in {
defm Z : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info512,
SrcInfo.info512>, EVEX_V512, AVX512AIi8Base, EVEX_4V;
}
let Predicates = [HasBWI, HasVLX] in {
let Predicates = [Pred, HasVLX] in {
defm Z128 : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info128,
SrcInfo.info128>, EVEX_V128, AVX512AIi8Base, EVEX_4V;
defm Z256 : avx512_3Op_rm_imm8<opc, OpStr, OpNode, DestInfo.info256,
@ -8762,11 +8767,12 @@ multiclass avx512_common_3Op_rm_imm8<bits<8> opc, SDNode OpNode, string OpStr,
}
multiclass avx512_common_3Op_imm8<string OpcodeStr, AVX512VLVectorVTInfo _,
bits<8> opc, SDNode OpNode>{
let Predicates = [HasAVX512] in {
bits<8> opc, SDNode OpNode,
Predicate Pred = HasAVX512> {
let Predicates = [Pred] in {
defm Z : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info512>, EVEX_V512;
}
let Predicates = [HasAVX512, HasVLX] in {
let Predicates = [Pred, HasVLX] in {
defm Z128 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info128>, EVEX_V128;
defm Z256 : avx512_3Op_imm8<opc, OpcodeStr, OpNode, _.info256>, EVEX_V256;
}
@ -10063,3 +10069,94 @@ defm : vpclmulqdq_aliases<"VPCLMULQDQZ", VR512, i512mem>;
defm : vpclmulqdq_aliases<"VPCLMULQDQZ128", VR128X, i128mem>;
defm : vpclmulqdq_aliases<"VPCLMULQDQZ256", VR256X, i256mem>;
//===----------------------------------------------------------------------===//
// VBMI2
//===----------------------------------------------------------------------===//
multiclass VBMI2_shift_var_rm<bits<8> Op, string OpStr, SDNode OpNode,
X86VectorVTInfo VTI> {
let Constraints = "$src1 = $dst",
ExeDomain = VTI.ExeDomain in {
defm r: AVX512_maskable_3src<Op, MRMSrcReg, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src2, VTI.RC:$src3), OpStr,
"$src3, $src2", "$src2, $src3",
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2, VTI.RC:$src3))>,
AVX512FMA3Base;
defm m: AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src2, VTI.MemOp:$src3), OpStr,
"$src3, $src2", "$src2, $src3",
(VTI.VT (OpNode VTI.RC:$src1, VTI.RC:$src2,
(VTI.VT (bitconvert (VTI.LdFrag addr:$src3)))))>,
AVX512FMA3Base;
}
}
multiclass VBMI2_shift_var_rmb<bits<8> Op, string OpStr, SDNode OpNode,
X86VectorVTInfo VTI>
: VBMI2_shift_var_rm<Op, OpStr, OpNode, VTI> {
let Constraints = "$src1 = $dst",
ExeDomain = VTI.ExeDomain in
defm mb: AVX512_maskable_3src<Op, MRMSrcMem, VTI, (outs VTI.RC:$dst),
(ins VTI.RC:$src2, VTI.ScalarMemOp:$src3), OpStr,
"${src3}"##VTI.BroadcastStr##", $src2",
"$src2, ${src3}"##VTI.BroadcastStr,
(OpNode VTI.RC:$src1, VTI.RC:$src2,
(VTI.VT (X86VBroadcast (VTI.ScalarLdFrag addr:$src3))))>,
AVX512FMA3Base, EVEX_B;
}
multiclass VBMI2_shift_var_rm_common<bits<8> Op, string OpStr, SDNode OpNode,
AVX512VLVectorVTInfo VTI> {
let Predicates = [HasVBMI2] in
defm Z : VBMI2_shift_var_rm<Op, OpStr, OpNode, VTI.info512>, EVEX_V512;
let Predicates = [HasVBMI2, HasVLX] in {
defm Z256 : VBMI2_shift_var_rm<Op, OpStr, OpNode, VTI.info256>, EVEX_V256;
defm Z128 : VBMI2_shift_var_rm<Op, OpStr, OpNode, VTI.info128>, EVEX_V128;
}
}
multiclass VBMI2_shift_var_rmb_common<bits<8> Op, string OpStr, SDNode OpNode,
AVX512VLVectorVTInfo VTI> {
let Predicates = [HasVBMI2] in
defm Z : VBMI2_shift_var_rmb<Op, OpStr, OpNode, VTI.info512>, EVEX_V512;
let Predicates = [HasVBMI2, HasVLX] in {
defm Z256 : VBMI2_shift_var_rmb<Op, OpStr, OpNode, VTI.info256>, EVEX_V256;
defm Z128 : VBMI2_shift_var_rmb<Op, OpStr, OpNode, VTI.info128>, EVEX_V128;
}
}
multiclass VBMI2_shift_var<bits<8> wOp, bits<8> dqOp, string Prefix,
SDNode OpNode> {
defm W : VBMI2_shift_var_rm_common<wOp, Prefix##"w", OpNode,
avx512vl_i16_info>, VEX_W, EVEX_CD8<16, CD8VF>;
defm D : VBMI2_shift_var_rmb_common<dqOp, Prefix##"d", OpNode,
avx512vl_i32_info>, EVEX_CD8<32, CD8VF>;
defm Q : VBMI2_shift_var_rmb_common<dqOp, Prefix##"q", OpNode,
avx512vl_i64_info>, VEX_W, EVEX_CD8<64, CD8VF>;
}
multiclass VBMI2_shift_imm<bits<8> wOp, bits<8> dqOp, string Prefix,
SDNode OpNode> {
defm W : avx512_common_3Op_rm_imm8<wOp, OpNode, Prefix##"w", avx512vl_i16_info,
avx512vl_i16_info, HasVBMI2>, VEX_W, EVEX_CD8<16, CD8VF>;
defm D : avx512_common_3Op_imm8<Prefix##"d", avx512vl_i32_info, dqOp,
OpNode, HasVBMI2>, AVX512AIi8Base, EVEX_4V, EVEX_CD8<32, CD8VF>;
defm Q : avx512_common_3Op_imm8<Prefix##"q", avx512vl_i64_info, dqOp, OpNode,
HasVBMI2>, AVX512AIi8Base, EVEX_4V, EVEX_CD8<64, CD8VF>, VEX_W;
}
// Concat & Shift
defm VPSHLDV : VBMI2_shift_var<0x70, 0x71, "vpshldv", X86VShldv>;
defm VPSHRDV : VBMI2_shift_var<0x72, 0x73, "vpshrdv", X86VShrdv>;
defm VPSHLD : VBMI2_shift_imm<0x70, 0x71, "vpshld", X86VShld>;
defm VPSHRD : VBMI2_shift_imm<0x72, 0x73, "vpshrd", X86VShrd>;
// Compress
defm VPCOMPRESSB : compress_by_elt_width <0x63, "vpcompressb", avx512vl_i8_info,
HasVBMI2>, EVEX;
defm VPCOMPRESSW : compress_by_elt_width <0x63, "vpcompressw", avx512vl_i16_info,
HasVBMI2>, EVEX, VEX_W;
// Expand
defm VPEXPANDB : expand_by_elt_width <0x62, "vpexpandb", avx512vl_i8_info,
HasVBMI2>, EVEX;
defm VPEXPANDW : expand_by_elt_width <0x62, "vpexpandw", avx512vl_i16_info,
HasVBMI2>, EVEX, VEX_W;

View File

@ -350,6 +350,19 @@ def X86PAlignr : SDNode<"X86ISD::PALIGNR",
SDTCisVT<3, i8>]>>;
def X86VAlign : SDNode<"X86ISD::VALIGN", SDTShuff3OpI>;
def X86VShld : SDNode<"X86ISD::VSHLD", SDTShuff3OpI>;
def X86VShrd : SDNode<"X86ISD::VSHRD", SDTShuff3OpI>;
def X86VShldv : SDNode<"X86ISD::VSHLDV",
SDTypeProfile<1, 3, [SDTCisVec<0>,
SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>,
SDTCisSameAs<0,3>]>>;
def X86VShrdv : SDNode<"X86ISD::VSHRDV",
SDTypeProfile<1, 3, [SDTCisVec<0>,
SDTCisSameAs<0,1>,
SDTCisSameAs<0,2>,
SDTCisSameAs<0,3>]>>;
def X86Conflict : SDNode<"X86ISD::CONFLICT", SDTIntUnaryOp>;
def X86PShufd : SDNode<"X86ISD::PSHUFD", SDTShuff2OpI>;

View File

@ -862,6 +862,7 @@ def HasBMI2 : Predicate<"Subtarget->hasBMI2()">;
def NoBMI2 : Predicate<"!Subtarget->hasBMI2()">;
def HasVBMI : Predicate<"Subtarget->hasVBMI()">,
AssemblerPredicate<"FeatureVBMI", "AVX-512 VBMI ISA">;
def HasVBMI2 : Predicate<"Subtarget->hasVBMI2()">;
def HasIFMA : Predicate<"Subtarget->hasIFMA()">,
AssemblerPredicate<"FeatureIFMA", "AVX-512 IFMA ISA">;
def HasRTM : Predicate<"Subtarget->hasRTM()">;

View File

@ -120,6 +120,12 @@ static const IntrinsicData IntrinsicsWithChain[] = {
X86_INTRINSIC_DATA(avx512_gatherpf_qps_512, PREFETCH,
X86::VGATHERPF0QPSm, X86::VGATHERPF1QPSm),
X86_INTRINSIC_DATA(avx512_mask_compress_store_b_128,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_b_256,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_b_512,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_d_128,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_d_256,
@ -144,6 +150,18 @@ static const IntrinsicData IntrinsicsWithChain[] = {
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_q_512,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_w_128,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_w_256,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_store_w_512,
COMPRESS_TO_MEM, X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_b_128,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_b_256,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_b_512,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_d_128,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_d_256,
@ -168,6 +186,12 @@ static const IntrinsicData IntrinsicsWithChain[] = {
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_q_512,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_w_128,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_w_256,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_load_w_512,
EXPAND_FROM_MEM, X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_128, TRUNCATE_TO_MEM_VI8,
X86ISD::VTRUNC, 0),
X86_INTRINSIC_DATA(avx512_mask_pmov_db_mem_256, TRUNCATE_TO_MEM_VI8,
@ -479,6 +503,13 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FSETCCM, X86ISD::FSETCCM_RND),
X86_INTRINSIC_DATA(avx512_mask_cmp_ss, CMP_MASK_SCALAR_CC,
X86ISD::FSETCCM, X86ISD::FSETCCM_RND),
X86_INTRINSIC_DATA(avx512_mask_compress_b_128, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_b_256, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_b_512, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_d_128, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_d_256, COMPRESS_EXPAND_IN_REG,
@ -503,6 +534,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_q_512, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_w_128, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_w_256, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_compress_w_512, COMPRESS_EXPAND_IN_REG,
X86ISD::COMPRESS, 0),
X86_INTRINSIC_DATA(avx512_mask_conflict_d_128, INTR_TYPE_1OP_MASK,
X86ISD::CONFLICT, 0),
X86_INTRINSIC_DATA(avx512_mask_conflict_d_256, INTR_TYPE_1OP_MASK,
@ -677,6 +714,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::FDIVS_RND, 0),
X86_INTRINSIC_DATA(avx512_mask_div_ss_round, INTR_TYPE_SCALAR_MASK_RM,
X86ISD::FDIVS_RND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_b_128, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_b_256, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_b_512, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_d_128, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_d_256, COMPRESS_EXPAND_IN_REG,
@ -701,6 +744,12 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_q_512, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_w_128, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_w_256, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_expand_w_512, COMPRESS_EXPAND_IN_REG,
X86ISD::EXPAND, 0),
X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_128, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_256, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
X86_INTRINSIC_DATA(avx512_mask_fixupimm_pd_512, FIXUPIMM, X86ISD::VFIXUPIMM, 0),
@ -1192,6 +1241,44 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_mask_vpmadd52l_uq_512 , IFMA_OP_MASK,
X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_d_128, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_d_256, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_d_512, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_q_128, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_q_256, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_q_512, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_w_128, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_w_256, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshld_w_512, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHLD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_d_128, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_d_256, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_d_512, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_q_128, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_q_256, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_q_512, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_w_128, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_w_256, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshldv_w_512, FMA_OP_MASK, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_d_128, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_d_256, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_d_512, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_q_128, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_q_256, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_q_512, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_w_128, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_w_256, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrd_w_512, INTR_TYPE_3OP_IMM8_MASK, X86ISD::VSHRD, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_d_128, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_d_256, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_d_512, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_q_128, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_q_256, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_q_512, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_w_128, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_w_256, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask_vpshrdv_w_512, FMA_OP_MASK, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_128, FMA_OP_MASK3, ISD::FMA, 0),
X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_256, FMA_OP_MASK3, ISD::FMA, 0),
X86_INTRINSIC_DATA(avx512_mask3_vfmadd_pd_512, FMA_OP_MASK3, ISD::FMA,
@ -1338,6 +1425,26 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpmadd52l_uq_512, IFMA_OP_MASKZ,
X86ISD::VPMADD52L, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_d_128, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_d_256, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_d_512, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_q_128, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_q_256, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_q_512, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_w_128, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_w_256, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshldv_w_512, FMA_OP_MASKZ, X86ISD::VSHLDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_d_128, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_d_256, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_d_512, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_q_128, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_q_256, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_q_512, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_w_128, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_w_256, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_maskz_vpshrdv_w_512, FMA_OP_MASKZ, X86ISD::VSHRDV, 0),
X86_INTRINSIC_DATA(avx512_packssdw_512, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx512_packsswb_512, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx512_packusdw_512, INTR_TYPE_2OP, X86ISD::PACKUS, 0),

View File

@ -313,6 +313,7 @@ void X86Subtarget::initializeEnvironment() {
HasBMI = false;
HasBMI2 = false;
HasVBMI = false;
HasVBMI2 = false;
HasIFMA = false;
HasRTM = false;
HasERI = false;

View File

@ -167,6 +167,9 @@ protected:
/// Processor has VBMI instructions.
bool HasVBMI;
/// Processor has VBMI2 instructions.
bool HasVBMI2;
/// Processor has Integer Fused Multiply Add
bool HasIFMA;
@ -483,6 +486,7 @@ public:
bool hasBMI() const { return HasBMI; }
bool hasBMI2() const { return HasBMI2; }
bool hasVBMI() const { return HasVBMI; }
bool hasVBMI2() const { return HasVBMI2; }
bool hasIFMA() const { return HasIFMA; }
bool hasRTM() const { return HasRTM; }
bool hasADX() const { return HasADX; }

View File

@ -0,0 +1,327 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vbmi2 | FileCheck %s
define <32 x i16> @test_expand_load_w_512(i8* %addr, <32 x i16> %data, i32 %mask) {
; CHECK-LABEL: test_expand_load_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw (%rdi), %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <32 x i16> @llvm.x86.avx512.mask.expand.load.w.512(i8* %addr, <32 x i16> %data, i32 %mask)
ret <32 x i16> %res
}
declare <32 x i16> @llvm.x86.avx512.mask.expand.load.w.512(i8* %addr, <32 x i16> %data, i32 %mask)
define void @test_compress_store_w_512(i8* %addr, <32 x i16> %data, i32 %mask) {
; CHECK-LABEL: test_compress_store_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressw %zmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.compress.store.w.512(i8* %addr, <32 x i16> %data, i32 %mask)
ret void
}
declare void @llvm.x86.avx512.mask.compress.store.w.512(i8* %addr, <32 x i16> %data, i32 %mask)
define <64 x i8> @test_expand_load_b_512(i8* %addr, <64 x i8> %data, i64 %mask) {
; CHECK-LABEL: test_expand_load_b_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: vpexpandb (%rdi), %zmm0 {%k1}
; CHECK-NEXT: retq
%res = call <64 x i8> @llvm.x86.avx512.mask.expand.load.b.512(i8* %addr, <64 x i8> %data, i64 %mask)
ret <64 x i8> %res
}
declare <64 x i8> @llvm.x86.avx512.mask.expand.load.b.512(i8* %addr, <64 x i8> %data, i64 %mask)
define void @test_compress_store_b_512(i8* %addr, <64 x i8> %data, i64 %mask) {
; CHECK-LABEL: test_compress_store_b_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: vpcompressb %zmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.compress.store.b.512(i8* %addr, <64 x i8> %data, i64 %mask)
ret void
}
declare void @llvm.x86.avx512.mask.compress.store.b.512(i8* %addr, <64 x i8> %data, i64 %mask)
define <32 x i16> @test_compress_w_512(<32 x i16> %data, <32 x i16> %src, i32 %mask) {
; CHECK-LABEL: test_compress_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressw %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <32 x i16> @llvm.x86.avx512.mask.compress.w.512(<32 x i16> %data, <32 x i16> %src, i32 %mask)
ret <32 x i16> %res
}
declare <32 x i16> @llvm.x86.avx512.mask.compress.w.512(<32 x i16>, <32 x i16>, i32)
define <64 x i8> @test_compress_b_512(<64 x i8> %data, <64 x i8> %src, i64 %mask) {
; CHECK-LABEL: test_compress_b_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovq %rdi, %k1
; CHECK-NEXT: vpcompressb %zmm0, %zmm1 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
; CHECK-NEXT: retq
%res = call <64 x i8> @llvm.x86.avx512.mask.compress.b.512(<64 x i8> %data, <64 x i8> %src, i64 %mask)
ret <64 x i8> %res
}
declare <64 x i8> @llvm.x86.avx512.mask.compress.b.512(<64 x i8>, <64 x i8>, i64)
define <32 x i16> @test_expand_w_512(i8* %addr, <32 x i16> %data, i32 %mask) {
; CHECK-LABEL: test_expand_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <32 x i16> @llvm.x86.avx512.mask.expand.w.512(<32 x i16> %data, <32 x i16> zeroinitializer, i32 %mask)
ret <32 x i16> %res
}
declare <32 x i16> @llvm.x86.avx512.mask.expand.w.512(<32 x i16>, <32 x i16>, i32)
define <64 x i8> @test_expand_b_512(i8* %addr, <64 x i8> %data, i64 %mask) {
; CHECK-LABEL: test_expand_b_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovq %rsi, %k1
; CHECK-NEXT: vpexpandb %zmm0, %zmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8> %data, <64 x i8> zeroinitializer, i64 %mask)
ret <64 x i8> %res
}
declare <64 x i8> @llvm.x86.avx512.mask.expand.b.512(<64 x i8>, <64 x i8>, i64)
define <16 x i32>@test_int_x86_avx512_mask_vpshld_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_d_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldd $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshldd $22, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22, <16 x i32> %x3, i16 %x4)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22, <16 x i32> %x3, i16 -1)
%res2 = add <16 x i32> %res, %res1
ret <16 x i32> %res2
}
declare <16 x i32> @llvm.x86.avx512.mask.vpshld.d.512(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16)
define <8 x i64>@test_int_x86_avx512_mask_vpshld_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_q_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldq $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshldq $22, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22, <8 x i64> %x3, i8 %x4)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22, <8 x i64> %x3, i8 -1)
%res2 = add <8 x i64> %res, %res1
ret <8 x i64> %res2
}
declare <8 x i64> @llvm.x86.avx512.mask.vpshld.q.512(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8)
define <32 x i16>@test_int_x86_avx512_mask_vpshld_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldw $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshldw $22, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 %x4)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 -1)
%res2 = add <32 x i16> %res, %res1
ret <32 x i16> %res2
}
declare <32 x i16> @llvm.x86.avx512.mask.vpshld.w.512(<32 x i16>, <32 x i16>, i32, <32 x i16>, i32)
define <16 x i32>@test_int_x86_avx512_mask_vpshrd_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_d_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdd $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshrdd $22, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22, <16 x i32> %x3, i16 %x4)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32> %x0, <16 x i32> %x1, i32 22, <16 x i32> %x3, i16 -1)
%res2 = add <16 x i32> %res, %res1
ret <16 x i32> %res2
}
declare <16 x i32> @llvm.x86.avx512.mask.vpshrd.d.512(<16 x i32>, <16 x i32>, i32, <16 x i32>, i16)
define <8 x i64>@test_int_x86_avx512_mask_vpshrd_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_q_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdq $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshrdq $22, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22, <8 x i64> %x3, i8 %x4)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64> %x0, <8 x i64> %x1, i32 22, <8 x i64> %x3, i8 -1)
%res2 = add <8 x i64> %res, %res1
ret <8 x i64> %res2
}
declare <8 x i64> @llvm.x86.avx512.mask.vpshrd.q.512(<8 x i64>, <8 x i64>, i32, <8 x i64>, i8)
define <32 x i16>@test_int_x86_avx512_mask_vpshrd_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x3, i32 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdw $22, %zmm1, %zmm0, %zmm2 {%k1}
; CHECK-NEXT: vpshrdw $22, %zmm1, %zmm0, %zmm0
; CHECK-NEXT: vpaddw %zmm0, %zmm2, %zmm0
; CHECK-NEXT: retq
%res = call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 %x4)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16> %x0, <32 x i16> %x1, i32 22, <32 x i16> %x3, i32 -1)
%res2 = add <32 x i16> %res, %res1
ret <32 x i16> %res2
}
declare <32 x i16> @llvm.x86.avx512.mask.vpshrd.w.512(<32 x i16>, <32 x i16>, i32, <32 x i16>, i32)
declare <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
declare <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_mask_vpshrdv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_d_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshrdvd (%rdi), %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
; CHECK-NEXT: vpshrdvd %zmm2, %zmm1, %zmm4
; CHECK-NEXT: vpshrdvd %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2p
%res = call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 -1)
%res2 = call <16 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 %x3)
%res3 = add <16 x i32> %res, %res1
%res4 = add <16 x i32> %res2, %res3
ret <16 x i32> %res4
}
declare <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
declare <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_vpshrdv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_q_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshrdvq (%rdi), %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
; CHECK-NEXT: vpshrdvq %zmm2, %zmm1, %zmm4
; CHECK-NEXT: vpshrdvq %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%x2 = load <8 x i64>, <8 x i64>* %x2p
%res = call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 -1)
%res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 %x3)
%res3 = add <8 x i64> %res, %res1
%res4 = add <8 x i64> %res2, %res3
ret <8 x i64> %res4
}
declare <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
declare <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_mask_vpshrdv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshrdvw (%rdi), %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
; CHECK-NEXT: vpshrdvw %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpshrdvw %zmm2, %zmm1, %zmm0
; CHECK-NEXT: vpaddw %zmm4, %zmm0, %zmm0
; CHECK-NEXT: vpaddw %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%x2 = load <32 x i16>, <32 x i16>* %x2p
%res = call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 -1)
%res2 = call <32 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 %x3)
%res3 = add <32 x i16> %res, %res1
%res4 = add <32 x i16> %res2, %res3
ret <32 x i16> %res4
}
declare <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
declare <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32>, <16 x i32>, <16 x i32>, i16)
define <16 x i32>@test_int_x86_avx512_mask_vpshldv_d_512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32>* %x2p, <16 x i32> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_d_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshldvd (%rdi), %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
; CHECK-NEXT: vpshldvd %zmm2, %zmm1, %zmm4
; CHECK-NEXT: vpshldvd %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddd %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%x2 = load <16 x i32>, <16 x i32>* %x2p
%res = call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x2, i16 %x3)
%res1 = call <16 x i32> @llvm.x86.avx512.mask.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 -1)
%res2 = call <16 x i32> @llvm.x86.avx512.maskz.vpshldv.d.512(<16 x i32> %x0, <16 x i32> %x1, <16 x i32> %x4, i16 %x3)
%res3 = add <16 x i32> %res, %res1
%res4 = add <16 x i32> %res2, %res3
ret <16 x i32> %res4
}
declare <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
declare <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64>, <8 x i64>, <8 x i64>, i8)
define <8 x i64>@test_int_x86_avx512_mask_vpshldv_q_512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64>* %x2p, <8 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_q_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshldvq (%rdi), %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
; CHECK-NEXT: vpshldvq %zmm2, %zmm1, %zmm4
; CHECK-NEXT: vpshldvq %zmm2, %zmm1, %zmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %zmm0, %zmm4, %zmm0
; CHECK-NEXT: vpaddq %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%x2 = load <8 x i64>, <8 x i64>* %x2p
%res = call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x2, i8 %x3)
%res1 = call <8 x i64> @llvm.x86.avx512.mask.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 -1)
%res2 = call <8 x i64> @llvm.x86.avx512.maskz.vpshldv.q.512(<8 x i64> %x0, <8 x i64> %x1, <8 x i64> %x4, i8 %x3)
%res3 = add <8 x i64> %res, %res1
%res4 = add <8 x i64> %res2, %res3
ret <8 x i64> %res4
}
declare <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
declare <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16>, <32 x i16>, <32 x i16>, i32)
define <32 x i16>@test_int_x86_avx512_mask_vpshldv_w_512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16>* %x2p, <32 x i16> %x4, i32 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_w_512:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm3
; CHECK-NEXT: vpshldvw (%rdi), %zmm1, %zmm3 {%k1}
; CHECK-NEXT: vmovdqa64 %zmm0, %zmm4
; CHECK-NEXT: vpshldvw %zmm2, %zmm1, %zmm4 {%k1} {z}
; CHECK-NEXT: vpshldvw %zmm2, %zmm1, %zmm0
; CHECK-NEXT: vpaddw %zmm4, %zmm0, %zmm0
; CHECK-NEXT: vpaddw %zmm0, %zmm3, %zmm0
; CHECK-NEXT: retq
%x2 = load <32 x i16>, <32 x i16>* %x2p
%res = call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x2, i32 %x3)
%res1 = call <32 x i16> @llvm.x86.avx512.mask.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 -1)
%res2 = call <32 x i16> @llvm.x86.avx512.maskz.vpshldv.w.512(<32 x i16> %x0, <32 x i16> %x1, <32 x i16> %x4, i32 %x3)
%res3 = add <32 x i16> %res, %res1
%res4 = add <32 x i16> %res2, %res3
ret <32 x i16> %res4
}

View File

@ -0,0 +1,657 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512vl,+avx512vbmi2 | FileCheck %s
define <16 x i16> @test_compress_w_256(<16 x i16> %src, <16 x i16> %data, i16 %mask) {
; CHECK-LABEL: test_compress_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressw %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16> %data, <16 x i16> %src, i16 %mask)
ret <16 x i16> %res
}
declare <16 x i16> @llvm.x86.avx512.mask.compress.w.256(<16 x i16>, <16 x i16>, i16)
define <8 x i16> @test_compress_w_128(<8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_compress_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressw %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16> %data, <8 x i16> zeroinitializer, i8 %mask)
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.avx512.mask.compress.w.128(<8 x i16>, <8 x i16>, i8)
define <32 x i8> @test_compress_b_256(<32 x i8> %src, <32 x i8> %data, i32 %mask) {
; CHECK-LABEL: test_compress_b_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressb %ymm1, %ymm0 {%k1}
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8> %data, <32 x i8> %src, i32 %mask)
ret <32 x i8> %res
}
declare <32 x i8> @llvm.x86.avx512.mask.compress.b.256(<32 x i8>, <32 x i8>, i32)
define <16 x i8> @test_compress_b_128(<16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_compress_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpcompressb %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8> %data, <16 x i8> zeroinitializer, i16 %mask)
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.avx512.mask.compress.b.128(<16 x i8>, <16 x i8>, i16)
define <32 x i8> @test_expand_b_256(<32 x i8> %data, <32 x i8> %src, i32 %mask) {
; CHECK-LABEL: test_expand_b_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandb %ymm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.x86.avx512.mask.expand.b.256( <32 x i8> %data, <32 x i8> %src, i32 %mask)
ret <32 x i8> %res
}
declare <32 x i8> @llvm.x86.avx512.mask.expand.b.256(<32 x i8>, <32 x i8>, i32)
define <16 x i8> @test_expand_b_128(<16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_expand_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandb %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8> %data, <16 x i8> zeroinitializer, i16 %mask)
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.avx512.mask.expand.b.128(<16 x i8>, <16 x i8>, i16)
define <16 x i16> @test_expand_w_256(<16 x i16> %data, <16 x i16> %src, i16 %mask) {
; CHECK-LABEL: test_expand_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandw %ymm0, %ymm1 {%k1}
; CHECK-NEXT: vmovdqa %ymm1, %ymm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx512.mask.expand.w.256( <16 x i16> %data, <16 x i16> %src, i16 %mask)
ret <16 x i16> %res
}
declare <16 x i16> @llvm.x86.avx512.mask.expand.w.256(<16 x i16>, <16 x i16>, i16)
define <8 x i16> @test_expand_w_128(<8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_expand_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpexpandw %xmm0, %xmm0 {%k1} {z}
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16> %data, <8 x i16> zeroinitializer, i8 %mask)
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.avx512.mask.expand.w.128(<8 x i16>, <8 x i16>, i8)
define <16 x i16> @test_expand_load_w_256(i8* %addr, <16 x i16> %data, i16 %mask) {
; CHECK-LABEL: test_expand_load_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw (%rdi), %ymm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx512.mask.expand.load.w.256(i8* %addr, <16 x i16> %data, i16 %mask)
ret <16 x i16> %res
}
declare <16 x i16> @llvm.x86.avx512.mask.expand.load.w.256(i8* %addr, <16 x i16> %data, i16 %mask)
define <8 x i16> @test_expand_load_w_128(i8* %addr, <8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_expand_load_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandw (%rdi), %xmm0 {%k1}
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.avx512.mask.expand.load.w.128(i8* %addr, <8 x i16> %data, i8 %mask)
ret <8 x i16> %res
}
declare <8 x i16> @llvm.x86.avx512.mask.expand.load.w.128(i8* %addr, <8 x i16> %data, i8 %mask)
define void @test_compress_store_w_256(i8* %addr, <16 x i16> %data, i16 %mask) {
; CHECK-LABEL: test_compress_store_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressw %ymm0, (%rdi) {%k1}
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.compress.store.w.256(i8* %addr, <16 x i16> %data, i16 %mask)
ret void
}
declare void @llvm.x86.avx512.mask.compress.store.w.256(i8* %addr, <16 x i16> %data, i16 %mask)
define void @test_compress_store_w_128(i8* %addr, <8 x i16> %data, i8 %mask) {
; CHECK-LABEL: test_compress_store_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressw %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.compress.store.w.128(i8* %addr, <8 x i16> %data, i8 %mask)
ret void
}
declare void @llvm.x86.avx512.mask.compress.store.w.128(i8* %addr, <8 x i16> %data, i8 %mask)
define <32 x i8> @test_expand_load_b_256(i8* %addr, <32 x i8> %data, i32 %mask) {
; CHECK-LABEL: test_expand_load_b_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandb (%rdi), %ymm0 {%k1}
; CHECK-NEXT: retq
%res = call <32 x i8> @llvm.x86.avx512.mask.expand.load.b.256(i8* %addr, <32 x i8> %data, i32 %mask)
ret <32 x i8> %res
}
declare <32 x i8> @llvm.x86.avx512.mask.expand.load.b.256(i8* %addr, <32 x i8> %data, i32 %mask)
define <16 x i8> @test_expand_load_b_128(i8* %addr, <16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_expand_load_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpexpandb (%rdi), %xmm0 {%k1}
; CHECK-NEXT: retq
%res = call <16 x i8> @llvm.x86.avx512.mask.expand.load.b.128(i8* %addr, <16 x i8> %data, i16 %mask)
ret <16 x i8> %res
}
declare <16 x i8> @llvm.x86.avx512.mask.expand.load.b.128(i8* %addr, <16 x i8> %data, i16 %mask)
define void @test_compress_store_b_256(i8* %addr, <32 x i8> %data, i32 %mask) {
; CHECK-LABEL: test_compress_store_b_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressb %ymm0, (%rdi) {%k1}
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.compress.store.b.256(i8* %addr, <32 x i8> %data, i32 %mask)
ret void
}
declare void @llvm.x86.avx512.mask.compress.store.b.256(i8* %addr, <32 x i8> %data, i32 %mask)
define void @test_compress_store_b_128(i8* %addr, <16 x i8> %data, i16 %mask) {
; CHECK-LABEL: test_compress_store_b_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vpcompressb %xmm0, (%rdi) {%k1}
; CHECK-NEXT: retq
call void @llvm.x86.avx512.mask.compress.store.b.128(i8* %addr, <16 x i8> %data, i16 %mask)
ret void
}
declare void @llvm.x86.avx512.mask.compress.store.b.128(i8* %addr, <16 x i8> %data, i16 %mask)
define <4 x i32>@test_int_x86_avx512_mask_vpshld_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldd $22, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vpshldd $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshldd $22, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22, <4 x i32> %x3, i8 %x4)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22, <4 x i32> %x3, i8 -1)
%res2 = call <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22, <4 x i32> zeroinitializer,i8 %x4)
%res3 = add <4 x i32> %res, %res1
%res4 = add <4 x i32> %res3, %res2
ret <4 x i32> %res4
}
declare <4 x i32> @llvm.x86.avx512.mask.vpshld.d.128(<4 x i32>, <4 x i32>, i32, <4 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_vpshld_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldd $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshldd $22, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 %x4)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 -1)
%res2 = add <8 x i32> %res, %res1
ret <8 x i32> %res2
}
declare <8 x i32> @llvm.x86.avx512.mask.vpshld.d.256(<8 x i32>, <8 x i32>, i32, <8 x i32>, i8)
define <2 x i64>@test_int_x86_avx512_mask_vpshld_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldq $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshldq $22, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 %x4)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 -1)
%res2 = add <2 x i64> %res, %res1
ret <2 x i64> %res2
}
declare <2 x i64> @llvm.x86.avx512.mask.vpshld.q.128(<2 x i64>, <2 x i64>, i32, <2 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_vpshld_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldq $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshldq $22, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; CHECK-NEXT: retq
%res = call <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 %x4)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 -1)
%res2 = add <4 x i64> %res, %res1
ret <4 x i64> %res2
}
declare <4 x i64> @llvm.x86.avx512.mask.vpshld.q.256(<4 x i64>, <4 x i64>, i32, <4 x i64>, i8)
define <8 x i16>@test_int_x86_avx512_mask_vpshld_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldw $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshldw $22, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 %x4)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 -1)
%res2 = add <8 x i16> %res, %res1
ret <8 x i16> %res2
}
declare <8 x i16> @llvm.x86.avx512.mask.vpshld.w.128(<8 x i16>, <8 x i16>, i32, <8 x i16>, i8)
define <16 x i16>@test_int_x86_avx512_mask_vpshld_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshld_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshldw $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshldw $22, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 %x4)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 -1)
%res2 = add <16 x i16> %res, %res1
ret <16 x i16> %res2
}
declare <16 x i16> @llvm.x86.avx512.mask.vpshld.w.256(<16 x i16>, <16 x i16>, i32, <16 x i16>, i16)
define <4 x i32>@test_int_x86_avx512_mask_vpshrd_d_128(<4 x i32> %x0, <4 x i32> %x1,<4 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdd $22, %xmm1, %xmm0, %xmm3 {%k1} {z}
; CHECK-NEXT: vpshrdd $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshrdd $22, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm2, %xmm0
; CHECK-NEXT: vpaddd %xmm3, %xmm0, %xmm0
; CHECK-NEXT: retq
%res = call <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22, <4 x i32> %x3, i8 %x4)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22, <4 x i32> %x3, i8 -1)
%res2 = call <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32> %x0, <4 x i32> %x1, i32 22, <4 x i32> zeroinitializer,i8 %x4)
%res3 = add <4 x i32> %res, %res1
%res4 = add <4 x i32> %res3, %res2
ret <4 x i32> %res4
}
declare <4 x i32> @llvm.x86.avx512.mask.vpshrd.d.128(<4 x i32>, <4 x i32>, i32, <4 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_vpshrd_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdd $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshrdd $22, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddd %ymm0, %ymm2, %ymm0
; CHECK-NEXT: retq
%res = call <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 %x4)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32> %x0, <8 x i32> %x1, i32 22, <8 x i32> %x3, i8 -1)
%res2 = add <8 x i32> %res, %res1
ret <8 x i32> %res2
}
declare <8 x i32> @llvm.x86.avx512.mask.vpshrd.d.256(<8 x i32>, <8 x i32>, i32, <8 x i32>, i8)
define <2 x i64>@test_int_x86_avx512_mask_vpshrd_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdq $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshrdq $22, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddq %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res = call <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 %x4)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64> %x0, <2 x i64> %x1, i32 22, <2 x i64> %x3, i8 -1)
%res2 = add <2 x i64> %res, %res1
ret <2 x i64> %res2
}
declare <2 x i64> @llvm.x86.avx512.mask.vpshrd.q.128(<2 x i64>, <2 x i64>, i32, <2 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_vpshrd_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdq $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshrdq $22, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddq %ymm0, %ymm2, %ymm0
; CHECK-NEXT: retq
%res = call <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 %x4)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64> %x0, <4 x i64> %x1, i32 22, <4 x i64> %x3, i8 -1)
%res2 = add <4 x i64> %res, %res1
ret <4 x i64> %res2
}
declare <4 x i64> @llvm.x86.avx512.mask.vpshrd.q.256(<4 x i64>, <4 x i64>, i32, <4 x i64>, i8)
define <8 x i16>@test_int_x86_avx512_mask_vpshrd_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x3, i8 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdw $22, %xmm1, %xmm0, %xmm2 {%k1}
; CHECK-NEXT: vpshrdw $22, %xmm1, %xmm0, %xmm0
; CHECK-NEXT: vpaddw %xmm0, %xmm2, %xmm0
; CHECK-NEXT: retq
%res = call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 %x4)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16> %x0, <8 x i16> %x1, i32 22, <8 x i16> %x3, i8 -1)
%res2 = add <8 x i16> %res, %res1
ret <8 x i16> %res2
}
declare <8 x i16> @llvm.x86.avx512.mask.vpshrd.w.128(<8 x i16>, <8 x i16>, i32, <8 x i16>, i8)
define <16 x i16>@test_int_x86_avx512_mask_vpshrd_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x3, i16 %x4) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrd_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %edi, %k1
; CHECK-NEXT: vpshrdw $22, %ymm1, %ymm0, %ymm2 {%k1}
; CHECK-NEXT: vpshrdw $22, %ymm1, %ymm0, %ymm0
; CHECK-NEXT: vpaddw %ymm0, %ymm2, %ymm0
; CHECK-NEXT: retq
%res = call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 %x4)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16> %x0, <16 x i16> %x1, i32 22, <16 x i16> %x3, i16 -1)
%res2 = add <16 x i16> %res, %res1
ret <16 x i16> %res2
}
declare <16 x i16> @llvm.x86.avx512.mask.vpshrd.w.256(<16 x i16>, <16 x i16>, i32, <16 x i16>, i16)
declare <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
declare <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_vpshrdv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshrdvd (%rdi), %ymm1, %ymm3 {%k1}
; CHECK-NEXT: vmovdqa %ymm0, %ymm4
; CHECK-NEXT: vpshrdvd %ymm2, %ymm1, %ymm4
; CHECK-NEXT: vpshrdvd %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpaddd %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddd %ymm0, %ymm3, %ymm0
; CHECK-NEXT: retq
%x2 = load <8 x i32>, <8 x i32>* %x2p
%res = call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 -1)
%res2 = call <8 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 %x3)
%res3 = add <8 x i32> %res, %res1
%res4 = add <8 x i32> %res2, %res3
ret <8 x i32> %res4
}
declare <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
declare <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_vpshrdv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshrdvd (%rdi), %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovdqa %xmm0, %xmm4
; CHECK-NEXT: vpshrdvd %xmm2, %xmm1, %xmm4
; CHECK-NEXT: vpshrdvd %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%x2 = load <4 x i32>, <4 x i32>* %x2p
%res = call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 -1)
%res2 = call <4 x i32> @llvm.x86.avx512.maskz.vpshrdv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 %x3)
%res3 = add <4 x i32> %res, %res1
%res4 = add <4 x i32> %res2, %res3
ret <4 x i32> %res4
}
declare <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
declare <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_vpshrdv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshrdvq (%rdi), %ymm1, %ymm3 {%k1}
; CHECK-NEXT: vmovdqa %ymm0, %ymm4
; CHECK-NEXT: vpshrdvq %ymm2, %ymm1, %ymm4
; CHECK-NEXT: vpshrdvq %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0
; CHECK-NEXT: retq
%x2 = load <4 x i64>, <4 x i64>* %x2p
%res = call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 -1)
%res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 %x3)
%res3 = add <4 x i64> %res, %res1
%res4 = add <4 x i64> %res2, %res3
ret <4 x i64> %res4
}
declare <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
declare <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_vpshrdv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshrdvq (%rdi), %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovdqa %xmm0, %xmm4
; CHECK-NEXT: vpshrdvq %xmm2, %xmm1, %xmm4
; CHECK-NEXT: vpshrdvq %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%x2 = load <2 x i64>, <2 x i64>* %x2p
%res = call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 -1)
%res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpshrdv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 %x3)
%res3 = add <2 x i64> %res, %res1
%res4 = add <2 x i64> %res2, %res3
ret <2 x i64> %res4
}
declare <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
declare <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_mask_vpshrdv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshrdvw (%rdi), %ymm1, %ymm3 {%k1}
; CHECK-NEXT: vmovdqa %ymm0, %ymm4
; CHECK-NEXT: vpshrdvw %ymm2, %ymm1, %ymm4
; CHECK-NEXT: vpshrdvw %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpaddw %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddw %ymm0, %ymm3, %ymm0
; CHECK-NEXT: retq
%x2 = load <16 x i16>, <16 x i16>* %x2p
%res = call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 -1)
%res2 = call <16 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 %x3)
%res3 = add <16 x i16> %res, %res1
%res4 = add <16 x i16> %res2, %res3
ret <16 x i16> %res4
}
declare <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
declare <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_vpshrdv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshrdv_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshrdvw (%rdi), %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovdqa %xmm0, %xmm4
; CHECK-NEXT: vpshrdvw %xmm2, %xmm1, %xmm4
; CHECK-NEXT: vpshrdvw %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddw %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddw %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%x2 = load <8 x i16>, <8 x i16>* %x2p
%res = call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 -1)
%res2 = call <8 x i16> @llvm.x86.avx512.maskz.vpshrdv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 %x3)
%res3 = add <8 x i16> %res, %res1
%res4 = add <8 x i16> %res2, %res3
ret <8 x i16> %res4
}
declare <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
declare <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32>, <8 x i32>, <8 x i32>, i8)
define <8 x i32>@test_int_x86_avx512_mask_vpshldv_d_256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32>* %x2p, <8 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_d_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshldvd (%rdi), %ymm1, %ymm3 {%k1}
; CHECK-NEXT: vmovdqa %ymm0, %ymm4
; CHECK-NEXT: vpshldvd %ymm2, %ymm1, %ymm4
; CHECK-NEXT: vpshldvd %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpaddd %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddd %ymm0, %ymm3, %ymm0
; CHECK-NEXT: retq
%x2 = load <8 x i32>, <8 x i32>* %x2p
%res = call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x2, i8 %x3)
%res1 = call <8 x i32> @llvm.x86.avx512.mask.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 -1)
%res2 = call <8 x i32> @llvm.x86.avx512.maskz.vpshldv.d.256(<8 x i32> %x0, <8 x i32> %x1, <8 x i32> %x4, i8 %x3)
%res3 = add <8 x i32> %res, %res1
%res4 = add <8 x i32> %res2, %res3
ret <8 x i32> %res4
}
declare <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
declare <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32>, <4 x i32>, <4 x i32>, i8)
define <4 x i32>@test_int_x86_avx512_mask_vpshldv_d_128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32>* %x2p, <4 x i32> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_d_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshldvd (%rdi), %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovdqa %xmm0, %xmm4
; CHECK-NEXT: vpshldvd %xmm2, %xmm1, %xmm4
; CHECK-NEXT: vpshldvd %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddd %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddd %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%x2 = load <4 x i32>, <4 x i32>* %x2p
%res = call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x2, i8 %x3)
%res1 = call <4 x i32> @llvm.x86.avx512.mask.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 -1)
%res2 = call <4 x i32> @llvm.x86.avx512.maskz.vpshldv.d.128(<4 x i32> %x0, <4 x i32> %x1, <4 x i32> %x4, i8 %x3)
%res3 = add <4 x i32> %res, %res1
%res4 = add <4 x i32> %res2, %res3
ret <4 x i32> %res4
}
declare <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
declare <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64>, <4 x i64>, <4 x i64>, i8)
define <4 x i64>@test_int_x86_avx512_mask_vpshldv_q_256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64>* %x2p, <4 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_q_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshldvq (%rdi), %ymm1, %ymm3 {%k1}
; CHECK-NEXT: vmovdqa %ymm0, %ymm4
; CHECK-NEXT: vpshldvq %ymm2, %ymm1, %ymm4
; CHECK-NEXT: vpshldvq %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpaddq %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddq %ymm0, %ymm3, %ymm0
; CHECK-NEXT: retq
%x2 = load <4 x i64>, <4 x i64>* %x2p
%res = call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x2, i8 %x3)
%res1 = call <4 x i64> @llvm.x86.avx512.mask.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 -1)
%res2 = call <4 x i64> @llvm.x86.avx512.maskz.vpshldv.q.256(<4 x i64> %x0, <4 x i64> %x1, <4 x i64> %x4, i8 %x3)
%res3 = add <4 x i64> %res, %res1
%res4 = add <4 x i64> %res2, %res3
ret <4 x i64> %res4
}
declare <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
declare <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64>, <2 x i64>, <2 x i64>, i8)
define <2 x i64>@test_int_x86_avx512_mask_vpshldv_q_128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64>* %x2p, <2 x i64> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_q_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshldvq (%rdi), %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovdqa %xmm0, %xmm4
; CHECK-NEXT: vpshldvq %xmm2, %xmm1, %xmm4
; CHECK-NEXT: vpshldvq %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddq %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddq %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%x2 = load <2 x i64>, <2 x i64>* %x2p
%res = call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x2, i8 %x3)
%res1 = call <2 x i64> @llvm.x86.avx512.mask.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 -1)
%res2 = call <2 x i64> @llvm.x86.avx512.maskz.vpshldv.q.128(<2 x i64> %x0, <2 x i64> %x1, <2 x i64> %x4, i8 %x3)
%res3 = add <2 x i64> %res, %res1
%res4 = add <2 x i64> %res2, %res3
ret <2 x i64> %res4
}
declare <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
declare <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16>, <16 x i16>, <16 x i16>, i16)
define <16 x i16>@test_int_x86_avx512_mask_vpshldv_w_256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16>* %x2p, <16 x i16> %x4, i16 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_w_256:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %ymm0, %ymm3
; CHECK-NEXT: vpshldvw (%rdi), %ymm1, %ymm3 {%k1}
; CHECK-NEXT: vmovdqa %ymm0, %ymm4
; CHECK-NEXT: vpshldvw %ymm2, %ymm1, %ymm4
; CHECK-NEXT: vpshldvw %ymm2, %ymm1, %ymm0 {%k1} {z}
; CHECK-NEXT: vpaddw %ymm0, %ymm4, %ymm0
; CHECK-NEXT: vpaddw %ymm0, %ymm3, %ymm0
; CHECK-NEXT: retq
%x2 = load <16 x i16>, <16 x i16>* %x2p
%res = call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x2, i16 %x3)
%res1 = call <16 x i16> @llvm.x86.avx512.mask.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 -1)
%res2 = call <16 x i16> @llvm.x86.avx512.maskz.vpshldv.w.256(<16 x i16> %x0, <16 x i16> %x1, <16 x i16> %x4, i16 %x3)
%res3 = add <16 x i16> %res, %res1
%res4 = add <16 x i16> %res2, %res3
ret <16 x i16> %res4
}
declare <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
declare <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16>, <8 x i16>, <8 x i16>, i8)
define <8 x i16>@test_int_x86_avx512_mask_vpshldv_w_128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16>* %x2p, <8 x i16> %x4, i8 %x3) {
; CHECK-LABEL: test_int_x86_avx512_mask_vpshldv_w_128:
; CHECK: ## BB#0:
; CHECK-NEXT: kmovd %esi, %k1
; CHECK-NEXT: vmovdqa %xmm0, %xmm3
; CHECK-NEXT: vpshldvw (%rdi), %xmm1, %xmm3 {%k1}
; CHECK-NEXT: vmovdqa %xmm0, %xmm4
; CHECK-NEXT: vpshldvw %xmm2, %xmm1, %xmm4
; CHECK-NEXT: vpshldvw %xmm2, %xmm1, %xmm0 {%k1} {z}
; CHECK-NEXT: vpaddw %xmm0, %xmm4, %xmm0
; CHECK-NEXT: vpaddw %xmm0, %xmm3, %xmm0
; CHECK-NEXT: retq
%x2 = load <8 x i16>, <8 x i16>* %x2p
%res = call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x2, i8 %x3)
%res1 = call <8 x i16> @llvm.x86.avx512.mask.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 -1)
%res2 = call <8 x i16> @llvm.x86.avx512.maskz.vpshldv.w.128(<8 x i16> %x0, <8 x i16> %x1, <8 x i16> %x4, i8 %x3)
%res3 = add <8 x i16> %res, %res1
%res4 = add <8 x i16> %res2, %res3
ret <8 x i16> %res4
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff