mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2026-01-31 01:35:20 +01:00
[X86][SSE] Auto upgrade PADDS/PSUBS intrinsics to SADD_SAT/SSUB_SAT generic intrinsics (llvm)
Pulled out of D55894 to match the clang changes in D55890. Differential Revision: https://reviews.llvm.org/D55890 llvm-svn: 349744
This commit is contained in:
@@ -364,16 +364,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
|
||||
// Integer arithmetic ops.
|
||||
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
def int_x86_sse2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb128">,
|
||||
def int_x86_sse2_padds_b :
|
||||
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
|
||||
llvm_v16i8_ty], [IntrNoMem, Commutative]>;
|
||||
def int_x86_sse2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw128">,
|
||||
def int_x86_sse2_padds_w :
|
||||
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
|
||||
llvm_v8i16_ty], [IntrNoMem, Commutative]>;
|
||||
def int_x86_sse2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb128">,
|
||||
def int_x86_sse2_psubs_b :
|
||||
Intrinsic<[llvm_v16i8_ty], [llvm_v16i8_ty,
|
||||
llvm_v16i8_ty], [IntrNoMem]>;
|
||||
def int_x86_sse2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw128">,
|
||||
def int_x86_sse2_psubs_w :
|
||||
Intrinsic<[llvm_v8i16_ty], [llvm_v8i16_ty,
|
||||
llvm_v8i16_ty], [IntrNoMem]>;
|
||||
def int_x86_sse2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw128">,
|
||||
@@ -1346,16 +1346,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
|
||||
// Integer arithmetic ops.
|
||||
let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
def int_x86_avx2_padds_b : GCCBuiltin<"__builtin_ia32_paddsb256">,
|
||||
def int_x86_avx2_padds_b :
|
||||
Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
|
||||
llvm_v32i8_ty], [IntrNoMem, Commutative]>;
|
||||
def int_x86_avx2_padds_w : GCCBuiltin<"__builtin_ia32_paddsw256">,
|
||||
def int_x86_avx2_padds_w :
|
||||
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
|
||||
llvm_v16i16_ty], [IntrNoMem, Commutative]>;
|
||||
def int_x86_avx2_psubs_b : GCCBuiltin<"__builtin_ia32_psubsb256">,
|
||||
def int_x86_avx2_psubs_b :
|
||||
Intrinsic<[llvm_v32i8_ty], [llvm_v32i8_ty,
|
||||
llvm_v32i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx2_psubs_w : GCCBuiltin<"__builtin_ia32_psubsw256">,
|
||||
def int_x86_avx2_psubs_w :
|
||||
Intrinsic<[llvm_v16i16_ty], [llvm_v16i16_ty,
|
||||
llvm_v16i16_ty], [IntrNoMem]>;
|
||||
def int_x86_avx2_pmulhu_w : GCCBuiltin<"__builtin_ia32_pmulhuw256">,
|
||||
@@ -3677,16 +3677,16 @@ let TargetPrefix = "x86" in { // All intrinsics start with "llvm.x86.".
|
||||
}
|
||||
// Integer arithmetic ops
|
||||
let TargetPrefix = "x86" in {
|
||||
def int_x86_avx512_padds_b_512 : GCCBuiltin<"__builtin_ia32_paddsb512">,
|
||||
def int_x86_avx512_padds_b_512 :
|
||||
Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_x86_avx512_padds_w_512 : GCCBuiltin<"__builtin_ia32_paddsw512">,
|
||||
def int_x86_avx512_padds_w_512 :
|
||||
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_x86_avx512_psubs_b_512 : GCCBuiltin<"__builtin_ia32_psubsb512">,
|
||||
def int_x86_avx512_psubs_b_512 :
|
||||
Intrinsic<[llvm_v64i8_ty], [llvm_v64i8_ty, llvm_v64i8_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_x86_avx512_psubs_w_512 : GCCBuiltin<"__builtin_ia32_psubsw512">,
|
||||
def int_x86_avx512_psubs_w_512 :
|
||||
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty],
|
||||
[IntrNoMem]>;
|
||||
def int_x86_avx512_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512">,
|
||||
|
||||
@@ -98,11 +98,11 @@ define <4 x i64> @test_mm256_adds_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-NEXT: ret{{[l|q]}}
|
||||
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
||||
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
||||
%res = call <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
||||
%res = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
|
||||
%bc = bitcast <32 x i8> %res to <4 x i64>
|
||||
ret <4 x i64> %bc
|
||||
}
|
||||
declare <32 x i8> @llvm.x86.avx2.padds.b(<32 x i8>, <32 x i8>) nounwind readnone
|
||||
declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
|
||||
|
||||
define <4 x i64> @test_mm256_adds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-LABEL: test_mm256_adds_epi16:
|
||||
@@ -111,11 +111,11 @@ define <4 x i64> @test_mm256_adds_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-NEXT: ret{{[l|q]}}
|
||||
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
||||
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
||||
%res = call <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
||||
%res = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
|
||||
%bc = bitcast <16 x i16> %res to <4 x i64>
|
||||
ret <4 x i64> %bc
|
||||
}
|
||||
declare <16 x i16> @llvm.x86.avx2.padds.w(<16 x i16>, <16 x i16>) nounwind readnone
|
||||
declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
|
||||
|
||||
define <4 x i64> @test_mm256_adds_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-LABEL: test_mm256_adds_epu8:
|
||||
@@ -2527,11 +2527,11 @@ define <4 x i64> @test_mm256_subs_epi8(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-NEXT: ret{{[l|q]}}
|
||||
%arg0 = bitcast <4 x i64> %a0 to <32 x i8>
|
||||
%arg1 = bitcast <4 x i64> %a1 to <32 x i8>
|
||||
%res = call <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8> %arg0, <32 x i8> %arg1)
|
||||
%res = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %arg0, <32 x i8> %arg1)
|
||||
%bc = bitcast <32 x i8> %res to <4 x i64>
|
||||
ret <4 x i64> %bc
|
||||
}
|
||||
declare <32 x i8> @llvm.x86.avx2.psubs.b(<32 x i8>, <32 x i8>) nounwind readnone
|
||||
declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>) nounwind readnone
|
||||
|
||||
define <4 x i64> @test_mm256_subs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-LABEL: test_mm256_subs_epi16:
|
||||
@@ -2540,11 +2540,11 @@ define <4 x i64> @test_mm256_subs_epi16(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-NEXT: ret{{[l|q]}}
|
||||
%arg0 = bitcast <4 x i64> %a0 to <16 x i16>
|
||||
%arg1 = bitcast <4 x i64> %a1 to <16 x i16>
|
||||
%res = call <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16> %arg0, <16 x i16> %arg1)
|
||||
%res = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %arg0, <16 x i16> %arg1)
|
||||
%bc = bitcast <16 x i16> %res to <4 x i64>
|
||||
ret <4 x i64> %bc
|
||||
}
|
||||
declare <16 x i16> @llvm.x86.avx2.psubs.w(<16 x i16>, <16 x i16>) nounwind readnone
|
||||
declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>) nounwind readnone
|
||||
|
||||
define <4 x i64> @test_mm256_subs_epu8(<4 x i64> %a0, <4 x i64> %a1) {
|
||||
; CHECK-LABEL: test_mm256_subs_epu8:
|
||||
|
||||
@@ -2,8 +2,292 @@
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512BW
|
||||
; RUN: llc < %s -mtriple=i386-unknown-linux-gnu -mcpu=knl -mattr=+avx512bw | FileCheck %s --check-prefix=ALL --check-prefix=AVX512F-32
|
||||
|
||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
|
||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512bw-builtins.c
|
||||
|
||||
;
|
||||
; Signed Saturation
|
||||
;
|
||||
|
||||
define <32 x i16> @test_mask_adds_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rr_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: vpaddsw %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rr_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: vpaddsw %zmm1, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%res = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
ret <32 x i16> %res
|
||||
}
|
||||
declare <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16>, <32 x i16>)
|
||||
|
||||
define <32 x i16> @test_mask_adds_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rrk_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %edi, %k1
|
||||
; AVX512BW-NEXT: vpaddsw %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rrk_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpaddsw %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
|
||||
ret <32 x i16> %3
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_adds_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rrkz_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %edi, %k1
|
||||
; AVX512BW-NEXT: vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rrkz_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpaddsw %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512F-32-NEXT: retl
|
||||
%1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
|
||||
ret <32 x i16> %3
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_adds_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rm_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: vpaddsw (%rdi), %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rm_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX512F-32-NEXT: vpaddsw (%eax), %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%b = load <32 x i16>, <32 x i16>* %ptr_b
|
||||
%1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
ret <32 x i16> %1
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_adds_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rmk_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %esi, %k1
|
||||
; AVX512BW-NEXT: vpaddsw (%rdi), %zmm0, %zmm1 {%k1}
|
||||
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rmk_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpaddsw (%eax), %zmm0, %zmm1 {%k1}
|
||||
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%b = load <32 x i16>, <32 x i16>* %ptr_b
|
||||
%1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> %passThru
|
||||
ret <32 x i16> %3
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_adds_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rmkz_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %esi, %k1
|
||||
; AVX512BW-NEXT: vpaddsw (%rdi), %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rmkz_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpaddsw (%eax), %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512F-32-NEXT: retl
|
||||
%b = load <32 x i16>, <32 x i16>* %ptr_b
|
||||
%1 = call <32 x i16> @llvm.sadd.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i16> %1, <32 x i16> zeroinitializer
|
||||
ret <32 x i16> %3
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_subs_epi16_rr_512(<32 x i16> %a, <32 x i16> %b) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rr_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: vpsubsw %zmm1, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rr_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: vpsubsw %zmm1, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
ret <32 x i16> %sub
|
||||
}
|
||||
declare <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16>, <32 x i16>)
|
||||
|
||||
define <32 x i16> @test_mask_subs_epi16_rrk_512(<32 x i16> %a, <32 x i16> %b, <32 x i16> %passThru, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rrk_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %edi, %k1
|
||||
; AVX512BW-NEXT: vpsubsw %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512BW-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rrk_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpsubsw %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; AVX512F-32-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
|
||||
ret <32 x i16> %res
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_subs_epi16_rrkz_512(<32 x i16> %a, <32 x i16> %b, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rrkz_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %edi, %k1
|
||||
; AVX512BW-NEXT: vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rrkz_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpsubsw %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512F-32-NEXT: retl
|
||||
%sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
|
||||
ret <32 x i16> %res
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_subs_epi16_rm_512(<32 x i16> %a, <32 x i16>* %ptr_b) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rm_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: vpsubsw (%rdi), %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rm_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX512F-32-NEXT: vpsubsw (%eax), %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%b = load <32 x i16>, <32 x i16>* %ptr_b
|
||||
%sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
ret <32 x i16> %sub
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_subs_epi16_rmk_512(<32 x i16> %a, <32 x i16>* %ptr_b, <32 x i16> %passThru, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rmk_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %esi, %k1
|
||||
; AVX512BW-NEXT: vpsubsw (%rdi), %zmm0, %zmm1 {%k1}
|
||||
; AVX512BW-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rmk_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpsubsw (%eax), %zmm0, %zmm1 {%k1}
|
||||
; AVX512F-32-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; AVX512F-32-NEXT: retl
|
||||
%b = load <32 x i16>, <32 x i16>* %ptr_b
|
||||
%sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> %passThru
|
||||
ret <32 x i16> %res
|
||||
}
|
||||
|
||||
define <32 x i16> @test_mask_subs_epi16_rmkz_512(<32 x i16> %a, <32 x i16>* %ptr_b, i32 %mask) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rmkz_512:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: kmovd %esi, %k1
|
||||
; AVX512BW-NEXT: vpsubsw (%rdi), %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rmkz_512:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: movl {{[0-9]+}}(%esp), %eax
|
||||
; AVX512F-32-NEXT: kmovd {{[0-9]+}}(%esp), %k1
|
||||
; AVX512F-32-NEXT: vpsubsw (%eax), %zmm0, %zmm0 {%k1} {z}
|
||||
; AVX512F-32-NEXT: retl
|
||||
%b = load <32 x i16>, <32 x i16>* %ptr_b
|
||||
%sub = call <32 x i16> @llvm.ssub.sat.v32i16(<32 x i16> %a, <32 x i16> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i16> %sub, <32 x i16> zeroinitializer
|
||||
ret <32 x i16> %res
|
||||
}
|
||||
|
||||
|
||||
define <64 x i16> @test_mask_adds_epi16_rr_1024(<64 x i16> %a, <64 x i16> %b) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epi16_rr_1024:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: vpaddsw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpaddsw %zmm3, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_adds_epi16_rr_1024:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: pushl %ebp
|
||||
; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
|
||||
; AVX512F-32-NEXT: .cfi_offset %ebp, -8
|
||||
; AVX512F-32-NEXT: movl %esp, %ebp
|
||||
; AVX512F-32-NEXT: .cfi_def_cfa_register %ebp
|
||||
; AVX512F-32-NEXT: andl $-64, %esp
|
||||
; AVX512F-32-NEXT: subl $64, %esp
|
||||
; AVX512F-32-NEXT: vpaddsw %zmm2, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: vpaddsw 8(%ebp), %zmm1, %zmm1
|
||||
; AVX512F-32-NEXT: movl %ebp, %esp
|
||||
; AVX512F-32-NEXT: popl %ebp
|
||||
; AVX512F-32-NEXT: .cfi_def_cfa %esp, 4
|
||||
; AVX512F-32-NEXT: retl
|
||||
%1 = call <64 x i16> @llvm.sadd.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
|
||||
ret <64 x i16> %1
|
||||
}
|
||||
declare <64 x i16> @llvm.sadd.sat.v64i16(<64 x i16>, <64 x i16>)
|
||||
|
||||
define <64 x i16> @test_mask_subs_epi16_rr_1024(<64 x i16> %a, <64 x i16> %b) {
|
||||
; AVX512BW-LABEL: test_mask_subs_epi16_rr_1024:
|
||||
; AVX512BW: ## %bb.0:
|
||||
; AVX512BW-NEXT: vpsubsw %zmm2, %zmm0, %zmm0
|
||||
; AVX512BW-NEXT: vpsubsw %zmm3, %zmm1, %zmm1
|
||||
; AVX512BW-NEXT: retq
|
||||
;
|
||||
; AVX512F-32-LABEL: test_mask_subs_epi16_rr_1024:
|
||||
; AVX512F-32: # %bb.0:
|
||||
; AVX512F-32-NEXT: pushl %ebp
|
||||
; AVX512F-32-NEXT: .cfi_def_cfa_offset 8
|
||||
; AVX512F-32-NEXT: .cfi_offset %ebp, -8
|
||||
; AVX512F-32-NEXT: movl %esp, %ebp
|
||||
; AVX512F-32-NEXT: .cfi_def_cfa_register %ebp
|
||||
; AVX512F-32-NEXT: andl $-64, %esp
|
||||
; AVX512F-32-NEXT: subl $64, %esp
|
||||
; AVX512F-32-NEXT: vpsubsw %zmm2, %zmm0, %zmm0
|
||||
; AVX512F-32-NEXT: vpsubsw 8(%ebp), %zmm1, %zmm1
|
||||
; AVX512F-32-NEXT: movl %ebp, %esp
|
||||
; AVX512F-32-NEXT: popl %ebp
|
||||
; AVX512F-32-NEXT: .cfi_def_cfa %esp, 4
|
||||
; AVX512F-32-NEXT: retl
|
||||
%sub = call <64 x i16> @llvm.ssub.sat.v64i16(<64 x i16> %a, <64 x i16> %b)
|
||||
ret <64 x i16> %sub
|
||||
}
|
||||
declare <64 x i16> @llvm.ssub.sat.v64i16(<64 x i16>, <64 x i16>);
|
||||
|
||||
;
|
||||
; Unsigned Saturation
|
||||
;
|
||||
|
||||
define <32 x i16> @test_mask_adds_epu16_rr_512(<32 x i16> %a, <32 x i16> %b) {
|
||||
; AVX512BW-LABEL: test_mask_adds_epu16_rr_512:
|
||||
|
||||
@@ -1,9 +1,593 @@
|
||||
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||||
; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=knl -mattr=+avx512bw -mattr=+avx512vl --show-mc-encoding| FileCheck %s
|
||||
|
||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/sse2-builtins.c
|
||||
; NOTE: This should use IR equivalent to what is generated by clang/test/CodeGen/avx512vlbw-builtins.c
|
||||
|
||||
|
||||
;
|
||||
; Signed Saturation
|
||||
;
|
||||
|
||||
define <8 x i16> @test_mask_adds_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rr_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
ret <8 x i16> %1
|
||||
}
|
||||
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>)
|
||||
|
||||
define <8 x i16> @test_mask_adds_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rrk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%2 = bitcast i8 %mask to <8 x i1>
|
||||
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
|
||||
ret <8 x i16> %3
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_adds_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rrkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%2 = bitcast i8 %mask to <8 x i1>
|
||||
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
|
||||
ret <8 x i16> %3
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_adds_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rm_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xed,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <8 x i16>, <8 x i16>* %ptr_b
|
||||
%1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
ret <8 x i16> %1
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_adds_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rmk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xed,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <8 x i16>, <8 x i16>* %ptr_b
|
||||
%1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%2 = bitcast i8 %mask to <8 x i1>
|
||||
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> %passThru
|
||||
ret <8 x i16> %3
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_adds_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rmkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xed,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <8 x i16>, <8 x i16>* %ptr_b
|
||||
%1 = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%2 = bitcast i8 %mask to <8 x i1>
|
||||
%3 = select <8 x i1> %2, <8 x i16> %1, <8 x i16> zeroinitializer
|
||||
ret <8 x i16> %3
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_adds_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rr_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
ret <16 x i16> %1
|
||||
}
|
||||
declare <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16>, <16 x i16>)
|
||||
|
||||
define <16 x i16> @test_mask_adds_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rrk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
|
||||
ret <16 x i16> %3
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_adds_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rrkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
|
||||
ret <16 x i16> %3
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_adds_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rm_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xed,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i16>, <16 x i16>* %ptr_b
|
||||
%1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
ret <16 x i16> %1
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_adds_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rmk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xed,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i16>, <16 x i16>* %ptr_b
|
||||
%1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> %passThru
|
||||
ret <16 x i16> %3
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_adds_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi16_rmkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xed,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i16>, <16 x i16>* %ptr_b
|
||||
%1 = call <16 x i16> @llvm.sadd.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i16> %1, <16 x i16> zeroinitializer
|
||||
ret <16 x i16> %3
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_subs_epi16_rr_128(<8 x i16> %a, <8 x i16> %b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rr_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
ret <8 x i16> %sub
|
||||
}
|
||||
declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>)
|
||||
|
||||
define <8 x i16> @test_mask_subs_epi16_rrk_128(<8 x i16> %a, <8 x i16> %b, <8 x i16> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rrk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%bc = bitcast i8 %mask to <8 x i1>
|
||||
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
|
||||
ret <8 x i16> %res
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_subs_epi16_rrkz_128(<8 x i16> %a, <8 x i16> %b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rrkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsw %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%bc = bitcast i8 %mask to <8 x i1>
|
||||
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
|
||||
ret <8 x i16> %res
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_subs_epi16_rm_128(<8 x i16> %a, <8 x i16>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rm_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe9,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <8 x i16>, <8 x i16>* %ptr_b
|
||||
%sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
ret <8 x i16> %sub
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_subs_epi16_rmk_128(<8 x i16> %a, <8 x i16>* %ptr_b, <8 x i16> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rmk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe9,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <8 x i16>, <8 x i16>* %ptr_b
|
||||
%sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%bc = bitcast i8 %mask to <8 x i1>
|
||||
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> %passThru
|
||||
ret <8 x i16> %res
|
||||
}
|
||||
|
||||
define <8 x i16> @test_mask_subs_epi16_rmkz_128(<8 x i16> %a, <8 x i16>* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rmkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsw (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe9,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <8 x i16>, <8 x i16>* %ptr_b
|
||||
%sub = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %a, <8 x i16> %b)
|
||||
%bc = bitcast i8 %mask to <8 x i1>
|
||||
%res = select <8 x i1> %bc, <8 x i16> %sub, <8 x i16> zeroinitializer
|
||||
ret <8 x i16> %res
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_subs_epi16_rr_256(<16 x i16> %a, <16 x i16> %b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rr_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
ret <16 x i16> %sub
|
||||
}
|
||||
declare <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16>, <16 x i16>)
|
||||
|
||||
define <16 x i16> @test_mask_subs_epi16_rrk_256(<16 x i16> %a, <16 x i16> %b, <16 x i16> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rrk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
|
||||
ret <16 x i16> %res
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_subs_epi16_rrkz_256(<16 x i16> %a, <16 x i16> %b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rrkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsw %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
|
||||
ret <16 x i16> %res
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_subs_epi16_rm_256(<16 x i16> %a, <16 x i16>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rm_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe9,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i16>, <16 x i16>* %ptr_b
|
||||
%sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
ret <16 x i16> %sub
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_subs_epi16_rmk_256(<16 x i16> %a, <16 x i16>* %ptr_b, <16 x i16> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rmk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe9,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i16>, <16 x i16>* %ptr_b
|
||||
%sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> %passThru
|
||||
ret <16 x i16> %res
|
||||
}
|
||||
|
||||
define <16 x i16> @test_mask_subs_epi16_rmkz_256(<16 x i16> %a, <16 x i16>* %ptr_b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi16_rmkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsw (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe9,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i16>, <16 x i16>* %ptr_b
|
||||
%sub = call <16 x i16> @llvm.ssub.sat.v16i16(<16 x i16> %a, <16 x i16> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i16> %sub, <16 x i16> zeroinitializer
|
||||
ret <16 x i16> %res
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_adds_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rr_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
ret <16 x i8> %1
|
||||
}
|
||||
declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>)
|
||||
|
||||
define <16 x i8> @test_mask_adds_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rrk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
|
||||
ret <16 x i8> %3
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_adds_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rrkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
|
||||
ret <16 x i8> %3
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_adds_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rm_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xec,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i8>, <16 x i8>* %ptr_b
|
||||
%1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
ret <16 x i8> %1
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_adds_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rmk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xec,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i8>, <16 x i8>* %ptr_b
|
||||
%1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> %passThru
|
||||
ret <16 x i8> %3
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_adds_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rmkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xec,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i8>, <16 x i8>* %ptr_b
|
||||
%1 = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%2 = bitcast i16 %mask to <16 x i1>
|
||||
%3 = select <16 x i1> %2, <16 x i8> %1, <16 x i8> zeroinitializer
|
||||
ret <16 x i8> %3
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_adds_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rr_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
ret <32 x i8> %1
|
||||
}
|
||||
declare <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8>, <32 x i8>)
|
||||
|
||||
define <32 x i8> @test_mask_adds_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rrk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xec,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
|
||||
ret <32 x i8> %3
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_adds_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rrkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpaddsb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xec,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
|
||||
ret <32 x i8> %3
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_adds_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rm_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xec,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <32 x i8>, <32 x i8>* %ptr_b
|
||||
%1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
ret <32 x i8> %1
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_adds_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rmk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xec,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <32 x i8>, <32 x i8>* %ptr_b
|
||||
%1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> %passThru
|
||||
ret <32 x i8> %3
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_adds_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_adds_epi8_rmkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpaddsb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xec,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <32 x i8>, <32 x i8>* %ptr_b
|
||||
%1 = call <32 x i8> @llvm.sadd.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%2 = bitcast i32 %mask to <32 x i1>
|
||||
%3 = select <32 x i1> %2, <32 x i8> %1, <32 x i8> zeroinitializer
|
||||
ret <32 x i8> %3
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_subs_epi8_rr_128(<16 x i8> %a, <16 x i8> %b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rr_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
ret <16 x i8> %sub
|
||||
}
|
||||
declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>)
|
||||
|
||||
define <16 x i8> @test_mask_subs_epi8_rrk_128(<16 x i8> %a, <16 x i8> %b, <16 x i8> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rrk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %xmm2, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
|
||||
ret <16 x i8> %res
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_subs_epi8_rrkz_128(<16 x i8> %a, <16 x i8> %b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rrkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsb %xmm1, %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
|
||||
ret <16 x i8> %res
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_subs_epi8_rm_128(<16 x i8> %a, <16 x i8>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rm_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0xe8,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i8>, <16 x i8>* %ptr_b
|
||||
%sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
ret <16 x i8> %sub
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_subs_epi8_rmk_128(<16 x i8> %a, <16 x i8>* %ptr_b, <16 x i8> %passThru, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rmk_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x09,0xe8,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %xmm1, %xmm0 ## EVEX TO VEX Compression encoding: [0xc5,0xf9,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i8>, <16 x i8>* %ptr_b
|
||||
%sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> %passThru
|
||||
ret <16 x i8> %res
|
||||
}
|
||||
|
||||
define <16 x i8> @test_mask_subs_epi8_rmkz_128(<16 x i8> %a, <16 x i8>* %ptr_b, i16 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rmkz_128:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsb (%rdi), %xmm0, %xmm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0x89,0xe8,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <16 x i8>, <16 x i8>* %ptr_b
|
||||
%sub = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %a, <16 x i8> %b)
|
||||
%bc = bitcast i16 %mask to <16 x i1>
|
||||
%res = select <16 x i1> %bc, <16 x i8> %sub, <16 x i8> zeroinitializer
|
||||
ret <16 x i8> %res
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_subs_epi8_rr_256(<32 x i8> %a, <32 x i8> %b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rr_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
ret <32 x i8> %sub
|
||||
}
|
||||
declare <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8>, <32 x i8>)
|
||||
|
||||
define <32 x i8> @test_mask_subs_epi8_rrk_256(<32 x i8> %a, <32 x i8> %b, <32 x i8> %passThru, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rrk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm2 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe8,0xd1]
|
||||
; CHECK-NEXT: vmovdqa %ymm2, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc2]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
|
||||
ret <32 x i8> %res
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_subs_epi8_rrkz_256(<32 x i8> %a, <32 x i8> %b, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rrkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %edi, %k1 ## encoding: [0xc5,0xfb,0x92,0xcf]
|
||||
; CHECK-NEXT: vpsubsb %ymm1, %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
|
||||
ret <32 x i8> %res
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_subs_epi8_rm_256(<32 x i8> %a, <32 x i8>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rm_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0xe8,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <32 x i8>, <32 x i8>* %ptr_b
|
||||
%sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
ret <32 x i8> %sub
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_subs_epi8_rmk_256(<32 x i8> %a, <32 x i8>* %ptr_b, <32 x i8> %passThru, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rmk_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsb (%rdi), %ymm0, %ymm1 {%k1} ## encoding: [0x62,0xf1,0x7d,0x29,0xe8,0x0f]
|
||||
; CHECK-NEXT: vmovdqa %ymm1, %ymm0 ## EVEX TO VEX Compression encoding: [0xc5,0xfd,0x6f,0xc1]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <32 x i8>, <32 x i8>* %ptr_b
|
||||
%sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> %passThru
|
||||
ret <32 x i8> %res
|
||||
}
|
||||
|
||||
define <32 x i8> @test_mask_subs_epi8_rmkz_256(<32 x i8> %a, <32 x i8>* %ptr_b, i32 %mask) {
|
||||
; CHECK-LABEL: test_mask_subs_epi8_rmkz_256:
|
||||
; CHECK: ## %bb.0:
|
||||
; CHECK-NEXT: kmovd %esi, %k1 ## encoding: [0xc5,0xfb,0x92,0xce]
|
||||
; CHECK-NEXT: vpsubsb (%rdi), %ymm0, %ymm0 {%k1} {z} ## encoding: [0x62,0xf1,0x7d,0xa9,0xe8,0x07]
|
||||
; CHECK-NEXT: retq ## encoding: [0xc3]
|
||||
%b = load <32 x i8>, <32 x i8>* %ptr_b
|
||||
%sub = call <32 x i8> @llvm.ssub.sat.v32i8(<32 x i8> %a, <32 x i8> %b)
|
||||
%bc = bitcast i32 %mask to <32 x i1>
|
||||
%res = select <32 x i1> %bc, <32 x i8> %sub, <32 x i8> zeroinitializer
|
||||
ret <32 x i8> %res
|
||||
}
|
||||
|
||||
;
|
||||
; Unsigned Saturation
|
||||
;
|
||||
|
||||
define <8 x i16> @test_mask_adds_epu16_rr_128(<8 x i16> %a, <8 x i16> %b) {
|
||||
; CHECK-LABEL: test_mask_adds_epu16_rr_128:
|
||||
; CHECK: ## %bb.0:
|
||||
|
||||
@@ -151,11 +151,11 @@ define <2 x i64> @test_mm_adds_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||
%res = call <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8> %arg0, <16 x i8> %arg1)
|
||||
%res = call <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
|
||||
%bc = bitcast <16 x i8> %res to <2 x i64>
|
||||
ret <2 x i64> %bc
|
||||
}
|
||||
declare <16 x i8> @llvm.x86.sse2.padds.b(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
declare <16 x i8> @llvm.sadd.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; SSE-LABEL: test_mm_adds_epi16:
|
||||
@@ -174,11 +174,11 @@ define <2 x i64> @test_mm_adds_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
||||
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
|
||||
%res = call <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16> %arg0, <8 x i16> %arg1)
|
||||
%res = call <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
|
||||
%bc = bitcast <8 x i16> %res to <2 x i64>
|
||||
ret <2 x i64> %bc
|
||||
}
|
||||
declare <8 x i16> @llvm.x86.sse2.padds.w(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
declare <8 x i16> @llvm.sadd.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <2 x i64> @test_mm_adds_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; SSE-LABEL: test_mm_adds_epu8:
|
||||
@@ -6174,11 +6174,11 @@ define <2 x i64> @test_mm_subs_epi8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%arg0 = bitcast <2 x i64> %a0 to <16 x i8>
|
||||
%arg1 = bitcast <2 x i64> %a1 to <16 x i8>
|
||||
%res = call <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8> %arg0, <16 x i8> %arg1)
|
||||
%res = call <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8> %arg0, <16 x i8> %arg1)
|
||||
%bc = bitcast <16 x i8> %res to <2 x i64>
|
||||
ret <2 x i64> %bc
|
||||
}
|
||||
declare <16 x i8> @llvm.x86.sse2.psubs.b(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
declare <16 x i8> @llvm.ssub.sat.v16i8(<16 x i8>, <16 x i8>) nounwind readnone
|
||||
|
||||
define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; SSE-LABEL: test_mm_subs_epi16:
|
||||
@@ -6197,11 +6197,11 @@ define <2 x i64> @test_mm_subs_epi16(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; AVX512-NEXT: ret{{[l|q]}} # encoding: [0xc3]
|
||||
%arg0 = bitcast <2 x i64> %a0 to <8 x i16>
|
||||
%arg1 = bitcast <2 x i64> %a1 to <8 x i16>
|
||||
%res = call <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16> %arg0, <8 x i16> %arg1)
|
||||
%res = call <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16> %arg0, <8 x i16> %arg1)
|
||||
%bc = bitcast <8 x i16> %res to <2 x i64>
|
||||
ret <2 x i64> %bc
|
||||
}
|
||||
declare <8 x i16> @llvm.x86.sse2.psubs.w(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
declare <8 x i16> @llvm.ssub.sat.v8i16(<8 x i16>, <8 x i16>) nounwind readnone
|
||||
|
||||
define <2 x i64> @test_mm_subs_epu8(<2 x i64> %a0, <2 x i64> %a1) nounwind {
|
||||
; SSE-LABEL: test_mm_subs_epu8:
|
||||
|
||||
Reference in New Issue
Block a user