mirror of
https://github.com/RPCS3/llvm.git
synced 2024-11-28 22:20:43 +00:00
[AVX-512] Remove masked pmuldq and pmuludq intrinsics and autoupgrade them to unmasked intrinsics plus a select.
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@290583 91177308-0d34-0410-b5e6-96231b3b80d8
This commit is contained in:
parent
b7ae55eb2c
commit
48ee9a80ae
@ -5187,26 +5187,8 @@ let TargetPrefix = "x86" in {
|
||||
def int_x86_avx512_mask_psubus_w_512 : GCCBuiltin<"__builtin_ia32_psubusw512_mask">,
|
||||
Intrinsic<[llvm_v32i16_ty], [llvm_v32i16_ty, llvm_v32i16_ty,
|
||||
llvm_v32i16_ty, llvm_i32_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmulu_dq_128 : GCCBuiltin<"__builtin_ia32_pmuludq128_mask">,
|
||||
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
|
||||
llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmul_dq_128 : GCCBuiltin<"__builtin_ia32_pmuldq128_mask">,
|
||||
Intrinsic<[llvm_v2i64_ty], [llvm_v4i32_ty, llvm_v4i32_ty,
|
||||
llvm_v2i64_ty, llvm_i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmulu_dq_256 : GCCBuiltin<"__builtin_ia32_pmuludq256_mask">,
|
||||
Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
|
||||
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmul_dq_256 : GCCBuiltin<"__builtin_ia32_pmuldq256_mask">,
|
||||
Intrinsic<[llvm_v4i64_ty], [llvm_v8i32_ty, llvm_v8i32_ty,
|
||||
llvm_v4i64_ty, llvm_i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512_mask">,
|
||||
Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
|
||||
llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_pmulu_dq_512 : GCCBuiltin<"__builtin_ia32_pmuludq512">,
|
||||
Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512_mask">,
|
||||
Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty,
|
||||
llvm_v8i64_ty, llvm_i8_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_pmul_dq_512 : GCCBuiltin<"__builtin_ia32_pmuldq512">,
|
||||
Intrinsic<[llvm_v8i64_ty], [llvm_v16i32_ty, llvm_v16i32_ty], [IntrNoMem]>;
|
||||
def int_x86_avx512_mask_pmulhu_w_512 : GCCBuiltin<"__builtin_ia32_pmulhuw512_mask">,
|
||||
|
@ -296,6 +296,8 @@ static bool UpgradeIntrinsicFunction1(Function *F, Function *&NewFn) {
|
||||
Name.startswith("avx512.mask.pmull.") || // Added in 4.0
|
||||
Name.startswith("avx512.mask.cvtdq2pd.") || // Added in 4.0
|
||||
Name.startswith("avx512.mask.cvtudq2pd.") || // Added in 4.0
|
||||
Name.startswith("avx512.mask.pmul.dq.") || // Added in 4.0
|
||||
Name.startswith("avx512.mask.pmulu.dq.") || // Added in 4.0
|
||||
Name == "avx512.mask.add.pd.128" || // Added in 4.0
|
||||
Name == "avx512.mask.add.pd.256" || // Added in 4.0
|
||||
Name == "avx512.mask.add.ps.128" || // Added in 4.0
|
||||
@ -1452,6 +1454,30 @@ void llvm::UpgradeIntrinsicCall(CallInst *CI, Function *NewFn) {
|
||||
else
|
||||
llvm_unreachable("Unexpected intrinsic");
|
||||
|
||||
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
||||
{ CI->getArgOperand(0), CI->getArgOperand(1) });
|
||||
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
||||
CI->getArgOperand(2));
|
||||
} else if (IsX86 && (Name.startswith("avx512.mask.pmul.dq.") ||
|
||||
Name.startswith("avx512.mask.pmulu.dq."))) {
|
||||
bool IsUnsigned = Name[16] == 'u';
|
||||
VectorType *VecTy = cast<VectorType>(CI->getType());
|
||||
Intrinsic::ID IID;
|
||||
if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 128)
|
||||
IID = Intrinsic::x86_sse41_pmuldq;
|
||||
else if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 256)
|
||||
IID = Intrinsic::x86_avx2_pmul_dq;
|
||||
else if (!IsUnsigned && VecTy->getPrimitiveSizeInBits() == 512)
|
||||
IID = Intrinsic::x86_avx512_pmul_dq_512;
|
||||
else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 128)
|
||||
IID = Intrinsic::x86_sse2_pmulu_dq;
|
||||
else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 256)
|
||||
IID = Intrinsic::x86_avx2_pmulu_dq;
|
||||
else if (IsUnsigned && VecTy->getPrimitiveSizeInBits() == 512)
|
||||
IID = Intrinsic::x86_avx512_pmulu_dq_512;
|
||||
else
|
||||
llvm_unreachable("Unexpected intrinsic");
|
||||
|
||||
Rep = Builder.CreateCall(Intrinsic::getDeclaration(F->getParent(), IID),
|
||||
{ CI->getArgOperand(0), CI->getArgOperand(1) });
|
||||
Rep = EmitX86Select(Builder, CI->getArgOperand(3), Rep,
|
||||
|
@ -1077,12 +1077,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
|
||||
X86ISD::VTRUNCUS, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmovus_wb_512, INTR_TYPE_1OP_MASK,
|
||||
X86ISD::VTRUNCUS, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmul_dq_128, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::PMULDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmul_dq_256, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::PMULDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmul_dq_512, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::PMULDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmul_hr_sw_128, INTR_TYPE_2OP_MASK, X86ISD::MULHRS, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmul_hr_sw_256, INTR_TYPE_2OP_MASK, X86ISD::MULHRS, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmul_hr_sw_512, INTR_TYPE_2OP_MASK, X86ISD::MULHRS, 0),
|
||||
@ -1098,12 +1092,6 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
|
||||
X86ISD::MULTISHIFT, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmultishift_qb_512, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::MULTISHIFT, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_128, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::PMULUDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_256, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::PMULUDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_pmulu_dq_512, INTR_TYPE_2OP_MASK,
|
||||
X86ISD::PMULUDQ, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_prol_d_128, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_prol_d_256, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
|
||||
X86_INTRINSIC_DATA(avx512_mask_prol_d_512, INTR_TYPE_2OP_IMM8_MASK, X86ISD::VROTLI, 0),
|
||||
|
@ -2655,3 +2655,216 @@ define <16 x float>@test_int_x86_avx512_mask_vpermilvar_ps_512_constant_pool(<16
|
||||
ret <16 x float> %res4
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rr:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rrk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rrkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rm:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmb:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmbk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmbkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rr:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rrk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rrkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rm:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmb:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmbk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmbkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
|
||||
|
@ -1632,113 +1632,6 @@ define <8 x i64> @test_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.pmul.dq.512(<16 x i32>, <16 x i32>)
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rr(<16 x i32> %a, <16 x i32> %b) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rr:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rrk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rrkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuldq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rm:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmb(<16 x i32> %a, i64* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmb:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmbk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epi32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epi32_rmbkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuldq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.mask.pmul.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
|
||||
|
||||
define <8 x i64> @test_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
|
||||
; CHECK-LABEL: test_mul_epu32_rr:
|
||||
; CHECK: ## BB#0:
|
||||
@ -1858,113 +1751,6 @@ define <8 x i64> @test_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.pmulu.dq.512(<16 x i32>, <16 x i32>)
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rr(<16 x i32> %a, <16 x i32> %b) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rr:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rrk(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rrk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm2 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm2, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rrkz(<16 x i32> %a, <16 x i32> %b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rrkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %edi, %k1
|
||||
; CHECK-NEXT: vpmuludq %zmm1, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rm(<16 x i32> %a, <16 x i32>* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rm:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmk(<16 x i32> %a, <16 x i32>* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmkz(<16 x i32> %a, <16 x i32>* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi), %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%b = load <16 x i32>, <16 x i32>* %ptr_b
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmb(<16 x i32> %a, i64* %ptr_b) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmb:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 -1)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmbk(<16 x i32> %a, i64* %ptr_b, <8 x i64> %passThru, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmbk:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm1 {%k1}
|
||||
; CHECK-NEXT: vmovdqa64 %zmm1, %zmm0
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> %passThru, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
define <8 x i64> @test_mask_mul_epu32_rmbkz(<16 x i32> %a, i64* %ptr_b, i8 %mask) {
|
||||
; CHECK-LABEL: test_mask_mul_epu32_rmbkz:
|
||||
; CHECK: ## BB#0:
|
||||
; CHECK-NEXT: kmovw %esi, %k1
|
||||
; CHECK-NEXT: vpmuludq (%rdi){1to8}, %zmm0, %zmm0 {%k1} {z}
|
||||
; CHECK-NEXT: retq
|
||||
%q = load i64, i64* %ptr_b
|
||||
%vecinit.i = insertelement <8 x i64> undef, i64 %q, i32 0
|
||||
%b64 = shufflevector <8 x i64> %vecinit.i, <8 x i64> undef, <8 x i32> zeroinitializer
|
||||
%b = bitcast <8 x i64> %b64 to <16 x i32>
|
||||
%res = call <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32> %a, <16 x i32> %b, <8 x i64> zeroinitializer, i8 %mask)
|
||||
ret < 8 x i64> %res
|
||||
}
|
||||
|
||||
declare <8 x i64> @llvm.x86.avx512.mask.pmulu.dq.512(<16 x i32>, <16 x i32>, <8 x i64>, i8)
|
||||
|
||||
define <16 x float> @test_mm512_maskz_add_round_ps_rn_sae(<16 x float> %a0, <16 x float> %a1, i16 %mask) {
|
||||
; CHECK-LABEL: test_mm512_maskz_add_round_ps_rn_sae:
|
||||
; CHECK: ## BB#0:
|
||||
|
Loading…
Reference in New Issue
Block a user