mirror of
https://github.com/capstone-engine/llvm-capstone.git
synced 2024-12-21 23:10:54 +00:00
[Clang][RISCV] Guard vmulh, vsmul correctly
According to v-spec 1.0, `vmulh`, `vmulhu`, `vmulhsu` and `vsmul` are NOT supported for EEW=64 in Zve64*. This patch tries to guard it correctly. Authored by: Craig Topper <craig.topper@sifive.com> @craig.topper Co-Authored by: Eop Chen <eop.chen@sifive.com> @eopXD Reviewed By: craig.topper Differential Revision: https://reviews.llvm.org/D117913
This commit is contained in:
parent
c415ff186d
commit
970a191203
@ -1668,11 +1668,13 @@ defm vmax : RVVSignedBinBuiltinSet;
|
|||||||
|
|
||||||
// 12.10. Vector Single-Width Integer Multiply Instructions
|
// 12.10. Vector Single-Width Integer Multiply Instructions
|
||||||
defm vmul : RVVIntBinBuiltinSet;
|
defm vmul : RVVIntBinBuiltinSet;
|
||||||
|
let RequiredFeatures = ["FullMultiply"] in {
|
||||||
defm vmulh : RVVSignedBinBuiltinSet;
|
defm vmulh : RVVSignedBinBuiltinSet;
|
||||||
defm vmulhu : RVVUnsignedBinBuiltinSet;
|
defm vmulhu : RVVUnsignedBinBuiltinSet;
|
||||||
defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
|
defm vmulhsu : RVVOutOp1BuiltinSet<"vmulhsu", "csil",
|
||||||
[["vv", "v", "vvUv"],
|
[["vv", "v", "vvUv"],
|
||||||
["vx", "v", "vvUe"]]>;
|
["vx", "v", "vvUe"]]>;
|
||||||
|
}
|
||||||
|
|
||||||
// 12.11. Vector Integer Divide Instructions
|
// 12.11. Vector Integer Divide Instructions
|
||||||
defm vdivu : RVVUnsignedBinBuiltinSet;
|
defm vdivu : RVVUnsignedBinBuiltinSet;
|
||||||
@ -1759,7 +1761,9 @@ defm vasubu : RVVUnsignedBinBuiltinSet;
|
|||||||
defm vasub : RVVSignedBinBuiltinSet;
|
defm vasub : RVVSignedBinBuiltinSet;
|
||||||
|
|
||||||
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
|
// 13.3. Vector Single-Width Fractional Multiply with Rounding and Saturation
|
||||||
|
let RequiredFeatures = ["FullMultiply"] in {
|
||||||
defm vsmul : RVVSignedBinBuiltinSet;
|
defm vsmul : RVVSignedBinBuiltinSet;
|
||||||
|
}
|
||||||
|
|
||||||
// 13.4. Vector Single-Width Scaling Shift Instructions
|
// 13.4. Vector Single-Width Scaling Shift Instructions
|
||||||
defm vssrl : RVVUnsignedShiftBuiltinSet;
|
defm vssrl : RVVUnsignedShiftBuiltinSet;
|
||||||
|
440
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
Normal file
440
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmul-eew64.c
Normal file
@ -0,0 +1,440 @@
|
|||||||
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
|
// REQUIRES: riscv-registered-target
|
||||||
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu.
|
||||||
|
// NOTE: The purpose of separating these 3 instructions from vmul.c is that
|
||||||
|
// eew=64 versions only enable when V extension is specified. (Not for zve)
|
||||||
|
|
||||||
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
@ -1120,78 +1120,6 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
|
|||||||
return vmulh(op1, op2, vl);
|
return vmulh(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
@ -1516,78 +1444,6 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
|
|||||||
return vmulhu(op1, op2, vl);
|
return vmulhu(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
@ -1912,78 +1768,6 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
|
|||||||
return vmulhsu(op1, op2, vl);
|
return vmulhsu(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
|
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -3100,78 +2884,6 @@ vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
|
|||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
return vmulh(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -3496,78 +3208,6 @@ vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32
|
|||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
return vmulhu(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -3891,75 +3531,3 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8
|
|||||||
vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
|
vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
159
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
Normal file
159
clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vsmul-eew64.c
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
|
// REQUIRES: riscv-registered-target
|
||||||
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
// NOTE: The purpose of separating these 3 instructions from vsmul.c is that
|
||||||
|
// eew=64 versions only enable when V extension is specified. (Not for zve)
|
||||||
|
|
||||||
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
||||||
|
vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
||||||
|
vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
||||||
|
vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
||||||
|
vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
||||||
|
vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
||||||
|
vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
||||||
|
vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
||||||
|
vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
@ -328,78 +328,6 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
|
|||||||
return vsmul(op1, op2, vl);
|
return vsmul(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -762,83 +690,3 @@ vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
|
|||||||
vint32m8_t op1, int32_t op2, size_t vl) {
|
vint32m8_t op1, int32_t op2, size_t vl) {
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
return vsmul(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
|
||||||
vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
|
||||||
vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
|
||||||
vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
|
||||||
vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
|
||||||
vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
|
||||||
vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
|
||||||
vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
|
||||||
vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
440
clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
Normal file
440
clang/test/CodeGen/RISCV/rvv-intrinsics/vmul-eew64.c
Normal file
@ -0,0 +1,440 @@
|
|||||||
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
|
// REQUIRES: riscv-registered-target
|
||||||
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
// NOTE: This test file contains eew=64 of vmulh, vmulhu, vmulhsu.
|
||||||
|
// NOTE: The purpose of separating these 3 instructions from vmul.c is that
|
||||||
|
// eew=64 versions only enable when V extension is specified. (Not for zve)
|
||||||
|
|
||||||
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
|
||||||
|
return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
@ -1120,78 +1120,6 @@ vint32m8_t test_vmulh_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
|
|||||||
return vmulh_vx_i32m8(op1, op2, vl);
|
return vmulh_vx_i32m8(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
@ -1516,78 +1444,6 @@ vuint32m8_t test_vmulhu_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
|
|||||||
return vmulhu_vx_u32m8(op1, op2, vl);
|
return vmulhu_vx_u32m8(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
@ -1912,78 +1768,6 @@ vint32m8_t test_vmulhsu_vx_i32m8(vint32m8_t op1, uint32_t op2, size_t vl) {
|
|||||||
return vmulhsu_vx_i32m8(op1, op2, vl);
|
return vmulhsu_vx_i32m8(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vv_i64m1(vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vx_i64m1(vint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vv_i64m2(vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vx_i64m2(vint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vv_i64m4(vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vx_i64m4(vint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vv_i64m8(vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vx_i64m8(vint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
|
// CHECK-RV64-LABEL: @test_vmul_vv_i8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -3100,78 +2884,6 @@ vint32m8_t test_vmulh_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t
|
|||||||
return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
|
return vmulh_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vv_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vmulh_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulh_vx_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vmulh_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
|
// CHECK-RV64-LABEL: @test_vmulhu_vv_u8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -3496,78 +3208,6 @@ vuint32m8_t test_vmulhu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32
|
|||||||
return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
|
return vmulhu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m1_t test_vmulhu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m2_t test_vmulhu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m4_t test_vmulhu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vv_u64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhu_vx_u64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vuint64m8_t test_vmulhu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
|
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmulhsu.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -3891,75 +3531,3 @@ vint32m8_t test_vmulhsu_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8
|
|||||||
vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
|
vint32m8_t test_vmulhsu_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, uint32_t op2, size_t vl) {
|
||||||
return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
|
return vmulhsu_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmulhsu.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vmulhsu_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmulhsu.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vmulhsu_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmulhsu.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vmulhsu_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vv_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vmulhsu_vx_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmulhsu.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vmulhsu_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, uint64_t op2, size_t vl) {
|
|
||||||
return vmulhsu_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
159
clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
Normal file
159
clang/test/CodeGen/RISCV/rvv-intrinsics/vsmul-eew64.c
Normal file
@ -0,0 +1,159 @@
|
|||||||
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
|
// REQUIRES: riscv-registered-target
|
||||||
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
// NOTE: The purpose of separating these 3 instructions from vsmul.c is that
|
||||||
|
// eew=64 versions only enable when V extension is specified. (Not for zve)
|
||||||
|
|
||||||
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m1(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m2(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m4(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m8(op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
||||||
|
vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
||||||
|
vint64m1_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
||||||
|
vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
||||||
|
vint64m2_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
||||||
|
vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
||||||
|
vint64m4_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
||||||
|
vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
||||||
|
return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
||||||
|
|
||||||
|
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
|
||||||
|
// CHECK-RV64-NEXT: entry:
|
||||||
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
|
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
||||||
|
//
|
||||||
|
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
||||||
|
vint64m8_t op1, int64_t op2, size_t vl) {
|
||||||
|
return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
||||||
// REQUIRES: riscv-registered-target
|
// REQUIRES: riscv-registered-target
|
||||||
// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
// RUN: %clang_cc1 -triple riscv64 -target-feature +zve64x -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
|
||||||
|
|
||||||
#include <riscv_vector.h>
|
#include <riscv_vector.h>
|
||||||
|
|
||||||
@ -328,78 +328,6 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
|
|||||||
return vsmul_vx_i32m8(op1, op2, vl);
|
return vsmul_vx_i32m8(op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m1(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m2(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m4(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m8(op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
|
// CHECK-RV64-LABEL: @test_vsmul_vv_i8mf8_m(
|
||||||
// CHECK-RV64-NEXT: entry:
|
// CHECK-RV64-NEXT: entry:
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
||||||
@ -762,83 +690,3 @@ vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
|
|||||||
vint32m8_t op1, int32_t op2, size_t vl) {
|
vint32m8_t op1, int32_t op2, size_t vl) {
|
||||||
return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
|
return vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
|
||||||
}
|
}
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
|
||||||
vint64m1_t op1, vint64m1_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m1_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
|
|
||||||
vint64m1_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
|
||||||
vint64m2_t op1, vint64m2_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m2_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
|
|
||||||
vint64m2_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
|
||||||
vint64m4_t op1, vint64m4_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m4_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
|
|
||||||
vint64m4_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vv_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
|
||||||
vint64m8_t op1, vint64m8_t op2, size_t vl) {
|
|
||||||
return vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
|
||||||
// CHECK-RV64-LABEL: @test_vsmul_vx_i64m8_m(
|
|
||||||
// CHECK-RV64-NEXT: entry:
|
|
||||||
// CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
|
|
||||||
// CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
|
|
||||||
//
|
|
||||||
vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
|
|
||||||
vint64m8_t op1, int64_t op2, size_t vl) {
|
|
||||||
return vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
|
|
||||||
}
|
|
||||||
|
@ -141,11 +141,12 @@ using RISCVPredefinedMacroT = uint8_t;
|
|||||||
|
|
||||||
enum RISCVPredefinedMacro : RISCVPredefinedMacroT {
|
enum RISCVPredefinedMacro : RISCVPredefinedMacroT {
|
||||||
Basic = 0,
|
Basic = 0,
|
||||||
Zfh = 1 << 1,
|
V = 1 << 1,
|
||||||
RV64 = 1 << 2,
|
Zfh = 1 << 2,
|
||||||
VectorMaxELen64 = 1 << 3,
|
RV64 = 1 << 3,
|
||||||
VectorMaxELenFp32 = 1 << 4,
|
VectorMaxELen64 = 1 << 4,
|
||||||
VectorMaxELenFp64 = 1 << 5,
|
VectorMaxELenFp32 = 1 << 5,
|
||||||
|
VectorMaxELenFp64 = 1 << 6,
|
||||||
};
|
};
|
||||||
|
|
||||||
// TODO refactor RVVIntrinsic class design after support all intrinsic
|
// TODO refactor RVVIntrinsic class design after support all intrinsic
|
||||||
@ -808,6 +809,11 @@ RVVIntrinsic::RVVIntrinsic(StringRef NewName, StringRef Suffix,
|
|||||||
for (auto Feature : RequiredFeatures) {
|
for (auto Feature : RequiredFeatures) {
|
||||||
if (Feature == "RV64")
|
if (Feature == "RV64")
|
||||||
RISCVPredefinedMacros |= RISCVPredefinedMacro::RV64;
|
RISCVPredefinedMacros |= RISCVPredefinedMacro::RV64;
|
||||||
|
// Note: Full multiply instruction (mulh, mulhu, mulhsu, smul) for EEW=64
|
||||||
|
// require V.
|
||||||
|
if (Feature == "FullMultiply" &&
|
||||||
|
(RISCVPredefinedMacros & RISCVPredefinedMacro::VectorMaxELen64))
|
||||||
|
RISCVPredefinedMacros |= RISCVPredefinedMacro::V;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Init OutputType and InputTypes
|
// Init OutputType and InputTypes
|
||||||
@ -1314,6 +1320,8 @@ bool RVVEmitter::emitMacroRestrictionStr(RISCVPredefinedMacroT PredefinedMacros,
|
|||||||
return false;
|
return false;
|
||||||
OS << "#if ";
|
OS << "#if ";
|
||||||
ListSeparator LS(" && ");
|
ListSeparator LS(" && ");
|
||||||
|
if (PredefinedMacros & RISCVPredefinedMacro::V)
|
||||||
|
OS << LS << "defined(__riscv_v)";
|
||||||
if (PredefinedMacros & RISCVPredefinedMacro::Zfh)
|
if (PredefinedMacros & RISCVPredefinedMacro::Zfh)
|
||||||
OS << LS << "defined(__riscv_zfh)";
|
OS << LS << "defined(__riscv_zfh)";
|
||||||
if (PredefinedMacros & RISCVPredefinedMacro::RV64)
|
if (PredefinedMacros & RISCVPredefinedMacro::RV64)
|
||||||
|
Loading…
Reference in New Issue
Block a user