[RISCV] Merge RV32/RV64 CHECK lines in strided vp load/store tests. NFC

This commit is contained in:
Luke Lau 2023-09-19 12:24:32 +01:00
parent a0768b8237
commit 73c2cb5999
2 changed files with 474 additions and 1012 deletions

View File

@ -1,23 +1,19 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+v,+zvfh \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV32
; RUN: -verify-machineinstrs < %s | FileCheck %s \
; RUN: -check-prefixes=CHECK,CHECK-RV32
; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+v,+zvfh \
; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK-RV64
; RUN: -verify-machineinstrs < %s | FileCheck %s \
; RUN: -check-prefixes=CHECK,CHECK-RV64
declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i8:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i8:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i8_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i8> %load
}
@ -25,17 +21,11 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride,
declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr, i16, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i8_i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr %ptr, i16 %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i8> %load
}
@ -79,33 +69,21 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64_allones_mask(ptr %ptr, i64 s
declare <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x i8> @strided_vpload_nxv1i8(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i8> %load
}
define <vscale x 1 x i8> @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i8_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl)
@ -115,17 +93,11 @@ define <vscale x 1 x i8> @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signe
declare <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x i8> @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i8:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i8:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i8> %load
}
@ -133,17 +105,11 @@ define <vscale x 2 x i8> @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, <
declare <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x i8> @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i8:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i8:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i8> %load
}
@ -151,33 +117,21 @@ define <vscale x 4 x i8> @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, <
declare <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x i8> @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i8:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i8:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i8> %load
}
define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i8_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i8_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8i8_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
%load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl)
@ -187,17 +141,11 @@ define <vscale x 8 x i8> @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signe
declare <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x i16> @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i16> %load
}
@ -205,33 +153,21 @@ define <vscale x 1 x i16> @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride,
declare <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x i16> @strided_vpload_nxv2i16(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i16> %load
}
define <vscale x 2 x i16> @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i16_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i16_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2i16_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
%load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl)
@ -241,17 +177,11 @@ define <vscale x 2 x i16> @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 sig
declare <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i16> %load
}
@ -259,17 +189,11 @@ define <vscale x 4 x i16> @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride,
declare <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x i16> @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i16> %load
}
@ -277,17 +201,11 @@ define <vscale x 8 x i16> @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride,
declare <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x i32> @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i32> %load
}
@ -295,17 +213,11 @@ define <vscale x 1 x i32> @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride,
declare <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x i32> @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i32> %load
}
@ -313,33 +225,21 @@ define <vscale x 2 x i32> @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride,
declare <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x i32> @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i32> %load
}
define <vscale x 4 x i32> @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i32_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i32_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4i32_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
%load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl)
@ -349,17 +249,11 @@ define <vscale x 4 x i32> @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 sig
declare <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x i32> @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i32> %load
}
@ -367,33 +261,21 @@ define <vscale x 8 x i32> @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride,
declare <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x i64> @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x i64> %load
}
define <vscale x 1 x i64> @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i64_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i64_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1i64_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 1 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 1 x i1> %a, <vscale x 1 x i1> poison, <vscale x 1 x i32> zeroinitializer
%load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %b, i32 %evl)
@ -403,17 +285,11 @@ define <vscale x 1 x i64> @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 sig
declare <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x i64> @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x i64> %load
}
@ -421,17 +297,11 @@ define <vscale x 2 x i64> @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride,
declare <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x i64> @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x i64> %load
}
@ -439,17 +309,11 @@ define <vscale x 4 x i64> @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride,
declare <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x i64> @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x i64> %load
}
@ -457,17 +321,11 @@ define <vscale x 8 x i64> @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride,
declare <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x half> @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1f16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1f16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x half> %load
}
@ -475,33 +333,21 @@ define <vscale x 1 x half> @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride
declare <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x half> @strided_vpload_nxv2f16(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x half> %load
}
define <vscale x 2 x half> @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f16_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f16_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2f16_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 2 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 2 x i1> %a, <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer
%load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %b, i32 %evl)
@ -511,17 +357,11 @@ define <vscale x 2 x half> @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 si
declare <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x half> @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x half> %load
}
@ -529,17 +369,11 @@ define <vscale x 4 x half> @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride
declare <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x half> @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f16:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f16:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8f16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x half> %load
}
@ -547,17 +381,11 @@ define <vscale x 8 x half> @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride
declare <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x float> @strided_vpload_nxv1f32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1f32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1f32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x float> %load
}
@ -565,17 +393,11 @@ define <vscale x 1 x float> @strided_vpload_nxv1f32(ptr %ptr, i32 signext %strid
declare <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x float> @strided_vpload_nxv2f32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x float> %load
}
@ -583,17 +405,11 @@ define <vscale x 2 x float> @strided_vpload_nxv2f32(ptr %ptr, i32 signext %strid
declare <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x float> @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x float> %load
}
@ -601,33 +417,21 @@ define <vscale x 4 x float> @strided_vpload_nxv4f32(ptr %ptr, i32 signext %strid
declare <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x float> @strided_vpload_nxv8f32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f32:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f32:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x float> %load
}
define <vscale x 8 x float> @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f32_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f32_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8f32_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vlse32.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 8 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 8 x i1> %a, <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer
%load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %b, i32 %evl)
@ -637,17 +441,11 @@ define <vscale x 8 x float> @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 s
declare <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr, i32, <vscale x 1 x i1>, i32)
define <vscale x 1 x double> @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1f64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1f64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
ret <vscale x 1 x double> %load
}
@ -655,17 +453,11 @@ define <vscale x 1 x double> @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stri
declare <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, <vscale x 2 x i1>, i32)
define <vscale x 2 x double> @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
ret <vscale x 2 x double> %load
}
@ -673,33 +465,21 @@ define <vscale x 2 x double> @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stri
declare <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr, i32, <vscale x 4 x i1>, i32)
define <vscale x 4 x double> @strided_vpload_nxv4f64(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
ret <vscale x 4 x double> %load
}
define <vscale x 4 x double> @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f64_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f64_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv4f64_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: ret
%a = insertelement <vscale x 4 x i1> poison, i1 true, i32 0
%b = shufflevector <vscale x 4 x i1> %a, <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer
%load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 4 x i1> %b, i32 %evl)
@ -709,50 +489,32 @@ define <vscale x 4 x double> @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32
declare <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr, i32, <vscale x 8 x i1>, i32)
define <vscale x 8 x double> @strided_vpload_nxv8f64(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
ret <vscale x 8 x double> %load
}
; Widening
define <vscale x 3 x double> @strided_vpload_nxv3f64(ptr %ptr, i32 signext %stride, <vscale x 3 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv3f64:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv3f64:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv3f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, <vscale x 3 x i1> %mask, i32 %evl)
ret <vscale x 3 x double> %v
}
define <vscale x 3 x double> @strided_vpload_nxv3f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv3f64_allones_mask:
; CHECK-RV32: # %bb.0:
; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv3f64_allones_mask:
; CHECK-RV64: # %bb.0:
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1
; CHECK-RV64-NEXT: ret
; CHECK-LABEL: strided_vpload_nxv3f64_allones_mask:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1
; CHECK-NEXT: ret
%one = insertelement <vscale x 3 x i1> poison, i1 true, i32 0
%allones = shufflevector <vscale x 3 x i1> %one, <vscale x 3 x i1> poison, <vscale x 3 x i32> zeroinitializer
%v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, <vscale x 3 x i1> %allones, i32 %evl)

File diff suppressed because it is too large Load Diff