[RISCV] Add tests for illegal fixed length vectors that need widened

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D148518
This commit is contained in:
Luke Lau 2023-04-14 19:49:41 +01:00
parent 31ec0a6845
commit bd6fa8656a
6 changed files with 3558 additions and 102 deletions

View File

@ -36,6 +36,41 @@ define void @abs_v8i16(ptr %x) {
}
declare <8 x i16> @llvm.abs.v8i16(<8 x i16>, i1)
define void @abs_v6i16(ptr %x) {
; LMULMAX1-RV32-LABEL: abs_v6i16:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; LMULMAX1-RV32-NEXT: vle16.v v8, (a0)
; LMULMAX1-RV32-NEXT: vrsub.vi v9, v8, 0
; LMULMAX1-RV32-NEXT: vmax.vv v8, v8, v9
; LMULMAX1-RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1-RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1-RV32-NEXT: addi a1, a0, 8
; LMULMAX1-RV32-NEXT: vse32.v v9, (a1)
; LMULMAX1-RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX1-RV32-NEXT: vse16.v v8, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: abs_v6i16:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; LMULMAX1-RV64-NEXT: vle16.v v8, (a0)
; LMULMAX1-RV64-NEXT: vrsub.vi v9, v8, 0
; LMULMAX1-RV64-NEXT: vmax.vv v8, v8, v9
; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX1-RV64-NEXT: vse64.v v8, (a0)
; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1-RV64-NEXT: vslidedown.vi v8, v8, 2
; LMULMAX1-RV64-NEXT: addi a0, a0, 8
; LMULMAX1-RV64-NEXT: vse32.v v8, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <6 x i16>, ptr %x
%b = call <6 x i16> @llvm.abs.v6i16(<6 x i16> %a, i1 false)
store <6 x i16> %b, ptr %x
ret void
}
declare <6 x i16> @llvm.abs.v6i16(<6 x i16>, i1)
define void @abs_v4i32(ptr %x) {
; CHECK-LABEL: abs_v4i32:
; CHECK: # %bb.0:

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64
define void @fp2si_v2f32_v2i32(ptr %x, ptr %y) {
; CHECK-LABEL: fp2si_v2f32_v2i32:
@ -78,6 +78,348 @@ define <2 x i1> @fp2ui_v2f32_v2i1(<2 x float> %x) {
ret <2 x i1> %z
}
define void @fp2si_v3f32_v3i32(ptr %x, ptr %y) {
; LMULMAX8RV32-LABEL: fp2si_v3f32_v3i32:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vle32.v v8, (a0)
; LMULMAX8RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV32-NEXT: addi a0, a1, 8
; LMULMAX8RV32-NEXT: vse32.v v9, (a0)
; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX8RV32-NEXT: vse32.v v8, (a1)
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: fp2si_v3f32_v3i32:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vle32.v v8, (a0)
; LMULMAX8RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV64-NEXT: addi a0, a1, 8
; LMULMAX8RV64-NEXT: vse32.v v9, (a0)
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX8RV64-NEXT: vse64.v v8, (a1)
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: fp2si_v3f32_v3i32:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vle32.v v8, (a0)
; LMULMAX1RV32-NEXT: vfcvt.rtz.x.f.v v8, v8
; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV32-NEXT: addi a0, a1, 8
; LMULMAX1RV32-NEXT: vse32.v v9, (a0)
; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1RV32-NEXT: vse32.v v8, (a1)
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: fp2si_v3f32_v3i32:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vle32.v v8, (a0)
; LMULMAX1RV64-NEXT: vfcvt.rtz.x.f.v v8, v8
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV64-NEXT: addi a0, a1, 8
; LMULMAX1RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX1RV64-NEXT: vse64.v v8, (a1)
; LMULMAX1RV64-NEXT: ret
%a = load <3 x float>, ptr %x
%d = fptosi <3 x float> %a to <3 x i32>
store <3 x i32> %d, ptr %y
ret void
}
define void @fp2ui_v3f32_v3i32(ptr %x, ptr %y) {
; LMULMAX8RV32-LABEL: fp2ui_v3f32_v3i32:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vle32.v v8, (a0)
; LMULMAX8RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV32-NEXT: addi a0, a1, 8
; LMULMAX8RV32-NEXT: vse32.v v9, (a0)
; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX8RV32-NEXT: vse32.v v8, (a1)
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: fp2ui_v3f32_v3i32:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vle32.v v8, (a0)
; LMULMAX8RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV64-NEXT: addi a0, a1, 8
; LMULMAX8RV64-NEXT: vse32.v v9, (a0)
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX8RV64-NEXT: vse64.v v8, (a1)
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: fp2ui_v3f32_v3i32:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vle32.v v8, (a0)
; LMULMAX1RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8
; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV32-NEXT: addi a0, a1, 8
; LMULMAX1RV32-NEXT: vse32.v v9, (a0)
; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1RV32-NEXT: vse32.v v8, (a1)
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: fp2ui_v3f32_v3i32:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vle32.v v8, (a0)
; LMULMAX1RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV64-NEXT: addi a0, a1, 8
; LMULMAX1RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX1RV64-NEXT: vse64.v v8, (a1)
; LMULMAX1RV64-NEXT: ret
%a = load <3 x float>, ptr %x
%d = fptoui <3 x float> %a to <3 x i32>
store <3 x i32> %d, ptr %y
ret void
}
define <3 x i1> @fp2si_v3f32_v3i1(<3 x float> %x) {
; CHECK-LABEL: fp2si_v3f32_v3i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v9, v8
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%z = fptosi <3 x float> %x to <3 x i1>
ret <3 x i1> %z
}
; FIXME: This is expanded when they could be widened + promoted
define <3 x i15> @fp2si_v3f32_v3i15(<3 x float> %x) {
; LMULMAX8RV32-LABEL: fp2si_v3f32_v3i15:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX8RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX8RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX8RV32-NEXT: vmv.x.s a1, v8
; LMULMAX8RV32-NEXT: slli a2, a1, 17
; LMULMAX8RV32-NEXT: srli a2, a2, 19
; LMULMAX8RV32-NEXT: sh a2, 4(a0)
; LMULMAX8RV32-NEXT: vmv.x.s a2, v9
; LMULMAX8RV32-NEXT: lui a3, 8
; LMULMAX8RV32-NEXT: addi a3, a3, -1
; LMULMAX8RV32-NEXT: and a2, a2, a3
; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX8RV32-NEXT: vmv.x.s a4, v8
; LMULMAX8RV32-NEXT: and a3, a4, a3
; LMULMAX8RV32-NEXT: slli a3, a3, 15
; LMULMAX8RV32-NEXT: slli a1, a1, 30
; LMULMAX8RV32-NEXT: or a1, a2, a1
; LMULMAX8RV32-NEXT: or a1, a1, a3
; LMULMAX8RV32-NEXT: sw a1, 0(a0)
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: fp2si_v3f32_v3i15:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX8RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX8RV64-NEXT: vmv.x.s a1, v9
; LMULMAX8RV64-NEXT: lui a2, 8
; LMULMAX8RV64-NEXT: addiw a2, a2, -1
; LMULMAX8RV64-NEXT: and a1, a1, a2
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
; LMULMAX8RV64-NEXT: and a2, a3, a2
; LMULMAX8RV64-NEXT: slli a2, a2, 15
; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
; LMULMAX8RV64-NEXT: slli a3, a3, 30
; LMULMAX8RV64-NEXT: or a1, a1, a3
; LMULMAX8RV64-NEXT: or a1, a1, a2
; LMULMAX8RV64-NEXT: sw a1, 0(a0)
; LMULMAX8RV64-NEXT: slli a1, a1, 19
; LMULMAX8RV64-NEXT: srli a1, a1, 51
; LMULMAX8RV64-NEXT: sh a1, 4(a0)
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: fp2si_v3f32_v3i15:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX1RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX1RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX1RV32-NEXT: vmv.x.s a1, v8
; LMULMAX1RV32-NEXT: slli a2, a1, 17
; LMULMAX1RV32-NEXT: srli a2, a2, 19
; LMULMAX1RV32-NEXT: sh a2, 4(a0)
; LMULMAX1RV32-NEXT: vmv.x.s a2, v9
; LMULMAX1RV32-NEXT: lui a3, 8
; LMULMAX1RV32-NEXT: addi a3, a3, -1
; LMULMAX1RV32-NEXT: and a2, a2, a3
; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX1RV32-NEXT: vmv.x.s a4, v8
; LMULMAX1RV32-NEXT: and a3, a4, a3
; LMULMAX1RV32-NEXT: slli a3, a3, 15
; LMULMAX1RV32-NEXT: slli a1, a1, 30
; LMULMAX1RV32-NEXT: or a1, a2, a1
; LMULMAX1RV32-NEXT: or a1, a1, a3
; LMULMAX1RV32-NEXT: sw a1, 0(a0)
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: fp2si_v3f32_v3i15:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX1RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX1RV64-NEXT: vmv.x.s a1, v9
; LMULMAX1RV64-NEXT: lui a2, 8
; LMULMAX1RV64-NEXT: addiw a2, a2, -1
; LMULMAX1RV64-NEXT: and a1, a1, a2
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
; LMULMAX1RV64-NEXT: and a2, a3, a2
; LMULMAX1RV64-NEXT: slli a2, a2, 15
; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
; LMULMAX1RV64-NEXT: slli a3, a3, 30
; LMULMAX1RV64-NEXT: or a1, a1, a3
; LMULMAX1RV64-NEXT: or a1, a1, a2
; LMULMAX1RV64-NEXT: sw a1, 0(a0)
; LMULMAX1RV64-NEXT: slli a1, a1, 19
; LMULMAX1RV64-NEXT: srli a1, a1, 51
; LMULMAX1RV64-NEXT: sh a1, 4(a0)
; LMULMAX1RV64-NEXT: ret
%z = fptosi <3 x float> %x to <3 x i15>
ret <3 x i15> %z
}
; FIXME: This is expanded when they could be widened + promoted
define <3 x i15> @fp2ui_v3f32_v3i15(<3 x float> %x) {
; LMULMAX8RV32-LABEL: fp2ui_v3f32_v3i15:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX8RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX8RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX8RV32-NEXT: vmv.x.s a1, v8
; LMULMAX8RV32-NEXT: slli a2, a1, 17
; LMULMAX8RV32-NEXT: srli a2, a2, 19
; LMULMAX8RV32-NEXT: sh a2, 4(a0)
; LMULMAX8RV32-NEXT: vmv.x.s a2, v9
; LMULMAX8RV32-NEXT: lui a3, 16
; LMULMAX8RV32-NEXT: addi a3, a3, -1
; LMULMAX8RV32-NEXT: and a2, a2, a3
; LMULMAX8RV32-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX8RV32-NEXT: vmv.x.s a4, v8
; LMULMAX8RV32-NEXT: and a3, a4, a3
; LMULMAX8RV32-NEXT: slli a3, a3, 15
; LMULMAX8RV32-NEXT: slli a1, a1, 30
; LMULMAX8RV32-NEXT: or a1, a2, a1
; LMULMAX8RV32-NEXT: or a1, a1, a3
; LMULMAX8RV32-NEXT: sw a1, 0(a0)
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: fp2ui_v3f32_v3i15:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX8RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX8RV64-NEXT: vmv.x.s a1, v9
; LMULMAX8RV64-NEXT: lui a2, 16
; LMULMAX8RV64-NEXT: addiw a2, a2, -1
; LMULMAX8RV64-NEXT: and a1, a1, a2
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
; LMULMAX8RV64-NEXT: and a2, a3, a2
; LMULMAX8RV64-NEXT: slli a2, a2, 15
; LMULMAX8RV64-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX8RV64-NEXT: vmv.x.s a3, v8
; LMULMAX8RV64-NEXT: slli a3, a3, 30
; LMULMAX8RV64-NEXT: or a1, a1, a3
; LMULMAX8RV64-NEXT: or a1, a1, a2
; LMULMAX8RV64-NEXT: sw a1, 0(a0)
; LMULMAX8RV64-NEXT: slli a1, a1, 19
; LMULMAX8RV64-NEXT: srli a1, a1, 51
; LMULMAX8RV64-NEXT: sh a1, 4(a0)
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: fp2ui_v3f32_v3i15:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX1RV32-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX1RV32-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX1RV32-NEXT: vmv.x.s a1, v8
; LMULMAX1RV32-NEXT: slli a2, a1, 17
; LMULMAX1RV32-NEXT: srli a2, a2, 19
; LMULMAX1RV32-NEXT: sh a2, 4(a0)
; LMULMAX1RV32-NEXT: vmv.x.s a2, v9
; LMULMAX1RV32-NEXT: lui a3, 16
; LMULMAX1RV32-NEXT: addi a3, a3, -1
; LMULMAX1RV32-NEXT: and a2, a2, a3
; LMULMAX1RV32-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX1RV32-NEXT: vmv.x.s a4, v8
; LMULMAX1RV32-NEXT: and a3, a4, a3
; LMULMAX1RV32-NEXT: slli a3, a3, 15
; LMULMAX1RV32-NEXT: slli a1, a1, 30
; LMULMAX1RV32-NEXT: or a1, a2, a1
; LMULMAX1RV32-NEXT: or a1, a1, a3
; LMULMAX1RV32-NEXT: sw a1, 0(a0)
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: fp2ui_v3f32_v3i15:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; LMULMAX1RV64-NEXT: vfncvt.rtz.x.f.w v9, v8
; LMULMAX1RV64-NEXT: vmv.x.s a1, v9
; LMULMAX1RV64-NEXT: lui a2, 16
; LMULMAX1RV64-NEXT: addiw a2, a2, -1
; LMULMAX1RV64-NEXT: and a1, a1, a2
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 1
; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
; LMULMAX1RV64-NEXT: and a2, a3, a2
; LMULMAX1RV64-NEXT: slli a2, a2, 15
; LMULMAX1RV64-NEXT: vslidedown.vi v8, v9, 2
; LMULMAX1RV64-NEXT: vmv.x.s a3, v8
; LMULMAX1RV64-NEXT: slli a3, a3, 30
; LMULMAX1RV64-NEXT: or a1, a1, a3
; LMULMAX1RV64-NEXT: or a1, a1, a2
; LMULMAX1RV64-NEXT: sw a1, 0(a0)
; LMULMAX1RV64-NEXT: slli a1, a1, 19
; LMULMAX1RV64-NEXT: srli a1, a1, 51
; LMULMAX1RV64-NEXT: sh a1, 4(a0)
; LMULMAX1RV64-NEXT: ret
%z = fptoui <3 x float> %x to <3 x i15>
ret <3 x i15> %z
}
define <3 x i1> @fp2ui_v3f32_v3i1(<3 x float> %x) {
; CHECK-LABEL: fp2ui_v3f32_v3i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfncvt.rtz.xu.f.w v9, v8
; CHECK-NEXT: vand.vi v8, v9, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
%z = fptoui <3 x float> %x to <3 x i1>
ret <3 x i1> %z
}
define void @fp2si_v8f32_v8i32(ptr %x, ptr %y) {
; LMULMAX8-LABEL: fp2si_v8f32_v8i32:
; LMULMAX8: # %bb.0:

View File

@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV32
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=8 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX8,LMULMAX8RV64
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV32
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -riscv-v-fixed-length-vector-lmul-max=1 -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,LMULMAX1,LMULMAX1RV64
define void @si2fp_v2i32_v2f32(ptr %x, ptr %y) {
; CHECK-LABEL: si2fp_v2i32_v2f32:
@ -84,6 +84,324 @@ define <2 x float> @ui2fp_v2i1_v2f32(<2 x i1> %x) {
ret <2 x float> %z
}
define void @si2fp_v3i32_v3f32(ptr %x, ptr %y) {
; LMULMAX8RV32-LABEL: si2fp_v3i32_v3f32:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vle32.v v8, (a0)
; LMULMAX8RV32-NEXT: vfcvt.f.x.v v8, v8
; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV32-NEXT: addi a0, a1, 8
; LMULMAX8RV32-NEXT: vse32.v v9, (a0)
; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX8RV32-NEXT: vse32.v v8, (a1)
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: si2fp_v3i32_v3f32:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vle32.v v8, (a0)
; LMULMAX8RV64-NEXT: vfcvt.f.x.v v8, v8
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV64-NEXT: addi a0, a1, 8
; LMULMAX8RV64-NEXT: vse32.v v9, (a0)
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX8RV64-NEXT: vse64.v v8, (a1)
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: si2fp_v3i32_v3f32:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vle32.v v8, (a0)
; LMULMAX1RV32-NEXT: vfcvt.f.x.v v8, v8
; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV32-NEXT: addi a0, a1, 8
; LMULMAX1RV32-NEXT: vse32.v v9, (a0)
; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1RV32-NEXT: vse32.v v8, (a1)
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: si2fp_v3i32_v3f32:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vle32.v v8, (a0)
; LMULMAX1RV64-NEXT: vfcvt.f.x.v v8, v8
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV64-NEXT: addi a0, a1, 8
; LMULMAX1RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX1RV64-NEXT: vse64.v v8, (a1)
; LMULMAX1RV64-NEXT: ret
%a = load <3 x i32>, ptr %x
%d = sitofp <3 x i32> %a to <3 x float>
store <3 x float> %d, ptr %y
ret void
}
define void @ui2fp_v3i32_v3f32(ptr %x, ptr %y) {
; LMULMAX8RV32-LABEL: ui2fp_v3i32_v3f32:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vle32.v v8, (a0)
; LMULMAX8RV32-NEXT: vfcvt.f.xu.v v8, v8
; LMULMAX8RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV32-NEXT: addi a0, a1, 8
; LMULMAX8RV32-NEXT: vse32.v v9, (a0)
; LMULMAX8RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX8RV32-NEXT: vse32.v v8, (a1)
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: ui2fp_v3i32_v3f32:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vle32.v v8, (a0)
; LMULMAX8RV64-NEXT: vfcvt.f.xu.v v8, v8
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX8RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX8RV64-NEXT: addi a0, a1, 8
; LMULMAX8RV64-NEXT: vse32.v v9, (a0)
; LMULMAX8RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX8RV64-NEXT: vse64.v v8, (a1)
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: ui2fp_v3i32_v3f32:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vle32.v v8, (a0)
; LMULMAX1RV32-NEXT: vfcvt.f.xu.v v8, v8
; LMULMAX1RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV32-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV32-NEXT: addi a0, a1, 8
; LMULMAX1RV32-NEXT: vse32.v v9, (a0)
; LMULMAX1RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1RV32-NEXT: vse32.v v8, (a1)
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: ui2fp_v3i32_v3f32:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vle32.v v8, (a0)
; LMULMAX1RV64-NEXT: vfcvt.f.xu.v v8, v8
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; LMULMAX1RV64-NEXT: vslidedown.vi v9, v8, 2
; LMULMAX1RV64-NEXT: addi a0, a1, 8
; LMULMAX1RV64-NEXT: vse32.v v9, (a0)
; LMULMAX1RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; LMULMAX1RV64-NEXT: vse64.v v8, (a1)
; LMULMAX1RV64-NEXT: ret
%a = load <3 x i32>, ptr %x
%d = uitofp <3 x i32> %a to <3 x float>
store <3 x float> %d, ptr %y
ret void
}
define <3 x float> @si2fp_v3i1_v3f32(<3 x i1> %x) {
; CHECK-LABEL: si2fp_v3i1_v3f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v9, v8, -1, v0
; CHECK-NEXT: vfwcvt.f.x.v v8, v9
; CHECK-NEXT: ret
%z = sitofp <3 x i1> %x to <3 x float>
ret <3 x float> %z
}
; FIXME: This gets expanded instead of widened + promoted
define <3 x float> @si2fp_v3i7_v3f32(<3 x i7> %x) {
; LMULMAX8RV32-LABEL: si2fp_v3i7_v3f32:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: addi sp, sp, -16
; LMULMAX8RV32-NEXT: .cfi_def_cfa_offset 16
; LMULMAX8RV32-NEXT: lw a1, 8(a0)
; LMULMAX8RV32-NEXT: sb a1, 14(sp)
; LMULMAX8RV32-NEXT: lw a1, 4(a0)
; LMULMAX8RV32-NEXT: sb a1, 13(sp)
; LMULMAX8RV32-NEXT: lw a0, 0(a0)
; LMULMAX8RV32-NEXT: sb a0, 12(sp)
; LMULMAX8RV32-NEXT: addi a0, sp, 12
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX8RV32-NEXT: vle8.v v8, (a0)
; LMULMAX8RV32-NEXT: vadd.vv v8, v8, v8
; LMULMAX8RV32-NEXT: vsra.vi v8, v8, 1
; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX8RV32-NEXT: vsext.vf2 v9, v8
; LMULMAX8RV32-NEXT: vfwcvt.f.x.v v8, v9
; LMULMAX8RV32-NEXT: addi sp, sp, 16
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: si2fp_v3i7_v3f32:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: addi sp, sp, -16
; LMULMAX8RV64-NEXT: .cfi_def_cfa_offset 16
; LMULMAX8RV64-NEXT: ld a1, 16(a0)
; LMULMAX8RV64-NEXT: sb a1, 14(sp)
; LMULMAX8RV64-NEXT: ld a1, 8(a0)
; LMULMAX8RV64-NEXT: sb a1, 13(sp)
; LMULMAX8RV64-NEXT: ld a0, 0(a0)
; LMULMAX8RV64-NEXT: sb a0, 12(sp)
; LMULMAX8RV64-NEXT: addi a0, sp, 12
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX8RV64-NEXT: vle8.v v8, (a0)
; LMULMAX8RV64-NEXT: vadd.vv v8, v8, v8
; LMULMAX8RV64-NEXT: vsra.vi v8, v8, 1
; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX8RV64-NEXT: vsext.vf2 v9, v8
; LMULMAX8RV64-NEXT: vfwcvt.f.x.v v8, v9
; LMULMAX8RV64-NEXT: addi sp, sp, 16
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: si2fp_v3i7_v3f32:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: addi sp, sp, -16
; LMULMAX1RV32-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1RV32-NEXT: lw a1, 8(a0)
; LMULMAX1RV32-NEXT: sb a1, 14(sp)
; LMULMAX1RV32-NEXT: lw a1, 4(a0)
; LMULMAX1RV32-NEXT: sb a1, 13(sp)
; LMULMAX1RV32-NEXT: lw a0, 0(a0)
; LMULMAX1RV32-NEXT: sb a0, 12(sp)
; LMULMAX1RV32-NEXT: addi a0, sp, 12
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX1RV32-NEXT: vle8.v v8, (a0)
; LMULMAX1RV32-NEXT: vadd.vv v8, v8, v8
; LMULMAX1RV32-NEXT: vsra.vi v8, v8, 1
; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1RV32-NEXT: vsext.vf2 v9, v8
; LMULMAX1RV32-NEXT: vfwcvt.f.x.v v8, v9
; LMULMAX1RV32-NEXT: addi sp, sp, 16
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: si2fp_v3i7_v3f32:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: addi sp, sp, -16
; LMULMAX1RV64-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1RV64-NEXT: ld a1, 16(a0)
; LMULMAX1RV64-NEXT: sb a1, 14(sp)
; LMULMAX1RV64-NEXT: ld a1, 8(a0)
; LMULMAX1RV64-NEXT: sb a1, 13(sp)
; LMULMAX1RV64-NEXT: ld a0, 0(a0)
; LMULMAX1RV64-NEXT: sb a0, 12(sp)
; LMULMAX1RV64-NEXT: addi a0, sp, 12
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX1RV64-NEXT: vle8.v v8, (a0)
; LMULMAX1RV64-NEXT: vadd.vv v8, v8, v8
; LMULMAX1RV64-NEXT: vsra.vi v8, v8, 1
; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1RV64-NEXT: vsext.vf2 v9, v8
; LMULMAX1RV64-NEXT: vfwcvt.f.x.v v8, v9
; LMULMAX1RV64-NEXT: addi sp, sp, 16
; LMULMAX1RV64-NEXT: ret
%z = sitofp <3 x i7> %x to <3 x float>
ret <3 x float> %z
}
; FIXME: This gets expanded instead of widened + promoted
define <3 x float> @ui2fp_v3i7_v3f32(<3 x i7> %x) {
; LMULMAX8RV32-LABEL: ui2fp_v3i7_v3f32:
; LMULMAX8RV32: # %bb.0:
; LMULMAX8RV32-NEXT: addi sp, sp, -16
; LMULMAX8RV32-NEXT: .cfi_def_cfa_offset 16
; LMULMAX8RV32-NEXT: lw a1, 8(a0)
; LMULMAX8RV32-NEXT: sb a1, 14(sp)
; LMULMAX8RV32-NEXT: lw a1, 4(a0)
; LMULMAX8RV32-NEXT: sb a1, 13(sp)
; LMULMAX8RV32-NEXT: lw a0, 0(a0)
; LMULMAX8RV32-NEXT: sb a0, 12(sp)
; LMULMAX8RV32-NEXT: addi a0, sp, 12
; LMULMAX8RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX8RV32-NEXT: vle8.v v8, (a0)
; LMULMAX8RV32-NEXT: li a0, 127
; LMULMAX8RV32-NEXT: vand.vx v8, v8, a0
; LMULMAX8RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX8RV32-NEXT: vzext.vf2 v9, v8
; LMULMAX8RV32-NEXT: vfwcvt.f.xu.v v8, v9
; LMULMAX8RV32-NEXT: addi sp, sp, 16
; LMULMAX8RV32-NEXT: ret
;
; LMULMAX8RV64-LABEL: ui2fp_v3i7_v3f32:
; LMULMAX8RV64: # %bb.0:
; LMULMAX8RV64-NEXT: addi sp, sp, -16
; LMULMAX8RV64-NEXT: .cfi_def_cfa_offset 16
; LMULMAX8RV64-NEXT: ld a1, 16(a0)
; LMULMAX8RV64-NEXT: sb a1, 14(sp)
; LMULMAX8RV64-NEXT: ld a1, 8(a0)
; LMULMAX8RV64-NEXT: sb a1, 13(sp)
; LMULMAX8RV64-NEXT: ld a0, 0(a0)
; LMULMAX8RV64-NEXT: sb a0, 12(sp)
; LMULMAX8RV64-NEXT: addi a0, sp, 12
; LMULMAX8RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX8RV64-NEXT: vle8.v v8, (a0)
; LMULMAX8RV64-NEXT: li a0, 127
; LMULMAX8RV64-NEXT: vand.vx v8, v8, a0
; LMULMAX8RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX8RV64-NEXT: vzext.vf2 v9, v8
; LMULMAX8RV64-NEXT: vfwcvt.f.xu.v v8, v9
; LMULMAX8RV64-NEXT: addi sp, sp, 16
; LMULMAX8RV64-NEXT: ret
;
; LMULMAX1RV32-LABEL: ui2fp_v3i7_v3f32:
; LMULMAX1RV32: # %bb.0:
; LMULMAX1RV32-NEXT: addi sp, sp, -16
; LMULMAX1RV32-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1RV32-NEXT: lw a1, 8(a0)
; LMULMAX1RV32-NEXT: sb a1, 14(sp)
; LMULMAX1RV32-NEXT: lw a1, 4(a0)
; LMULMAX1RV32-NEXT: sb a1, 13(sp)
; LMULMAX1RV32-NEXT: lw a0, 0(a0)
; LMULMAX1RV32-NEXT: sb a0, 12(sp)
; LMULMAX1RV32-NEXT: addi a0, sp, 12
; LMULMAX1RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX1RV32-NEXT: vle8.v v8, (a0)
; LMULMAX1RV32-NEXT: li a0, 127
; LMULMAX1RV32-NEXT: vand.vx v8, v8, a0
; LMULMAX1RV32-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1RV32-NEXT: vzext.vf2 v9, v8
; LMULMAX1RV32-NEXT: vfwcvt.f.xu.v v8, v9
; LMULMAX1RV32-NEXT: addi sp, sp, 16
; LMULMAX1RV32-NEXT: ret
;
; LMULMAX1RV64-LABEL: ui2fp_v3i7_v3f32:
; LMULMAX1RV64: # %bb.0:
; LMULMAX1RV64-NEXT: addi sp, sp, -16
; LMULMAX1RV64-NEXT: .cfi_def_cfa_offset 16
; LMULMAX1RV64-NEXT: ld a1, 16(a0)
; LMULMAX1RV64-NEXT: sb a1, 14(sp)
; LMULMAX1RV64-NEXT: ld a1, 8(a0)
; LMULMAX1RV64-NEXT: sb a1, 13(sp)
; LMULMAX1RV64-NEXT: ld a0, 0(a0)
; LMULMAX1RV64-NEXT: sb a0, 12(sp)
; LMULMAX1RV64-NEXT: addi a0, sp, 12
; LMULMAX1RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; LMULMAX1RV64-NEXT: vle8.v v8, (a0)
; LMULMAX1RV64-NEXT: li a0, 127
; LMULMAX1RV64-NEXT: vand.vx v8, v8, a0
; LMULMAX1RV64-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; LMULMAX1RV64-NEXT: vzext.vf2 v9, v8
; LMULMAX1RV64-NEXT: vfwcvt.f.xu.v v8, v9
; LMULMAX1RV64-NEXT: addi sp, sp, 16
; LMULMAX1RV64-NEXT: ret
%z = uitofp <3 x i7> %x to <3 x float>
ret <3 x float> %z
}
define <3 x float> @ui2fp_v3i1_v3f32(<3 x i1> %x) {
; CHECK-LABEL: ui2fp_v3i1_v3f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v9, v8, 1, v0
; CHECK-NEXT: vfwcvt.f.xu.v v8, v9
; CHECK-NEXT: ret
%z = uitofp <3 x i1> %x to <3 x float>
ret <3 x float> %z
}
define void @si2fp_v8i32_v8f32(ptr %x, ptr %y) {
; LMULMAX8-LABEL: si2fp_v8i32_v8f32:
; LMULMAX8: # %bb.0:

File diff suppressed because it is too large Load Diff

View File

@ -1,6 +1,521 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV32
; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+experimental-zvfh,+f,+d -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64
define void @vselect_vv_v6i32(ptr %a, ptr %b, ptr %cc, ptr %z) {
; RV32-LABEL: vselect_vv_v6i32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: lbu a2, 0(a2)
; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: srli a1, a2, 5
; RV32-NEXT: sb a1, 13(sp)
; RV32-NEXT: andi a1, a2, 1
; RV32-NEXT: sb a1, 8(sp)
; RV32-NEXT: slli a1, a2, 27
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 12(sp)
; RV32-NEXT: slli a1, a2, 28
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 11(sp)
; RV32-NEXT: slli a1, a2, 29
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 10(sp)
; RV32-NEXT: slli a2, a2, 30
; RV32-NEXT: srli a2, a2, 31
; RV32-NEXT: sb a2, 9(sp)
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vle8.v v10, (a1)
; RV32-NEXT: vand.vi v10, v10, 1
; RV32-NEXT: vmsne.vi v0, v10, 0
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; RV32-NEXT: vle32.v v8, (a0), v0.t
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: addi a0, a3, 16
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a3)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_vv_v6i32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: lbu a2, 0(a2)
; RV64-NEXT: vle32.v v8, (a1)
; RV64-NEXT: srli a1, a2, 5
; RV64-NEXT: sb a1, 13(sp)
; RV64-NEXT: andi a1, a2, 1
; RV64-NEXT: sb a1, 8(sp)
; RV64-NEXT: slli a1, a2, 59
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 12(sp)
; RV64-NEXT: slli a1, a2, 60
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 11(sp)
; RV64-NEXT: slli a1, a2, 61
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 10(sp)
; RV64-NEXT: slli a2, a2, 62
; RV64-NEXT: srli a2, a2, 63
; RV64-NEXT: sb a2, 9(sp)
; RV64-NEXT: addi a1, sp, 8
; RV64-NEXT: vle8.v v10, (a1)
; RV64-NEXT: vand.vi v10, v10, 1
; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; RV64-NEXT: vle32.v v8, (a0), v0.t
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: addi a0, a3, 16
; RV64-NEXT: vse64.v v10, (a0)
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a3)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%va = load <6 x i32>, ptr %a
%vb = load <6 x i32>, ptr %b
%vcc = load <6 x i1>, ptr %cc
%vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb
store <6 x i32> %vsel, ptr %z
ret void
}
define void @vselect_vx_v6i32(i32 %a, ptr %b, ptr %cc, ptr %z) {
; RV32-LABEL: vselect_vx_v6i32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: lbu a2, 0(a2)
; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: srli a1, a2, 5
; RV32-NEXT: sb a1, 13(sp)
; RV32-NEXT: andi a1, a2, 1
; RV32-NEXT: sb a1, 8(sp)
; RV32-NEXT: slli a1, a2, 27
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 12(sp)
; RV32-NEXT: slli a1, a2, 28
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 11(sp)
; RV32-NEXT: slli a1, a2, 29
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 10(sp)
; RV32-NEXT: slli a2, a2, 30
; RV32-NEXT: srli a2, a2, 31
; RV32-NEXT: sb a2, 9(sp)
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vle8.v v10, (a1)
; RV32-NEXT: vand.vi v10, v10, 1
; RV32-NEXT: vmsne.vi v0, v10, 0
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vmerge.vxm v8, v8, a0, v0
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: addi a0, a3, 16
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a3)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_vx_v6i32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: lbu a2, 0(a2)
; RV64-NEXT: vle32.v v8, (a1)
; RV64-NEXT: srli a1, a2, 5
; RV64-NEXT: sb a1, 13(sp)
; RV64-NEXT: andi a1, a2, 1
; RV64-NEXT: sb a1, 8(sp)
; RV64-NEXT: slli a1, a2, 59
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 12(sp)
; RV64-NEXT: slli a1, a2, 60
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 11(sp)
; RV64-NEXT: slli a1, a2, 61
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 10(sp)
; RV64-NEXT: slli a2, a2, 62
; RV64-NEXT: srli a2, a2, 63
; RV64-NEXT: sb a2, 9(sp)
; RV64-NEXT: addi a1, sp, 8
; RV64-NEXT: vle8.v v10, (a1)
; RV64-NEXT: vand.vi v10, v10, 1
; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vmerge.vxm v8, v8, a0, v0
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: addi a0, a3, 16
; RV64-NEXT: vse64.v v10, (a0)
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a3)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%vb = load <6 x i32>, ptr %b
%ahead = insertelement <6 x i32> poison, i32 %a, i32 0
%va = shufflevector <6 x i32> %ahead, <6 x i32> poison, <6 x i32> zeroinitializer
%vcc = load <6 x i1>, ptr %cc
%vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb
store <6 x i32> %vsel, ptr %z
ret void
}
define void @vselect_vi_v6i32(ptr %b, ptr %cc, ptr %z) {
; RV32-LABEL: vselect_vi_v6i32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: lbu a1, 0(a1)
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: srli a0, a1, 5
; RV32-NEXT: sb a0, 13(sp)
; RV32-NEXT: andi a0, a1, 1
; RV32-NEXT: sb a0, 8(sp)
; RV32-NEXT: slli a0, a1, 27
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 12(sp)
; RV32-NEXT: slli a0, a1, 28
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 11(sp)
; RV32-NEXT: slli a0, a1, 29
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 10(sp)
; RV32-NEXT: slli a1, a1, 30
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 9(sp)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vle8.v v10, (a0)
; RV32-NEXT: vand.vi v10, v10, 1
; RV32-NEXT: vmsne.vi v0, v10, 0
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vmerge.vim v8, v8, -1, v0
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: addi a0, a2, 16
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a2)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_vi_v6i32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: lbu a1, 0(a1)
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: srli a0, a1, 5
; RV64-NEXT: sb a0, 13(sp)
; RV64-NEXT: andi a0, a1, 1
; RV64-NEXT: sb a0, 8(sp)
; RV64-NEXT: slli a0, a1, 59
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 12(sp)
; RV64-NEXT: slli a0, a1, 60
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 11(sp)
; RV64-NEXT: slli a0, a1, 61
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 10(sp)
; RV64-NEXT: slli a1, a1, 62
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 9(sp)
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: vle8.v v10, (a0)
; RV64-NEXT: vand.vi v10, v10, 1
; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vmerge.vim v8, v8, -1, v0
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: addi a0, a2, 16
; RV64-NEXT: vse64.v v10, (a0)
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a2)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%vb = load <6 x i32>, ptr %b
%a = insertelement <6 x i32> poison, i32 -1, i32 0
%va = shufflevector <6 x i32> %a, <6 x i32> poison, <6 x i32> zeroinitializer
%vcc = load <6 x i1>, ptr %cc
%vsel = select <6 x i1> %vcc, <6 x i32> %va, <6 x i32> %vb
store <6 x i32> %vsel, ptr %z
ret void
}
define void @vselect_vv_v6f32(ptr %a, ptr %b, ptr %cc, ptr %z) {
; RV32-LABEL: vselect_vv_v6f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: lbu a2, 0(a2)
; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: srli a1, a2, 5
; RV32-NEXT: sb a1, 13(sp)
; RV32-NEXT: andi a1, a2, 1
; RV32-NEXT: sb a1, 8(sp)
; RV32-NEXT: slli a1, a2, 27
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 12(sp)
; RV32-NEXT: slli a1, a2, 28
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 11(sp)
; RV32-NEXT: slli a1, a2, 29
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 10(sp)
; RV32-NEXT: slli a2, a2, 30
; RV32-NEXT: srli a2, a2, 31
; RV32-NEXT: sb a2, 9(sp)
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vle8.v v10, (a1)
; RV32-NEXT: vand.vi v10, v10, 1
; RV32-NEXT: vmsne.vi v0, v10, 0
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; RV32-NEXT: vle32.v v8, (a0), v0.t
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: addi a0, a3, 16
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a3)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_vv_v6f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: lbu a2, 0(a2)
; RV64-NEXT: vle32.v v8, (a1)
; RV64-NEXT: srli a1, a2, 5
; RV64-NEXT: sb a1, 13(sp)
; RV64-NEXT: andi a1, a2, 1
; RV64-NEXT: sb a1, 8(sp)
; RV64-NEXT: slli a1, a2, 59
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 12(sp)
; RV64-NEXT: slli a1, a2, 60
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 11(sp)
; RV64-NEXT: slli a1, a2, 61
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 10(sp)
; RV64-NEXT: slli a2, a2, 62
; RV64-NEXT: srli a2, a2, 63
; RV64-NEXT: sb a2, 9(sp)
; RV64-NEXT: addi a1, sp, 8
; RV64-NEXT: vle8.v v10, (a1)
; RV64-NEXT: vand.vi v10, v10, 1
; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; RV64-NEXT: vle32.v v8, (a0), v0.t
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: addi a0, a3, 16
; RV64-NEXT: vse64.v v10, (a0)
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a3)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%va = load <6 x float>, ptr %a
%vb = load <6 x float>, ptr %b
%vcc = load <6 x i1>, ptr %cc
%vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb
store <6 x float> %vsel, ptr %z
ret void
}
define void @vselect_vx_v6f32(float %a, ptr %b, ptr %cc, ptr %z) {
; RV32-LABEL: vselect_vx_v6f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: lbu a1, 0(a1)
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: srli a0, a1, 5
; RV32-NEXT: sb a0, 13(sp)
; RV32-NEXT: andi a0, a1, 1
; RV32-NEXT: sb a0, 8(sp)
; RV32-NEXT: slli a0, a1, 27
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 12(sp)
; RV32-NEXT: slli a0, a1, 28
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 11(sp)
; RV32-NEXT: slli a0, a1, 29
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 10(sp)
; RV32-NEXT: slli a1, a1, 30
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 9(sp)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vle8.v v10, (a0)
; RV32-NEXT: vand.vi v10, v10, 1
; RV32-NEXT: vmsne.vi v0, v10, 0
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vfmerge.vfm v8, v8, fa0, v0
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: addi a0, a2, 16
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a2)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_vx_v6f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: lbu a1, 0(a1)
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: srli a0, a1, 5
; RV64-NEXT: sb a0, 13(sp)
; RV64-NEXT: andi a0, a1, 1
; RV64-NEXT: sb a0, 8(sp)
; RV64-NEXT: slli a0, a1, 59
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 12(sp)
; RV64-NEXT: slli a0, a1, 60
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 11(sp)
; RV64-NEXT: slli a0, a1, 61
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 10(sp)
; RV64-NEXT: slli a1, a1, 62
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 9(sp)
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: vle8.v v10, (a0)
; RV64-NEXT: vand.vi v10, v10, 1
; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vfmerge.vfm v8, v8, fa0, v0
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: addi a0, a2, 16
; RV64-NEXT: vse64.v v10, (a0)
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a2)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%vb = load <6 x float>, ptr %b
%ahead = insertelement <6 x float> poison, float %a, i32 0
%va = shufflevector <6 x float> %ahead, <6 x float> poison, <6 x i32> zeroinitializer
%vcc = load <6 x i1>, ptr %cc
%vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb
store <6 x float> %vsel, ptr %z
ret void
}
define void @vselect_vfpzero_v6f32(ptr %b, ptr %cc, ptr %z) {
; RV32-LABEL: vselect_vfpzero_v6f32:
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV32-NEXT: lbu a1, 0(a1)
; RV32-NEXT: vle32.v v8, (a0)
; RV32-NEXT: srli a0, a1, 5
; RV32-NEXT: sb a0, 13(sp)
; RV32-NEXT: andi a0, a1, 1
; RV32-NEXT: sb a0, 8(sp)
; RV32-NEXT: slli a0, a1, 27
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 12(sp)
; RV32-NEXT: slli a0, a1, 28
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 11(sp)
; RV32-NEXT: slli a0, a1, 29
; RV32-NEXT: srli a0, a0, 31
; RV32-NEXT: sb a0, 10(sp)
; RV32-NEXT: slli a1, a1, 30
; RV32-NEXT: srli a1, a1, 31
; RV32-NEXT: sb a1, 9(sp)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vle8.v v10, (a0)
; RV32-NEXT: vand.vi v10, v10, 1
; RV32-NEXT: vmsne.vi v0, v10, 0
; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32-NEXT: vmerge.vim v8, v8, 0, v0
; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v10, v8, 4
; RV32-NEXT: addi a0, a2, 16
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vse32.v v10, (a0)
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a2)
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vselect_vfpzero_v6f32:
; RV64: # %bb.0:
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; RV64-NEXT: lbu a1, 0(a1)
; RV64-NEXT: vle32.v v8, (a0)
; RV64-NEXT: srli a0, a1, 5
; RV64-NEXT: sb a0, 13(sp)
; RV64-NEXT: andi a0, a1, 1
; RV64-NEXT: sb a0, 8(sp)
; RV64-NEXT: slli a0, a1, 59
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 12(sp)
; RV64-NEXT: slli a0, a1, 60
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 11(sp)
; RV64-NEXT: slli a0, a1, 61
; RV64-NEXT: srli a0, a0, 63
; RV64-NEXT: sb a0, 10(sp)
; RV64-NEXT: slli a1, a1, 62
; RV64-NEXT: srli a1, a1, 63
; RV64-NEXT: sb a1, 9(sp)
; RV64-NEXT: addi a0, sp, 8
; RV64-NEXT: vle8.v v10, (a0)
; RV64-NEXT: vand.vi v10, v10, 1
; RV64-NEXT: vmsne.vi v0, v10, 0
; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV64-NEXT: vmerge.vim v8, v8, 0, v0
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v10, v8, 2
; RV64-NEXT: addi a0, a2, 16
; RV64-NEXT: vse64.v v10, (a0)
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a2)
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%vb = load <6 x float>, ptr %b
%a = insertelement <6 x float> poison, float 0.0, i32 0
%va = shufflevector <6 x float> %a, <6 x float> poison, <6 x i32> zeroinitializer
%vcc = load <6 x i1>, ptr %cc
%vsel = select <6 x i1> %vcc, <6 x float> %va, <6 x float> %vb
store <6 x float> %vsel, ptr %z
ret void
}
define void @vselect_vv_v8i32(ptr %a, ptr %b, ptr %cc, ptr %z) {
; CHECK-LABEL: vselect_vv_v8i32: