mirror of
https://github.com/RPCS3/llvm-mirror.git
synced 2024-11-26 04:40:38 +00:00
[AArch64][SVE] Add SVE intrinsics for masked loads & stores
Summary: Implements the following intrinsics for contiguous loads & stores: - @llvm.aarch64.sve.ld1 - @llvm.aarch64.sve.st1 Reviewers: sdesmalen, andwar, efriedma, cameron.mcinally, dancgr, rengolin Reviewed By: cameron.mcinally Subscribers: tschuett, kristof.beyls, hiraditya, rkruppe, psnobl, danielkiss, cfe-commits, llvm-commits Tags: #llvm Differential Revision: https://reviews.llvm.org/D76688
This commit is contained in:
parent
644dc31c55
commit
1b630dbb38
@ -1282,6 +1282,8 @@ class SVE_gather_prf_vector_base_scalar_offset
|
||||
// Loads
|
||||
//
|
||||
|
||||
def int_aarch64_sve_ld1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_ldnt1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
def int_aarch64_sve_ldnf1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
@ -1290,6 +1292,8 @@ def int_aarch64_sve_ldff1 : AdvSIMD_1Vec_PredLoad_Intrinsic;
|
||||
// Stores
|
||||
//
|
||||
|
||||
def int_aarch64_sve_st1 : AdvSIMD_1Vec_PredStore_Intrinsic;
|
||||
|
||||
def int_aarch64_sve_stnt1 : AdvSIMD_1Vec_PredStore_Intrinsic;
|
||||
|
||||
//
|
||||
|
@ -8978,6 +8978,7 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Info.align = Align(16);
|
||||
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MOVolatile;
|
||||
return true;
|
||||
case Intrinsic::aarch64_sve_ld1:
|
||||
case Intrinsic::aarch64_sve_ldnt1: {
|
||||
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(1)->getType());
|
||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
||||
@ -8985,9 +8986,12 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Info.ptrVal = I.getArgOperand(1);
|
||||
Info.offset = 0;
|
||||
Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
|
||||
Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MONonTemporal;
|
||||
Info.flags = MachineMemOperand::MOLoad;
|
||||
if (Intrinsic == Intrinsic::aarch64_sve_ldnt1)
|
||||
Info.flags |= MachineMemOperand::MONonTemporal;
|
||||
return true;
|
||||
}
|
||||
case Intrinsic::aarch64_sve_st1:
|
||||
case Intrinsic::aarch64_sve_stnt1: {
|
||||
PointerType *PtrTy = cast<PointerType>(I.getArgOperand(2)->getType());
|
||||
Info.opc = ISD::INTRINSIC_W_CHAIN;
|
||||
@ -8995,7 +8999,9 @@ bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
|
||||
Info.ptrVal = I.getArgOperand(2);
|
||||
Info.offset = 0;
|
||||
Info.align = MaybeAlign(DL.getABITypeAlignment(PtrTy->getElementType()));
|
||||
Info.flags = MachineMemOperand::MOStore | MachineMemOperand::MONonTemporal;
|
||||
Info.flags = MachineMemOperand::MOStore;
|
||||
if (Intrinsic == Intrinsic::aarch64_sve_stnt1)
|
||||
Info.flags |= MachineMemOperand::MONonTemporal;
|
||||
return true;
|
||||
}
|
||||
default:
|
||||
@ -11514,7 +11520,7 @@ static MVT getSVEContainerType(EVT ContentTy) {
|
||||
}
|
||||
}
|
||||
|
||||
static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
|
||||
static SDValue performLD1Combine(SDNode *N, SelectionDAG &DAG) {
|
||||
SDLoc DL(N);
|
||||
EVT VT = N->getValueType(0);
|
||||
EVT PtrTy = N->getOperand(3).getValueType();
|
||||
@ -11539,7 +11545,7 @@ static SDValue performLDNT1Combine(SDNode *N, SelectionDAG &DAG) {
|
||||
return L;
|
||||
}
|
||||
|
||||
static SDValue performSTNT1Combine(SDNode *N, SelectionDAG &DAG) {
|
||||
static SDValue performST1Combine(SDNode *N, SelectionDAG &DAG) {
|
||||
SDLoc DL(N);
|
||||
|
||||
SDValue Data = N->getOperand(2);
|
||||
@ -13130,8 +13136,9 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
||||
case Intrinsic::aarch64_neon_st3lane:
|
||||
case Intrinsic::aarch64_neon_st4lane:
|
||||
return performNEONPostLDSTCombine(N, DCI, DAG);
|
||||
case Intrinsic::aarch64_sve_ld1:
|
||||
case Intrinsic::aarch64_sve_ldnt1:
|
||||
return performLDNT1Combine(N, DAG);
|
||||
return performLD1Combine(N, DAG);
|
||||
case Intrinsic::aarch64_sve_ldnt1_gather_scalar_offset:
|
||||
return performGatherLoadCombine(N, DAG, AArch64ISD::GLDNT1);
|
||||
case Intrinsic::aarch64_sve_ldnt1_gather:
|
||||
@ -13144,8 +13151,9 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
|
||||
return performLDNF1Combine(N, DAG, AArch64ISD::LDNF1);
|
||||
case Intrinsic::aarch64_sve_ldff1:
|
||||
return performLDNF1Combine(N, DAG, AArch64ISD::LDFF1);
|
||||
case Intrinsic::aarch64_sve_st1:
|
||||
case Intrinsic::aarch64_sve_stnt1:
|
||||
return performSTNT1Combine(N, DAG);
|
||||
return performST1Combine(N, DAG);
|
||||
case Intrinsic::aarch64_sve_stnt1_scatter_scalar_offset:
|
||||
return performScatterStoreCombine(N, DAG, AArch64ISD::SSTNT1);
|
||||
case Intrinsic::aarch64_sve_stnt1_scatter_uxtw:
|
||||
|
182
test/CodeGen/AArch64/sve-intrinsics-ldst1.ll
Normal file
182
test/CodeGen/AArch64/sve-intrinsics-ldst1.ll
Normal file
@ -0,0 +1,182 @@
|
||||
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s
|
||||
|
||||
;
|
||||
; LD1B
|
||||
;
|
||||
|
||||
define <vscale x 16 x i8> @ld1b_i8(<vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: ld1b_i8:
|
||||
; CHECK: ld1b { z0.b }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1> %pred,
|
||||
i8* %addr)
|
||||
ret <vscale x 16 x i8> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1H
|
||||
;
|
||||
|
||||
define <vscale x 8 x i16> @ld1h_i16(<vscale x 8 x i1> %pred, i16* %addr) {
|
||||
; CHECK-LABEL: ld1h_i16:
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1> %pred,
|
||||
i16* %addr)
|
||||
ret <vscale x 8 x i16> %res
|
||||
}
|
||||
|
||||
define <vscale x 8 x half> @ld1h_f16(<vscale x 8 x i1> %pred, half* %addr) {
|
||||
; CHECK-LABEL: ld1h_f16:
|
||||
; CHECK: ld1h { z0.h }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1> %pred,
|
||||
half* %addr)
|
||||
ret <vscale x 8 x half> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1W
|
||||
;
|
||||
|
||||
define <vscale x 4 x i32> @ld1w_i32(<vscale x 4 x i1> %pred, i32* %addr) {
|
||||
; CHECK-LABEL: ld1w_i32:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1> %pred,
|
||||
i32* %addr)
|
||||
ret <vscale x 4 x i32> %res
|
||||
}
|
||||
|
||||
define <vscale x 4 x float> @ld1w_f32(<vscale x 4 x i1> %pred, float* %addr) {
|
||||
; CHECK-LABEL: ld1w_f32:
|
||||
; CHECK: ld1w { z0.s }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1> %pred,
|
||||
float* %addr)
|
||||
ret <vscale x 4 x float> %res
|
||||
}
|
||||
|
||||
;
|
||||
; LD1D
|
||||
;
|
||||
|
||||
define <vscale x 2 x i64> @ld1d_i64(<vscale x 2 x i1> %pred, i64* %addr) {
|
||||
; CHECK-LABEL: ld1d_i64:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1> %pred,
|
||||
i64* %addr)
|
||||
ret <vscale x 2 x i64> %res
|
||||
}
|
||||
|
||||
define <vscale x 2 x double> @ld1d_f64(<vscale x 2 x i1> %pred, double* %addr) {
|
||||
; CHECK-LABEL: ld1d_f64:
|
||||
; CHECK: ld1d { z0.d }, p0/z, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
%res = call <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1> %pred,
|
||||
double* %addr)
|
||||
ret <vscale x 2 x double> %res
|
||||
}
|
||||
|
||||
;
|
||||
; ST1B
|
||||
;
|
||||
|
||||
define void @st1b_i8(<vscale x 16 x i8> %data, <vscale x 16 x i1> %pred, i8* %addr) {
|
||||
; CHECK-LABEL: st1b_i8:
|
||||
; CHECK: st1b { z0.b }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8> %data,
|
||||
<vscale x 16 x i1> %pred,
|
||||
i8* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1H
|
||||
;
|
||||
|
||||
define void @st1h_i16(<vscale x 8 x i16> %data, <vscale x 8 x i1> %pred, i16* %addr) {
|
||||
; CHECK-LABEL: st1h_i16:
|
||||
; CHECK: st1h { z0.h }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16> %data,
|
||||
<vscale x 8 x i1> %pred,
|
||||
i16* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1h_f16(<vscale x 8 x half> %data, <vscale x 8 x i1> %pred, half* %addr) {
|
||||
; CHECK-LABEL: st1h_f16:
|
||||
; CHECK: st1h { z0.h }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half> %data,
|
||||
<vscale x 8 x i1> %pred,
|
||||
half* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1W
|
||||
;
|
||||
|
||||
define void @st1w_i32(<vscale x 4 x i32> %data, <vscale x 4 x i1> %pred, i32* %addr) {
|
||||
; CHECK-LABEL: st1w_i32:
|
||||
; CHECK: st1w { z0.s }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32> %data,
|
||||
<vscale x 4 x i1> %pred,
|
||||
i32* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1w_f32(<vscale x 4 x float> %data, <vscale x 4 x i1> %pred, float* %addr) {
|
||||
; CHECK-LABEL: st1w_f32:
|
||||
; CHECK: st1w { z0.s }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float> %data,
|
||||
<vscale x 4 x i1> %pred,
|
||||
float* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
;
|
||||
; ST1D
|
||||
;
|
||||
|
||||
define void @st1d_i64(<vscale x 2 x i64> %data, <vscale x 2 x i1> %pred, i64* %addr) {
|
||||
; CHECK-LABEL: st1d_i64:
|
||||
; CHECK: st1d { z0.d }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64> %data,
|
||||
<vscale x 2 x i1> %pred,
|
||||
i64* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
define void @st1d_f64(<vscale x 2 x double> %data, <vscale x 2 x i1> %pred, double* %addr) {
|
||||
; CHECK-LABEL: st1d_f64:
|
||||
; CHECK: st1d { z0.d }, p0, [x0]
|
||||
; CHECK-NEXT: ret
|
||||
call void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double> %data,
|
||||
<vscale x 2 x i1> %pred,
|
||||
double* %addr)
|
||||
ret void
|
||||
}
|
||||
|
||||
declare <vscale x 16 x i8> @llvm.aarch64.sve.ld1.nxv16i8(<vscale x 16 x i1>, i8*)
|
||||
declare <vscale x 8 x i16> @llvm.aarch64.sve.ld1.nxv8i16(<vscale x 8 x i1>, i16*)
|
||||
declare <vscale x 4 x i32> @llvm.aarch64.sve.ld1.nxv4i32(<vscale x 4 x i1>, i32*)
|
||||
declare <vscale x 2 x i64> @llvm.aarch64.sve.ld1.nxv2i64(<vscale x 2 x i1>, i64*)
|
||||
declare <vscale x 8 x half> @llvm.aarch64.sve.ld1.nxv8f16(<vscale x 8 x i1>, half*)
|
||||
declare <vscale x 4 x float> @llvm.aarch64.sve.ld1.nxv4f32(<vscale x 4 x i1>, float*)
|
||||
declare <vscale x 2 x double> @llvm.aarch64.sve.ld1.nxv2f64(<vscale x 2 x i1>, double*)
|
||||
|
||||
declare void @llvm.aarch64.sve.st1.nxv16i8(<vscale x 16 x i8>, <vscale x 16 x i1>, i8*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv8i16(<vscale x 8 x i16>, <vscale x 8 x i1>, i16*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4i32(<vscale x 4 x i32>, <vscale x 4 x i1>, i32*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i64*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv8f16(<vscale x 8 x half>, <vscale x 8 x i1>, half*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv4f32(<vscale x 4 x float>, <vscale x 4 x i1>, float*)
|
||||
declare void @llvm.aarch64.sve.st1.nxv2f64(<vscale x 2 x double>, <vscale x 2 x i1>, double*)
|
Loading…
Reference in New Issue
Block a user