mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-30 06:50:57 +00:00
target/arm: Implement SVE2 complex integer multiply-add
Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-id: 20210525010358.152808-38-richard.henderson@linaro.org Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
45a32e80b9
commit
d782d3ca9f
@ -2601,3 +2601,21 @@ DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_s, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_umlsl_zzzw_d, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_b, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_h, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_s, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_cmla_zzzz_d, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
||||
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_b, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_h, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_s, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
DEF_HELPER_FLAGS_5(sve2_sqrdcmlah_zzzz_d, TCG_CALL_NO_RWG,
|
||||
void, ptr, ptr, ptr, ptr, i32)
|
||||
|
@ -1362,3 +1362,8 @@ SMLSLB_zzzw 01000100 .. 0 ..... 010 100 ..... ..... @rda_rn_rm
|
||||
SMLSLT_zzzw 01000100 .. 0 ..... 010 101 ..... ..... @rda_rn_rm
|
||||
UMLSLB_zzzw 01000100 .. 0 ..... 010 110 ..... ..... @rda_rn_rm
|
||||
UMLSLT_zzzw 01000100 .. 0 ..... 010 111 ..... ..... @rda_rn_rm
|
||||
|
||||
## SVE2 complex integer multiply-add
|
||||
|
||||
CMLA_zzzz 01000100 esz:2 0 rm:5 0010 rot:2 rn:5 rd:5 ra=%reg_movprfx
|
||||
SQRDCMLAH_zzzz 01000100 esz:2 0 rm:5 0011 rot:2 rn:5 rd:5 ra=%reg_movprfx
|
||||
|
@ -1453,6 +1453,52 @@ DO_SQDMLAL(sve2_sqdmlsl_zzzw_d, int64_t, int32_t, , H1_4,
|
||||
|
||||
#undef DO_SQDMLAL
|
||||
|
||||
#define DO_CMLA_FUNC(NAME, TYPE, H, OP) \
|
||||
void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \
|
||||
{ \
|
||||
intptr_t i, opr_sz = simd_oprsz(desc) / sizeof(TYPE); \
|
||||
int rot = simd_data(desc); \
|
||||
int sel_a = rot & 1, sel_b = sel_a ^ 1; \
|
||||
bool sub_r = rot == 1 || rot == 2; \
|
||||
bool sub_i = rot >= 2; \
|
||||
TYPE *d = vd, *n = vn, *m = vm, *a = va; \
|
||||
for (i = 0; i < opr_sz; i += 2) { \
|
||||
TYPE elt1_a = n[H(i + sel_a)]; \
|
||||
TYPE elt2_a = m[H(i + sel_a)]; \
|
||||
TYPE elt2_b = m[H(i + sel_b)]; \
|
||||
d[H(i)] = OP(elt1_a, elt2_a, a[H(i)], sub_r); \
|
||||
d[H(i + 1)] = OP(elt1_a, elt2_b, a[H(i + 1)], sub_i); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DO_CMLA(N, M, A, S) (A + (N * M) * (S ? -1 : 1))
|
||||
|
||||
DO_CMLA_FUNC(sve2_cmla_zzzz_b, uint8_t, H1, DO_CMLA)
|
||||
DO_CMLA_FUNC(sve2_cmla_zzzz_h, uint16_t, H2, DO_CMLA)
|
||||
DO_CMLA_FUNC(sve2_cmla_zzzz_s, uint32_t, H4, DO_CMLA)
|
||||
DO_CMLA_FUNC(sve2_cmla_zzzz_d, uint64_t, , DO_CMLA)
|
||||
|
||||
#define DO_SQRDMLAH_B(N, M, A, S) \
|
||||
do_sqrdmlah_b(N, M, A, S, true)
|
||||
#define DO_SQRDMLAH_H(N, M, A, S) \
|
||||
({ uint32_t discard; do_sqrdmlah_h(N, M, A, S, true, &discard); })
|
||||
#define DO_SQRDMLAH_S(N, M, A, S) \
|
||||
({ uint32_t discard; do_sqrdmlah_s(N, M, A, S, true, &discard); })
|
||||
#define DO_SQRDMLAH_D(N, M, A, S) \
|
||||
do_sqrdmlah_d(N, M, A, S, true)
|
||||
|
||||
DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_b, int8_t, H1, DO_SQRDMLAH_B)
|
||||
DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_h, int16_t, H2, DO_SQRDMLAH_H)
|
||||
DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_s, int32_t, H4, DO_SQRDMLAH_S)
|
||||
DO_CMLA_FUNC(sve2_sqrdcmlah_zzzz_d, int64_t, , DO_SQRDMLAH_D)
|
||||
|
||||
#undef DO_CMLA
|
||||
#undef DO_CMLA_FUNC
|
||||
#undef DO_SQRDMLAH_B
|
||||
#undef DO_SQRDMLAH_H
|
||||
#undef DO_SQRDMLAH_S
|
||||
#undef DO_SQRDMLAH_D
|
||||
|
||||
#define DO_BITPERM(NAME, TYPE, OP) \
|
||||
void HELPER(NAME)(void *vd, void *vn, void *vm, uint32_t desc) \
|
||||
{ \
|
||||
|
@ -7656,3 +7656,35 @@ static bool trans_UMLSLT_zzzw(DisasContext *s, arg_rrrr_esz *a)
|
||||
{
|
||||
return do_umlsl_zzzw(s, a, true);
|
||||
}
|
||||
|
||||
static bool trans_CMLA_zzzz(DisasContext *s, arg_CMLA_zzzz *a)
|
||||
{
|
||||
static gen_helper_gvec_4 * const fns[] = {
|
||||
gen_helper_sve2_cmla_zzzz_b, gen_helper_sve2_cmla_zzzz_h,
|
||||
gen_helper_sve2_cmla_zzzz_s, gen_helper_sve2_cmla_zzzz_d,
|
||||
};
|
||||
|
||||
if (!dc_isar_feature(aa64_sve2, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_SQRDCMLAH_zzzz(DisasContext *s, arg_SQRDCMLAH_zzzz *a)
|
||||
{
|
||||
static gen_helper_gvec_4 * const fns[] = {
|
||||
gen_helper_sve2_sqrdcmlah_zzzz_b, gen_helper_sve2_sqrdcmlah_zzzz_h,
|
||||
gen_helper_sve2_sqrdcmlah_zzzz_s, gen_helper_sve2_sqrdcmlah_zzzz_d,
|
||||
};
|
||||
|
||||
if (!dc_isar_feature(aa64_sve2, s)) {
|
||||
return false;
|
||||
}
|
||||
if (sve_access_check(s)) {
|
||||
gen_gvec_ool_zzzz(s, fns[a->esz], a->rd, a->rn, a->rm, a->ra, a->rot);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
@ -38,8 +38,8 @@
|
||||
#endif
|
||||
|
||||
/* Signed saturating rounding doubling multiply-accumulate high half, 8-bit */
|
||||
static int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
|
||||
bool neg, bool round)
|
||||
int8_t do_sqrdmlah_b(int8_t src1, int8_t src2, int8_t src3,
|
||||
bool neg, bool round)
|
||||
{
|
||||
/*
|
||||
* Simplify:
|
||||
@ -82,8 +82,8 @@ void HELPER(sve2_sqrdmlsh_b)(void *vd, void *vn, void *vm,
|
||||
}
|
||||
|
||||
/* Signed saturating rounding doubling multiply-accumulate high half, 16-bit */
|
||||
static int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
|
||||
bool neg, bool round, uint32_t *sat)
|
||||
int16_t do_sqrdmlah_h(int16_t src1, int16_t src2, int16_t src3,
|
||||
bool neg, bool round, uint32_t *sat)
|
||||
{
|
||||
/* Simplify similarly to do_sqrdmlah_b above. */
|
||||
int32_t ret = (int32_t)src1 * src2;
|
||||
@ -199,8 +199,8 @@ void HELPER(sve2_sqrdmlsh_h)(void *vd, void *vn, void *vm,
|
||||
}
|
||||
|
||||
/* Signed saturating rounding doubling multiply-accumulate high half, 32-bit */
|
||||
static int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
|
||||
bool neg, bool round, uint32_t *sat)
|
||||
int32_t do_sqrdmlah_s(int32_t src1, int32_t src2, int32_t src3,
|
||||
bool neg, bool round, uint32_t *sat)
|
||||
{
|
||||
/* Simplify similarly to do_sqrdmlah_b above. */
|
||||
int64_t ret = (int64_t)src1 * src2;
|
||||
@ -321,8 +321,7 @@ static int64_t do_sat128_d(Int128 r)
|
||||
return ls;
|
||||
}
|
||||
|
||||
static int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a,
|
||||
bool neg, bool round)
|
||||
int64_t do_sqrdmlah_d(int64_t n, int64_t m, int64_t a, bool neg, bool round)
|
||||
{
|
||||
uint64_t l, h;
|
||||
Int128 r, t;
|
||||
|
@ -168,4 +168,9 @@ static inline int64_t do_suqrshl_d(int64_t src, int64_t shift,
|
||||
return do_uqrshl_d(src, shift, round, sat);
|
||||
}
|
||||
|
||||
int8_t do_sqrdmlah_b(int8_t, int8_t, int8_t, bool, bool);
|
||||
int16_t do_sqrdmlah_h(int16_t, int16_t, int16_t, bool, bool, uint32_t *);
|
||||
int32_t do_sqrdmlah_s(int32_t, int32_t, int32_t, bool, bool, uint32_t *);
|
||||
int64_t do_sqrdmlah_d(int64_t, int64_t, int64_t, bool, bool);
|
||||
|
||||
#endif /* TARGET_ARM_VEC_INTERNALS_H */
|
||||
|
Loading…
Reference in New Issue
Block a user