From 3607440c4df6498585a570cfc1041e4972b41b56 Mon Sep 17 00:00:00 2001 From: Richard Henderson Date: Fri, 28 Aug 2020 10:02:50 +0100 Subject: [PATCH] target/arm: Convert integer multiply-add (indexed) to gvec for aa64 advsimd Signed-off-by: Richard Henderson Reviewed-by: Peter Maydell Message-id: 20200815013145.539409-20-richard.henderson@linaro.org Signed-off-by: Peter Maydell --- target/arm/helper.h | 14 ++++++++++++++ target/arm/translate-a64.c | 34 ++++++++++++++++++++++++++++++++++ target/arm/vec_helper.c | 25 +++++++++++++++++++++++++ 3 files changed, 73 insertions(+) diff --git a/target/arm/helper.h b/target/arm/helper.h index d0573a53c8..378bb1898b 100644 --- a/target/arm/helper.h +++ b/target/arm/helper.h @@ -762,6 +762,20 @@ DEF_HELPER_FLAGS_4(gvec_mul_idx_h, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_mul_idx_s, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) DEF_HELPER_FLAGS_4(gvec_mul_idx_d, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mla_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mla_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mla_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + +DEF_HELPER_FLAGS_5(gvec_mls_idx_h, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mls_idx_s, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) +DEF_HELPER_FLAGS_5(gvec_mls_idx_d, TCG_CALL_NO_RWG, + void, ptr, ptr, ptr, ptr, i32) + #ifdef TARGET_AARCH64 #include "helper-a64.h" #include "helper-sve.h" diff --git a/target/arm/translate-a64.c b/target/arm/translate-a64.c index abbd6421a2..a50b2f0cb1 100644 --- a/target/arm/translate-a64.c +++ b/target/arm/translate-a64.c @@ -13504,6 +13504,40 @@ static void disas_simd_indexed(DisasContext *s, uint32_t insn) return; } break; + + case 0x10: /* MLA */ + if (!is_long && !is_scalar) { + static gen_helper_gvec_4 * const fns[3] = { + gen_helper_gvec_mla_idx_h, + gen_helper_gvec_mla_idx_s, + gen_helper_gvec_mla_idx_d, + }; + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vec_full_reg_offset(s, rd), + is_q ? 16 : 8, vec_full_reg_size(s), + index, fns[size - 1]); + return; + } + break; + + case 0x14: /* MLS */ + if (!is_long && !is_scalar) { + static gen_helper_gvec_4 * const fns[3] = { + gen_helper_gvec_mls_idx_h, + gen_helper_gvec_mls_idx_s, + gen_helper_gvec_mls_idx_d, + }; + tcg_gen_gvec_4_ool(vec_full_reg_offset(s, rd), + vec_full_reg_offset(s, rn), + vec_full_reg_offset(s, rm), + vec_full_reg_offset(s, rd), + is_q ? 16 : 8, vec_full_reg_size(s), + index, fns[size - 1]); + return; + } + break; } if (size == 3) { diff --git a/target/arm/vec_helper.c b/target/arm/vec_helper.c index 155b6368cb..9e93a8a154 100644 --- a/target/arm/vec_helper.c +++ b/target/arm/vec_helper.c @@ -731,6 +731,31 @@ DO_MUL_IDX(gvec_mul_idx_d, uint64_t, ) #undef DO_MUL_IDX +#define DO_MLA_IDX(NAME, TYPE, OP, H) \ +void HELPER(NAME)(void *vd, void *vn, void *vm, void *va, uint32_t desc) \ +{ \ + intptr_t i, j, oprsz = simd_oprsz(desc), segment = 16 / sizeof(TYPE); \ + intptr_t idx = simd_data(desc); \ + TYPE *d = vd, *n = vn, *m = vm, *a = va; \ + for (i = 0; i < oprsz / sizeof(TYPE); i += segment) { \ + TYPE mm = m[H(i + idx)]; \ + for (j = 0; j < segment; j++) { \ + d[i + j] = a[i + j] OP n[i + j] * mm; \ + } \ + } \ + clear_tail(d, oprsz, simd_maxsz(desc)); \ +} + +DO_MLA_IDX(gvec_mla_idx_h, uint16_t, +, H2) +DO_MLA_IDX(gvec_mla_idx_s, uint32_t, +, H4) +DO_MLA_IDX(gvec_mla_idx_d, uint64_t, +, ) + +DO_MLA_IDX(gvec_mls_idx_h, uint16_t, -, H2) +DO_MLA_IDX(gvec_mls_idx_s, uint32_t, -, H4) +DO_MLA_IDX(gvec_mls_idx_d, uint64_t, -, ) + +#undef DO_MLA_IDX + #define DO_FMUL_IDX(NAME, TYPE, H) \ void HELPER(NAME)(void *vd, void *vn, void *vm, void *stat, uint32_t desc) \ { \