2019-06-11 15:39:41 +00:00
|
|
|
/*
|
|
|
|
* ARM translation: AArch32 VFP instructions
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003 Fabrice Bellard
|
|
|
|
* Copyright (c) 2005-2007 CodeSourcery
|
|
|
|
* Copyright (c) 2007 OpenedHand, Ltd.
|
|
|
|
* Copyright (c) 2019 Linaro, Ltd.
|
|
|
|
*
|
|
|
|
* This library is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
|
|
* License as published by the Free Software Foundation; either
|
2020-10-23 12:29:13 +00:00
|
|
|
* version 2.1 of the License, or (at your option) any later version.
|
2019-06-11 15:39:41 +00:00
|
|
|
*
|
|
|
|
* This library is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* Lesser General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
|
|
* License along with this library; if not, see <http://www.gnu.org/licenses/>.
|
|
|
|
*/
|
|
|
|
|
2021-04-30 13:27:35 +00:00
|
|
|
#include "qemu/osdep.h"
|
|
|
|
#include "tcg/tcg-op.h"
|
|
|
|
#include "tcg/tcg-op-gvec.h"
|
|
|
|
#include "exec/exec-all.h"
|
|
|
|
#include "exec/gen-icount.h"
|
|
|
|
#include "translate.h"
|
|
|
|
#include "translate-a32.h"
|
2019-06-11 15:39:41 +00:00
|
|
|
|
|
|
|
/* Include the generated VFP decoder */
|
2020-02-04 11:41:01 +00:00
|
|
|
#include "decode-vfp.c.inc"
|
|
|
|
#include "decode-vfp-uncond.c.inc"
|
2019-06-11 15:39:41 +00:00
|
|
|
|
2021-04-30 13:27:33 +00:00
|
|
|
static inline void vfp_load_reg64(TCGv_i64 var, int reg)
|
|
|
|
{
|
|
|
|
tcg_gen_ld_i64(var, cpu_env, vfp_reg_offset(true, reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vfp_store_reg64(TCGv_i64 var, int reg)
|
|
|
|
{
|
|
|
|
tcg_gen_st_i64(var, cpu_env, vfp_reg_offset(true, reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vfp_load_reg32(TCGv_i32 var, int reg)
|
|
|
|
{
|
|
|
|
tcg_gen_ld_i32(var, cpu_env, vfp_reg_offset(false, reg));
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void vfp_store_reg32(TCGv_i32 var, int reg)
|
|
|
|
{
|
|
|
|
tcg_gen_st_i32(var, cpu_env, vfp_reg_offset(false, reg));
|
|
|
|
}
|
|
|
|
|
2019-06-13 16:39:06 +00:00
|
|
|
/*
|
|
|
|
* The imm8 encodes the sign bit, enough bits to represent an exponent in
|
|
|
|
* the range 01....1xx to 10....0xx, and the most significant 4 bits of
|
|
|
|
* the mantissa; see VFPExpandImm() in the v8 ARM ARM.
|
|
|
|
*/
|
|
|
|
uint64_t vfp_expand_imm(int size, uint8_t imm8)
|
|
|
|
{
|
|
|
|
uint64_t imm;
|
|
|
|
|
|
|
|
switch (size) {
|
|
|
|
case MO_64:
|
|
|
|
imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
|
|
|
|
(extract32(imm8, 6, 1) ? 0x3fc0 : 0x4000) |
|
|
|
|
extract32(imm8, 0, 6);
|
|
|
|
imm <<= 48;
|
|
|
|
break;
|
|
|
|
case MO_32:
|
|
|
|
imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
|
|
|
|
(extract32(imm8, 6, 1) ? 0x3e00 : 0x4000) |
|
|
|
|
(extract32(imm8, 0, 6) << 3);
|
|
|
|
imm <<= 16;
|
|
|
|
break;
|
|
|
|
case MO_16:
|
|
|
|
imm = (extract32(imm8, 7, 1) ? 0x8000 : 0) |
|
|
|
|
(extract32(imm8, 6, 1) ? 0x3000 : 0x4000) |
|
|
|
|
(extract32(imm8, 0, 6) << 6);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
return imm;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
/*
|
|
|
|
* Return the offset of a 16-bit half of the specified VFP single-precision
|
|
|
|
* register. If top is true, returns the top 16 bits; otherwise the bottom
|
|
|
|
* 16 bits.
|
|
|
|
*/
|
|
|
|
static inline long vfp_f16_offset(unsigned reg, bool top)
|
|
|
|
{
|
|
|
|
long offs = vfp_reg_offset(false, reg);
|
|
|
|
#ifdef HOST_WORDS_BIGENDIAN
|
|
|
|
if (!top) {
|
|
|
|
offs += 2;
|
|
|
|
}
|
|
|
|
#else
|
|
|
|
if (top) {
|
|
|
|
offs += 2;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
return offs;
|
|
|
|
}
|
|
|
|
|
2020-11-19 21:56:01 +00:00
|
|
|
/*
|
|
|
|
* Generate code for M-profile lazy FP state preservation if needed;
|
|
|
|
* this corresponds to the pseudocode PreserveFPState() function.
|
|
|
|
*/
|
|
|
|
static void gen_preserve_fp_state(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (s->v7m_lspact) {
|
|
|
|
/*
|
|
|
|
* Lazy state saving affects external memory and also the NVIC,
|
|
|
|
* so we must mark it as an IO operation for icount (and cause
|
|
|
|
* this to be the last insn in the TB).
|
|
|
|
*/
|
|
|
|
if (tb_cflags(s->base.tb) & CF_USE_ICOUNT) {
|
|
|
|
s->base.is_jmp = DISAS_UPDATE_EXIT;
|
|
|
|
gen_io_start();
|
|
|
|
}
|
|
|
|
gen_helper_v7m_preserve_fp_state(cpu_env);
|
|
|
|
/*
|
|
|
|
* If the preserve_fp_state helper doesn't throw an exception
|
|
|
|
* then it will clear LSPACT; we don't need to repeat this for
|
|
|
|
* any further FP insns in this TB.
|
|
|
|
*/
|
|
|
|
s->v7m_lspact = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:41 +00:00
|
|
|
/*
|
|
|
|
* Check that VFP access is enabled. If it is, do the necessary
|
|
|
|
* M-profile lazy-FP handling and then return true.
|
|
|
|
* If not, emit code to generate an appropriate exception and
|
|
|
|
* return false.
|
|
|
|
* The ignore_vfp_enabled argument specifies that we should ignore
|
|
|
|
* whether VFP is enabled via FPEXC[EN]: this should be true for FMXR/FMRX
|
|
|
|
* accesses to FPSID, FPEXC, MVFR0, MVFR1, MVFR2, and false for all other insns.
|
|
|
|
*/
|
|
|
|
static bool full_vfp_access_check(DisasContext *s, bool ignore_vfp_enabled)
|
|
|
|
{
|
|
|
|
if (s->fp_excp_el) {
|
2021-06-14 15:09:15 +00:00
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
/*
|
|
|
|
* M-profile mostly catches the "FPU disabled" case early, in
|
|
|
|
* disas_m_nocp(), but a few insns (eg LCTP, WLSTP, DLSTP)
|
|
|
|
* which do coprocessor-checks are outside the large ranges of
|
|
|
|
* the encoding space handled by the patterns in m-nocp.decode,
|
|
|
|
* and for them we may need to raise NOCP here.
|
|
|
|
*/
|
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
|
|
|
|
syn_uncategorized(), s->fp_excp_el);
|
|
|
|
} else {
|
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_UDEF,
|
|
|
|
syn_fp_access_trap(1, 0xe, false),
|
|
|
|
s->fp_excp_el);
|
|
|
|
}
|
2019-06-11 15:39:41 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!s->vfp_enabled && !ignore_vfp_enabled) {
|
|
|
|
assert(!arm_dc_feature(s, ARM_FEATURE_M));
|
2019-08-26 15:15:36 +00:00
|
|
|
unallocated_encoding(s);
|
2019-06-11 15:39:41 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
/* Handle M-profile lazy FP state mechanics */
|
|
|
|
|
|
|
|
/* Trigger lazy-state preservation if necessary */
|
2020-11-19 21:56:01 +00:00
|
|
|
gen_preserve_fp_state(s);
|
2019-06-11 15:39:41 +00:00
|
|
|
|
|
|
|
/* Update ownership of FP context: set FPCCR.S to match current state */
|
|
|
|
if (s->v8m_fpccr_s_wrong) {
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
tmp = load_cpu_field(v7m.fpccr[M_REG_S]);
|
|
|
|
if (s->v8m_secure) {
|
|
|
|
tcg_gen_ori_i32(tmp, tmp, R_V7M_FPCCR_S_MASK);
|
|
|
|
} else {
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, ~R_V7M_FPCCR_S_MASK);
|
|
|
|
}
|
|
|
|
store_cpu_field(tmp, v7m.fpccr[M_REG_S]);
|
|
|
|
/* Don't need to do this for any further FP insns in this TB */
|
|
|
|
s->v8m_fpccr_s_wrong = false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v7m_new_fp_ctxt_needed) {
|
|
|
|
/*
|
2021-06-14 15:09:13 +00:00
|
|
|
* Create new FP context by updating CONTROL.FPCA, CONTROL.SFPA,
|
|
|
|
* the FPSCR, and VPR.
|
2019-06-11 15:39:41 +00:00
|
|
|
*/
|
|
|
|
TCGv_i32 control, fpscr;
|
|
|
|
uint32_t bits = R_V7M_CONTROL_FPCA_MASK;
|
|
|
|
|
|
|
|
fpscr = load_cpu_field(v7m.fpdscr[s->v8m_secure]);
|
|
|
|
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
|
|
|
|
tcg_temp_free_i32(fpscr);
|
2021-06-14 15:09:13 +00:00
|
|
|
if (dc_isar_feature(aa32_mve, s)) {
|
|
|
|
TCGv_i32 z32 = tcg_const_i32(0);
|
|
|
|
store_cpu_field(z32, v7m.vpr);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:41 +00:00
|
|
|
/*
|
|
|
|
* We don't need to arrange to end the TB, because the only
|
|
|
|
* parts of FPSCR which we cache in the TB flags are the VECLEN
|
|
|
|
* and VECSTRIDE, and those don't exist for M-profile.
|
|
|
|
*/
|
|
|
|
|
|
|
|
if (s->v8m_secure) {
|
|
|
|
bits |= R_V7M_CONTROL_SFPA_MASK;
|
|
|
|
}
|
|
|
|
control = load_cpu_field(v7m.control[M_REG_S]);
|
|
|
|
tcg_gen_ori_i32(control, control, bits);
|
|
|
|
store_cpu_field(control, v7m.control[M_REG_S]);
|
|
|
|
/* Don't need to do this for any further FP insns in this TB */
|
|
|
|
s->v7m_new_fp_ctxt_needed = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:42 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* The most usual kind of VFP access check, for everything except
|
|
|
|
* FMXR/FMRX to the always-available special registers.
|
|
|
|
*/
|
2021-04-30 13:27:30 +00:00
|
|
|
bool vfp_access_check(DisasContext *s)
|
2019-06-11 15:39:42 +00:00
|
|
|
{
|
|
|
|
return full_vfp_access_check(s, false);
|
|
|
|
}
|
2019-06-11 15:39:43 +00:00
|
|
|
|
|
|
|
static bool trans_VSEL(DisasContext *s, arg_VSEL *a)
|
|
|
|
{
|
|
|
|
uint32_t rd, rn, rm;
|
2020-08-28 18:33:26 +00:00
|
|
|
int sz = a->sz;
|
2019-06-11 15:39:43 +00:00
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_vsel, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:26 +00:00
|
|
|
if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
|
2019-06-11 15:39:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
2019-06-14 10:44:57 +00:00
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-08-28 18:33:26 +00:00
|
|
|
if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
|
2020-02-24 22:22:21 +00:00
|
|
|
((a->vm | a->vn | a->vd) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:43 +00:00
|
|
|
rd = a->vd;
|
|
|
|
rn = a->vn;
|
|
|
|
rm = a->vm;
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:26 +00:00
|
|
|
if (sz == 3) {
|
2019-06-11 15:39:43 +00:00
|
|
|
TCGv_i64 frn, frm, dest;
|
|
|
|
TCGv_i64 tmp, zero, zf, nf, vf;
|
|
|
|
|
|
|
|
zero = tcg_const_i64(0);
|
|
|
|
|
|
|
|
frn = tcg_temp_new_i64();
|
|
|
|
frm = tcg_temp_new_i64();
|
|
|
|
dest = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
zf = tcg_temp_new_i64();
|
|
|
|
nf = tcg_temp_new_i64();
|
|
|
|
vf = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
tcg_gen_extu_i32_i64(zf, cpu_ZF);
|
|
|
|
tcg_gen_ext_i32_i64(nf, cpu_NF);
|
|
|
|
tcg_gen_ext_i32_i64(vf, cpu_VF);
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(frn, rn);
|
|
|
|
vfp_load_reg64(frm, rm);
|
2019-06-11 15:39:43 +00:00
|
|
|
switch (a->cc) {
|
|
|
|
case 0: /* eq: Z */
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_EQ, dest, zf, zero,
|
|
|
|
frn, frm);
|
|
|
|
break;
|
|
|
|
case 1: /* vs: V */
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_LT, dest, vf, zero,
|
|
|
|
frn, frm);
|
|
|
|
break;
|
|
|
|
case 2: /* ge: N == V -> N ^ V == 0 */
|
|
|
|
tmp = tcg_temp_new_i64();
|
|
|
|
tcg_gen_xor_i64(tmp, vf, nf);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
|
|
|
|
frn, frm);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
break;
|
|
|
|
case 3: /* gt: !Z && N == V */
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_NE, dest, zf, zero,
|
|
|
|
frn, frm);
|
|
|
|
tmp = tcg_temp_new_i64();
|
|
|
|
tcg_gen_xor_i64(tmp, vf, nf);
|
|
|
|
tcg_gen_movcond_i64(TCG_COND_GE, dest, tmp, zero,
|
|
|
|
dest, frm);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
break;
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(dest, rd);
|
2019-06-11 15:39:43 +00:00
|
|
|
tcg_temp_free_i64(frn);
|
|
|
|
tcg_temp_free_i64(frm);
|
|
|
|
tcg_temp_free_i64(dest);
|
|
|
|
|
|
|
|
tcg_temp_free_i64(zf);
|
|
|
|
tcg_temp_free_i64(nf);
|
|
|
|
tcg_temp_free_i64(vf);
|
|
|
|
|
|
|
|
tcg_temp_free_i64(zero);
|
|
|
|
} else {
|
|
|
|
TCGv_i32 frn, frm, dest;
|
|
|
|
TCGv_i32 tmp, zero;
|
|
|
|
|
|
|
|
zero = tcg_const_i32(0);
|
|
|
|
|
|
|
|
frn = tcg_temp_new_i32();
|
|
|
|
frm = tcg_temp_new_i32();
|
|
|
|
dest = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(frn, rn);
|
|
|
|
vfp_load_reg32(frm, rm);
|
2019-06-11 15:39:43 +00:00
|
|
|
switch (a->cc) {
|
|
|
|
case 0: /* eq: Z */
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_EQ, dest, cpu_ZF, zero,
|
|
|
|
frn, frm);
|
|
|
|
break;
|
|
|
|
case 1: /* vs: V */
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_LT, dest, cpu_VF, zero,
|
|
|
|
frn, frm);
|
|
|
|
break;
|
|
|
|
case 2: /* ge: N == V -> N ^ V == 0 */
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
|
|
|
|
frn, frm);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
break;
|
|
|
|
case 3: /* gt: !Z && N == V */
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_NE, dest, cpu_ZF, zero,
|
|
|
|
frn, frm);
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
tcg_gen_xor_i32(tmp, cpu_VF, cpu_NF);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_GE, dest, tmp, zero,
|
|
|
|
dest, frm);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
break;
|
|
|
|
}
|
2020-08-28 18:33:26 +00:00
|
|
|
/* For fp16 the top half is always zeroes */
|
|
|
|
if (sz == 1) {
|
|
|
|
tcg_gen_andi_i32(dest, dest, 0xffff);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(dest, rd);
|
2019-06-11 15:39:43 +00:00
|
|
|
tcg_temp_free_i32(frn);
|
|
|
|
tcg_temp_free_i32(frm);
|
|
|
|
tcg_temp_free_i32(dest);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Table for converting the most common AArch32 encoding of
|
|
|
|
* rounding mode to arm_fprounding order (which matches the
|
|
|
|
* common AArch64 order); see ARM ARM pseudocode FPDecodeRM().
|
|
|
|
*/
|
|
|
|
static const uint8_t fp_decode_rm[] = {
|
|
|
|
FPROUNDING_TIEAWAY,
|
|
|
|
FPROUNDING_TIEEVEN,
|
|
|
|
FPROUNDING_POSINF,
|
|
|
|
FPROUNDING_NEGINF,
|
|
|
|
};
|
|
|
|
|
|
|
|
static bool trans_VRINT(DisasContext *s, arg_VRINT *a)
|
|
|
|
{
|
|
|
|
uint32_t rd, rm;
|
2020-08-28 18:33:27 +00:00
|
|
|
int sz = a->sz;
|
2019-06-11 15:39:43 +00:00
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tcg_rmode;
|
|
|
|
int rounding = fp_decode_rm[a->rm];
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:27 +00:00
|
|
|
if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
|
2019-06-11 15:39:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
2019-06-14 10:44:57 +00:00
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-08-28 18:33:27 +00:00
|
|
|
if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) &&
|
2020-02-24 22:22:21 +00:00
|
|
|
((a->vm | a->vd) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:43 +00:00
|
|
|
rd = a->vd;
|
|
|
|
rm = a->vm;
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:27 +00:00
|
|
|
if (sz == 1) {
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
} else {
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
|
|
|
}
|
2019-06-11 15:39:43 +00:00
|
|
|
|
|
|
|
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
|
2020-08-28 18:33:27 +00:00
|
|
|
if (sz == 3) {
|
2019-06-11 15:39:43 +00:00
|
|
|
TCGv_i64 tcg_op;
|
|
|
|
TCGv_i64 tcg_res;
|
|
|
|
tcg_op = tcg_temp_new_i64();
|
|
|
|
tcg_res = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tcg_op, rm);
|
2019-06-11 15:39:43 +00:00
|
|
|
gen_helper_rintd(tcg_res, tcg_op, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(tcg_res, rd);
|
2019-06-11 15:39:43 +00:00
|
|
|
tcg_temp_free_i64(tcg_op);
|
|
|
|
tcg_temp_free_i64(tcg_res);
|
|
|
|
} else {
|
|
|
|
TCGv_i32 tcg_op;
|
|
|
|
TCGv_i32 tcg_res;
|
|
|
|
tcg_op = tcg_temp_new_i32();
|
|
|
|
tcg_res = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tcg_op, rm);
|
2020-08-28 18:33:27 +00:00
|
|
|
if (sz == 1) {
|
|
|
|
gen_helper_rinth(tcg_res, tcg_op, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_rints(tcg_res, tcg_op, fpst);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tcg_res, rd);
|
2019-06-11 15:39:43 +00:00
|
|
|
tcg_temp_free_i32(tcg_op);
|
|
|
|
tcg_temp_free_i32(tcg_res);
|
|
|
|
}
|
|
|
|
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
tcg_temp_free_i32(tcg_rmode);
|
|
|
|
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT(DisasContext *s, arg_VCVT *a)
|
|
|
|
{
|
|
|
|
uint32_t rd, rm;
|
2020-08-28 18:33:25 +00:00
|
|
|
int sz = a->sz;
|
2019-06-11 15:39:43 +00:00
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tcg_rmode, tcg_shift;
|
|
|
|
int rounding = fp_decode_rm[a->rm];
|
|
|
|
bool is_signed = a->op;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_vcvt_dr, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:25 +00:00
|
|
|
if (sz == 3 && !dc_isar_feature(aa32_fpdp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sz == 1 && !dc_isar_feature(aa32_fp16_arith, s)) {
|
2019-06-11 15:39:43 +00:00
|
|
|
return false;
|
|
|
|
}
|
2019-06-14 10:44:57 +00:00
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-08-28 18:33:25 +00:00
|
|
|
if (sz == 3 && !dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:43 +00:00
|
|
|
rd = a->vd;
|
|
|
|
rm = a->vm;
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:25 +00:00
|
|
|
if (sz == 1) {
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
} else {
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
|
|
|
}
|
2019-06-11 15:39:43 +00:00
|
|
|
|
|
|
|
tcg_shift = tcg_const_i32(0);
|
|
|
|
|
|
|
|
tcg_rmode = tcg_const_i32(arm_rmode_to_sf(rounding));
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
|
2020-08-28 18:33:25 +00:00
|
|
|
if (sz == 3) {
|
2019-06-11 15:39:43 +00:00
|
|
|
TCGv_i64 tcg_double, tcg_res;
|
|
|
|
TCGv_i32 tcg_tmp;
|
|
|
|
tcg_double = tcg_temp_new_i64();
|
|
|
|
tcg_res = tcg_temp_new_i64();
|
|
|
|
tcg_tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tcg_double, rm);
|
2019-06-11 15:39:43 +00:00
|
|
|
if (is_signed) {
|
|
|
|
gen_helper_vfp_tosld(tcg_res, tcg_double, tcg_shift, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_tould(tcg_res, tcg_double, tcg_shift, fpst);
|
|
|
|
}
|
|
|
|
tcg_gen_extrl_i64_i32(tcg_tmp, tcg_res);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tcg_tmp, rd);
|
2019-06-11 15:39:43 +00:00
|
|
|
tcg_temp_free_i32(tcg_tmp);
|
|
|
|
tcg_temp_free_i64(tcg_res);
|
|
|
|
tcg_temp_free_i64(tcg_double);
|
|
|
|
} else {
|
|
|
|
TCGv_i32 tcg_single, tcg_res;
|
|
|
|
tcg_single = tcg_temp_new_i32();
|
|
|
|
tcg_res = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tcg_single, rm);
|
2020-08-28 18:33:25 +00:00
|
|
|
if (sz == 1) {
|
|
|
|
if (is_signed) {
|
|
|
|
gen_helper_vfp_toslh(tcg_res, tcg_single, tcg_shift, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_toulh(tcg_res, tcg_single, tcg_shift, fpst);
|
|
|
|
}
|
2019-06-11 15:39:43 +00:00
|
|
|
} else {
|
2020-08-28 18:33:25 +00:00
|
|
|
if (is_signed) {
|
|
|
|
gen_helper_vfp_tosls(tcg_res, tcg_single, tcg_shift, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_touls(tcg_res, tcg_single, tcg_shift, fpst);
|
|
|
|
}
|
2019-06-11 15:39:43 +00:00
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tcg_res, rd);
|
2019-06-11 15:39:43 +00:00
|
|
|
tcg_temp_free_i32(tcg_res);
|
|
|
|
tcg_temp_free_i32(tcg_single);
|
|
|
|
}
|
|
|
|
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
tcg_temp_free_i32(tcg_rmode);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(tcg_shift);
|
|
|
|
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:44 +00:00
|
|
|
|
|
|
|
static bool trans_VMOV_to_gp(DisasContext *s, arg_VMOV_to_gp *a)
|
|
|
|
{
|
|
|
|
/* VMOV scalar to general purpose register */
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
/*
|
|
|
|
* SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
|
|
|
|
* all sizes, whether the CPU has fp or not.
|
|
|
|
*/
|
|
|
|
if (!dc_isar_feature(aa32_mve, s)) {
|
|
|
|
if (a->size == MO_32
|
|
|
|
? !dc_isar_feature(aa32_fpsp_v2, s)
|
|
|
|
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-02-24 22:22:23 +00:00
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:44 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
|
2019-06-11 15:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
read_neon_element32(tmp, a->vn, a->index, a->size | (a->u ? 0 : MO_SIGN));
|
2019-06-11 15:39:44 +00:00
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMOV_from_gp(DisasContext *s, arg_VMOV_from_gp *a)
|
|
|
|
{
|
|
|
|
/* VMOV general purpose register to scalar */
|
2020-11-02 16:52:14 +00:00
|
|
|
TCGv_i32 tmp;
|
2019-06-11 15:39:44 +00:00
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
/*
|
|
|
|
* SIZE == MO_32 is a VFP instruction; otherwise NEON. MVE has
|
|
|
|
* all sizes, whether the CPU has fp or not.
|
|
|
|
*/
|
|
|
|
if (!dc_isar_feature(aa32_mve, s)) {
|
|
|
|
if (a->size == MO_32
|
|
|
|
? !dc_isar_feature(aa32_fpsp_v2, s)
|
|
|
|
: !arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
|
|
|
return false;
|
|
|
|
}
|
2020-02-24 22:22:23 +00:00
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:44 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
|
2019-06-11 15:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt);
|
2020-11-02 16:52:14 +00:00
|
|
|
write_neon_element32(tmp, a->vn, a->index, a->size);
|
|
|
|
tcg_temp_free_i32(tmp);
|
2019-06-11 15:39:44 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
|
|
|
|
{
|
|
|
|
/* VDUP (general purpose register) */
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
int size, vec_size;
|
|
|
|
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_NEON)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vn & 0x10)) {
|
2019-06-11 15:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->b && a->e) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->q && (a->vn & 1)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
vec_size = a->q ? 16 : 8;
|
|
|
|
if (a->b) {
|
|
|
|
size = 0;
|
|
|
|
} else if (a->e) {
|
|
|
|
size = 1;
|
|
|
|
} else {
|
|
|
|
size = 2;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = load_reg(s, a->rt);
|
2020-11-02 16:52:12 +00:00
|
|
|
tcg_gen_gvec_dup_i32(size, neon_full_reg_offset(a->vn),
|
2019-06-11 15:39:44 +00:00
|
|
|
vec_size, vec_size, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:44 +00:00
|
|
|
|
2020-11-19 21:55:56 +00:00
|
|
|
/*
|
|
|
|
* M-profile provides two different sets of instructions that can
|
|
|
|
* access floating point system registers: VMSR/VMRS (which move
|
|
|
|
* to/from a general purpose register) and VLDR/VSTR sysreg (which
|
|
|
|
* move directly to/from memory). In some cases there are also side
|
|
|
|
* effects which must happen after any write to memory (which could
|
|
|
|
* cause an exception). So we implement the common logic for the
|
|
|
|
* sysreg access in gen_M_fp_sysreg_write() and gen_M_fp_sysreg_read(),
|
|
|
|
* which take pointers to callback functions which will perform the
|
|
|
|
* actual "read/write general purpose register" and "read/write
|
|
|
|
* memory" operations.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Emit code to store the sysreg to its final destination; frees the
|
|
|
|
* TCG temp 'value' it is passed.
|
|
|
|
*/
|
|
|
|
typedef void fp_sysreg_storefn(DisasContext *s, void *opaque, TCGv_i32 value);
|
|
|
|
/*
|
|
|
|
* Emit code to load the value to be copied to the sysreg; returns
|
|
|
|
* a new TCG temporary
|
|
|
|
*/
|
|
|
|
typedef TCGv_i32 fp_sysreg_loadfn(DisasContext *s, void *opaque);
|
|
|
|
|
|
|
|
/* Common decode/access checks for fp sysreg read/write */
|
|
|
|
typedef enum FPSysRegCheckResult {
|
|
|
|
FPSysRegCheckFailed, /* caller should return false */
|
|
|
|
FPSysRegCheckDone, /* caller should return true */
|
|
|
|
FPSysRegCheckContinue, /* caller should continue generating code */
|
|
|
|
} FPSysRegCheckResult;
|
|
|
|
|
|
|
|
static FPSysRegCheckResult fp_sysreg_checks(DisasContext *s, int regno)
|
2019-06-11 15:39:44 +00:00
|
|
|
{
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-11-19 21:55:56 +00:00
|
|
|
return FPSysRegCheckFailed;
|
|
|
|
}
|
|
|
|
|
|
|
|
switch (regno) {
|
|
|
|
case ARM_VFP_FPSCR:
|
|
|
|
case QEMU_VFP_FPSCR_NZCV:
|
|
|
|
break;
|
2020-11-19 21:55:59 +00:00
|
|
|
case ARM_VFP_FPSCR_NZCVQC:
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
2021-05-20 15:28:36 +00:00
|
|
|
return FPSysRegCheckFailed;
|
2020-11-19 21:55:59 +00:00
|
|
|
}
|
|
|
|
break;
|
2020-11-19 21:56:02 +00:00
|
|
|
case ARM_VFP_FPCXT_S:
|
2020-12-10 20:14:32 +00:00
|
|
|
case ARM_VFP_FPCXT_NS:
|
2020-11-19 21:56:02 +00:00
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
2021-05-20 15:28:36 +00:00
|
|
|
return FPSysRegCheckFailed;
|
2020-11-19 21:56:02 +00:00
|
|
|
}
|
|
|
|
if (!s->v8m_secure) {
|
2021-05-20 15:28:36 +00:00
|
|
|
return FPSysRegCheckFailed;
|
2020-11-19 21:56:02 +00:00
|
|
|
}
|
|
|
|
break;
|
2021-05-20 15:28:37 +00:00
|
|
|
case ARM_VFP_VPR:
|
|
|
|
case ARM_VFP_P0:
|
|
|
|
if (!dc_isar_feature(aa32_mve, s)) {
|
|
|
|
return FPSysRegCheckFailed;
|
|
|
|
}
|
|
|
|
break;
|
2020-11-19 21:55:56 +00:00
|
|
|
default:
|
|
|
|
return FPSysRegCheckFailed;
|
|
|
|
}
|
|
|
|
|
2020-12-10 20:14:32 +00:00
|
|
|
/*
|
|
|
|
* FPCXT_NS is a special case: it has specific handling for
|
|
|
|
* "current FP state is inactive", and must do the PreserveFPState()
|
|
|
|
* but not the usual full set of actions done by ExecuteFPCheck().
|
|
|
|
* So we don't call vfp_access_check() and the callers must handle this.
|
|
|
|
*/
|
|
|
|
if (regno != ARM_VFP_FPCXT_NS && !vfp_access_check(s)) {
|
2020-11-19 21:55:56 +00:00
|
|
|
return FPSysRegCheckDone;
|
|
|
|
}
|
|
|
|
return FPSysRegCheckContinue;
|
|
|
|
}
|
|
|
|
|
2020-12-10 20:14:32 +00:00
|
|
|
static void gen_branch_fpInactive(DisasContext *s, TCGCond cond,
|
|
|
|
TCGLabel *label)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* FPCXT_NS is a special case: it has specific handling for
|
|
|
|
* "current FP state is inactive", and must do the PreserveFPState()
|
|
|
|
* but not the usual full set of actions done by ExecuteFPCheck().
|
|
|
|
* We don't have a TB flag that matches the fpInactive check, so we
|
|
|
|
* do it at runtime as we don't expect FPCXT_NS accesses to be frequent.
|
|
|
|
*
|
|
|
|
* Emit code that checks fpInactive and does a conditional
|
|
|
|
* branch to label based on it:
|
|
|
|
* if cond is TCG_COND_NE then branch if fpInactive != 0 (ie if inactive)
|
|
|
|
* if cond is TCG_COND_EQ then branch if fpInactive == 0 (ie if active)
|
|
|
|
*/
|
|
|
|
assert(cond == TCG_COND_EQ || cond == TCG_COND_NE);
|
|
|
|
|
|
|
|
/* fpInactive = FPCCR_NS.ASPEN == 1 && CONTROL.FPCA == 0 */
|
|
|
|
TCGv_i32 aspen, fpca;
|
|
|
|
aspen = load_cpu_field(v7m.fpccr[M_REG_NS]);
|
|
|
|
fpca = load_cpu_field(v7m.control[M_REG_S]);
|
|
|
|
tcg_gen_andi_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
|
|
|
|
tcg_gen_xori_i32(aspen, aspen, R_V7M_FPCCR_ASPEN_MASK);
|
|
|
|
tcg_gen_andi_i32(fpca, fpca, R_V7M_CONTROL_FPCA_MASK);
|
|
|
|
tcg_gen_or_i32(fpca, fpca, aspen);
|
|
|
|
tcg_gen_brcondi_i32(tcg_invert_cond(cond), fpca, 0, label);
|
|
|
|
tcg_temp_free_i32(aspen);
|
|
|
|
tcg_temp_free_i32(fpca);
|
|
|
|
}
|
|
|
|
|
2020-11-19 21:55:56 +00:00
|
|
|
static bool gen_M_fp_sysreg_write(DisasContext *s, int regno,
|
|
|
|
fp_sysreg_loadfn *loadfn,
|
2021-06-18 14:10:13 +00:00
|
|
|
void *opaque)
|
2020-11-19 21:55:56 +00:00
|
|
|
{
|
|
|
|
/* Do a write to an M-profile floating point system register */
|
2019-06-11 15:39:44 +00:00
|
|
|
TCGv_i32 tmp;
|
2020-12-10 20:14:32 +00:00
|
|
|
TCGLabel *lab_end = NULL;
|
2019-06-11 15:39:44 +00:00
|
|
|
|
2020-11-19 21:55:56 +00:00
|
|
|
switch (fp_sysreg_checks(s, regno)) {
|
|
|
|
case FPSysRegCheckFailed:
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
2020-11-19 21:55:56 +00:00
|
|
|
case FPSysRegCheckDone:
|
|
|
|
return true;
|
|
|
|
case FPSysRegCheckContinue:
|
|
|
|
break;
|
2020-02-24 22:22:23 +00:00
|
|
|
}
|
|
|
|
|
2020-11-19 21:55:56 +00:00
|
|
|
switch (regno) {
|
|
|
|
case ARM_VFP_FPSCR:
|
|
|
|
tmp = loadfn(s, opaque);
|
|
|
|
gen_helper_vfp_set_fpscr(cpu_env, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
gen_lookup_tb(s);
|
|
|
|
break;
|
2020-11-19 21:55:59 +00:00
|
|
|
case ARM_VFP_FPSCR_NZCVQC:
|
|
|
|
{
|
|
|
|
TCGv_i32 fpscr;
|
|
|
|
tmp = loadfn(s, opaque);
|
2021-06-14 15:09:12 +00:00
|
|
|
if (dc_isar_feature(aa32_mve, s)) {
|
|
|
|
/* QC is only present for MVE; otherwise RES0 */
|
|
|
|
TCGv_i32 qc = tcg_temp_new_i32();
|
|
|
|
tcg_gen_andi_i32(qc, tmp, FPCR_QC);
|
|
|
|
/*
|
|
|
|
* The 4 vfp.qc[] fields need only be "zero" vs "non-zero";
|
|
|
|
* here writing the same value into all elements is simplest.
|
|
|
|
*/
|
|
|
|
tcg_gen_gvec_dup_i32(MO_32, offsetof(CPUARMState, vfp.qc),
|
|
|
|
16, 16, qc);
|
|
|
|
}
|
2020-11-19 21:55:59 +00:00
|
|
|
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
|
|
|
|
fpscr = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
|
|
|
|
tcg_gen_andi_i32(fpscr, fpscr, ~FPCR_NZCV_MASK);
|
|
|
|
tcg_gen_or_i32(fpscr, fpscr, tmp);
|
|
|
|
store_cpu_field(fpscr, vfp.xregs[ARM_VFP_FPSCR]);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
break;
|
|
|
|
}
|
2020-12-10 20:14:32 +00:00
|
|
|
case ARM_VFP_FPCXT_NS:
|
|
|
|
lab_end = gen_new_label();
|
|
|
|
/* fpInactive case: write is a NOP, so branch to end */
|
|
|
|
gen_branch_fpInactive(s, TCG_COND_NE, lab_end);
|
2021-06-18 14:10:14 +00:00
|
|
|
/*
|
|
|
|
* !fpInactive: if FPU disabled, take NOCP exception;
|
|
|
|
* otherwise PreserveFPState(), and then FPCXT_NS writes
|
|
|
|
* behave the same as FPCXT_S writes.
|
|
|
|
*/
|
|
|
|
if (s->fp_excp_el) {
|
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
|
|
|
|
syn_uncategorized(), s->fp_excp_el);
|
|
|
|
/*
|
|
|
|
* This was only a conditional exception, so override
|
|
|
|
* gen_exception_insn()'s default to DISAS_NORETURN
|
|
|
|
*/
|
|
|
|
s->base.is_jmp = DISAS_NEXT;
|
|
|
|
break;
|
|
|
|
}
|
2020-12-10 20:14:32 +00:00
|
|
|
gen_preserve_fp_state(s);
|
|
|
|
/* fall through */
|
2020-11-19 21:56:02 +00:00
|
|
|
case ARM_VFP_FPCXT_S:
|
|
|
|
{
|
2020-12-10 20:14:31 +00:00
|
|
|
TCGv_i32 sfpa, control;
|
|
|
|
/*
|
|
|
|
* Set FPSCR and CONTROL.SFPA from value; the new FPSCR takes
|
|
|
|
* bits [27:0] from value and zeroes bits [31:28].
|
|
|
|
*/
|
2020-11-19 21:56:02 +00:00
|
|
|
tmp = loadfn(s, opaque);
|
|
|
|
sfpa = tcg_temp_new_i32();
|
|
|
|
tcg_gen_shri_i32(sfpa, tmp, 31);
|
|
|
|
control = load_cpu_field(v7m.control[M_REG_S]);
|
|
|
|
tcg_gen_deposit_i32(control, control, sfpa,
|
|
|
|
R_V7M_CONTROL_SFPA_SHIFT, 1);
|
|
|
|
store_cpu_field(control, v7m.control[M_REG_S]);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
|
2020-12-10 20:14:31 +00:00
|
|
|
gen_helper_vfp_set_fpscr(cpu_env, tmp);
|
2020-11-19 21:56:02 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_temp_free_i32(sfpa);
|
|
|
|
break;
|
|
|
|
}
|
2021-05-20 15:28:37 +00:00
|
|
|
case ARM_VFP_VPR:
|
|
|
|
/* Behaves as NOP if not privileged */
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tmp = loadfn(s, opaque);
|
|
|
|
store_cpu_field(tmp, v7m.vpr);
|
|
|
|
break;
|
|
|
|
case ARM_VFP_P0:
|
|
|
|
{
|
|
|
|
TCGv_i32 vpr;
|
|
|
|
tmp = loadfn(s, opaque);
|
|
|
|
vpr = load_cpu_field(v7m.vpr);
|
|
|
|
tcg_gen_deposit_i32(vpr, vpr, tmp,
|
|
|
|
R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
|
|
|
|
store_cpu_field(vpr, v7m.vpr);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
break;
|
|
|
|
}
|
2020-11-19 21:55:56 +00:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2020-12-10 20:14:32 +00:00
|
|
|
if (lab_end) {
|
|
|
|
gen_set_label(lab_end);
|
|
|
|
}
|
2020-11-19 21:55:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen_M_fp_sysreg_read(DisasContext *s, int regno,
|
2021-06-18 14:10:13 +00:00
|
|
|
fp_sysreg_storefn *storefn,
|
|
|
|
void *opaque)
|
2020-11-19 21:55:56 +00:00
|
|
|
{
|
|
|
|
/* Do a read from an M-profile floating point system register */
|
|
|
|
TCGv_i32 tmp;
|
2020-12-10 20:14:32 +00:00
|
|
|
TCGLabel *lab_end = NULL;
|
|
|
|
bool lookup_tb = false;
|
2020-11-19 21:55:56 +00:00
|
|
|
|
|
|
|
switch (fp_sysreg_checks(s, regno)) {
|
|
|
|
case FPSysRegCheckFailed:
|
|
|
|
return false;
|
|
|
|
case FPSysRegCheckDone:
|
|
|
|
return true;
|
|
|
|
case FPSysRegCheckContinue:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2021-06-14 15:09:12 +00:00
|
|
|
if (regno == ARM_VFP_FPSCR_NZCVQC && !dc_isar_feature(aa32_mve, s)) {
|
|
|
|
/* QC is RES0 without MVE, so NZCVQC simplifies to NZCV */
|
|
|
|
regno = QEMU_VFP_FPSCR_NZCV;
|
|
|
|
}
|
|
|
|
|
2020-11-19 21:55:56 +00:00
|
|
|
switch (regno) {
|
|
|
|
case ARM_VFP_FPSCR:
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_helper_vfp_get_fpscr(tmp, cpu_env);
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
break;
|
2020-11-19 21:55:59 +00:00
|
|
|
case ARM_VFP_FPSCR_NZCVQC:
|
2021-06-14 15:09:12 +00:00
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_helper_vfp_get_fpscr(tmp, cpu_env);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCVQC_MASK);
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
break;
|
2020-11-19 21:55:56 +00:00
|
|
|
case QEMU_VFP_FPSCR_NZCV:
|
2019-06-11 15:39:44 +00:00
|
|
|
/*
|
2020-11-19 21:55:56 +00:00
|
|
|
* Read just NZCV; this is a special case to avoid the
|
|
|
|
* helper call for the "VMRS to CPSR.NZCV" insn.
|
2019-06-11 15:39:44 +00:00
|
|
|
*/
|
2020-11-19 21:55:56 +00:00
|
|
|
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
|
2020-11-19 21:56:00 +00:00
|
|
|
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
|
2020-11-19 21:55:56 +00:00
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
break;
|
2020-11-19 21:56:02 +00:00
|
|
|
case ARM_VFP_FPCXT_S:
|
|
|
|
{
|
|
|
|
TCGv_i32 control, sfpa, fpscr;
|
|
|
|
/* Bits [27:0] from FPSCR, bit [31] from CONTROL.SFPA */
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
sfpa = tcg_temp_new_i32();
|
|
|
|
gen_helper_vfp_get_fpscr(tmp, cpu_env);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, ~FPCR_NZCV_MASK);
|
|
|
|
control = load_cpu_field(v7m.control[M_REG_S]);
|
|
|
|
tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
|
|
|
|
tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
|
|
|
|
tcg_gen_or_i32(tmp, tmp, sfpa);
|
|
|
|
tcg_temp_free_i32(sfpa);
|
|
|
|
/*
|
|
|
|
* Store result before updating FPSCR etc, in case
|
|
|
|
* it is a memory write which causes an exception.
|
|
|
|
*/
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
/*
|
|
|
|
* Now we must reset FPSCR from FPDSCR_NS, and clear
|
|
|
|
* CONTROL.SFPA; so we'll end the TB here.
|
|
|
|
*/
|
|
|
|
tcg_gen_andi_i32(control, control, ~R_V7M_CONTROL_SFPA_MASK);
|
|
|
|
store_cpu_field(control, v7m.control[M_REG_S]);
|
|
|
|
fpscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
|
|
|
|
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
|
|
|
|
tcg_temp_free_i32(fpscr);
|
2020-12-10 20:14:32 +00:00
|
|
|
lookup_tb = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
case ARM_VFP_FPCXT_NS:
|
|
|
|
{
|
|
|
|
TCGv_i32 control, sfpa, fpscr, fpdscr, zero;
|
|
|
|
TCGLabel *lab_active = gen_new_label();
|
|
|
|
|
|
|
|
lookup_tb = true;
|
|
|
|
|
|
|
|
gen_branch_fpInactive(s, TCG_COND_EQ, lab_active);
|
|
|
|
/* fpInactive case: reads as FPDSCR_NS */
|
|
|
|
TCGv_i32 tmp = load_cpu_field(v7m.fpdscr[M_REG_NS]);
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
lab_end = gen_new_label();
|
|
|
|
tcg_gen_br(lab_end);
|
|
|
|
|
|
|
|
gen_set_label(lab_active);
|
2021-06-18 14:10:14 +00:00
|
|
|
/*
|
|
|
|
* !fpInactive: if FPU disabled, take NOCP exception;
|
|
|
|
* otherwise PreserveFPState(), and then FPCXT_NS
|
|
|
|
* reads the same as FPCXT_S.
|
|
|
|
*/
|
|
|
|
if (s->fp_excp_el) {
|
|
|
|
gen_exception_insn(s, s->pc_curr, EXCP_NOCP,
|
|
|
|
syn_uncategorized(), s->fp_excp_el);
|
|
|
|
/*
|
|
|
|
* This was only a conditional exception, so override
|
|
|
|
* gen_exception_insn()'s default to DISAS_NORETURN
|
|
|
|
*/
|
|
|
|
s->base.is_jmp = DISAS_NEXT;
|
|
|
|
break;
|
|
|
|
}
|
2020-12-10 20:14:32 +00:00
|
|
|
gen_preserve_fp_state(s);
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
sfpa = tcg_temp_new_i32();
|
|
|
|
fpscr = tcg_temp_new_i32();
|
|
|
|
gen_helper_vfp_get_fpscr(fpscr, cpu_env);
|
|
|
|
tcg_gen_andi_i32(tmp, fpscr, ~FPCR_NZCV_MASK);
|
|
|
|
control = load_cpu_field(v7m.control[M_REG_S]);
|
|
|
|
tcg_gen_andi_i32(sfpa, control, R_V7M_CONTROL_SFPA_MASK);
|
|
|
|
tcg_gen_shli_i32(sfpa, sfpa, 31 - R_V7M_CONTROL_SFPA_SHIFT);
|
|
|
|
tcg_gen_or_i32(tmp, tmp, sfpa);
|
|
|
|
tcg_temp_free_i32(control);
|
|
|
|
/* Store result before updating FPSCR, in case it faults */
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
/* If SFPA is zero then set FPSCR from FPDSCR_NS */
|
|
|
|
fpdscr = load_cpu_field(v7m.fpdscr[M_REG_NS]);
|
|
|
|
zero = tcg_const_i32(0);
|
|
|
|
tcg_gen_movcond_i32(TCG_COND_EQ, fpscr, sfpa, zero, fpdscr, fpscr);
|
|
|
|
gen_helper_vfp_set_fpscr(cpu_env, fpscr);
|
|
|
|
tcg_temp_free_i32(zero);
|
|
|
|
tcg_temp_free_i32(sfpa);
|
|
|
|
tcg_temp_free_i32(fpdscr);
|
|
|
|
tcg_temp_free_i32(fpscr);
|
2020-11-19 21:56:02 +00:00
|
|
|
break;
|
|
|
|
}
|
2021-05-20 15:28:37 +00:00
|
|
|
case ARM_VFP_VPR:
|
|
|
|
/* Behaves as NOP if not privileged */
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
tmp = load_cpu_field(v7m.vpr);
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
break;
|
|
|
|
case ARM_VFP_P0:
|
|
|
|
tmp = load_cpu_field(v7m.vpr);
|
|
|
|
tcg_gen_extract_i32(tmp, tmp, R_V7M_VPR_P0_SHIFT, R_V7M_VPR_P0_LENGTH);
|
|
|
|
storefn(s, opaque, tmp);
|
|
|
|
break;
|
2020-11-19 21:55:56 +00:00
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
2020-12-10 20:14:32 +00:00
|
|
|
|
|
|
|
if (lab_end) {
|
|
|
|
gen_set_label(lab_end);
|
|
|
|
}
|
|
|
|
if (lookup_tb) {
|
|
|
|
gen_lookup_tb(s);
|
|
|
|
}
|
2020-11-19 21:55:56 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void fp_sysreg_to_gpr(DisasContext *s, void *opaque, TCGv_i32 value)
|
|
|
|
{
|
|
|
|
arg_VMSR_VMRS *a = opaque;
|
|
|
|
|
|
|
|
if (a->rt == 15) {
|
|
|
|
/* Set the 4 flag bits in the CPSR */
|
|
|
|
gen_set_nzcv(value);
|
|
|
|
tcg_temp_free_i32(value);
|
|
|
|
} else {
|
|
|
|
store_reg(s, a->rt, value);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static TCGv_i32 gpr_to_fp_sysreg(DisasContext *s, void *opaque)
|
|
|
|
{
|
|
|
|
arg_VMSR_VMRS *a = opaque;
|
|
|
|
|
|
|
|
return load_reg(s, a->rt);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool gen_M_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Accesses to R15 are UNPREDICTABLE; we choose to undef.
|
|
|
|
* FPSCR -> r15 is a special case which writes to the PSR flags;
|
|
|
|
* set a->reg to a special value to tell gen_M_fp_sysreg_read()
|
|
|
|
* we only care about the top 4 bits of FPSCR there.
|
|
|
|
*/
|
|
|
|
if (a->rt == 15) {
|
|
|
|
if (a->l && a->reg == ARM_VFP_FPSCR) {
|
|
|
|
a->reg = QEMU_VFP_FPSCR_NZCV;
|
|
|
|
} else {
|
2019-06-11 15:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-11-19 21:55:56 +00:00
|
|
|
if (a->l) {
|
|
|
|
/* VMRS, move FP system register to gp register */
|
|
|
|
return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_gpr, a);
|
|
|
|
} else {
|
|
|
|
/* VMSR, move gp register to FP system register */
|
|
|
|
return gen_M_fp_sysreg_write(s, a->reg, gpr_to_fp_sysreg, a);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
bool ignore_vfp_enabled = false;
|
|
|
|
|
|
|
|
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
|
|
|
return gen_M_VMSR_VMRS(s, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:44 +00:00
|
|
|
switch (a->reg) {
|
|
|
|
case ARM_VFP_FPSID:
|
|
|
|
/*
|
|
|
|
* VFPv2 allows access to FPSID from userspace; VFPv3 restricts
|
|
|
|
* all ID registers to privileged access only.
|
|
|
|
*/
|
2020-02-24 22:22:22 +00:00
|
|
|
if (IS_USER(s) && dc_isar_feature(aa32_fpsp_v3, s)) {
|
2019-06-11 15:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ignore_vfp_enabled = true;
|
|
|
|
break;
|
|
|
|
case ARM_VFP_MVFR0:
|
|
|
|
case ARM_VFP_MVFR1:
|
|
|
|
if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ignore_vfp_enabled = true;
|
|
|
|
break;
|
|
|
|
case ARM_VFP_MVFR2:
|
|
|
|
if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ignore_vfp_enabled = true;
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPSCR:
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPEXC:
|
|
|
|
if (IS_USER(s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
ignore_vfp_enabled = true;
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPINST:
|
|
|
|
case ARM_VFP_FPINST2:
|
|
|
|
/* Not present in VFPv3 */
|
2020-02-24 22:22:22 +00:00
|
|
|
if (IS_USER(s) || dc_isar_feature(aa32_fpsp_v3, s)) {
|
2019-06-11 15:39:44 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->l) {
|
|
|
|
/* VMRS, move VFP special register to gp register */
|
|
|
|
switch (a->reg) {
|
2019-12-01 12:20:16 +00:00
|
|
|
case ARM_VFP_MVFR0:
|
|
|
|
case ARM_VFP_MVFR1:
|
|
|
|
case ARM_VFP_MVFR2:
|
2019-06-11 15:39:44 +00:00
|
|
|
case ARM_VFP_FPSID:
|
2019-12-01 12:20:16 +00:00
|
|
|
if (s->current_el == 1) {
|
|
|
|
TCGv_i32 tcg_reg, tcg_rt;
|
|
|
|
|
|
|
|
gen_set_condexec(s);
|
|
|
|
gen_set_pc_im(s, s->pc_curr);
|
|
|
|
tcg_reg = tcg_const_i32(a->reg);
|
|
|
|
tcg_rt = tcg_const_i32(a->rt);
|
|
|
|
gen_helper_check_hcr_el2_trap(cpu_env, tcg_rt, tcg_reg);
|
|
|
|
tcg_temp_free_i32(tcg_reg);
|
|
|
|
tcg_temp_free_i32(tcg_rt);
|
|
|
|
}
|
|
|
|
/* fall through */
|
2019-06-11 15:39:44 +00:00
|
|
|
case ARM_VFP_FPEXC:
|
|
|
|
case ARM_VFP_FPINST:
|
|
|
|
case ARM_VFP_FPINST2:
|
|
|
|
tmp = load_cpu_field(vfp.xregs[a->reg]);
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPSCR:
|
|
|
|
if (a->rt == 15) {
|
|
|
|
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
|
2020-11-19 21:56:00 +00:00
|
|
|
tcg_gen_andi_i32(tmp, tmp, FPCR_NZCV_MASK);
|
2019-06-11 15:39:44 +00:00
|
|
|
} else {
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
gen_helper_vfp_get_fpscr(tmp, cpu_env);
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->rt == 15) {
|
|
|
|
/* Set the 4 flag bits in the CPSR. */
|
|
|
|
gen_set_nzcv(tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
} else {
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* VMSR, move gp register to VFP special register */
|
|
|
|
switch (a->reg) {
|
|
|
|
case ARM_VFP_FPSID:
|
|
|
|
case ARM_VFP_MVFR0:
|
|
|
|
case ARM_VFP_MVFR1:
|
|
|
|
case ARM_VFP_MVFR2:
|
|
|
|
/* Writes are ignored. */
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPSCR:
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
gen_helper_vfp_set_fpscr(cpu_env, tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
gen_lookup_tb(s);
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPEXC:
|
|
|
|
/*
|
|
|
|
* TODO: VFP subarchitecture support.
|
|
|
|
* For now, keep the EN bit only
|
|
|
|
*/
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 1 << 30);
|
|
|
|
store_cpu_field(tmp, vfp.xregs[a->reg]);
|
|
|
|
gen_lookup_tb(s);
|
|
|
|
break;
|
|
|
|
case ARM_VFP_FPINST:
|
|
|
|
case ARM_VFP_FPINST2:
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
store_cpu_field(tmp, vfp.xregs[a->reg]);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-11-19 21:55:58 +00:00
|
|
|
static void fp_sysreg_to_memory(DisasContext *s, void *opaque, TCGv_i32 value)
|
|
|
|
{
|
|
|
|
arg_vldr_sysreg *a = opaque;
|
|
|
|
uint32_t offset = a->imm;
|
|
|
|
TCGv_i32 addr;
|
|
|
|
|
|
|
|
if (!a->a) {
|
2021-06-18 14:10:13 +00:00
|
|
|
offset = -offset;
|
2020-11-19 21:55:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
addr = load_reg(s, a->rn);
|
|
|
|
if (a->p) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
gen_aa32_st_i32(s, value, addr, get_mem_index(s),
|
|
|
|
MO_UL | MO_ALIGN | s->be_data);
|
|
|
|
tcg_temp_free_i32(value);
|
|
|
|
|
|
|
|
if (a->w) {
|
|
|
|
/* writeback */
|
|
|
|
if (!a->p) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
} else {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static TCGv_i32 memory_to_fp_sysreg(DisasContext *s, void *opaque)
|
|
|
|
{
|
|
|
|
arg_vldr_sysreg *a = opaque;
|
|
|
|
uint32_t offset = a->imm;
|
|
|
|
TCGv_i32 addr;
|
|
|
|
TCGv_i32 value = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
if (!a->a) {
|
2021-06-18 14:10:13 +00:00
|
|
|
offset = -offset;
|
2020-11-19 21:55:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
addr = load_reg(s, a->rn);
|
|
|
|
if (a->p) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
gen_aa32_ld_i32(s, value, addr, get_mem_index(s),
|
|
|
|
MO_UL | MO_ALIGN | s->be_data);
|
|
|
|
|
|
|
|
if (a->w) {
|
|
|
|
/* writeback */
|
|
|
|
if (!a->p) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
} else {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
return value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VLDR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
|
|
|
|
{
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rn == 15) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return gen_M_fp_sysreg_write(s, a->reg, memory_to_fp_sysreg, a);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VSTR_sysreg(DisasContext *s, arg_vldr_sysreg *a)
|
|
|
|
{
|
|
|
|
if (!arm_dc_feature(s, ARM_FEATURE_V8_1M)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rn == 15) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return gen_M_fp_sysreg_read(s, a->reg, fp_sysreg_to_memory, a);
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:30 +00:00
|
|
|
static bool trans_VMOV_half(DisasContext *s, arg_VMOV_single *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->rt == 15) {
|
|
|
|
/* UNPREDICTABLE; we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->l) {
|
|
|
|
/* VFP to general purpose register */
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vn);
|
2020-08-28 18:33:30 +00:00
|
|
|
tcg_gen_andi_i32(tmp, tmp, 0xffff);
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
} else {
|
|
|
|
/* general purpose register to VFP */
|
|
|
|
tmp = load_reg(s, a->rt);
|
|
|
|
tcg_gen_andi_i32(tmp, tmp, 0xffff);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vn);
|
2020-08-28 18:33:30 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:44 +00:00
|
|
|
static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:44 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->l) {
|
|
|
|
/* VFP to general purpose register */
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vn);
|
2019-06-11 15:39:44 +00:00
|
|
|
if (a->rt == 15) {
|
|
|
|
/* Set the 4 flag bits in the CPSR. */
|
|
|
|
gen_set_nzcv(tmp);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
} else {
|
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* general purpose register to VFP */
|
|
|
|
tmp = load_reg(s, a->rt);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vn);
|
2019-06-11 15:39:44 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:45 +00:00
|
|
|
|
|
|
|
static bool trans_VMOV_64_sp(DisasContext *s, arg_VMOV_64_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
/*
|
|
|
|
* VMOV between two general-purpose registers and two single precision
|
|
|
|
* floating point registers
|
|
|
|
*/
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->op) {
|
|
|
|
/* fpreg to gpreg */
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2019-06-11 15:39:45 +00:00
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm + 1);
|
2019-06-11 15:39:45 +00:00
|
|
|
store_reg(s, a->rt2, tmp);
|
|
|
|
} else {
|
|
|
|
/* gpreg to fpreg */
|
|
|
|
tmp = load_reg(s, a->rt);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vm);
|
2019-08-27 12:19:31 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
2019-06-11 15:39:45 +00:00
|
|
|
tmp = load_reg(s, a->rt2);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vm + 1);
|
2019-08-27 12:19:31 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
2019-06-11 15:39:45 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VMOV_64_dp(DisasContext *s, arg_VMOV_64_dp *a)
|
2019-06-11 15:39:45 +00:00
|
|
|
{
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* VMOV between two general-purpose registers and one double precision
|
2020-02-24 22:22:23 +00:00
|
|
|
* floating point register. Note that this does not require support
|
|
|
|
* for double precision arithmetic.
|
2019-06-11 15:39:45 +00:00
|
|
|
*/
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
2019-06-11 15:39:45 +00:00
|
|
|
|
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
|
2019-06-11 15:39:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (a->op) {
|
|
|
|
/* fpreg to gpreg */
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm * 2);
|
2019-06-11 15:39:45 +00:00
|
|
|
store_reg(s, a->rt, tmp);
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm * 2 + 1);
|
2019-06-11 15:39:45 +00:00
|
|
|
store_reg(s, a->rt2, tmp);
|
|
|
|
} else {
|
|
|
|
/* gpreg to fpreg */
|
|
|
|
tmp = load_reg(s, a->rt);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vm * 2);
|
2019-06-11 15:39:45 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tmp = load_reg(s, a->rt2);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vm * 2 + 1);
|
2019-06-11 15:39:45 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:45 +00:00
|
|
|
|
2020-08-28 18:33:20 +00:00
|
|
|
static bool trans_VLDR_VSTR_hp(DisasContext *s, arg_VLDR_VSTR_sp *a)
|
|
|
|
{
|
|
|
|
uint32_t offset;
|
|
|
|
TCGv_i32 addr, tmp;
|
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-08-28 18:33:20 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* imm8 field is offset/2 for fp16, unlike fp32 and fp64 */
|
|
|
|
offset = a->imm << 1;
|
|
|
|
if (!a->u) {
|
|
|
|
offset = -offset;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* For thumb, use of PC is UNPREDICTABLE. */
|
|
|
|
addr = add_reg_for_lit(s, a->rn, offset);
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
if (a->l) {
|
2021-04-19 20:22:47 +00:00
|
|
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2020-08-28 18:33:20 +00:00
|
|
|
} else {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vd);
|
2021-04-19 20:22:47 +00:00
|
|
|
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UW | MO_ALIGN);
|
2020-08-28 18:33:20 +00:00
|
|
|
}
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
static bool trans_VLDR_VSTR_sp(DisasContext *s, arg_VLDR_VSTR_sp *a)
|
|
|
|
{
|
|
|
|
uint32_t offset;
|
2019-06-11 15:39:46 +00:00
|
|
|
TCGv_i32 addr, tmp;
|
2019-06-11 15:39:45 +00:00
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = a->imm << 2;
|
|
|
|
if (!a->u) {
|
|
|
|
offset = -offset;
|
|
|
|
}
|
|
|
|
|
2019-08-15 08:46:43 +00:00
|
|
|
/* For thumb, use of PC is UNPREDICTABLE. */
|
|
|
|
addr = add_reg_for_lit(s, a->rn, offset);
|
2019-06-11 15:39:46 +00:00
|
|
|
tmp = tcg_temp_new_i32();
|
2019-06-11 15:39:45 +00:00
|
|
|
if (a->l) {
|
2021-04-19 20:22:47 +00:00
|
|
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2019-06-11 15:39:45 +00:00
|
|
|
} else {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vd);
|
2021-04-19 20:22:47 +00:00
|
|
|
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
|
2019-06-11 15:39:45 +00:00
|
|
|
}
|
2019-06-11 15:39:46 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
2019-06-11 15:39:45 +00:00
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VLDR_VSTR_dp(DisasContext *s, arg_VLDR_VSTR_dp *a)
|
2019-06-11 15:39:45 +00:00
|
|
|
{
|
|
|
|
uint32_t offset;
|
|
|
|
TCGv_i32 addr;
|
2019-06-11 15:39:46 +00:00
|
|
|
TCGv_i64 tmp;
|
2019-06-11 15:39:45 +00:00
|
|
|
|
2020-02-24 22:22:23 +00:00
|
|
|
/* Note that this does not require support for double arithmetic. */
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
|
2019-06-11 15:39:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = a->imm << 2;
|
|
|
|
if (!a->u) {
|
|
|
|
offset = -offset;
|
|
|
|
}
|
|
|
|
|
2019-08-15 08:46:43 +00:00
|
|
|
/* For thumb, use of PC is UNPREDICTABLE. */
|
|
|
|
addr = add_reg_for_lit(s, a->rn, offset);
|
2019-06-11 15:39:46 +00:00
|
|
|
tmp = tcg_temp_new_i64();
|
2019-06-11 15:39:45 +00:00
|
|
|
if (a->l) {
|
2021-04-19 20:22:47 +00:00
|
|
|
gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(tmp, a->vd);
|
2019-06-11 15:39:45 +00:00
|
|
|
} else {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tmp, a->vd);
|
2021-04-19 20:22:47 +00:00
|
|
|
gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
|
2019-06-11 15:39:45 +00:00
|
|
|
}
|
2019-06-11 15:39:46 +00:00
|
|
|
tcg_temp_free_i64(tmp);
|
2019-06-11 15:39:45 +00:00
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:45 +00:00
|
|
|
|
|
|
|
static bool trans_VLDM_VSTM_sp(DisasContext *s, arg_VLDM_VSTM_sp *a)
|
|
|
|
{
|
|
|
|
uint32_t offset;
|
2019-06-11 15:39:46 +00:00
|
|
|
TCGv_i32 addr, tmp;
|
2019-06-11 15:39:45 +00:00
|
|
|
int i, n;
|
|
|
|
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
n = a->imm;
|
|
|
|
|
|
|
|
if (n == 0 || (a->vd + n) > 32) {
|
|
|
|
/*
|
|
|
|
* UNPREDICTABLE cases for bad immediates: we choose to
|
|
|
|
* UNDEF to avoid generating huge numbers of TCG ops
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rn == 15 && a->w) {
|
|
|
|
/* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
target/arm: Add handling for PSR.ECI/ICI
On A-profile, PSR bits [15:10][26:25] are always the IT state bits.
On M-profile, some of the reserved encodings of the IT state are used
to instead indicate partial progress through instructions that were
interrupted partway through by an exception and can be resumed.
These resumable instructions fall into two categories:
(1) load/store multiple instructions, where these bits are called
"ICI" and specify the register in the ldm/stm list where execution
should resume. (Specifically: LDM, STM, VLDM, VSTM, VLLDM, VLSTM,
CLRM, VSCCLRM.)
(2) MVE instructions subject to beatwise execution, where these bits
are called "ECI" and specify which beats in this and possibly also
the following MVE insn have been executed.
There are also a few insns (LE, LETP, and BKPT) which do not use the
ICI/ECI bits but must leave them alone.
Otherwise, we should raise an INVSTATE UsageFault for any attempt to
execute an insn with non-zero ICI/ECI bits.
So far we have been able to ignore ECI/ICI, because the architecture
allows the IMPDEF choice of "always restart load/store multiple from
the beginning regardless of ICI state", so the only thing we have
been missing is that we don't raise the INVSTATE fault for bad guest
code. However, MVE requires that we honour ECI bits and do not
rexecute beats of an insn that have already been executed.
Add the support in the decoder for handling ECI/ICI:
* identify the ECI/ICI case in the CONDEXEC TB flags
* when a load/store multiple insn succeeds, it updates the ECI/ICI
state (both in DisasContext and in the CPU state), and sets a flag
to say that the ECI/ICI state was handled
* if we find that the insn we just decoded did not handle the
ECI/ICI state, we delete all the code that we just generated for
it and instead emit the code to raise the INVFAULT. This allows
us to avoid having to update every non-MVE non-LDM/STM insn to
make it check for "is ECI/ICI set?".
We continue with our existing IMPDEF choice of not caring about the
ICI state for the load/store multiples and simply restarting them
from the beginning. Because we don't allow interrupts in the middle
of an insn, the only way we would see this state is if the guest set
ICI manually on return from an exception handler, so it's a corner
case which doesn't merit optimisation.
ICI update for LDM/STM is simple -- it always zeroes the state. ECI
update for MVE beatwise insns will be a little more complex, since
the ECI state may include information for the following insn.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210614151007.4545-5-peter.maydell@linaro.org
2021-06-14 15:09:14 +00:00
|
|
|
s->eci_handled = true;
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-15 08:46:43 +00:00
|
|
|
/* For thumb, use of PC is UNPREDICTABLE. */
|
|
|
|
addr = add_reg_for_lit(s, a->rn, 0);
|
2019-06-11 15:39:45 +00:00
|
|
|
if (a->p) {
|
|
|
|
/* pre-decrement */
|
|
|
|
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
/*
|
|
|
|
* Here 'addr' is the lowest address we will store to,
|
|
|
|
* and is either the old SP (if post-increment) or
|
|
|
|
* the new SP (if pre-decrement). For post-increment
|
|
|
|
* where the old value is below the limit and the new
|
|
|
|
* value is above, it is UNKNOWN whether the limit check
|
|
|
|
* triggers; we choose to trigger.
|
|
|
|
*/
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = 4;
|
2019-06-11 15:39:46 +00:00
|
|
|
tmp = tcg_temp_new_i32();
|
2019-06-11 15:39:45 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (a->l) {
|
|
|
|
/* load */
|
2021-04-19 20:22:46 +00:00
|
|
|
gen_aa32_ld_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd + i);
|
2019-06-11 15:39:45 +00:00
|
|
|
} else {
|
|
|
|
/* store */
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vd + i);
|
2021-04-19 20:22:46 +00:00
|
|
|
gen_aa32_st_i32(s, tmp, addr, get_mem_index(s), MO_UL | MO_ALIGN);
|
2019-06-11 15:39:45 +00:00
|
|
|
}
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
2019-06-11 15:39:46 +00:00
|
|
|
tcg_temp_free_i32(tmp);
|
2019-06-11 15:39:45 +00:00
|
|
|
if (a->w) {
|
|
|
|
/* writeback */
|
|
|
|
if (a->p) {
|
|
|
|
offset = -offset * n;
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
} else {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
|
target/arm: Add handling for PSR.ECI/ICI
On A-profile, PSR bits [15:10][26:25] are always the IT state bits.
On M-profile, some of the reserved encodings of the IT state are used
to instead indicate partial progress through instructions that were
interrupted partway through by an exception and can be resumed.
These resumable instructions fall into two categories:
(1) load/store multiple instructions, where these bits are called
"ICI" and specify the register in the ldm/stm list where execution
should resume. (Specifically: LDM, STM, VLDM, VSTM, VLLDM, VLSTM,
CLRM, VSCCLRM.)
(2) MVE instructions subject to beatwise execution, where these bits
are called "ECI" and specify which beats in this and possibly also
the following MVE insn have been executed.
There are also a few insns (LE, LETP, and BKPT) which do not use the
ICI/ECI bits but must leave them alone.
Otherwise, we should raise an INVSTATE UsageFault for any attempt to
execute an insn with non-zero ICI/ECI bits.
So far we have been able to ignore ECI/ICI, because the architecture
allows the IMPDEF choice of "always restart load/store multiple from
the beginning regardless of ICI state", so the only thing we have
been missing is that we don't raise the INVSTATE fault for bad guest
code. However, MVE requires that we honour ECI bits and do not
rexecute beats of an insn that have already been executed.
Add the support in the decoder for handling ECI/ICI:
* identify the ECI/ICI case in the CONDEXEC TB flags
* when a load/store multiple insn succeeds, it updates the ECI/ICI
state (both in DisasContext and in the CPU state), and sets a flag
to say that the ECI/ICI state was handled
* if we find that the insn we just decoded did not handle the
ECI/ICI state, we delete all the code that we just generated for
it and instead emit the code to raise the INVFAULT. This allows
us to avoid having to update every non-MVE non-LDM/STM insn to
make it check for "is ECI/ICI set?".
We continue with our existing IMPDEF choice of not caring about the
ICI state for the load/store multiples and simply restarting them
from the beginning. Because we don't allow interrupts in the middle
of an insn, the only way we would see this state is if the guest set
ICI manually on return from an exception handler, so it's a corner
case which doesn't merit optimisation.
ICI update for LDM/STM is simple -- it always zeroes the state. ECI
update for MVE beatwise insns will be a little more complex, since
the ECI state may include information for the following insn.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210614151007.4545-5-peter.maydell@linaro.org
2021-06-14 15:09:14 +00:00
|
|
|
clear_eci_state(s);
|
2019-06-11 15:39:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VLDM_VSTM_dp(DisasContext *s, arg_VLDM_VSTM_dp *a)
|
|
|
|
{
|
|
|
|
uint32_t offset;
|
|
|
|
TCGv_i32 addr;
|
2019-06-11 15:39:46 +00:00
|
|
|
TCGv_i64 tmp;
|
2019-06-11 15:39:45 +00:00
|
|
|
int i, n;
|
|
|
|
|
2020-02-24 22:22:23 +00:00
|
|
|
/* Note that this does not require support for double arithmetic. */
|
2021-05-20 15:28:33 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s) && !dc_isar_feature(aa32_mve, s)) {
|
2020-02-24 22:22:23 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
n = a->imm >> 1;
|
|
|
|
|
|
|
|
if (n == 0 || (a->vd + n) > 32 || n > 16) {
|
|
|
|
/*
|
|
|
|
* UNPREDICTABLE cases for bad immediates: we choose to
|
|
|
|
* UNDEF to avoid generating huge numbers of TCG ops
|
|
|
|
*/
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (a->rn == 15 && a->w) {
|
|
|
|
/* writeback to PC is UNPREDICTABLE, we choose to UNDEF */
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd + n) > 16) {
|
2019-06-11 15:39:45 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
target/arm: Add handling for PSR.ECI/ICI
On A-profile, PSR bits [15:10][26:25] are always the IT state bits.
On M-profile, some of the reserved encodings of the IT state are used
to instead indicate partial progress through instructions that were
interrupted partway through by an exception and can be resumed.
These resumable instructions fall into two categories:
(1) load/store multiple instructions, where these bits are called
"ICI" and specify the register in the ldm/stm list where execution
should resume. (Specifically: LDM, STM, VLDM, VSTM, VLLDM, VLSTM,
CLRM, VSCCLRM.)
(2) MVE instructions subject to beatwise execution, where these bits
are called "ECI" and specify which beats in this and possibly also
the following MVE insn have been executed.
There are also a few insns (LE, LETP, and BKPT) which do not use the
ICI/ECI bits but must leave them alone.
Otherwise, we should raise an INVSTATE UsageFault for any attempt to
execute an insn with non-zero ICI/ECI bits.
So far we have been able to ignore ECI/ICI, because the architecture
allows the IMPDEF choice of "always restart load/store multiple from
the beginning regardless of ICI state", so the only thing we have
been missing is that we don't raise the INVSTATE fault for bad guest
code. However, MVE requires that we honour ECI bits and do not
rexecute beats of an insn that have already been executed.
Add the support in the decoder for handling ECI/ICI:
* identify the ECI/ICI case in the CONDEXEC TB flags
* when a load/store multiple insn succeeds, it updates the ECI/ICI
state (both in DisasContext and in the CPU state), and sets a flag
to say that the ECI/ICI state was handled
* if we find that the insn we just decoded did not handle the
ECI/ICI state, we delete all the code that we just generated for
it and instead emit the code to raise the INVFAULT. This allows
us to avoid having to update every non-MVE non-LDM/STM insn to
make it check for "is ECI/ICI set?".
We continue with our existing IMPDEF choice of not caring about the
ICI state for the load/store multiples and simply restarting them
from the beginning. Because we don't allow interrupts in the middle
of an insn, the only way we would see this state is if the guest set
ICI manually on return from an exception handler, so it's a corner
case which doesn't merit optimisation.
ICI update for LDM/STM is simple -- it always zeroes the state. ECI
update for MVE beatwise insns will be a little more complex, since
the ECI state may include information for the following insn.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210614151007.4545-5-peter.maydell@linaro.org
2021-06-14 15:09:14 +00:00
|
|
|
s->eci_handled = true;
|
|
|
|
|
2019-06-11 15:39:45 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-08-15 08:46:43 +00:00
|
|
|
/* For thumb, use of PC is UNPREDICTABLE. */
|
|
|
|
addr = add_reg_for_lit(s, a->rn, 0);
|
2019-06-11 15:39:45 +00:00
|
|
|
if (a->p) {
|
|
|
|
/* pre-decrement */
|
|
|
|
tcg_gen_addi_i32(addr, addr, -(a->imm << 2));
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->v8m_stackcheck && a->rn == 13 && a->w) {
|
|
|
|
/*
|
|
|
|
* Here 'addr' is the lowest address we will store to,
|
|
|
|
* and is either the old SP (if post-increment) or
|
|
|
|
* the new SP (if pre-decrement). For post-increment
|
|
|
|
* where the old value is below the limit and the new
|
|
|
|
* value is above, it is UNKNOWN whether the limit check
|
|
|
|
* triggers; we choose to trigger.
|
|
|
|
*/
|
|
|
|
gen_helper_v8m_stackcheck(cpu_env, addr);
|
|
|
|
}
|
|
|
|
|
|
|
|
offset = 8;
|
2019-06-11 15:39:46 +00:00
|
|
|
tmp = tcg_temp_new_i64();
|
2019-06-11 15:39:45 +00:00
|
|
|
for (i = 0; i < n; i++) {
|
|
|
|
if (a->l) {
|
|
|
|
/* load */
|
2021-04-19 20:22:46 +00:00
|
|
|
gen_aa32_ld_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(tmp, a->vd + i);
|
2019-06-11 15:39:45 +00:00
|
|
|
} else {
|
|
|
|
/* store */
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tmp, a->vd + i);
|
2021-04-19 20:22:46 +00:00
|
|
|
gen_aa32_st_i64(s, tmp, addr, get_mem_index(s), MO_Q | MO_ALIGN_4);
|
2019-06-11 15:39:45 +00:00
|
|
|
}
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
2019-06-11 15:39:46 +00:00
|
|
|
tcg_temp_free_i64(tmp);
|
2019-06-11 15:39:45 +00:00
|
|
|
if (a->w) {
|
|
|
|
/* writeback */
|
|
|
|
if (a->p) {
|
|
|
|
offset = -offset * n;
|
|
|
|
} else if (a->imm & 1) {
|
|
|
|
offset = 4;
|
|
|
|
} else {
|
|
|
|
offset = 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset != 0) {
|
|
|
|
tcg_gen_addi_i32(addr, addr, offset);
|
|
|
|
}
|
|
|
|
store_reg(s, a->rn, addr);
|
|
|
|
} else {
|
|
|
|
tcg_temp_free_i32(addr);
|
|
|
|
}
|
|
|
|
|
target/arm: Add handling for PSR.ECI/ICI
On A-profile, PSR bits [15:10][26:25] are always the IT state bits.
On M-profile, some of the reserved encodings of the IT state are used
to instead indicate partial progress through instructions that were
interrupted partway through by an exception and can be resumed.
These resumable instructions fall into two categories:
(1) load/store multiple instructions, where these bits are called
"ICI" and specify the register in the ldm/stm list where execution
should resume. (Specifically: LDM, STM, VLDM, VSTM, VLLDM, VLSTM,
CLRM, VSCCLRM.)
(2) MVE instructions subject to beatwise execution, where these bits
are called "ECI" and specify which beats in this and possibly also
the following MVE insn have been executed.
There are also a few insns (LE, LETP, and BKPT) which do not use the
ICI/ECI bits but must leave them alone.
Otherwise, we should raise an INVSTATE UsageFault for any attempt to
execute an insn with non-zero ICI/ECI bits.
So far we have been able to ignore ECI/ICI, because the architecture
allows the IMPDEF choice of "always restart load/store multiple from
the beginning regardless of ICI state", so the only thing we have
been missing is that we don't raise the INVSTATE fault for bad guest
code. However, MVE requires that we honour ECI bits and do not
rexecute beats of an insn that have already been executed.
Add the support in the decoder for handling ECI/ICI:
* identify the ECI/ICI case in the CONDEXEC TB flags
* when a load/store multiple insn succeeds, it updates the ECI/ICI
state (both in DisasContext and in the CPU state), and sets a flag
to say that the ECI/ICI state was handled
* if we find that the insn we just decoded did not handle the
ECI/ICI state, we delete all the code that we just generated for
it and instead emit the code to raise the INVFAULT. This allows
us to avoid having to update every non-MVE non-LDM/STM insn to
make it check for "is ECI/ICI set?".
We continue with our existing IMPDEF choice of not caring about the
ICI state for the load/store multiples and simply restarting them
from the beginning. Because we don't allow interrupts in the middle
of an insn, the only way we would see this state is if the guest set
ICI manually on return from an exception handler, so it's a corner
case which doesn't merit optimisation.
ICI update for LDM/STM is simple -- it always zeroes the state. ECI
update for MVE beatwise insns will be a little more complex, since
the ECI state may include information for the following insn.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-id: 20210614151007.4545-5-peter.maydell@linaro.org
2021-06-14 15:09:14 +00:00
|
|
|
clear_eci_state(s);
|
2019-06-11 15:39:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Types for callbacks for do_vfp_3op_sp() and do_vfp_3op_dp().
|
|
|
|
* The callback should emit code to write a value to vd. If
|
|
|
|
* do_vfp_3op_{sp,dp}() was passed reads_vd then the TCGv vd
|
|
|
|
* will contain the old value of the relevant VFP register;
|
|
|
|
* otherwise it must be written to only.
|
|
|
|
*/
|
|
|
|
typedef void VFPGen3OpSPFn(TCGv_i32 vd,
|
|
|
|
TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst);
|
|
|
|
typedef void VFPGen3OpDPFn(TCGv_i64 vd,
|
|
|
|
TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst);
|
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
/*
|
|
|
|
* Types for callbacks for do_vfp_2op_sp() and do_vfp_2op_dp().
|
|
|
|
* The callback should emit code to write a value to vd (which
|
|
|
|
* should be written to only).
|
|
|
|
*/
|
|
|
|
typedef void VFPGen2OpSPFn(TCGv_i32 vd, TCGv_i32 vm);
|
|
|
|
typedef void VFPGen2OpDPFn(TCGv_i64 vd, TCGv_i64 vm);
|
|
|
|
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
/*
|
|
|
|
* Return true if the specified S reg is in a scalar bank
|
|
|
|
* (ie if it is s0..s7)
|
|
|
|
*/
|
|
|
|
static inline bool vfp_sreg_is_scalar(int reg)
|
|
|
|
{
|
|
|
|
return (reg & 0x18) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if the specified D reg is in a scalar bank
|
|
|
|
* (ie if it is d0..d3 or d16..d19)
|
|
|
|
*/
|
|
|
|
static inline bool vfp_dreg_is_scalar(int reg)
|
|
|
|
{
|
|
|
|
return (reg & 0xc) == 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance the S reg number forwards by delta within its bank
|
|
|
|
* (ie increment the low 3 bits but leave the rest the same)
|
|
|
|
*/
|
|
|
|
static inline int vfp_advance_sreg(int reg, int delta)
|
|
|
|
{
|
|
|
|
return ((reg + delta) & 0x7) | (reg & ~0x7);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Advance the D reg number forwards by delta within its bank
|
|
|
|
* (ie increment the low 2 bits but leave the rest the same)
|
|
|
|
*/
|
|
|
|
static inline int vfp_advance_dreg(int reg, int delta)
|
|
|
|
{
|
|
|
|
return ((reg + delta) & 0x3) | (reg & ~0x3);
|
|
|
|
}
|
|
|
|
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
/*
|
|
|
|
* Perform a 3-operand VFP data processing instruction. fn is the
|
|
|
|
* callback to do the actual operation; this function deals with the
|
|
|
|
* code to handle looping around for VFP vector processing.
|
|
|
|
*/
|
|
|
|
static bool do_vfp_3op_sp(DisasContext *s, VFPGen3OpSPFn *fn,
|
|
|
|
int vd, int vn, int vm, bool reads_vd)
|
|
|
|
{
|
|
|
|
uint32_t delta_m = 0;
|
|
|
|
uint32_t delta_d = 0;
|
|
|
|
int veclen = s->vec_len;
|
|
|
|
TCGv_i32 f0, f1, fd;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
2020-02-24 22:22:23 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
|
|
|
(veclen != 0 || s->vec_stride != 0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (veclen > 0) {
|
|
|
|
/* Figure out what type of vector operation this is. */
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_sreg_is_scalar(vd)) {
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
/* scalar */
|
|
|
|
veclen = 0;
|
|
|
|
} else {
|
|
|
|
delta_d = s->vec_stride + 1;
|
|
|
|
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_sreg_is_scalar(vm)) {
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
/* mixed scalar/vector */
|
|
|
|
delta_m = 0;
|
|
|
|
} else {
|
|
|
|
/* vector */
|
|
|
|
delta_m = delta_d;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
f0 = tcg_temp_new_i32();
|
|
|
|
f1 = tcg_temp_new_i32();
|
|
|
|
fd = tcg_temp_new_i32();
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f0, vn);
|
|
|
|
vfp_load_reg32(f1, vm);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (reads_vd) {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(fd, vd);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
}
|
|
|
|
fn(fd, f0, f1, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(fd, vd);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
|
|
|
if (veclen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up the operands for the next iteration */
|
|
|
|
veclen--;
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_sreg(vd, delta_d);
|
|
|
|
vn = vfp_advance_sreg(vn, delta_d);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f0, vn);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
if (delta_m) {
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vm = vfp_advance_sreg(vm, delta_m);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f1, vm);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i32(f0);
|
|
|
|
tcg_temp_free_i32(f1);
|
|
|
|
tcg_temp_free_i32(fd);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:12 +00:00
|
|
|
static bool do_vfp_3op_hp(DisasContext *s, VFPGen3OpSPFn *fn,
|
|
|
|
int vd, int vn, int vm, bool reads_vd)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Do a half-precision operation. Functionally this is
|
|
|
|
* the same as do_vfp_3op_sp(), except:
|
|
|
|
* - it uses the FPST_FPCR_F16
|
|
|
|
* - it doesn't need the VFP vector handling (fp16 is a
|
|
|
|
* v8 feature, and in v8 VFP vectors don't exist)
|
|
|
|
* - it does the aa32_fp16_arith feature test
|
|
|
|
*/
|
|
|
|
TCGv_i32 f0, f1, fd;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
f0 = tcg_temp_new_i32();
|
|
|
|
f1 = tcg_temp_new_i32();
|
|
|
|
fd = tcg_temp_new_i32();
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f0, vn);
|
|
|
|
vfp_load_reg32(f1, vm);
|
2020-08-28 18:33:12 +00:00
|
|
|
|
|
|
|
if (reads_vd) {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(fd, vd);
|
2020-08-28 18:33:12 +00:00
|
|
|
}
|
|
|
|
fn(fd, f0, f1, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(fd, vd);
|
2020-08-28 18:33:12 +00:00
|
|
|
|
|
|
|
tcg_temp_free_i32(f0);
|
|
|
|
tcg_temp_free_i32(f1);
|
|
|
|
tcg_temp_free_i32(fd);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
static bool do_vfp_3op_dp(DisasContext *s, VFPGen3OpDPFn *fn,
|
|
|
|
int vd, int vn, int vm, bool reads_vd)
|
|
|
|
{
|
|
|
|
uint32_t delta_m = 0;
|
|
|
|
uint32_t delta_d = 0;
|
|
|
|
int veclen = s->vec_len;
|
|
|
|
TCGv_i64 f0, f1, fd;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vn | vm) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
|
|
|
(veclen != 0 || s->vec_stride != 0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (veclen > 0) {
|
|
|
|
/* Figure out what type of vector operation this is. */
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_dreg_is_scalar(vd)) {
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
/* scalar */
|
|
|
|
veclen = 0;
|
|
|
|
} else {
|
|
|
|
delta_d = (s->vec_stride >> 1) + 1;
|
|
|
|
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_dreg_is_scalar(vm)) {
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
/* mixed scalar/vector */
|
|
|
|
delta_m = 0;
|
|
|
|
} else {
|
|
|
|
/* vector */
|
|
|
|
delta_m = delta_d;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
f0 = tcg_temp_new_i64();
|
|
|
|
f1 = tcg_temp_new_i64();
|
|
|
|
fd = tcg_temp_new_i64();
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(f0, vn);
|
|
|
|
vfp_load_reg64(f1, vm);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
if (reads_vd) {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(fd, vd);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
}
|
|
|
|
fn(fd, f0, f1, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(fd, vd);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
|
|
|
|
if (veclen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* Set up the operands for the next iteration */
|
|
|
|
veclen--;
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_dreg(vd, delta_d);
|
|
|
|
vn = vfp_advance_dreg(vn, delta_d);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(f0, vn);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
if (delta_m) {
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vm = vfp_advance_dreg(vm, delta_m);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(f1, vm);
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i64(f0);
|
|
|
|
tcg_temp_free_i64(f1);
|
|
|
|
tcg_temp_free_i64(fd);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
static bool do_vfp_2op_sp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
|
|
|
|
{
|
|
|
|
uint32_t delta_m = 0;
|
|
|
|
uint32_t delta_d = 0;
|
|
|
|
int veclen = s->vec_len;
|
|
|
|
TCGv_i32 f0, fd;
|
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
/* Note that the caller must check the aa32_fpsp_v2 feature. */
|
2020-02-24 22:22:23 +00:00
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
|
|
|
(veclen != 0 || s->vec_stride != 0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (veclen > 0) {
|
|
|
|
/* Figure out what type of vector operation this is. */
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_sreg_is_scalar(vd)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* scalar */
|
|
|
|
veclen = 0;
|
|
|
|
} else {
|
|
|
|
delta_d = s->vec_stride + 1;
|
|
|
|
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_sreg_is_scalar(vm)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* mixed scalar/vector */
|
|
|
|
delta_m = 0;
|
|
|
|
} else {
|
|
|
|
/* vector */
|
|
|
|
delta_m = delta_d;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
f0 = tcg_temp_new_i32();
|
|
|
|
fd = tcg_temp_new_i32();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f0, vm);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
fn(fd, f0);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(fd, vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
if (veclen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (delta_m == 0) {
|
|
|
|
/* single source one-many */
|
|
|
|
while (veclen--) {
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_sreg(vd, delta_d);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(fd, vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up the operands for the next iteration */
|
|
|
|
veclen--;
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_sreg(vd, delta_d);
|
|
|
|
vm = vfp_advance_sreg(vm, delta_m);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f0, vm);
|
2019-06-11 15:39:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i32(f0);
|
|
|
|
tcg_temp_free_i32(fd);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:17 +00:00
|
|
|
static bool do_vfp_2op_hp(DisasContext *s, VFPGen2OpSPFn *fn, int vd, int vm)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Do a half-precision operation. Functionally this is
|
|
|
|
* the same as do_vfp_2op_sp(), except:
|
|
|
|
* - it doesn't need the VFP vector handling (fp16 is a
|
|
|
|
* v8 feature, and in v8 VFP vectors don't exist)
|
|
|
|
* - it does the aa32_fp16_arith feature test
|
|
|
|
*/
|
|
|
|
TCGv_i32 f0;
|
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
/* Note that the caller must check the aa32_fp16_arith feature */
|
|
|
|
|
2020-08-28 18:33:17 +00:00
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
f0 = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(f0, vm);
|
2020-08-28 18:33:17 +00:00
|
|
|
fn(f0, f0);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(f0, vd);
|
2020-08-28 18:33:17 +00:00
|
|
|
tcg_temp_free_i32(f0);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
static bool do_vfp_2op_dp(DisasContext *s, VFPGen2OpDPFn *fn, int vd, int vm)
|
|
|
|
{
|
|
|
|
uint32_t delta_m = 0;
|
|
|
|
uint32_t delta_d = 0;
|
|
|
|
int veclen = s->vec_len;
|
|
|
|
TCGv_i64 f0, fd;
|
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
/* Note that the caller must check the aa32_fpdp_v2 feature. */
|
2019-06-11 15:39:49 +00:00
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && ((vd | vm) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
|
|
|
(veclen != 0 || s->vec_stride != 0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (veclen > 0) {
|
|
|
|
/* Figure out what type of vector operation this is. */
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_dreg_is_scalar(vd)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* scalar */
|
|
|
|
veclen = 0;
|
|
|
|
} else {
|
|
|
|
delta_d = (s->vec_stride >> 1) + 1;
|
|
|
|
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_dreg_is_scalar(vm)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* mixed scalar/vector */
|
|
|
|
delta_m = 0;
|
|
|
|
} else {
|
|
|
|
/* vector */
|
|
|
|
delta_m = delta_d;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
f0 = tcg_temp_new_i64();
|
|
|
|
fd = tcg_temp_new_i64();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(f0, vm);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
fn(fd, f0);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(fd, vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
if (veclen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (delta_m == 0) {
|
|
|
|
/* single source one-many */
|
|
|
|
while (veclen--) {
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_dreg(vd, delta_d);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(fd, vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
}
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up the operands for the next iteration */
|
|
|
|
veclen--;
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_dreg(vd, delta_d);
|
|
|
|
vd = vfp_advance_dreg(vm, delta_m);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(f0, vm);
|
2019-06-11 15:39:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i64(f0);
|
|
|
|
tcg_temp_free_i64(fd);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:13 +00:00
|
|
|
static void gen_VMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* Note that order of inputs to the add matters for NaNs */
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_mulh(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_addh(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMLA_hp(DisasContext *s, arg_VMLA_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_VMLA_hp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
static void gen_VMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* Note that order of inputs to the add matters for NaNs */
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_muls(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_adds(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMLA_sp(DisasContext *s, arg_VMLA_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_VMLA_sp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_VMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* Note that order of inputs to the add matters for NaNs */
|
|
|
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_helper_vfp_muld(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_addd(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VMLA_dp(DisasContext *s, arg_VMLA_dp *a)
|
target/arm: Convert VFP VMLA to decodetree
Convert the VFP VMLA instruction to decodetree.
This is the first of the VFP 3-operand data processing instructions,
so we include in this patch the code which loops over the elements
for an old-style VFP vector operation. The existing code to do this
looping uses the deprecated cpu_F0s/F0d/F1s/F1d TCG globals; since
we are going to be converting instructions one at a time anyway
we can take the opportunity to make the new loop use TCG temporaries,
which means we can do that conversion one operation at a time
rather than needing to do it all in one go.
We include an UNDEF check which was missing in the old code:
short-vector operations (with stride or length non-zero) were
deprecated in v7A and must UNDEF in v8A, so if the MVFR0 FPShVec
field does not indicate that support for short vectors is present
we UNDEF the operations that would use them. (This is a change
of behaviour for Cortex-A7, Cortex-A15 and the v8 CPUs, which
previously were all incorrectly allowing short-vector operations.)
Note that the conversion fixes a bug in the old code for the
case of VFP short-vector "mixed scalar/vector operations". These
happen where the destination register is in a vector bank but
but the second operand is in a scalar bank. For example
vmla.f64 d10, d1, d16 with length 2 stride 2
is equivalent to the pair of scalar operations
vmla.f64 d10, d1, d16
vmla.f64 d8, d3, d16
where the destination and first input register cycle through
their vector but the second input is scalar (d16). In the
old decoder the gen_vfp_F1_mul() operation uses cpu_F1{s,d}
as a temporary output for the multiply, which trashes the
second input operand. For the fully-scalar case (where we
never do a second iteration) and the fully-vector case
(where the loop loads the new second input operand) this
doesn't matter, but for the mixed scalar/vector case we
will end up using the wrong value for later loop iterations.
In the new code we use TCG temporaries and so avoid the bug.
This bug is present for all the multiply-accumulate insns
that operate on short vectors: VMLA, VMLS, VNMLA, VNMLS.
Note 2: the expression used to calculate the next register
number in the vector bank is not in fact correct; we leave
this behaviour unchanged from the old decoder and will
fix this bug later in the series.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:46 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_VMLA_dp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
2019-06-11 15:39:46 +00:00
|
|
|
|
2020-08-28 18:33:13 +00:00
|
|
|
static void gen_VMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VMLS: vd = vd + -(vn * vm)
|
|
|
|
* Note that order of inputs to the add matters for NaNs.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_mulh(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negh(tmp, tmp);
|
|
|
|
gen_helper_vfp_addh(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMLS_hp(DisasContext *s, arg_VMLS_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_VMLS_hp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:46 +00:00
|
|
|
static void gen_VMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VMLS: vd = vd + -(vn * vm)
|
|
|
|
* Note that order of inputs to the add matters for NaNs.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_muls(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negs(tmp, tmp);
|
|
|
|
gen_helper_vfp_adds(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMLS_sp(DisasContext *s, arg_VMLS_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_VMLS_sp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_VMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VMLS: vd = vd + -(vn * vm)
|
|
|
|
* Note that order of inputs to the add matters for NaNs.
|
|
|
|
*/
|
|
|
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_helper_vfp_muld(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negd(tmp, tmp);
|
|
|
|
gen_helper_vfp_addd(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VMLS_dp(DisasContext *s, arg_VMLS_dp *a)
|
2019-06-11 15:39:46 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_VMLS_dp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
2019-06-11 15:39:46 +00:00
|
|
|
|
2020-08-28 18:33:13 +00:00
|
|
|
static void gen_VNMLS_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VNMLS: -fd + (fn * fm)
|
|
|
|
* Note that it isn't valid to replace (-A + B) with (B - A) or similar
|
|
|
|
* plausible looking simplifications because this will give wrong results
|
|
|
|
* for NaNs.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_mulh(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negh(vd, vd);
|
|
|
|
gen_helper_vfp_addh(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VNMLS_hp(DisasContext *s, arg_VNMLS_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_VNMLS_hp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:46 +00:00
|
|
|
static void gen_VNMLS_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VNMLS: -fd + (fn * fm)
|
|
|
|
* Note that it isn't valid to replace (-A + B) with (B - A) or similar
|
|
|
|
* plausible looking simplifications because this will give wrong results
|
|
|
|
* for NaNs.
|
|
|
|
*/
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_muls(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negs(vd, vd);
|
|
|
|
gen_helper_vfp_adds(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VNMLS_sp(DisasContext *s, arg_VNMLS_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_VNMLS_sp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_VNMLS_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VNMLS: -fd + (fn * fm)
|
|
|
|
* Note that it isn't valid to replace (-A + B) with (B - A) or similar
|
|
|
|
* plausible looking simplifications because this will give wrong results
|
|
|
|
* for NaNs.
|
|
|
|
*/
|
|
|
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_helper_vfp_muld(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negd(vd, vd);
|
|
|
|
gen_helper_vfp_addd(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VNMLS_dp(DisasContext *s, arg_VNMLS_dp *a)
|
2019-06-11 15:39:46 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_VNMLS_dp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
2019-06-11 15:39:47 +00:00
|
|
|
|
2020-08-28 18:33:13 +00:00
|
|
|
static void gen_VNMLA_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* VNMLA: -fd + -(fn * fm) */
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_mulh(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negh(tmp, tmp);
|
|
|
|
gen_helper_vfp_negh(vd, vd);
|
|
|
|
gen_helper_vfp_addh(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VNMLA_hp(DisasContext *s, arg_VNMLA_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_VNMLA_hp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:47 +00:00
|
|
|
static void gen_VNMLA_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* VNMLA: -fd + -(fn * fm) */
|
|
|
|
TCGv_i32 tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
gen_helper_vfp_muls(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negs(tmp, tmp);
|
|
|
|
gen_helper_vfp_negs(vd, vd);
|
|
|
|
gen_helper_vfp_adds(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VNMLA_sp(DisasContext *s, arg_VNMLA_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_VNMLA_sp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_VNMLA_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* VNMLA: -fd + (fn * fm) */
|
|
|
|
TCGv_i64 tmp = tcg_temp_new_i64();
|
|
|
|
|
|
|
|
gen_helper_vfp_muld(tmp, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negd(tmp, tmp);
|
|
|
|
gen_helper_vfp_negd(vd, vd);
|
|
|
|
gen_helper_vfp_addd(vd, vd, tmp, fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VNMLA_dp(DisasContext *s, arg_VNMLA_dp *a)
|
2019-06-11 15:39:47 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_VNMLA_dp, a->vd, a->vn, a->vm, true);
|
|
|
|
}
|
2019-06-11 15:39:47 +00:00
|
|
|
|
2020-08-28 18:33:12 +00:00
|
|
|
static bool trans_VMUL_hp(DisasContext *s, arg_VMUL_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_helper_vfp_mulh, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:47 +00:00
|
|
|
static bool trans_VMUL_sp(DisasContext *s, arg_VMUL_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_helper_vfp_muls, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VMUL_dp(DisasContext *s, arg_VMUL_dp *a)
|
2019-06-11 15:39:47 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_helper_vfp_muld, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
2019-06-11 15:39:47 +00:00
|
|
|
|
2020-08-28 18:33:13 +00:00
|
|
|
static void gen_VNMUL_hp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* VNMUL: -(fn * fm) */
|
|
|
|
gen_helper_vfp_mulh(vd, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negh(vd, vd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VNMUL_hp(DisasContext *s, arg_VNMUL_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_VNMUL_hp, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:47 +00:00
|
|
|
static void gen_VNMUL_sp(TCGv_i32 vd, TCGv_i32 vn, TCGv_i32 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* VNMUL: -(fn * fm) */
|
|
|
|
gen_helper_vfp_muls(vd, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negs(vd, vd);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VNMUL_sp(DisasContext *s, arg_VNMUL_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_VNMUL_sp, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_VNMUL_dp(TCGv_i64 vd, TCGv_i64 vn, TCGv_i64 vm, TCGv_ptr fpst)
|
|
|
|
{
|
|
|
|
/* VNMUL: -(fn * fm) */
|
|
|
|
gen_helper_vfp_muld(vd, vn, vm, fpst);
|
|
|
|
gen_helper_vfp_negd(vd, vd);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VNMUL_dp(DisasContext *s, arg_VNMUL_dp *a)
|
2019-06-11 15:39:47 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_VNMUL_dp, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
2019-06-11 15:39:48 +00:00
|
|
|
|
2020-08-28 18:33:12 +00:00
|
|
|
static bool trans_VADD_hp(DisasContext *s, arg_VADD_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_helper_vfp_addh, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:48 +00:00
|
|
|
static bool trans_VADD_sp(DisasContext *s, arg_VADD_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_helper_vfp_adds, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VADD_dp(DisasContext *s, arg_VADD_dp *a)
|
2019-06-11 15:39:48 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_helper_vfp_addd, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
2019-06-11 15:39:48 +00:00
|
|
|
|
2020-08-28 18:33:12 +00:00
|
|
|
static bool trans_VSUB_hp(DisasContext *s, arg_VSUB_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_helper_vfp_subh, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:48 +00:00
|
|
|
static bool trans_VSUB_sp(DisasContext *s, arg_VSUB_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_helper_vfp_subs, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VSUB_dp(DisasContext *s, arg_VSUB_dp *a)
|
2019-06-11 15:39:48 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_helper_vfp_subd, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
2019-06-11 15:39:48 +00:00
|
|
|
|
2020-08-28 18:33:12 +00:00
|
|
|
static bool trans_VDIV_hp(DisasContext *s, arg_VDIV_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_hp(s, gen_helper_vfp_divh, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:48 +00:00
|
|
|
static bool trans_VDIV_sp(DisasContext *s, arg_VDIV_sp *a)
|
|
|
|
{
|
|
|
|
return do_vfp_3op_sp(s, gen_helper_vfp_divs, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VDIV_dp(DisasContext *s, arg_VDIV_dp *a)
|
2019-06-11 15:39:48 +00:00
|
|
|
{
|
|
|
|
return do_vfp_3op_dp(s, gen_helper_vfp_divd, a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
2019-06-11 15:39:49 +00:00
|
|
|
|
2020-08-28 18:33:12 +00:00
|
|
|
static bool trans_VMINNM_hp(DisasContext *s, arg_VMINNM_sp *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return do_vfp_3op_hp(s, gen_helper_vfp_minnumh,
|
|
|
|
a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMAXNM_hp(DisasContext *s, arg_VMAXNM_sp *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return do_vfp_3op_hp(s, gen_helper_vfp_maxnumh,
|
|
|
|
a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:32 +00:00
|
|
|
static bool trans_VMINNM_sp(DisasContext *s, arg_VMINNM_sp *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return do_vfp_3op_sp(s, gen_helper_vfp_minnums,
|
|
|
|
a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMAXNM_sp(DisasContext *s, arg_VMAXNM_sp *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return do_vfp_3op_sp(s, gen_helper_vfp_maxnums,
|
|
|
|
a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMINNM_dp(DisasContext *s, arg_VMINNM_dp *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return do_vfp_3op_dp(s, gen_helper_vfp_minnumd,
|
|
|
|
a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMAXNM_dp(DisasContext *s, arg_VMAXNM_dp *a)
|
|
|
|
{
|
|
|
|
if (!dc_isar_feature(aa32_vminmaxnm, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return do_vfp_3op_dp(s, gen_helper_vfp_maxnumd,
|
|
|
|
a->vd, a->vn, a->vm, false);
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:15 +00:00
|
|
|
static bool do_vfm_hp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VFNMA : fd = muladd(-fd, fn, fm)
|
|
|
|
* VFNMS : fd = muladd(-fd, -fn, fm)
|
|
|
|
* VFMA : fd = muladd( fd, fn, fm)
|
|
|
|
* VFMS : fd = muladd( fd, -fn, fm)
|
|
|
|
*
|
|
|
|
* These are fused multiply-add, and must be done as one floating
|
|
|
|
* point operation with no rounding between the multiplication and
|
|
|
|
* addition steps. NB that doing the negations here as separate
|
|
|
|
* steps is correct : an input NaN should come out with its sign
|
|
|
|
* bit flipped if it is a negated-input.
|
|
|
|
*/
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 vn, vm, vd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Present in VFPv4 only, and only with the FP16 extension.
|
|
|
|
* Note that we can't rely on the SIMDFMAC check alone, because
|
|
|
|
* in a Neon-no-VFP core that ID register field will be non-zero.
|
|
|
|
*/
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s) ||
|
|
|
|
!dc_isar_feature(aa32_simdfmac, s) ||
|
|
|
|
!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vn = tcg_temp_new_i32();
|
|
|
|
vm = tcg_temp_new_i32();
|
|
|
|
vd = tcg_temp_new_i32();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vn, a->vn);
|
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-08-28 18:33:15 +00:00
|
|
|
if (neg_n) {
|
|
|
|
/* VFNMS, VFMS */
|
|
|
|
gen_helper_vfp_negh(vn, vn);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vd, a->vd);
|
2020-08-28 18:33:15 +00:00
|
|
|
if (neg_d) {
|
|
|
|
/* VFNMA, VFNMS */
|
|
|
|
gen_helper_vfp_negh(vd, vd);
|
|
|
|
}
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
gen_helper_vfp_muladdh(vd, vn, vm, vd, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2020-08-28 18:33:15 +00:00
|
|
|
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(vn);
|
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:31 +00:00
|
|
|
static bool do_vfm_sp(DisasContext *s, arg_VFMA_sp *a, bool neg_n, bool neg_d)
|
2019-06-11 15:39:49 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VFNMA : fd = muladd(-fd, fn, fm)
|
|
|
|
* VFNMS : fd = muladd(-fd, -fn, fm)
|
|
|
|
* VFMA : fd = muladd( fd, fn, fm)
|
|
|
|
* VFMS : fd = muladd( fd, -fn, fm)
|
|
|
|
*
|
|
|
|
* These are fused multiply-add, and must be done as one floating
|
|
|
|
* point operation with no rounding between the multiplication and
|
|
|
|
* addition steps. NB that doing the negations here as separate
|
|
|
|
* steps is correct : an input NaN should come out with its sign
|
|
|
|
* bit flipped if it is a negated-input.
|
|
|
|
*/
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 vn, vm, vd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Present in VFPv4 only.
|
2020-02-24 22:22:24 +00:00
|
|
|
* Note that we can't rely on the SIMDFMAC check alone, because
|
|
|
|
* in a Neon-no-VFP core that ID register field will be non-zero.
|
|
|
|
*/
|
|
|
|
if (!dc_isar_feature(aa32_simdfmac, s) ||
|
|
|
|
!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/*
|
2019-06-11 15:39:49 +00:00
|
|
|
* In v7A, UNPREDICTABLE with non-zero vector length/stride; from
|
|
|
|
* v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
|
|
|
|
*/
|
2020-02-24 22:22:24 +00:00
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
2019-06-11 15:39:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vn = tcg_temp_new_i32();
|
|
|
|
vm = tcg_temp_new_i32();
|
|
|
|
vd = tcg_temp_new_i32();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vn, a->vn);
|
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-02-24 22:22:31 +00:00
|
|
|
if (neg_n) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* VFNMS, VFMS */
|
|
|
|
gen_helper_vfp_negs(vn, vn);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vd, a->vd);
|
2020-02-24 22:22:31 +00:00
|
|
|
if (neg_d) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* VFNMA, VFNMS */
|
|
|
|
gen_helper_vfp_negs(vd, vd);
|
|
|
|
}
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:49 +00:00
|
|
|
gen_helper_vfp_muladds(vd, vn, vm, vd, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(vn);
|
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:31 +00:00
|
|
|
static bool do_vfm_dp(DisasContext *s, arg_VFMA_dp *a, bool neg_n, bool neg_d)
|
2019-06-11 15:39:49 +00:00
|
|
|
{
|
|
|
|
/*
|
|
|
|
* VFNMA : fd = muladd(-fd, fn, fm)
|
|
|
|
* VFNMS : fd = muladd(-fd, -fn, fm)
|
|
|
|
* VFMA : fd = muladd( fd, fn, fm)
|
|
|
|
* VFMS : fd = muladd( fd, -fn, fm)
|
|
|
|
*
|
|
|
|
* These are fused multiply-add, and must be done as one floating
|
|
|
|
* point operation with no rounding between the multiplication and
|
|
|
|
* addition steps. NB that doing the negations here as separate
|
|
|
|
* steps is correct : an input NaN should come out with its sign
|
|
|
|
* bit flipped if it is a negated-input.
|
|
|
|
*/
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i64 vn, vm, vd;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Present in VFPv4 only.
|
2020-02-24 22:22:24 +00:00
|
|
|
* Note that we can't rely on the SIMDFMAC check alone, because
|
|
|
|
* in a Neon-no-VFP core that ID register field will be non-zero.
|
|
|
|
*/
|
|
|
|
if (!dc_isar_feature(aa32_simdfmac, s) ||
|
|
|
|
!dc_isar_feature(aa32_fpdp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
/*
|
2019-06-11 15:39:49 +00:00
|
|
|
* In v7A, UNPREDICTABLE with non-zero vector length/stride; from
|
|
|
|
* v8A, must UNDEF. We choose to UNDEF for both v7A and v8A.
|
|
|
|
*/
|
2020-02-24 22:22:24 +00:00
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
2019-06-11 15:39:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) &&
|
|
|
|
((a->vd | a->vn | a->vm) & 0x10)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vn = tcg_temp_new_i64();
|
|
|
|
vm = tcg_temp_new_i64();
|
|
|
|
vd = tcg_temp_new_i64();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vn, a->vn);
|
|
|
|
vfp_load_reg64(vm, a->vm);
|
2020-02-24 22:22:31 +00:00
|
|
|
if (neg_n) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* VFNMS, VFMS */
|
|
|
|
gen_helper_vfp_negd(vn, vn);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vd, a->vd);
|
2020-02-24 22:22:31 +00:00
|
|
|
if (neg_d) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* VFNMA, VFNMS */
|
|
|
|
gen_helper_vfp_negd(vd, vd);
|
|
|
|
}
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:49 +00:00
|
|
|
gen_helper_vfp_muladdd(vd, vn, vm, vd, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(vd, a->vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i64(vn);
|
|
|
|
tcg_temp_free_i64(vm);
|
|
|
|
tcg_temp_free_i64(vd);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:49 +00:00
|
|
|
|
2020-08-28 18:33:14 +00:00
|
|
|
#define MAKE_ONE_VFM_TRANS_FN(INSN, PREC, NEGN, NEGD) \
|
|
|
|
static bool trans_##INSN##_##PREC(DisasContext *s, \
|
|
|
|
arg_##INSN##_##PREC *a) \
|
|
|
|
{ \
|
|
|
|
return do_vfm_##PREC(s, a, NEGN, NEGD); \
|
|
|
|
}
|
2020-02-24 22:22:31 +00:00
|
|
|
|
2020-08-28 18:33:14 +00:00
|
|
|
#define MAKE_VFM_TRANS_FNS(PREC) \
|
|
|
|
MAKE_ONE_VFM_TRANS_FN(VFMA, PREC, false, false) \
|
|
|
|
MAKE_ONE_VFM_TRANS_FN(VFMS, PREC, true, false) \
|
|
|
|
MAKE_ONE_VFM_TRANS_FN(VFNMA, PREC, false, true) \
|
|
|
|
MAKE_ONE_VFM_TRANS_FN(VFNMS, PREC, true, true)
|
2020-02-24 22:22:31 +00:00
|
|
|
|
2020-08-28 18:33:15 +00:00
|
|
|
MAKE_VFM_TRANS_FNS(hp)
|
2020-08-28 18:33:14 +00:00
|
|
|
MAKE_VFM_TRANS_FNS(sp)
|
|
|
|
MAKE_VFM_TRANS_FNS(dp)
|
2020-02-24 22:22:31 +00:00
|
|
|
|
2020-08-28 18:33:18 +00:00
|
|
|
static bool trans_VMOV_imm_hp(DisasContext *s, arg_VMOV_imm_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 fd;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
fd = tcg_const_i32(vfp_expand_imm(MO_16, a->imm));
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(fd, a->vd);
|
2020-08-28 18:33:18 +00:00
|
|
|
tcg_temp_free_i32(fd);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
static bool trans_VMOV_imm_sp(DisasContext *s, arg_VMOV_imm_sp *a)
|
|
|
|
{
|
|
|
|
uint32_t delta_d = 0;
|
|
|
|
int veclen = s->vec_len;
|
|
|
|
TCGv_i32 fd;
|
2019-06-13 16:39:07 +00:00
|
|
|
uint32_t vd;
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
vd = a->vd;
|
|
|
|
|
2020-02-24 22:22:22 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v3, s)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:22 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
|
|
|
(veclen != 0 || s->vec_stride != 0)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (veclen > 0) {
|
|
|
|
/* Figure out what type of vector operation this is. */
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_sreg_is_scalar(vd)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* scalar */
|
|
|
|
veclen = 0;
|
|
|
|
} else {
|
|
|
|
delta_d = s->vec_stride + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-13 16:39:07 +00:00
|
|
|
fd = tcg_const_i32(vfp_expand_imm(MO_32, a->imm));
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
for (;;) {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(fd, vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
if (veclen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up the operands for the next iteration */
|
|
|
|
veclen--;
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
vd = vfp_advance_sreg(vd, delta_d);
|
2019-06-11 15:39:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i32(fd);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VMOV_imm_dp(DisasContext *s, arg_VMOV_imm_dp *a)
|
|
|
|
{
|
|
|
|
uint32_t delta_d = 0;
|
|
|
|
int veclen = s->vec_len;
|
|
|
|
TCGv_i64 fd;
|
2019-06-13 16:39:07 +00:00
|
|
|
uint32_t vd;
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
vd = a->vd;
|
|
|
|
|
2020-02-24 22:22:22 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v3, s)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (vd & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:49 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpshvec, s) &&
|
|
|
|
(veclen != 0 || s->vec_stride != 0)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (veclen > 0) {
|
|
|
|
/* Figure out what type of vector operation this is. */
|
target/arm: Fix short-vector increment behaviour
For VFP short vectors, the VFP registers are divided into a
series of banks: for single-precision these are s0-s7, s8-s15,
s16-s23 and s24-s31; for double-precision they are d0-d3,
d4-d7, ... d28-d31. Some banks are "scalar" meaning that
use of a register within them triggers a pure-scalar or
mixed vector-scalar operation rather than a full vector
operation. The scalar banks are s0-s7, d0-d3 and d16-d19.
When using a bank as part of a vector operation, we
iterate through it, increasing the register number by
the specified stride each time, and wrapping around to
the beginning of the bank.
Unfortunately our calculation of the "increment" part of this
was incorrect:
vd = ((vd + delta_d) & (bank_mask - 1)) | (vd & bank_mask)
will only do the intended thing if bank_mask has exactly
one set high bit. For instance for doubles (bank_mask = 0xc),
if we start with vd = 6 and delta_d = 2 then vd is updated
to 12 rather than the intended 4.
This only causes problems in the unlikely case that the
starting register is not the first in its bank: if the
register number doesn't have to wrap around then the
expression happens to give the right answer.
Fix this bug by abstracting out the "check whether register
is in a scalar bank" and "advance register within bank"
operations to utility functions which use the right
bit masking operations.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
2019-06-11 15:39:53 +00:00
|
|
|
if (vfp_dreg_is_scalar(vd)) {
|
2019-06-11 15:39:49 +00:00
|
|
|
/* scalar */
|
|
|
|
veclen = 0;
|
|
|
|
} else {
|
|
|
|
delta_d = (s->vec_stride >> 1) + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-13 16:39:07 +00:00
|
|
|
fd = tcg_const_i64(vfp_expand_imm(MO_64, a->imm));
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
for (;;) {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(fd, vd);
|
2019-06-11 15:39:49 +00:00
|
|
|
|
|
|
|
if (veclen == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set up the operands for the next iteration */
|
|
|
|
veclen--;
|
2019-07-04 16:14:44 +00:00
|
|
|
vd = vfp_advance_dreg(vd, delta_d);
|
2019-06-11 15:39:49 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i64(fd);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:49 +00:00
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
#define DO_VFP_2OP(INSN, PREC, FN, CHECK) \
|
2020-08-28 18:33:16 +00:00
|
|
|
static bool trans_##INSN##_##PREC(DisasContext *s, \
|
|
|
|
arg_##INSN##_##PREC *a) \
|
|
|
|
{ \
|
2021-05-20 15:28:34 +00:00
|
|
|
if (!dc_isar_feature(CHECK, s)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
2020-08-28 18:33:16 +00:00
|
|
|
return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
|
|
|
|
}
|
2019-06-11 15:39:49 +00:00
|
|
|
|
2021-05-20 15:28:35 +00:00
|
|
|
#define DO_VFP_VMOV(INSN, PREC, FN) \
|
|
|
|
static bool trans_##INSN##_##PREC(DisasContext *s, \
|
|
|
|
arg_##INSN##_##PREC *a) \
|
|
|
|
{ \
|
|
|
|
if (!dc_isar_feature(aa32_fp##PREC##_v2, s) && \
|
|
|
|
!dc_isar_feature(aa32_mve, s)) { \
|
|
|
|
return false; \
|
|
|
|
} \
|
|
|
|
return do_vfp_2op_##PREC(s, FN, a->vd, a->vm); \
|
|
|
|
}
|
|
|
|
|
|
|
|
DO_VFP_VMOV(VMOV_reg, sp, tcg_gen_mov_i32)
|
|
|
|
DO_VFP_VMOV(VMOV_reg, dp, tcg_gen_mov_i64)
|
2019-06-11 15:39:50 +00:00
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
DO_VFP_2OP(VABS, hp, gen_helper_vfp_absh, aa32_fp16_arith)
|
|
|
|
DO_VFP_2OP(VABS, sp, gen_helper_vfp_abss, aa32_fpsp_v2)
|
|
|
|
DO_VFP_2OP(VABS, dp, gen_helper_vfp_absd, aa32_fpdp_v2)
|
2019-06-11 15:39:50 +00:00
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
DO_VFP_2OP(VNEG, hp, gen_helper_vfp_negh, aa32_fp16_arith)
|
|
|
|
DO_VFP_2OP(VNEG, sp, gen_helper_vfp_negs, aa32_fpsp_v2)
|
|
|
|
DO_VFP_2OP(VNEG, dp, gen_helper_vfp_negd, aa32_fpdp_v2)
|
2019-06-11 15:39:50 +00:00
|
|
|
|
2020-08-28 18:33:17 +00:00
|
|
|
static void gen_VSQRT_hp(TCGv_i32 vd, TCGv_i32 vm)
|
|
|
|
{
|
|
|
|
gen_helper_vfp_sqrth(vd, vm, cpu_env);
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:50 +00:00
|
|
|
static void gen_VSQRT_sp(TCGv_i32 vd, TCGv_i32 vm)
|
|
|
|
{
|
|
|
|
gen_helper_vfp_sqrts(vd, vm, cpu_env);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void gen_VSQRT_dp(TCGv_i64 vd, TCGv_i64 vm)
|
|
|
|
{
|
|
|
|
gen_helper_vfp_sqrtd(vd, vm, cpu_env);
|
|
|
|
}
|
|
|
|
|
2021-05-20 15:28:34 +00:00
|
|
|
DO_VFP_2OP(VSQRT, hp, gen_VSQRT_hp, aa32_fp16_arith)
|
|
|
|
DO_VFP_2OP(VSQRT, sp, gen_VSQRT_sp, aa32_fpsp_v2)
|
|
|
|
DO_VFP_2OP(VSQRT, dp, gen_VSQRT_dp, aa32_fpdp_v2)
|
2019-06-11 15:39:51 +00:00
|
|
|
|
2020-08-28 18:33:19 +00:00
|
|
|
static bool trans_VCMP_hp(DisasContext *s, arg_VCMP_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vd, vm;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Vm/M bits must be zero for the Z variant */
|
|
|
|
if (a->z && a->vm != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i32();
|
|
|
|
vm = tcg_temp_new_i32();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vd, a->vd);
|
2020-08-28 18:33:19 +00:00
|
|
|
if (a->z) {
|
|
|
|
tcg_gen_movi_i32(vm, 0);
|
|
|
|
} else {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-08-28 18:33:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (a->e) {
|
|
|
|
gen_helper_vfp_cmpeh(vd, vm, cpu_env);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_cmph(vd, vm, cpu_env);
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
static bool trans_VCMP_sp(DisasContext *s, arg_VCMP_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vd, vm;
|
|
|
|
|
2020-02-24 22:22:23 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
/* Vm/M bits must be zero for the Z variant */
|
|
|
|
if (a->z && a->vm != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i32();
|
|
|
|
vm = tcg_temp_new_i32();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vd, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
if (a->z) {
|
|
|
|
tcg_gen_movi_i32(vm, 0);
|
|
|
|
} else {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2019-06-11 15:39:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (a->e) {
|
|
|
|
gen_helper_vfp_cmpes(vd, vm, cpu_env);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_cmps(vd, vm, cpu_env);
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCMP_dp(DisasContext *s, arg_VCMP_dp *a)
|
|
|
|
{
|
|
|
|
TCGv_i64 vd, vm;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
/* Vm/M bits must be zero for the Z variant */
|
|
|
|
if (a->z && a->vm != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
2020-02-14 18:15:30 +00:00
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i64();
|
|
|
|
vm = tcg_temp_new_i64();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vd, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
if (a->z) {
|
|
|
|
tcg_gen_movi_i64(vm, 0);
|
|
|
|
} else {
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vm, a->vm);
|
2019-06-11 15:39:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (a->e) {
|
|
|
|
gen_helper_vfp_cmped(vd, vm, cpu_env);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_cmpd(vd, vm, cpu_env);
|
|
|
|
}
|
|
|
|
|
|
|
|
tcg_temp_free_i64(vd);
|
|
|
|
tcg_temp_free_i64(vm);
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:51 +00:00
|
|
|
|
|
|
|
static bool trans_VCVT_f32_f16(DisasContext *s, arg_VCVT_f32_f16 *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 ahp_mode;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_spconv, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
ahp_mode = get_ahp_flag();
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
/* The T bit tells us if we want the low or high 16 bits of Vm */
|
|
|
|
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
|
|
|
|
gen_helper_vfp_fcvt_f16_to_f32(tmp, tmp, fpst, ahp_mode);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_i32(ahp_mode);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT_f64_f16(DisasContext *s, arg_VCVT_f64_f16 *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 ahp_mode;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
TCGv_i64 vd;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
ahp_mode = get_ahp_flag();
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
/* The T bit tells us if we want the low or high 16 bits of Vm */
|
|
|
|
tcg_gen_ld16u_i32(tmp, cpu_env, vfp_f16_offset(a->vm, a->t));
|
|
|
|
vd = tcg_temp_new_i64();
|
|
|
|
gen_helper_vfp_fcvt_f16_to_f64(vd, tmp, fpst, ahp_mode);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(vd, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_i32(ahp_mode);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
tcg_temp_free_i64(vd);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:51 +00:00
|
|
|
|
2021-05-25 22:58:08 +00:00
|
|
|
static bool trans_VCVT_b16_f32(DisasContext *s, arg_VCVT_b16_f32 *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_bf16, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
vfp_load_reg32(tmp, a->vm);
|
|
|
|
gen_helper_bfcvt(tmp, tmp, fpst);
|
|
|
|
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
static bool trans_VCVT_f16_f32(DisasContext *s, arg_VCVT_f16_f32 *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 ahp_mode;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_spconv, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
ahp_mode = get_ahp_flag();
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2019-06-11 15:39:51 +00:00
|
|
|
gen_helper_vfp_fcvt_f32_to_f16(tmp, tmp, fpst, ahp_mode);
|
|
|
|
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
|
|
|
|
tcg_temp_free_i32(ahp_mode);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT_f16_f64(DisasContext *s, arg_VCVT_f16_f64 *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 ahp_mode;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
TCGv_i64 vm;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fp16_dpconv, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
ahp_mode = get_ahp_flag();
|
|
|
|
tmp = tcg_temp_new_i32();
|
|
|
|
vm = tcg_temp_new_i64();
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vm, a->vm);
|
2019-06-11 15:39:51 +00:00
|
|
|
gen_helper_vfp_fcvt_f64_to_f16(tmp, vm, fpst, ahp_mode);
|
|
|
|
tcg_temp_free_i64(vm);
|
|
|
|
tcg_gen_st16_i32(tmp, cpu_env, vfp_f16_offset(a->vd, a->t));
|
|
|
|
tcg_temp_free_i32(ahp_mode);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:51 +00:00
|
|
|
|
2020-08-28 18:33:27 +00:00
|
|
|
static bool trans_VRINTR_hp(DisasContext *s, arg_VRINTR_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2020-08-28 18:33:27 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
gen_helper_rinth(tmp, tmp, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2020-08-28 18:33:27 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
static bool trans_VRINTR_sp(DisasContext *s, arg_VRINTR_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
gen_helper_rints(tmp, tmp, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VRINTR_dp(DisasContext *s, arg_VRINTR_dp *a)
|
2019-06-11 15:39:51 +00:00
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i64 tmp;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tmp, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
gen_helper_rintd(tmp, tmp, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:27 +00:00
|
|
|
static bool trans_VRINTZ_hp(DisasContext *s, arg_VRINTZ_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
TCGv_i32 tcg_rmode;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2020-08-28 18:33:27 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
tcg_rmode = tcg_const_i32(float_round_to_zero);
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
gen_helper_rinth(tmp, tmp, fpst);
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2020-08-28 18:33:27 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tcg_rmode);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
static bool trans_VRINTZ_sp(DisasContext *s, arg_VRINTZ_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
TCGv_i32 tcg_rmode;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_rmode = tcg_const_i32(float_round_to_zero);
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
gen_helper_rints(tmp, tmp, fpst);
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tcg_rmode);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-14 10:44:56 +00:00
|
|
|
static bool trans_VRINTZ_dp(DisasContext *s, arg_VRINTZ_dp *a)
|
2019-06-11 15:39:51 +00:00
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i64 tmp;
|
|
|
|
TCGv_i32 tcg_rmode;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tmp, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_rmode = tcg_const_i32(float_round_to_zero);
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
|
|
|
gen_helper_rintd(tmp, tmp, fpst);
|
|
|
|
gen_helper_set_rmode(tcg_rmode, tcg_rmode, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
tcg_temp_free_i32(tcg_rmode);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-28 18:33:27 +00:00
|
|
|
static bool trans_VRINTX_hp(DisasContext *s, arg_VRINTX_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2020-08-28 18:33:27 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
gen_helper_rinth_exact(tmp, tmp, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2020-08-28 18:33:27 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
static bool trans_VRINTX_sp(DisasContext *s, arg_VRINTX_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i32 tmp;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(tmp, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
gen_helper_rints_exact(tmp, tmp, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i32(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VRINTX_dp(DisasContext *s, arg_VRINTX_dp *a)
|
|
|
|
{
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
TCGv_i64 tmp;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_vrint, s)) {
|
2019-06-11 15:39:51 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && ((a->vd | a->vm) & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:51 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
tmp = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(tmp, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:51 +00:00
|
|
|
gen_helper_rintd_exact(tmp, tmp, fpst);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(tmp, a->vd);
|
2019-06-11 15:39:51 +00:00
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
tcg_temp_free_i64(tmp);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:52 +00:00
|
|
|
|
|
|
|
static bool trans_VCVT_sp(DisasContext *s, arg_VCVT_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i64 vd;
|
|
|
|
TCGv_i32 vm;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:52 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm = tcg_temp_new_i32();
|
|
|
|
vd = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2019-06-11 15:39:52 +00:00
|
|
|
gen_helper_vfp_fcvtds(vd, vm, cpu_env);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(vd, a->vd);
|
2019-06-11 15:39:52 +00:00
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_i64(vd);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT_dp(DisasContext *s, arg_VCVT_dp *a)
|
|
|
|
{
|
|
|
|
TCGv_i64 vm;
|
|
|
|
TCGv_i32 vd;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:52 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i32();
|
|
|
|
vm = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vm, a->vm);
|
2019-06-11 15:39:52 +00:00
|
|
|
gen_helper_vfp_fcvtsd(vd, vm, cpu_env);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2019-06-11 15:39:52 +00:00
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
tcg_temp_free_i64(vm);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:52 +00:00
|
|
|
|
2020-08-28 18:33:21 +00:00
|
|
|
static bool trans_VCVT_int_hp(DisasContext *s, arg_VCVT_int_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vm;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-08-28 18:33:21 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
if (a->s) {
|
|
|
|
/* i32 -> f16 */
|
|
|
|
gen_helper_vfp_sitoh(vm, vm, fpst);
|
|
|
|
} else {
|
|
|
|
/* u32 -> f16 */
|
|
|
|
gen_helper_vfp_uitoh(vm, vm, fpst);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vm, a->vd);
|
2020-08-28 18:33:21 +00:00
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:52 +00:00
|
|
|
static bool trans_VCVT_int_sp(DisasContext *s, arg_VCVT_int_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vm;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
2020-02-24 22:22:23 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:52 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:52 +00:00
|
|
|
if (a->s) {
|
|
|
|
/* i32 -> f32 */
|
|
|
|
gen_helper_vfp_sitos(vm, vm, fpst);
|
|
|
|
} else {
|
|
|
|
/* u32 -> f32 */
|
|
|
|
gen_helper_vfp_uitos(vm, vm, fpst);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vm, a->vd);
|
2019-06-11 15:39:52 +00:00
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT_int_dp(DisasContext *s, arg_VCVT_int_dp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vm;
|
|
|
|
TCGv_i64 vd;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:52 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm = tcg_temp_new_i32();
|
|
|
|
vd = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:52 +00:00
|
|
|
if (a->s) {
|
|
|
|
/* i32 -> f64 */
|
|
|
|
gen_helper_vfp_sitod(vd, vm, fpst);
|
|
|
|
} else {
|
|
|
|
/* u32 -> f64 */
|
|
|
|
gen_helper_vfp_uitod(vd, vm, fpst);
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(vd, a->vd);
|
2019-06-11 15:39:52 +00:00
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_i64(vd);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:52 +00:00
|
|
|
|
|
|
|
static bool trans_VJCVT(DisasContext *s, arg_VJCVT *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vd;
|
|
|
|
TCGv_i64 vm;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_jscvt, s)) {
|
2019-06-11 15:39:52 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:52 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
vm = tcg_temp_new_i64();
|
|
|
|
vd = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vm, a->vm);
|
2019-06-11 15:39:52 +00:00
|
|
|
gen_helper_vjcvt(vd, vm, cpu_env);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2019-06-11 15:39:52 +00:00
|
|
|
tcg_temp_free_i64(vm);
|
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:53 +00:00
|
|
|
|
2020-08-28 18:33:24 +00:00
|
|
|
static bool trans_VCVT_fix_hp(DisasContext *s, arg_VCVT_fix_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vd, shift;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
int frac_bits;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vd, a->vd);
|
2020-08-28 18:33:24 +00:00
|
|
|
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
shift = tcg_const_i32(frac_bits);
|
|
|
|
|
|
|
|
/* Switch on op:U:sx bits */
|
|
|
|
switch (a->opc) {
|
|
|
|
case 0:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_shtoh_round_to_nearest(vd, vd, shift, fpst);
|
2020-08-28 18:33:24 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_sltoh_round_to_nearest(vd, vd, shift, fpst);
|
2020-08-28 18:33:24 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_uhtoh_round_to_nearest(vd, vd, shift, fpst);
|
2020-08-28 18:33:24 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_ultoh_round_to_nearest(vd, vd, shift, fpst);
|
2020-08-28 18:33:24 +00:00
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
gen_helper_vfp_toshh_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
gen_helper_vfp_toslh_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
gen_helper_vfp_touhh_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
gen_helper_vfp_toulh_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2020-08-28 18:33:24 +00:00
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
tcg_temp_free_i32(shift);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:53 +00:00
|
|
|
static bool trans_VCVT_fix_sp(DisasContext *s, arg_VCVT_fix_sp *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vd, shift;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
int frac_bits;
|
|
|
|
|
2020-02-24 22:22:22 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v3, s)) {
|
2019-06-11 15:39:53 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vd, a->vd);
|
2019-06-11 15:39:53 +00:00
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:53 +00:00
|
|
|
shift = tcg_const_i32(frac_bits);
|
|
|
|
|
|
|
|
/* Switch on op:U:sx bits */
|
|
|
|
switch (a->opc) {
|
|
|
|
case 0:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_shtos_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_sltos_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_uhtos_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_ultos_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
gen_helper_vfp_toshs_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
gen_helper_vfp_tosls_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
gen_helper_vfp_touhs_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
gen_helper_vfp_touls_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2019-06-11 15:39:53 +00:00
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
tcg_temp_free_i32(shift);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT_fix_dp(DisasContext *s, arg_VCVT_fix_dp *a)
|
|
|
|
{
|
|
|
|
TCGv_i64 vd;
|
|
|
|
TCGv_i32 shift;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
int frac_bits;
|
|
|
|
|
2020-02-24 22:22:22 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v3, s)) {
|
2019-06-11 15:39:53 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vd & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:53 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
frac_bits = (a->opc & 1) ? (32 - a->imm) : (16 - a->imm);
|
|
|
|
|
|
|
|
vd = tcg_temp_new_i64();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vd, a->vd);
|
2019-06-11 15:39:53 +00:00
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:53 +00:00
|
|
|
shift = tcg_const_i32(frac_bits);
|
|
|
|
|
|
|
|
/* Switch on op:U:sx bits */
|
|
|
|
switch (a->opc) {
|
|
|
|
case 0:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_shtod_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_sltod_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_uhtod_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 3:
|
2020-10-13 10:35:32 +00:00
|
|
|
gen_helper_vfp_ultod_round_to_nearest(vd, vd, shift, fpst);
|
2019-06-11 15:39:53 +00:00
|
|
|
break;
|
|
|
|
case 4:
|
|
|
|
gen_helper_vfp_toshd_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 5:
|
|
|
|
gen_helper_vfp_tosld_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 6:
|
|
|
|
gen_helper_vfp_touhd_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
case 7:
|
|
|
|
gen_helper_vfp_tould_round_to_zero(vd, vd, shift, fpst);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg64(vd, a->vd);
|
2019-06-11 15:39:53 +00:00
|
|
|
tcg_temp_free_i64(vd);
|
|
|
|
tcg_temp_free_i32(shift);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
2019-06-11 15:39:53 +00:00
|
|
|
|
2020-08-28 18:33:21 +00:00
|
|
|
static bool trans_VCVT_hp_int(DisasContext *s, arg_VCVT_sp_int *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vm;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
fpst = fpstatus_ptr(FPST_FPCR_F16);
|
|
|
|
vm = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2020-08-28 18:33:21 +00:00
|
|
|
|
|
|
|
if (a->s) {
|
|
|
|
if (a->rz) {
|
|
|
|
gen_helper_vfp_tosizh(vm, vm, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_tosih(vm, vm, fpst);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (a->rz) {
|
|
|
|
gen_helper_vfp_touizh(vm, vm, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_touih(vm, vm, fpst);
|
|
|
|
}
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vm, a->vd);
|
2020-08-28 18:33:21 +00:00
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:53 +00:00
|
|
|
static bool trans_VCVT_sp_int(DisasContext *s, arg_VCVT_sp_int *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vm;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
2020-02-24 22:22:23 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpsp_v2, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:53 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:53 +00:00
|
|
|
vm = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(vm, a->vm);
|
2019-06-11 15:39:53 +00:00
|
|
|
|
|
|
|
if (a->s) {
|
|
|
|
if (a->rz) {
|
|
|
|
gen_helper_vfp_tosizs(vm, vm, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_tosis(vm, vm, fpst);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (a->rz) {
|
|
|
|
gen_helper_vfp_touizs(vm, vm, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_touis(vm, vm, fpst);
|
|
|
|
}
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vm, a->vd);
|
2019-06-11 15:39:53 +00:00
|
|
|
tcg_temp_free_i32(vm);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool trans_VCVT_dp_int(DisasContext *s, arg_VCVT_dp_int *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 vd;
|
|
|
|
TCGv_i64 vm;
|
|
|
|
TCGv_ptr fpst;
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
if (!dc_isar_feature(aa32_fpdp_v2, s)) {
|
2019-06-11 15:39:53 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-02-24 22:22:21 +00:00
|
|
|
/* UNDEF accesses to D16-D31 if they don't exist. */
|
|
|
|
if (!dc_isar_feature(aa32_simd_r32, s) && (a->vm & 0x10)) {
|
2019-06-14 10:44:57 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-11 15:39:53 +00:00
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2020-08-06 10:44:51 +00:00
|
|
|
fpst = fpstatus_ptr(FPST_FPCR);
|
2019-06-11 15:39:53 +00:00
|
|
|
vm = tcg_temp_new_i64();
|
|
|
|
vd = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg64(vm, a->vm);
|
2019-06-11 15:39:53 +00:00
|
|
|
|
|
|
|
if (a->s) {
|
|
|
|
if (a->rz) {
|
|
|
|
gen_helper_vfp_tosizd(vd, vm, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_tosid(vd, vm, fpst);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
if (a->rz) {
|
|
|
|
gen_helper_vfp_touizd(vd, vm, fpst);
|
|
|
|
} else {
|
|
|
|
gen_helper_vfp_touid(vd, vm, fpst);
|
|
|
|
}
|
|
|
|
}
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(vd, a->vd);
|
2019-06-11 15:39:53 +00:00
|
|
|
tcg_temp_free_i32(vd);
|
|
|
|
tcg_temp_free_i64(vm);
|
|
|
|
tcg_temp_free_ptr(fpst);
|
|
|
|
return true;
|
|
|
|
}
|
2020-02-24 22:22:26 +00:00
|
|
|
|
2020-08-28 18:33:28 +00:00
|
|
|
static bool trans_VINS(DisasContext *s, arg_VINS *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 rd, rm;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Insert low half of Vm into high half of Vd */
|
|
|
|
rm = tcg_temp_new_i32();
|
|
|
|
rd = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(rm, a->vm);
|
|
|
|
vfp_load_reg32(rd, a->vd);
|
2020-08-28 18:33:28 +00:00
|
|
|
tcg_gen_deposit_i32(rd, rd, rm, 16, 16);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(rd, a->vd);
|
2020-08-28 18:33:28 +00:00
|
|
|
tcg_temp_free_i32(rm);
|
|
|
|
tcg_temp_free_i32(rd);
|
|
|
|
return true;
|
|
|
|
}
|
2020-08-28 18:33:29 +00:00
|
|
|
|
|
|
|
static bool trans_VMOVX(DisasContext *s, arg_VINS *a)
|
|
|
|
{
|
|
|
|
TCGv_i32 rm;
|
|
|
|
|
|
|
|
if (!dc_isar_feature(aa32_fp16_arith, s)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->vec_len != 0 || s->vec_stride != 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!vfp_access_check(s)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Set Vd to high half of Vm */
|
|
|
|
rm = tcg_temp_new_i32();
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_load_reg32(rm, a->vm);
|
2020-08-28 18:33:29 +00:00
|
|
|
tcg_gen_shri_i32(rm, rm, 16);
|
2020-11-02 16:52:14 +00:00
|
|
|
vfp_store_reg32(rm, a->vd);
|
2020-08-28 18:33:29 +00:00
|
|
|
tcg_temp_free_i32(rm);
|
|
|
|
return true;
|
|
|
|
}
|