2013-09-03 19:12:03 +00:00
|
|
|
#ifndef TARGET_ARM_TRANSLATE_H
|
|
|
|
#define TARGET_ARM_TRANSLATE_H
|
|
|
|
|
2017-07-14 08:21:37 +00:00
|
|
|
#include "exec/translator.h"
|
2019-08-15 08:46:42 +00:00
|
|
|
#include "internals.h"
|
2017-07-14 08:21:37 +00:00
|
|
|
|
|
|
|
|
2013-09-03 19:12:03 +00:00
|
|
|
/* internal defines */
|
|
|
|
typedef struct DisasContext {
|
2017-07-14 09:01:59 +00:00
|
|
|
DisasContextBase base;
|
2018-10-24 06:50:16 +00:00
|
|
|
const ARMISARegisters *isar;
|
2017-07-14 09:01:59 +00:00
|
|
|
|
2019-08-15 08:46:43 +00:00
|
|
|
/* The address of the current instruction being translated. */
|
|
|
|
target_ulong pc_curr;
|
2018-04-10 15:09:52 +00:00
|
|
|
target_ulong page_start;
|
2013-09-03 19:12:10 +00:00
|
|
|
uint32_t insn;
|
2013-09-03 19:12:03 +00:00
|
|
|
/* Nonzero if this instruction has been conditionally skipped. */
|
|
|
|
int condjmp;
|
|
|
|
/* The label that will be jumped to when the instruction is skipped. */
|
2015-02-13 20:51:55 +00:00
|
|
|
TCGLabel *condlabel;
|
2013-09-03 19:12:03 +00:00
|
|
|
/* Thumb-2 conditional execution bits. */
|
|
|
|
int condexec_mask;
|
|
|
|
int condexec_cond;
|
|
|
|
int thumb;
|
2016-03-04 11:30:19 +00:00
|
|
|
int sctlr_b;
|
2019-08-23 18:10:58 +00:00
|
|
|
MemOp be_data;
|
2013-09-03 19:12:03 +00:00
|
|
|
#if !defined(CONFIG_USER_ONLY)
|
|
|
|
int user;
|
|
|
|
#endif
|
2015-02-05 13:37:23 +00:00
|
|
|
ARMMMUIdx mmu_idx; /* MMU index to use for normal loads/stores */
|
2019-02-05 16:52:39 +00:00
|
|
|
uint8_t tbii; /* TBI1|TBI0 for insns */
|
|
|
|
uint8_t tbid; /* TBI1|TBI0 for data */
|
2020-06-26 03:31:06 +00:00
|
|
|
uint8_t tcma; /* TCMA1|TCMA0 for MTE */
|
2014-12-11 12:07:48 +00:00
|
|
|
bool ns; /* Use non-secure CPREG bank on access */
|
2015-05-29 10:28:53 +00:00
|
|
|
int fp_excp_el; /* FP exception EL or 0 if enabled */
|
2018-01-23 03:53:49 +00:00
|
|
|
int sve_excp_el; /* SVE exception EL or 0 if enabled */
|
|
|
|
int sve_len; /* SVE vector length in bytes */
|
2015-09-08 16:38:44 +00:00
|
|
|
/* Flag indicating that exceptions from secure mode are routed to EL3. */
|
|
|
|
bool secure_routed_to_el3;
|
2014-04-15 18:18:39 +00:00
|
|
|
bool vfp_enabled; /* FP enabled via FPSCR.EN */
|
2013-09-03 19:12:03 +00:00
|
|
|
int vec_len;
|
|
|
|
int vec_stride;
|
2017-04-20 16:32:31 +00:00
|
|
|
bool v7m_handler_mode;
|
2017-09-07 12:54:54 +00:00
|
|
|
bool v8m_secure; /* true if v8M and we're in Secure mode */
|
2018-10-08 13:55:04 +00:00
|
|
|
bool v8m_stackcheck; /* true if we need to perform v8M stack limit checks */
|
2019-04-29 16:36:01 +00:00
|
|
|
bool v8m_fpccr_s_wrong; /* true if v8M FPCCR.S != v8m_secure */
|
2019-04-29 16:36:01 +00:00
|
|
|
bool v7m_new_fp_ctxt_needed; /* ASPEN set but no active FP context */
|
2019-04-29 16:36:02 +00:00
|
|
|
bool v7m_lspact; /* FPCCR.LSPACT set */
|
2014-04-15 18:18:38 +00:00
|
|
|
/* Immediate value in AArch32 SVC insn; must be set if is_jmp == DISAS_SWI
|
|
|
|
* so that top level loop can generate correct syndrome information.
|
|
|
|
*/
|
|
|
|
uint32_t svc_imm;
|
2013-09-03 19:12:09 +00:00
|
|
|
int aarch64;
|
2014-10-24 11:19:14 +00:00
|
|
|
int current_el;
|
2019-08-15 08:46:42 +00:00
|
|
|
/* Debug target exception level for single-step exceptions */
|
|
|
|
int debug_target_el;
|
2014-01-04 22:15:44 +00:00
|
|
|
GHashTable *cp_regs;
|
2014-03-17 16:31:47 +00:00
|
|
|
uint64_t features; /* CPU features bits */
|
2014-04-15 18:18:40 +00:00
|
|
|
/* Because unallocated encodings generate different exception syndrome
|
|
|
|
* information from traps due to FP being disabled, we can't do a single
|
|
|
|
* "is fp access disabled" check at a high level in the decode tree.
|
|
|
|
* To help in catching bugs where the access check was forgotten in some
|
|
|
|
* code path, we set this flag when the access check is done, and assert
|
|
|
|
* that it is set at the point where we actually touch the FP regs.
|
|
|
|
*/
|
|
|
|
bool fp_access_checked;
|
2014-08-19 17:56:26 +00:00
|
|
|
/* ARMv8 single-step state (this is distinct from the QEMU gdbstub
|
|
|
|
* single-step support).
|
|
|
|
*/
|
|
|
|
bool ss_active;
|
|
|
|
bool pstate_ss;
|
|
|
|
/* True if the insn just emitted was a load-exclusive instruction
|
|
|
|
* (necessary for syndrome information for single step exceptions),
|
|
|
|
* ie A64 LDX*, LDAX*, A32/T32 LDREX*, LDAEX*.
|
|
|
|
*/
|
|
|
|
bool is_ldex;
|
2020-02-07 14:04:26 +00:00
|
|
|
/* True if AccType_UNPRIV should be used for LDTR et al */
|
|
|
|
bool unpriv;
|
2019-01-21 10:23:11 +00:00
|
|
|
/* True if v8.3-PAuth is active. */
|
|
|
|
bool pauth_active;
|
2020-06-26 03:31:06 +00:00
|
|
|
/* True if v8.5-MTE access to tags is enabled. */
|
|
|
|
bool ata;
|
|
|
|
/* True if v8.5-MTE tag checks affect the PE; index with is_unpriv. */
|
|
|
|
bool mte_active[2];
|
2019-02-05 16:52:36 +00:00
|
|
|
/* True with v8.5-BTI and SCTLR_ELx.BT* set. */
|
|
|
|
bool bt;
|
2019-12-01 12:20:17 +00:00
|
|
|
/* True if any CP15 access is trapped by HSTR_EL2 */
|
|
|
|
bool hstr_active;
|
2019-02-05 16:52:37 +00:00
|
|
|
/*
|
|
|
|
* >= 0, a copy of PSTATE.BTYPE, which will be 0 without v8.5-BTI.
|
|
|
|
* < 0, set by the current instruction.
|
|
|
|
*/
|
|
|
|
int8_t btype;
|
2020-06-26 03:31:17 +00:00
|
|
|
/* A copy of cpu->dcz_blocksize. */
|
|
|
|
uint8_t dcz_blocksize;
|
2019-02-05 16:52:37 +00:00
|
|
|
/* True if this page is guarded. */
|
|
|
|
bool guarded_page;
|
2014-09-29 17:48:48 +00:00
|
|
|
/* Bottom two bits of XScale c15_cpar coprocessor access control reg */
|
|
|
|
int c15_cpar;
|
2017-11-02 14:19:14 +00:00
|
|
|
/* TCG op of the current insn_start. */
|
|
|
|
TCGOp *insn_start;
|
2013-12-17 19:42:32 +00:00
|
|
|
#define TMP_A64_MAX 16
|
|
|
|
int tmp_a64_count;
|
|
|
|
TCGv_i64 tmp_a64[TMP_A64_MAX];
|
2013-09-03 19:12:03 +00:00
|
|
|
} DisasContext;
|
|
|
|
|
2015-09-14 13:39:47 +00:00
|
|
|
typedef struct DisasCompare {
|
|
|
|
TCGCond cond;
|
|
|
|
TCGv_i32 value;
|
|
|
|
bool value_global;
|
|
|
|
} DisasCompare;
|
|
|
|
|
2015-09-14 13:39:47 +00:00
|
|
|
/* Share the TCG temporaries common between 32 and 64 bit modes. */
|
|
|
|
extern TCGv_i32 cpu_NF, cpu_ZF, cpu_CF, cpu_VF;
|
|
|
|
extern TCGv_i64 cpu_exclusive_addr;
|
|
|
|
extern TCGv_i64 cpu_exclusive_val;
|
2013-09-03 19:12:04 +00:00
|
|
|
|
2014-03-17 16:31:47 +00:00
|
|
|
static inline int arm_dc_feature(DisasContext *dc, int feature)
|
|
|
|
{
|
|
|
|
return (dc->features & (1ULL << feature)) != 0;
|
|
|
|
}
|
|
|
|
|
2014-05-27 16:09:50 +00:00
|
|
|
static inline int get_mem_index(DisasContext *s)
|
|
|
|
{
|
2017-06-02 10:51:47 +00:00
|
|
|
return arm_to_core_mmu_idx(s->mmu_idx);
|
2014-05-27 16:09:50 +00:00
|
|
|
}
|
|
|
|
|
2015-05-29 10:28:50 +00:00
|
|
|
/* Function used to determine the target exception EL when otherwise not known
|
|
|
|
* or default.
|
|
|
|
*/
|
|
|
|
static inline int default_exception_el(DisasContext *s)
|
|
|
|
{
|
|
|
|
/* If we are coming from secure EL0 in a system with a 32-bit EL3, then
|
|
|
|
* there is no secure EL1, so we route exceptions to EL3. Otherwise,
|
|
|
|
* exceptions can only be routed to ELs above 1, so we target the higher of
|
|
|
|
* 1 or the current EL.
|
|
|
|
*/
|
2020-02-07 14:04:23 +00:00
|
|
|
return (s->mmu_idx == ARMMMUIdx_SE10_0 && s->secure_routed_to_el3)
|
2015-05-29 10:28:50 +00:00
|
|
|
? 3 : MAX(1, s->current_el);
|
|
|
|
}
|
|
|
|
|
2018-01-25 11:45:28 +00:00
|
|
|
static inline void disas_set_insn_syndrome(DisasContext *s, uint32_t syn)
|
2017-02-07 18:30:00 +00:00
|
|
|
{
|
|
|
|
/* We don't need to save all of the syndrome so we mask and shift
|
|
|
|
* out unneeded bits to help the sleb128 encoder do a better job.
|
|
|
|
*/
|
|
|
|
syn &= ARM_INSN_START_WORD2_MASK;
|
|
|
|
syn >>= ARM_INSN_START_WORD2_SHIFT;
|
|
|
|
|
|
|
|
/* We check and clear insn_start_idx to catch multiple updates. */
|
2017-11-02 14:19:14 +00:00
|
|
|
assert(s->insn_start != NULL);
|
2018-04-10 12:02:26 +00:00
|
|
|
tcg_set_insn_start_param(s->insn_start, 2, syn);
|
2017-11-02 14:19:14 +00:00
|
|
|
s->insn_start = NULL;
|
2017-02-07 18:30:00 +00:00
|
|
|
}
|
|
|
|
|
2017-07-14 08:21:37 +00:00
|
|
|
/* is_jmp field values */
|
|
|
|
#define DISAS_JUMP DISAS_TARGET_0 /* only pc was modified dynamically */
|
2020-06-26 03:31:03 +00:00
|
|
|
/* CPU state was modified dynamically; exit to main loop for interrupts. */
|
|
|
|
#define DISAS_UPDATE_EXIT DISAS_TARGET_1
|
2013-12-17 19:42:31 +00:00
|
|
|
/* These instructions trap after executing, so the A32/T32 decoder must
|
|
|
|
* defer them until after the conditional execution state has been updated.
|
|
|
|
* WFI also needs special handling when single-stepping.
|
|
|
|
*/
|
2017-07-14 08:21:37 +00:00
|
|
|
#define DISAS_WFI DISAS_TARGET_2
|
|
|
|
#define DISAS_SWI DISAS_TARGET_3
|
2014-03-10 14:56:30 +00:00
|
|
|
/* WFE */
|
2017-07-14 08:21:37 +00:00
|
|
|
#define DISAS_WFE DISAS_TARGET_4
|
|
|
|
#define DISAS_HVC DISAS_TARGET_5
|
|
|
|
#define DISAS_SMC DISAS_TARGET_6
|
|
|
|
#define DISAS_YIELD DISAS_TARGET_7
|
arm: Implement M profile exception return properly
On M profile, return from exceptions happen when code in Handler mode
executes one of the following function call return instructions:
* POP or LDM which loads the PC
* LDR to PC
* BX register
and the new PC value is 0xFFxxxxxx.
QEMU tries to implement this by not treating the instruction
specially but then catching the attempt to execute from the magic
address value. This is not ideal, because:
* there are guest visible differences from the architecturally
specified behaviour (for instance jumping to 0xFFxxxxxx via a
different instruction should not cause an exception return but it
will in the QEMU implementation)
* we have to account for it in various places (like refusing to take
an interrupt if the PC is at a magic value, and making sure that
the MPU doesn't deny execution at the magic value addresses)
Drop these hacks, and instead implement exception return the way the
architecture specifies -- by having the relevant instructions check
for the magic value and raise the 'do an exception return' QEMU
internal exception immediately.
The effect on the generated code is minor:
bx lr, old code (and new code for Thread mode):
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
exit_tb $0x0
set_label $L0
exit_tb $0x7f2aabd61993
x86_64 generated code:
0x7f2aabe87019: mov %ebx,%ebp
0x7f2aabe8701b: and $0xfffffffffffffffe,%ebp
0x7f2aabe8701e: mov %ebp,0x3c(%r14)
0x7f2aabe87022: and $0x1,%ebx
0x7f2aabe87025: mov %ebx,0x218(%r14)
0x7f2aabe8702c: xor %eax,%eax
0x7f2aabe8702e: jmpq 0x7f2aabe7c016
bx lr, new code when in Handler mode:
TCG:
mov_i32 tmp5,r14
movi_i32 tmp6,$0xfffffffffffffffe
and_i32 pc,tmp5,tmp6
movi_i32 tmp6,$0x1
and_i32 tmp5,tmp5,tmp6
st_i32 tmp5,env,$0x218
movi_i32 tmp5,$0xffffffffff000000
brcond_i32 pc,tmp5,geu,$L1
exit_tb $0x0
set_label $L1
movi_i32 tmp5,$0x8
call exception_internal,$0x0,$0,env,tmp5
x86_64 generated code:
0x7fe8fa1264e3: mov %ebp,%ebx
0x7fe8fa1264e5: and $0xfffffffffffffffe,%ebx
0x7fe8fa1264e8: mov %ebx,0x3c(%r14)
0x7fe8fa1264ec: and $0x1,%ebp
0x7fe8fa1264ef: mov %ebp,0x218(%r14)
0x7fe8fa1264f6: cmp $0xff000000,%ebx
0x7fe8fa1264fc: jae 0x7fe8fa126509
0x7fe8fa126502: xor %eax,%eax
0x7fe8fa126504: jmpq 0x7fe8fa122016
0x7fe8fa126509: mov %r14,%rdi
0x7fe8fa12650c: mov $0x8,%esi
0x7fe8fa126511: mov $0x56095dbeccf5,%r10
0x7fe8fa12651b: callq *%r10
which is a difference of one cmp/branch-not-taken. This will
be lost in the noise of having to exit generated code and
look up the next TB anyway.
Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <rth@twiddle.net>
Reviewed-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Message-id: 1491844419-12485-9-git-send-email-peter.maydell@linaro.org
2017-04-20 16:32:31 +00:00
|
|
|
/* M profile branch which might be an exception return (and so needs
|
|
|
|
* custom end-of-TB code)
|
|
|
|
*/
|
2017-07-14 08:21:37 +00:00
|
|
|
#define DISAS_BX_EXCRET DISAS_TARGET_8
|
2020-06-26 03:31:03 +00:00
|
|
|
/*
|
|
|
|
* For instructions which want an immediate exit to the main loop, as opposed
|
|
|
|
* to attempting to use lookup_and_goto_ptr. Unlike DISAS_UPDATE_EXIT, this
|
|
|
|
* doesn't write the PC on exiting the translation loop so you need to ensure
|
|
|
|
* something (gen_a64_set_pc_im or runtime helper) has done so before we reach
|
|
|
|
* return from cpu_tb_exec.
|
2017-04-27 03:29:20 +00:00
|
|
|
*/
|
2017-07-14 08:21:37 +00:00
|
|
|
#define DISAS_EXIT DISAS_TARGET_9
|
2020-06-26 03:31:04 +00:00
|
|
|
/* CPU state was modified dynamically; no need to exit, but do not chain. */
|
|
|
|
#define DISAS_UPDATE_NOCHAIN DISAS_TARGET_10
|
2013-12-17 19:42:31 +00:00
|
|
|
|
2013-09-03 19:12:10 +00:00
|
|
|
#ifdef TARGET_AARCH64
|
|
|
|
void a64_translate_init(void);
|
|
|
|
void gen_a64_set_pc_im(uint64_t val);
|
2017-07-14 09:58:33 +00:00
|
|
|
extern const TranslatorOps aarch64_translator_ops;
|
2013-09-03 19:12:10 +00:00
|
|
|
#else
|
|
|
|
static inline void a64_translate_init(void)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void gen_a64_set_pc_im(uint64_t val)
|
|
|
|
{
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2015-09-14 13:39:47 +00:00
|
|
|
void arm_test_cc(DisasCompare *cmp, int cc);
|
|
|
|
void arm_free_cc(DisasCompare *cmp);
|
|
|
|
void arm_jump_cc(DisasCompare *cmp, TCGLabel *label);
|
2015-02-13 20:51:55 +00:00
|
|
|
void arm_gen_test_cc(int cc, TCGLabel *label);
|
2013-12-17 19:42:33 +00:00
|
|
|
|
2018-05-07 12:17:16 +00:00
|
|
|
/* Return state of Alternate Half-precision flag, caller frees result */
|
|
|
|
static inline TCGv_i32 get_ahp_flag(void)
|
|
|
|
{
|
|
|
|
TCGv_i32 ret = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_gen_ld_i32(ret, cpu_env,
|
|
|
|
offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPSCR]));
|
|
|
|
tcg_gen_extract_i32(ret, ret, 26, 1);
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2019-03-01 20:04:56 +00:00
|
|
|
/* Set bits within PSTATE. */
|
|
|
|
static inline void set_pstate_bits(uint32_t bits)
|
|
|
|
{
|
|
|
|
TCGv_i32 p = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
|
|
|
|
|
|
|
|
tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
|
|
|
|
tcg_gen_ori_i32(p, p, bits);
|
|
|
|
tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
|
|
|
|
tcg_temp_free_i32(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Clear bits within PSTATE. */
|
|
|
|
static inline void clear_pstate_bits(uint32_t bits)
|
|
|
|
{
|
|
|
|
TCGv_i32 p = tcg_temp_new_i32();
|
|
|
|
|
|
|
|
tcg_debug_assert(!(bits & CACHED_PSTATE_BITS));
|
|
|
|
|
|
|
|
tcg_gen_ld_i32(p, cpu_env, offsetof(CPUARMState, pstate));
|
|
|
|
tcg_gen_andi_i32(p, p, ~bits);
|
|
|
|
tcg_gen_st_i32(p, cpu_env, offsetof(CPUARMState, pstate));
|
|
|
|
tcg_temp_free_i32(p);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If the singlestep state is Active-not-pending, advance to Active-pending. */
|
|
|
|
static inline void gen_ss_advance(DisasContext *s)
|
|
|
|
{
|
|
|
|
if (s->ss_active) {
|
|
|
|
s->pstate_ss = 0;
|
|
|
|
clear_pstate_bits(PSTATE_SS);
|
|
|
|
}
|
|
|
|
}
|
2018-10-24 06:50:19 +00:00
|
|
|
|
2019-08-15 08:46:42 +00:00
|
|
|
static inline void gen_exception(int excp, uint32_t syndrome,
|
|
|
|
uint32_t target_el)
|
|
|
|
{
|
|
|
|
TCGv_i32 tcg_excp = tcg_const_i32(excp);
|
|
|
|
TCGv_i32 tcg_syn = tcg_const_i32(syndrome);
|
|
|
|
TCGv_i32 tcg_el = tcg_const_i32(target_el);
|
|
|
|
|
|
|
|
gen_helper_exception_with_syndrome(cpu_env, tcg_excp,
|
|
|
|
tcg_syn, tcg_el);
|
|
|
|
|
|
|
|
tcg_temp_free_i32(tcg_el);
|
|
|
|
tcg_temp_free_i32(tcg_syn);
|
|
|
|
tcg_temp_free_i32(tcg_excp);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Generate an architectural singlestep exception */
|
|
|
|
static inline void gen_swstep_exception(DisasContext *s, int isv, int ex)
|
|
|
|
{
|
2019-08-15 08:46:42 +00:00
|
|
|
bool same_el = (s->debug_target_el == s->current_el);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If singlestep is targeting a lower EL than the current one,
|
|
|
|
* then s->ss_active must be false and we can never get here.
|
|
|
|
*/
|
|
|
|
assert(s->debug_target_el >= s->current_el);
|
|
|
|
|
|
|
|
gen_exception(EXCP_UDEF, syn_swstep(same_el, isv, ex), s->debug_target_el);
|
2019-08-15 08:46:42 +00:00
|
|
|
}
|
|
|
|
|
2019-06-13 16:39:06 +00:00
|
|
|
/*
|
|
|
|
* Given a VFP floating point constant encoded into an 8 bit immediate in an
|
|
|
|
* instruction, expand it to the actual constant value of the specified
|
|
|
|
* size, as per the VFPExpandImm() pseudocode in the Arm ARM.
|
|
|
|
*/
|
|
|
|
uint64_t vfp_expand_imm(int size, uint8_t imm8);
|
|
|
|
|
2018-10-24 06:50:19 +00:00
|
|
|
/* Vector operations shared between ARM and AArch64. */
|
2020-05-13 16:32:35 +00:00
|
|
|
void gen_gvec_ceq0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_clt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_cgt0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_cle0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_cge0(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:36 +00:00
|
|
|
void gen_gvec_mla(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_mls(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:38 +00:00
|
|
|
void gen_gvec_cmtst(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sshl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_ushl(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2018-10-24 06:50:20 +00:00
|
|
|
void gen_cmtst_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
2020-02-16 21:42:29 +00:00
|
|
|
void gen_ushl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
|
|
|
|
void gen_sshl_i32(TCGv_i32 d, TCGv_i32 a, TCGv_i32 b);
|
|
|
|
void gen_ushl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
|
|
|
void gen_sshl_i64(TCGv_i64 d, TCGv_i64 a, TCGv_i64 b);
|
2018-10-24 06:50:19 +00:00
|
|
|
|
2020-05-13 16:32:39 +00:00
|
|
|
void gen_gvec_uqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sqadd_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sqsub_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:30 +00:00
|
|
|
void gen_gvec_ssra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_usra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:31 +00:00
|
|
|
void gen_gvec_srshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_urshr(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_srsra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_ursra(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:32 +00:00
|
|
|
void gen_gvec_sri(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sli(unsigned vece, uint32_t rd_ofs, uint32_t rm_ofs,
|
|
|
|
int64_t shift, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:41 +00:00
|
|
|
void gen_gvec_sqrdmlah_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_sqrdmlsh_qc(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:44 +00:00
|
|
|
void gen_gvec_sabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uabd(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2020-05-13 16:32:45 +00:00
|
|
|
void gen_gvec_saba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
void gen_gvec_uaba(unsigned vece, uint32_t rd_ofs, uint32_t rn_ofs,
|
|
|
|
uint32_t rm_ofs, uint32_t opr_sz, uint32_t max_sz);
|
|
|
|
|
2018-10-24 06:50:16 +00:00
|
|
|
/*
|
|
|
|
* Forward to the isar_feature_* tests given a DisasContext pointer.
|
|
|
|
*/
|
|
|
|
#define dc_isar_feature(name, ctx) \
|
|
|
|
({ DisasContext *ctx_ = (ctx); isar_feature_##name(ctx_->isar); })
|
|
|
|
|
2020-04-30 18:09:41 +00:00
|
|
|
/* Note that the gvec expanders operate on offsets + sizes. */
|
|
|
|
typedef void GVecGen2Fn(unsigned, uint32_t, uint32_t, uint32_t, uint32_t);
|
|
|
|
typedef void GVecGen2iFn(unsigned, uint32_t, uint32_t, int64_t,
|
|
|
|
uint32_t, uint32_t);
|
|
|
|
typedef void GVecGen3Fn(unsigned, uint32_t, uint32_t,
|
|
|
|
uint32_t, uint32_t, uint32_t);
|
|
|
|
typedef void GVecGen4Fn(unsigned, uint32_t, uint32_t, uint32_t,
|
|
|
|
uint32_t, uint32_t, uint32_t);
|
|
|
|
|
2020-04-30 18:09:49 +00:00
|
|
|
/* Function prototype for gen_ functions for calling Neon helpers */
|
2020-06-16 17:08:35 +00:00
|
|
|
typedef void NeonGenOneOpFn(TCGv_i32, TCGv_i32);
|
2020-04-30 18:09:49 +00:00
|
|
|
typedef void NeonGenOneOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32);
|
|
|
|
typedef void NeonGenTwoOpFn(TCGv_i32, TCGv_i32, TCGv_i32);
|
|
|
|
typedef void NeonGenTwoOpEnvFn(TCGv_i32, TCGv_ptr, TCGv_i32, TCGv_i32);
|
|
|
|
typedef void NeonGenTwo64OpFn(TCGv_i64, TCGv_i64, TCGv_i64);
|
|
|
|
typedef void NeonGenTwo64OpEnvFn(TCGv_i64, TCGv_ptr, TCGv_i64, TCGv_i64);
|
|
|
|
typedef void NeonGenNarrowFn(TCGv_i32, TCGv_i64);
|
|
|
|
typedef void NeonGenNarrowEnvFn(TCGv_i32, TCGv_ptr, TCGv_i64);
|
|
|
|
typedef void NeonGenWidenFn(TCGv_i64, TCGv_i32);
|
2020-06-16 09:32:25 +00:00
|
|
|
typedef void NeonGenTwoOpWidenFn(TCGv_i64, TCGv_i32, TCGv_i32);
|
2020-06-16 17:08:38 +00:00
|
|
|
typedef void NeonGenOneSingleOpFn(TCGv_i32, TCGv_i32, TCGv_ptr);
|
2020-06-16 17:08:33 +00:00
|
|
|
typedef void NeonGenTwoSingleOpFn(TCGv_i32, TCGv_i32, TCGv_i32, TCGv_ptr);
|
|
|
|
typedef void NeonGenTwoDoubleOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGv_ptr);
|
2020-06-16 17:08:32 +00:00
|
|
|
typedef void NeonGenOne64OpFn(TCGv_i64, TCGv_i64);
|
2020-04-30 18:09:49 +00:00
|
|
|
typedef void CryptoTwoOpFn(TCGv_ptr, TCGv_ptr);
|
|
|
|
typedef void CryptoThreeOpIntFn(TCGv_ptr, TCGv_ptr, TCGv_i32);
|
|
|
|
typedef void CryptoThreeOpFn(TCGv_ptr, TCGv_ptr, TCGv_ptr);
|
|
|
|
typedef void AtomicThreeOpFn(TCGv_i64, TCGv_i64, TCGv_i64, TCGArg, MemOp);
|
|
|
|
|
2013-09-03 19:12:03 +00:00
|
|
|
#endif /* TARGET_ARM_TRANSLATE_H */
|