mirror of
https://github.com/xemu-project/xemu.git
synced 2024-12-12 14:05:56 +00:00
target/arm: Convert "single-precision" register moves to decodetree
Convert the "single-precision" register moves to decodetree: * VMSR * VMRS * VMOV between general purpose register and single precision Note that the VMSR/VMRS conversions make our handling of the "should this UNDEF?" checks consistent between the two instructions: * VMSR to MVFR0, MVFR1, MVFR2 now UNDEF from EL0 (previously was a nop) * VMSR to FPSID now UNDEFs from EL0 or if VFPv3 or better (previously was a nop) * VMSR to FPINST and FPINST2 now UNDEF if VFPv3 or better (previously would write to the register, which had no guest-visible effect because we always UNDEF reads) We also tighten up the decode: we were previously underdecoding some SBZ or SBO bits. The conversion of VMOV_single includes the expansion out of the gen_mov_F0_vreg()/gen_vfp_mrs() and gen_mov_vreg_F0()/gen_vfp_msr() sequences into the simpler direct load/store of the TCG temp via neon_{load,store}_reg32(): we know in the new function that we're always single-precision, we don't need to use the old-and-deprecated cpu_F0* TCG globals, and we don't happen to have the declaration of gen_vfp_msr() and gen_vfp_mrs() at the point in the file where the new function is. Signed-off-by: Peter Maydell <peter.maydell@linaro.org> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
parent
9851ed9269
commit
a9ab50011a
@ -622,3 +622,164 @@ static bool trans_VDUP(DisasContext *s, arg_VDUP *a)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VMSR_VMRS(DisasContext *s, arg_VMSR_VMRS *a)
|
||||
{
|
||||
TCGv_i32 tmp;
|
||||
bool ignore_vfp_enabled = false;
|
||||
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
/*
|
||||
* The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
|
||||
* Writes to R15 are UNPREDICTABLE; we choose to undef.
|
||||
*/
|
||||
if (a->rt == 15 || a->reg != ARM_VFP_FPSCR) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
switch (a->reg) {
|
||||
case ARM_VFP_FPSID:
|
||||
/*
|
||||
* VFPv2 allows access to FPSID from userspace; VFPv3 restricts
|
||||
* all ID registers to privileged access only.
|
||||
*/
|
||||
if (IS_USER(s) && arm_dc_feature(s, ARM_FEATURE_VFP3)) {
|
||||
return false;
|
||||
}
|
||||
ignore_vfp_enabled = true;
|
||||
break;
|
||||
case ARM_VFP_MVFR0:
|
||||
case ARM_VFP_MVFR1:
|
||||
if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
|
||||
return false;
|
||||
}
|
||||
ignore_vfp_enabled = true;
|
||||
break;
|
||||
case ARM_VFP_MVFR2:
|
||||
if (IS_USER(s) || !arm_dc_feature(s, ARM_FEATURE_V8)) {
|
||||
return false;
|
||||
}
|
||||
ignore_vfp_enabled = true;
|
||||
break;
|
||||
case ARM_VFP_FPSCR:
|
||||
break;
|
||||
case ARM_VFP_FPEXC:
|
||||
if (IS_USER(s)) {
|
||||
return false;
|
||||
}
|
||||
ignore_vfp_enabled = true;
|
||||
break;
|
||||
case ARM_VFP_FPINST:
|
||||
case ARM_VFP_FPINST2:
|
||||
/* Not present in VFPv3 */
|
||||
if (IS_USER(s) || arm_dc_feature(s, ARM_FEATURE_VFP3)) {
|
||||
return false;
|
||||
}
|
||||
break;
|
||||
default:
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (a->l) {
|
||||
/* VMRS, move VFP special register to gp register */
|
||||
switch (a->reg) {
|
||||
case ARM_VFP_FPSID:
|
||||
case ARM_VFP_FPEXC:
|
||||
case ARM_VFP_FPINST:
|
||||
case ARM_VFP_FPINST2:
|
||||
case ARM_VFP_MVFR0:
|
||||
case ARM_VFP_MVFR1:
|
||||
case ARM_VFP_MVFR2:
|
||||
tmp = load_cpu_field(vfp.xregs[a->reg]);
|
||||
break;
|
||||
case ARM_VFP_FPSCR:
|
||||
if (a->rt == 15) {
|
||||
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
|
||||
tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
|
||||
} else {
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_helper_vfp_get_fpscr(tmp, cpu_env);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
|
||||
if (a->rt == 15) {
|
||||
/* Set the 4 flag bits in the CPSR. */
|
||||
gen_set_nzcv(tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
} else {
|
||||
store_reg(s, a->rt, tmp);
|
||||
}
|
||||
} else {
|
||||
/* VMSR, move gp register to VFP special register */
|
||||
switch (a->reg) {
|
||||
case ARM_VFP_FPSID:
|
||||
case ARM_VFP_MVFR0:
|
||||
case ARM_VFP_MVFR1:
|
||||
case ARM_VFP_MVFR2:
|
||||
/* Writes are ignored. */
|
||||
break;
|
||||
case ARM_VFP_FPSCR:
|
||||
tmp = load_reg(s, a->rt);
|
||||
gen_helper_vfp_set_fpscr(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_lookup_tb(s);
|
||||
break;
|
||||
case ARM_VFP_FPEXC:
|
||||
/*
|
||||
* TODO: VFP subarchitecture support.
|
||||
* For now, keep the EN bit only
|
||||
*/
|
||||
tmp = load_reg(s, a->rt);
|
||||
tcg_gen_andi_i32(tmp, tmp, 1 << 30);
|
||||
store_cpu_field(tmp, vfp.xregs[a->reg]);
|
||||
gen_lookup_tb(s);
|
||||
break;
|
||||
case ARM_VFP_FPINST:
|
||||
case ARM_VFP_FPINST2:
|
||||
tmp = load_reg(s, a->rt);
|
||||
store_cpu_field(tmp, vfp.xregs[a->reg]);
|
||||
break;
|
||||
default:
|
||||
g_assert_not_reached();
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool trans_VMOV_single(DisasContext *s, arg_VMOV_single *a)
|
||||
{
|
||||
TCGv_i32 tmp;
|
||||
|
||||
if (!vfp_access_check(s)) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (a->l) {
|
||||
/* VFP to general purpose register */
|
||||
tmp = tcg_temp_new_i32();
|
||||
neon_load_reg32(tmp, a->vn);
|
||||
if (a->rt == 15) {
|
||||
/* Set the 4 flag bits in the CPSR. */
|
||||
gen_set_nzcv(tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
} else {
|
||||
store_reg(s, a->rt, tmp);
|
||||
}
|
||||
} else {
|
||||
/* general purpose register to VFP */
|
||||
tmp = load_reg(s, a->rt);
|
||||
neon_store_reg32(tmp, a->vn);
|
||||
tcg_temp_free_i32(tmp);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -3097,7 +3097,6 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
|
||||
TCGv_i32 addr;
|
||||
TCGv_i32 tmp;
|
||||
TCGv_i32 tmp2;
|
||||
bool ignore_vfp_enabled = false;
|
||||
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_VFP)) {
|
||||
return 1;
|
||||
@ -3133,14 +3132,7 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
|
||||
* for invalid encodings; we will generate incorrect syndrome information
|
||||
* for attempts to execute invalid vfp/neon encodings with FP disabled.
|
||||
*/
|
||||
if ((insn & 0x0fe00fff) == 0x0ee00a10) {
|
||||
rn = (insn >> 16) & 0xf;
|
||||
if (rn == ARM_VFP_FPSID || rn == ARM_VFP_FPEXC || rn == ARM_VFP_MVFR2
|
||||
|| rn == ARM_VFP_MVFR1 || rn == ARM_VFP_MVFR0) {
|
||||
ignore_vfp_enabled = true;
|
||||
}
|
||||
}
|
||||
if (!full_vfp_access_check(s, ignore_vfp_enabled)) {
|
||||
if (!vfp_access_check(s)) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -3148,142 +3140,8 @@ static int disas_vfp_insn(DisasContext *s, uint32_t insn)
|
||||
switch ((insn >> 24) & 0xf) {
|
||||
case 0xe:
|
||||
if (insn & (1 << 4)) {
|
||||
/* single register transfer */
|
||||
rd = (insn >> 12) & 0xf;
|
||||
if (dp) {
|
||||
/* already handled by decodetree */
|
||||
return 1;
|
||||
} else { /* !dp */
|
||||
bool is_sysreg;
|
||||
|
||||
if ((insn & 0x6f) != 0x00)
|
||||
return 1;
|
||||
rn = VFP_SREG_N(insn);
|
||||
|
||||
is_sysreg = extract32(insn, 21, 1);
|
||||
|
||||
if (arm_dc_feature(s, ARM_FEATURE_M)) {
|
||||
/*
|
||||
* The only M-profile VFP vmrs/vmsr sysreg is FPSCR.
|
||||
* Writes to R15 are UNPREDICTABLE; we choose to undef.
|
||||
*/
|
||||
if (is_sysreg && (rd == 15 || (rn >> 1) != ARM_VFP_FPSCR)) {
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
if (insn & ARM_CP_RW_BIT) {
|
||||
/* vfp->arm */
|
||||
if (is_sysreg) {
|
||||
/* system register */
|
||||
rn >>= 1;
|
||||
|
||||
switch (rn) {
|
||||
case ARM_VFP_FPSID:
|
||||
/* VFP2 allows access to FSID from userspace.
|
||||
VFP3 restricts all id registers to privileged
|
||||
accesses. */
|
||||
if (IS_USER(s)
|
||||
&& arm_dc_feature(s, ARM_FEATURE_VFP3)) {
|
||||
return 1;
|
||||
}
|
||||
tmp = load_cpu_field(vfp.xregs[rn]);
|
||||
break;
|
||||
case ARM_VFP_FPEXC:
|
||||
if (IS_USER(s))
|
||||
return 1;
|
||||
tmp = load_cpu_field(vfp.xregs[rn]);
|
||||
break;
|
||||
case ARM_VFP_FPINST:
|
||||
case ARM_VFP_FPINST2:
|
||||
/* Not present in VFP3. */
|
||||
if (IS_USER(s)
|
||||
|| arm_dc_feature(s, ARM_FEATURE_VFP3)) {
|
||||
return 1;
|
||||
}
|
||||
tmp = load_cpu_field(vfp.xregs[rn]);
|
||||
break;
|
||||
case ARM_VFP_FPSCR:
|
||||
if (rd == 15) {
|
||||
tmp = load_cpu_field(vfp.xregs[ARM_VFP_FPSCR]);
|
||||
tcg_gen_andi_i32(tmp, tmp, 0xf0000000);
|
||||
} else {
|
||||
tmp = tcg_temp_new_i32();
|
||||
gen_helper_vfp_get_fpscr(tmp, cpu_env);
|
||||
}
|
||||
break;
|
||||
case ARM_VFP_MVFR2:
|
||||
if (!arm_dc_feature(s, ARM_FEATURE_V8)) {
|
||||
return 1;
|
||||
}
|
||||
/* fall through */
|
||||
case ARM_VFP_MVFR0:
|
||||
case ARM_VFP_MVFR1:
|
||||
if (IS_USER(s)
|
||||
|| !arm_dc_feature(s, ARM_FEATURE_MVFR)) {
|
||||
return 1;
|
||||
}
|
||||
tmp = load_cpu_field(vfp.xregs[rn]);
|
||||
break;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
gen_mov_F0_vreg(0, rn);
|
||||
tmp = gen_vfp_mrs();
|
||||
}
|
||||
if (rd == 15) {
|
||||
/* Set the 4 flag bits in the CPSR. */
|
||||
gen_set_nzcv(tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
} else {
|
||||
store_reg(s, rd, tmp);
|
||||
}
|
||||
} else {
|
||||
/* arm->vfp */
|
||||
if (is_sysreg) {
|
||||
rn >>= 1;
|
||||
/* system register */
|
||||
switch (rn) {
|
||||
case ARM_VFP_FPSID:
|
||||
case ARM_VFP_MVFR0:
|
||||
case ARM_VFP_MVFR1:
|
||||
/* Writes are ignored. */
|
||||
break;
|
||||
case ARM_VFP_FPSCR:
|
||||
tmp = load_reg(s, rd);
|
||||
gen_helper_vfp_set_fpscr(cpu_env, tmp);
|
||||
tcg_temp_free_i32(tmp);
|
||||
gen_lookup_tb(s);
|
||||
break;
|
||||
case ARM_VFP_FPEXC:
|
||||
if (IS_USER(s))
|
||||
return 1;
|
||||
/* TODO: VFP subarchitecture support.
|
||||
* For now, keep the EN bit only */
|
||||
tmp = load_reg(s, rd);
|
||||
tcg_gen_andi_i32(tmp, tmp, 1 << 30);
|
||||
store_cpu_field(tmp, vfp.xregs[rn]);
|
||||
gen_lookup_tb(s);
|
||||
break;
|
||||
case ARM_VFP_FPINST:
|
||||
case ARM_VFP_FPINST2:
|
||||
if (IS_USER(s)) {
|
||||
return 1;
|
||||
}
|
||||
tmp = load_reg(s, rd);
|
||||
store_cpu_field(tmp, vfp.xregs[rn]);
|
||||
break;
|
||||
default:
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
tmp = load_reg(s, rd);
|
||||
gen_vfp_msr(tmp);
|
||||
gen_mov_vreg_F0(0, rn);
|
||||
}
|
||||
}
|
||||
}
|
||||
/* already handled by decodetree */
|
||||
return 1;
|
||||
} else {
|
||||
/* data processing */
|
||||
bool rd_is_dp = dp;
|
||||
|
@ -62,3 +62,7 @@ VMOV_from_gp ---- 1110 0 0 index:1 0 .... rt:4 1011 .00 1 0000 \
|
||||
|
||||
VDUP ---- 1110 1 b:1 q:1 0 .... rt:4 1011 . 0 e:1 1 0000 \
|
||||
vn=%vn_dp
|
||||
|
||||
VMSR_VMRS ---- 1110 111 l:1 reg:4 rt:4 1010 0001 0000
|
||||
VMOV_single ---- 1110 000 l:1 .... rt:4 1010 . 001 0000 \
|
||||
vn=%vn_sp
|
||||
|
Loading…
Reference in New Issue
Block a user