mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 19:49:43 +00:00
target-arm queue:
* Implement FEAT_S2FWB * Implement FEAT_IDST * Drop unsupported_encoding() macro * hw/intc/arm_gicv3: Use correct number of priority bits for the CPU * Fix aarch64 debug register names * hw/adc/zynq-xadc: Use qemu_irq typedef * target/arm/helper.c: Delete stray obsolete comment * Make number of counters in PMCR follow the CPU * hw/arm/virt: Fix dtb nits * ptimer: Rename PTIMER_POLICY_DEFAULT to PTIMER_POLICY_LEGACY * target/arm: Fix PAuth keys access checks for disabled SEL2 * Enable FEAT_HCX for -cpu max * Use FIELD definitions for CPACR, CPTR_ELx -----BEGIN PGP SIGNATURE----- iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmKGf/kZHHBldGVyLm1h eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3h5fD/9+2ymBkwX9jIaaOooejZrJ CUVCPL2w206eZbPihgE4snZcZdkGjteDhS30kipyJxFNaCE0d2qQLTMrPXQMKyGz aqCcvSSdb+VraVOP7RpfqYrkzFsj/+K/F5NHn3Vf/T7ULLQJFk0JZ4UWnt7/rkhL oaXGDm79JrFfMTWUu2AKtCGj132YXyH2YP7GmvYyIty3l+hR9a8mwx3EcFTewuEn U7AZGZL2GfixGLZU/nNRcTu/BOlkdU6PM1BZoprs7HwbMXR1+pBCX6bwIXK35Q6/ vCu8e/4+Hi4cd7nxRXqQ7+KuIdcEB+LwupN61othVUl2lnfQgNcvj2hhiWJkxdRI gpRktnnf6QNWsQ7HWShB5o8HhDN9v82wMf8VSiN1XncN6oYWZSEAMa4NbV/1ditY yolJww/onnre8A07xTzBqIXqKRViwUMltXSamUpuWx4UsSMOql/ktJYsXqSnJWP+ cpTqQ+VQKj8cTotvDTnxKsOiI/RraSAzW1amle0x2Ff8I6AN45j1S3GuT9EyJJ3w CabegyYloJChI2Gnqf5Pc+B108E/GwHlcsmgaN5FEjlGzluKa7Ii0D2f1Cey1tAl iTqJ3dRFjhkll6a88TN47QRtDJadXp+PjQzzQxtEM1wXhJLWXTxocvBn0cGg2OnY 4eMY1YLtD9neCnKvdZdw9g== =F3ow -----END PGP SIGNATURE----- Merge tag 'pull-target-arm-20220519' of https://git.linaro.org/people/pmaydell/qemu-arm into staging target-arm queue: * Implement FEAT_S2FWB * Implement FEAT_IDST * Drop unsupported_encoding() macro * hw/intc/arm_gicv3: Use correct number of priority bits for the CPU * Fix aarch64 debug register names * hw/adc/zynq-xadc: Use qemu_irq typedef * target/arm/helper.c: Delete stray obsolete comment * Make number of counters in PMCR follow the CPU * hw/arm/virt: Fix dtb nits * ptimer: Rename PTIMER_POLICY_DEFAULT to PTIMER_POLICY_LEGACY * target/arm: Fix PAuth keys access checks for disabled SEL2 * Enable FEAT_HCX for -cpu max * Use FIELD definitions for CPACR, CPTR_ELx # -----BEGIN PGP SIGNATURE----- # # iQJNBAABCAA3FiEE4aXFk81BneKOgxXPPCUl7RQ2DN4FAmKGf/kZHHBldGVyLm1h # eWRlbGxAbGluYXJvLm9yZwAKCRA8JSXtFDYM3h5fD/9+2ymBkwX9jIaaOooejZrJ # CUVCPL2w206eZbPihgE4snZcZdkGjteDhS30kipyJxFNaCE0d2qQLTMrPXQMKyGz # aqCcvSSdb+VraVOP7RpfqYrkzFsj/+K/F5NHn3Vf/T7ULLQJFk0JZ4UWnt7/rkhL # oaXGDm79JrFfMTWUu2AKtCGj132YXyH2YP7GmvYyIty3l+hR9a8mwx3EcFTewuEn # U7AZGZL2GfixGLZU/nNRcTu/BOlkdU6PM1BZoprs7HwbMXR1+pBCX6bwIXK35Q6/ # vCu8e/4+Hi4cd7nxRXqQ7+KuIdcEB+LwupN61othVUl2lnfQgNcvj2hhiWJkxdRI # gpRktnnf6QNWsQ7HWShB5o8HhDN9v82wMf8VSiN1XncN6oYWZSEAMa4NbV/1ditY # yolJww/onnre8A07xTzBqIXqKRViwUMltXSamUpuWx4UsSMOql/ktJYsXqSnJWP+ # cpTqQ+VQKj8cTotvDTnxKsOiI/RraSAzW1amle0x2Ff8I6AN45j1S3GuT9EyJJ3w # CabegyYloJChI2Gnqf5Pc+B108E/GwHlcsmgaN5FEjlGzluKa7Ii0D2f1Cey1tAl # iTqJ3dRFjhkll6a88TN47QRtDJadXp+PjQzzQxtEM1wXhJLWXTxocvBn0cGg2OnY # 4eMY1YLtD9neCnKvdZdw9g== # =F3ow # -----END PGP SIGNATURE----- # gpg: Signature made Thu 19 May 2022 10:35:53 AM PDT # gpg: using RSA key E1A5C593CD419DE28E8315CF3C2525ED14360CDE # gpg: issuer "peter.maydell@linaro.org" # gpg: Good signature from "Peter Maydell <peter.maydell@linaro.org>" [full] # gpg: aka "Peter Maydell <pmaydell@gmail.com>" [full] # gpg: aka "Peter Maydell <pmaydell@chiark.greenend.org.uk>" [full] * tag 'pull-target-arm-20220519' of https://git.linaro.org/people/pmaydell/qemu-arm: (22 commits) target/arm: Use FIELD definitions for CPACR, CPTR_ELx target/arm: Enable FEAT_HCX for -cpu max target/arm: Fix PAuth keys access checks for disabled SEL2 ptimer: Rename PTIMER_POLICY_DEFAULT to PTIMER_POLICY_LEGACY hw/arm/virt: Drop #size-cells and #address-cells from gpio-keys dtb node hw/arm/virt: Fix incorrect non-secure flash dtb node name target/arm: Make number of counters in PMCR follow the CPU target/arm/helper.c: Delete stray obsolete comment hw/adc/zynq-xadc: Use qemu_irq typedef Fix aarch64 debug register names. hw/intc/arm_gicv3: Provide ich_num_aprs() hw/intc/arm_gicv3: Use correct number of priority bits for the CPU hw/intc/arm_gicv3: Support configurable number of physical priority bits hw/intc/arm_gicv3_kvm.c: Stop using GIC_MIN_BPR constant hw/intc/arm_gicv3: report correct PRIbits field in ICV_CTLR_EL1 hw/intc/arm_gicv3_cpuif: Handle CPUs that don't specify GICv3 parameters target/arm: Drop unsupported_encoding() macro target/arm: Implement FEAT_IDST target/arm: Enable FEAT_S2FWB for -cpu max target/arm: Implement FEAT_S2FWB ... Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
commit
3a650ac995
@ -31,6 +31,7 @@ the following architecture extensions:
|
||||
- FEAT_FlagM2 (Enhancements to flag manipulation instructions)
|
||||
- FEAT_HPDS (Hierarchical permission disables)
|
||||
- FEAT_I8MM (AArch64 Int8 matrix multiplication instructions)
|
||||
- FEAT_IDST (ID space trap handling)
|
||||
- FEAT_IESB (Implicit error synchronization event)
|
||||
- FEAT_JSCVT (JavaScript conversion instructions)
|
||||
- FEAT_LOR (Limited ordering regions)
|
||||
@ -52,6 +53,7 @@ the following architecture extensions:
|
||||
- FEAT_RAS (Reliability, availability, and serviceability)
|
||||
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
|
||||
- FEAT_RNG (Random number generator)
|
||||
- FEAT_S2FWB (Stage 2 forced Write-Back)
|
||||
- FEAT_SB (Speculation Barrier)
|
||||
- FEAT_SEL2 (Secure EL2)
|
||||
- FEAT_SHA1 (SHA1 instructions)
|
||||
|
@ -86,7 +86,7 @@ static void zynq_xadc_update_ints(ZynqXADCState *s)
|
||||
s->regs[INT_STS] |= INT_DFIFO_GTH;
|
||||
}
|
||||
|
||||
qemu_set_irq(s->qemu_irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK]));
|
||||
qemu_set_irq(s->irq, !!(s->regs[INT_STS] & ~s->regs[INT_MASK]));
|
||||
}
|
||||
|
||||
static void zynq_xadc_reset(DeviceState *d)
|
||||
@ -262,7 +262,7 @@ static void zynq_xadc_init(Object *obj)
|
||||
memory_region_init_io(&s->iomem, obj, &xadc_ops, s, "zynq-xadc",
|
||||
ZYNQ_XADC_MMIO_SIZE);
|
||||
sysbus_init_mmio(sbd, &s->iomem);
|
||||
sysbus_init_irq(sbd, &s->qemu_irq);
|
||||
sysbus_init_irq(sbd, &s->irq);
|
||||
}
|
||||
|
||||
static const VMStateDescription vmstate_zynq_xadc = {
|
||||
|
@ -761,7 +761,7 @@ static void do_cpu_reset(void *opaque)
|
||||
env->cp15.scr_el3 |= SCR_ATA;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_sve, cpu)) {
|
||||
env->cp15.cptr_el[3] |= CPTR_EZ;
|
||||
env->cp15.cptr_el[3] |= R_CPTR_EL3_EZ_MASK;
|
||||
}
|
||||
/* AArch64 kernels never boot in secure mode */
|
||||
assert(!info->secure_boot);
|
||||
|
@ -464,7 +464,7 @@ static void mv88w8618_timer_init(SysBusDevice *dev, mv88w8618_timer_state *s,
|
||||
sysbus_init_irq(dev, &s->irq);
|
||||
s->freq = freq;
|
||||
|
||||
s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_DEFAULT);
|
||||
s->ptimer = ptimer_init(mv88w8618_timer_tick, s, PTIMER_POLICY_LEGACY);
|
||||
}
|
||||
|
||||
static uint64_t mv88w8618_pit_read(void *opaque, hwaddr offset,
|
||||
|
@ -925,8 +925,6 @@ static void create_gpio_keys(char *fdt, DeviceState *pl061_dev,
|
||||
|
||||
qemu_fdt_add_subnode(fdt, "/gpio-keys");
|
||||
qemu_fdt_setprop_string(fdt, "/gpio-keys", "compatible", "gpio-keys");
|
||||
qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#size-cells", 0);
|
||||
qemu_fdt_setprop_cell(fdt, "/gpio-keys", "#address-cells", 1);
|
||||
|
||||
qemu_fdt_add_subnode(fdt, "/gpio-keys/poweroff");
|
||||
qemu_fdt_setprop_string(fdt, "/gpio-keys/poweroff",
|
||||
@ -1195,7 +1193,7 @@ static void virt_flash_fdt(VirtMachineState *vms,
|
||||
qemu_fdt_setprop_string(ms->fdt, nodename, "secure-status", "okay");
|
||||
g_free(nodename);
|
||||
|
||||
nodename = g_strdup_printf("/flash@%" PRIx64, flashbase);
|
||||
nodename = g_strdup_printf("/flash@%" PRIx64, flashbase + flashsize);
|
||||
qemu_fdt_add_subnode(ms->fdt, nodename);
|
||||
qemu_fdt_setprop_string(ms->fdt, nodename, "compatible", "cfi-flash");
|
||||
qemu_fdt_setprop_sized_cells(ms->fdt, nodename, "reg",
|
||||
|
@ -41,7 +41,9 @@
|
||||
#include "hw/virtio/virtio-pci.h"
|
||||
#include "qom/object_interfaces.h"
|
||||
|
||||
GlobalProperty hw_compat_7_0[] = {};
|
||||
GlobalProperty hw_compat_7_0[] = {
|
||||
{ "arm-gicv3-common", "force-8-bit-prio", "on" },
|
||||
};
|
||||
const size_t hw_compat_7_0_len = G_N_ELEMENTS(hw_compat_7_0);
|
||||
|
||||
GlobalProperty hw_compat_6_2[] = {
|
||||
|
@ -552,7 +552,7 @@ static void xilinx_axidma_realize(DeviceState *dev, Error **errp)
|
||||
|
||||
st->dma = s;
|
||||
st->nr = i;
|
||||
st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
|
||||
st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(st->ptimer);
|
||||
ptimer_set_freq(st->ptimer, s->freqhz);
|
||||
ptimer_transaction_commit(st->ptimer);
|
||||
|
@ -666,7 +666,7 @@ static void xlnx_csu_dma_realize(DeviceState *dev, Error **errp)
|
||||
sysbus_init_irq(SYS_BUS_DEVICE(dev), &s->irq);
|
||||
|
||||
s->src_timer = ptimer_init(xlnx_csu_dma_src_timeout_hit,
|
||||
s, PTIMER_POLICY_DEFAULT);
|
||||
s, PTIMER_POLICY_LEGACY);
|
||||
|
||||
s->attr = MEMTXATTRS_UNSPECIFIED;
|
||||
|
||||
|
@ -563,6 +563,11 @@ static Property arm_gicv3_common_properties[] = {
|
||||
DEFINE_PROP_UINT32("revision", GICv3State, revision, 3),
|
||||
DEFINE_PROP_BOOL("has-lpi", GICv3State, lpi_enable, 0),
|
||||
DEFINE_PROP_BOOL("has-security-extensions", GICv3State, security_extn, 0),
|
||||
/*
|
||||
* Compatibility property: force 8 bits of physical priority, even
|
||||
* if the CPU being emulated should have fewer.
|
||||
*/
|
||||
DEFINE_PROP_BOOL("force-8-bit-prio", GICv3State, force_8bit_prio, 0),
|
||||
DEFINE_PROP_ARRAY("redist-region-count", GICv3State, nb_redist_regions,
|
||||
redist_region_count, qdev_prop_uint32, uint32_t),
|
||||
DEFINE_PROP_LINK("sysmem", GICv3State, dma, TYPE_MEMORY_REGION,
|
||||
|
@ -49,6 +49,14 @@ static inline int icv_min_vbpr(GICv3CPUState *cs)
|
||||
return 7 - cs->vprebits;
|
||||
}
|
||||
|
||||
static inline int ich_num_aprs(GICv3CPUState *cs)
|
||||
{
|
||||
/* Return the number of virtual APR registers (1, 2, or 4) */
|
||||
int aprmax = 1 << (cs->vprebits - 5);
|
||||
assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
|
||||
return aprmax;
|
||||
}
|
||||
|
||||
/* Simple accessor functions for LR fields */
|
||||
static uint32_t ich_lr_vintid(uint64_t lr)
|
||||
{
|
||||
@ -145,9 +153,7 @@ static int ich_highest_active_virt_prio(GICv3CPUState *cs)
|
||||
* in the ICH Active Priority Registers.
|
||||
*/
|
||||
int i;
|
||||
int aprmax = 1 << (cs->vprebits - 5);
|
||||
|
||||
assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
|
||||
int aprmax = ich_num_aprs(cs);
|
||||
|
||||
for (i = 0; i < aprmax; i++) {
|
||||
uint32_t apr = cs->ich_apr[GICV3_G0][i] |
|
||||
@ -657,7 +663,7 @@ static uint64_t icv_ctlr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
* should match the ones reported in ich_vtr_read().
|
||||
*/
|
||||
value = ICC_CTLR_EL1_A3V | (1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
|
||||
(7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
|
||||
((cs->vpribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
|
||||
|
||||
if (cs->ich_vmcr_el2 & ICH_VMCR_EL2_VEOIM) {
|
||||
value |= ICC_CTLR_EL1_EOIMODE;
|
||||
@ -787,6 +793,36 @@ static uint64_t icv_iar_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
return intid;
|
||||
}
|
||||
|
||||
static uint32_t icc_fullprio_mask(GICv3CPUState *cs)
|
||||
{
|
||||
/*
|
||||
* Return a mask word which clears the unimplemented priority bits
|
||||
* from a priority value for a physical interrupt. (Not to be confused
|
||||
* with the group priority, whose mask depends on the value of BPR
|
||||
* for the interrupt group.)
|
||||
*/
|
||||
return ~0U << (8 - cs->pribits);
|
||||
}
|
||||
|
||||
static inline int icc_min_bpr(GICv3CPUState *cs)
|
||||
{
|
||||
/* The minimum BPR for the physical interface. */
|
||||
return 7 - cs->prebits;
|
||||
}
|
||||
|
||||
static inline int icc_min_bpr_ns(GICv3CPUState *cs)
|
||||
{
|
||||
return icc_min_bpr(cs) + 1;
|
||||
}
|
||||
|
||||
static inline int icc_num_aprs(GICv3CPUState *cs)
|
||||
{
|
||||
/* Return the number of APR registers (1, 2, or 4) */
|
||||
int aprmax = 1 << MAX(cs->prebits - 5, 0);
|
||||
assert(aprmax <= ARRAY_SIZE(cs->icc_apr[0]));
|
||||
return aprmax;
|
||||
}
|
||||
|
||||
static int icc_highest_active_prio(GICv3CPUState *cs)
|
||||
{
|
||||
/* Calculate the current running priority based on the set bits
|
||||
@ -794,14 +830,14 @@ static int icc_highest_active_prio(GICv3CPUState *cs)
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cs->icc_apr[0]); i++) {
|
||||
for (i = 0; i < icc_num_aprs(cs); i++) {
|
||||
uint32_t apr = cs->icc_apr[GICV3_G0][i] |
|
||||
cs->icc_apr[GICV3_G1][i] | cs->icc_apr[GICV3_G1NS][i];
|
||||
|
||||
if (!apr) {
|
||||
continue;
|
||||
}
|
||||
return (i * 32 + ctz32(apr)) << (GIC_MIN_BPR + 1);
|
||||
return (i * 32 + ctz32(apr)) << (icc_min_bpr(cs) + 1);
|
||||
}
|
||||
/* No current active interrupts: return idle priority */
|
||||
return 0xff;
|
||||
@ -980,7 +1016,7 @@ static void icc_pmr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
|
||||
trace_gicv3_icc_pmr_write(gicv3_redist_affid(cs), value);
|
||||
|
||||
value &= 0xff;
|
||||
value &= icc_fullprio_mask(cs);
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_is_secure(env) &&
|
||||
(env->cp15.scr_el3 & SCR_FIQ)) {
|
||||
@ -1004,7 +1040,7 @@ static void icc_activate_irq(GICv3CPUState *cs, int irq)
|
||||
*/
|
||||
uint32_t mask = icc_gprio_mask(cs, cs->hppi.grp);
|
||||
int prio = cs->hppi.prio & mask;
|
||||
int aprbit = prio >> 1;
|
||||
int aprbit = prio >> (8 - cs->prebits);
|
||||
int regno = aprbit / 32;
|
||||
int regbit = aprbit % 32;
|
||||
|
||||
@ -1162,7 +1198,7 @@ static void icc_drop_prio(GICv3CPUState *cs, int grp)
|
||||
*/
|
||||
int i;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(cs->icc_apr[grp]); i++) {
|
||||
for (i = 0; i < icc_num_aprs(cs); i++) {
|
||||
uint64_t *papr = &cs->icc_apr[grp][i];
|
||||
|
||||
if (!*papr) {
|
||||
@ -1303,9 +1339,7 @@ static int icv_drop_prio(GICv3CPUState *cs)
|
||||
* 32 bits are actually relevant.
|
||||
*/
|
||||
int i;
|
||||
int aprmax = 1 << (cs->vprebits - 5);
|
||||
|
||||
assert(aprmax <= ARRAY_SIZE(cs->ich_apr[0]));
|
||||
int aprmax = ich_num_aprs(cs);
|
||||
|
||||
for (i = 0; i < aprmax; i++) {
|
||||
uint64_t *papr0 = &cs->ich_apr[GICV3_G0][i];
|
||||
@ -1590,7 +1624,7 @@ static void icc_bpr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
return;
|
||||
}
|
||||
|
||||
minval = (grp == GICV3_G1NS) ? GIC_MIN_BPR_NS : GIC_MIN_BPR;
|
||||
minval = (grp == GICV3_G1NS) ? icc_min_bpr_ns(cs) : icc_min_bpr(cs);
|
||||
if (value < minval) {
|
||||
value = minval;
|
||||
}
|
||||
@ -2171,19 +2205,19 @@ static void icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
||||
cs->icc_ctlr_el1[GICV3_S] = ICC_CTLR_EL1_A3V |
|
||||
(1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
|
||||
(7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
|
||||
((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
|
||||
cs->icc_ctlr_el1[GICV3_NS] = ICC_CTLR_EL1_A3V |
|
||||
(1 << ICC_CTLR_EL1_IDBITS_SHIFT) |
|
||||
(7 << ICC_CTLR_EL1_PRIBITS_SHIFT);
|
||||
((cs->pribits - 1) << ICC_CTLR_EL1_PRIBITS_SHIFT);
|
||||
cs->icc_pmr_el1 = 0;
|
||||
cs->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
|
||||
cs->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
|
||||
cs->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR_NS;
|
||||
cs->icc_bpr[GICV3_G0] = icc_min_bpr(cs);
|
||||
cs->icc_bpr[GICV3_G1] = icc_min_bpr(cs);
|
||||
cs->icc_bpr[GICV3_G1NS] = icc_min_bpr_ns(cs);
|
||||
memset(cs->icc_apr, 0, sizeof(cs->icc_apr));
|
||||
memset(cs->icc_igrpen, 0, sizeof(cs->icc_igrpen));
|
||||
cs->icc_ctlr_el3 = ICC_CTLR_EL3_NDS | ICC_CTLR_EL3_A3V |
|
||||
(1 << ICC_CTLR_EL3_IDBITS_SHIFT) |
|
||||
(7 << ICC_CTLR_EL3_PRIBITS_SHIFT);
|
||||
((cs->pribits - 1) << ICC_CTLR_EL3_PRIBITS_SHIFT);
|
||||
|
||||
memset(cs->ich_apr, 0, sizeof(cs->ich_apr));
|
||||
cs->ich_hcr_el2 = 0;
|
||||
@ -2238,27 +2272,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
/* All the ICC_AP1R*_EL1 registers are banked */
|
||||
{ .name = "ICC_AP1R0_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 0,
|
||||
@ -2267,27 +2280,6 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_irq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_irq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_irq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_DIR_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 11, .opc2 = 1,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
@ -2430,6 +2422,54 @@ static const ARMCPRegInfo gicv3_cpuif_reginfo[] = {
|
||||
},
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo gicv3_cpuif_icc_apxr1_reginfo[] = {
|
||||
{ .name = "ICC_AP0R1_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 5,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP1R1_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 1,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_irq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
};
|
||||
|
||||
static const ARMCPRegInfo gicv3_cpuif_icc_apxr23_reginfo[] = {
|
||||
{ .name = "ICC_AP0R2_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 6,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP0R3_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 8, .opc2 = 7,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_fiq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP1R2_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 2,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_irq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
{ .name = "ICC_AP1R3_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 12, .crm = 9, .opc2 = 3,
|
||||
.type = ARM_CP_IO | ARM_CP_NO_RAW,
|
||||
.access = PL1_RW, .accessfn = gicv3_irq_access,
|
||||
.readfn = icc_ap_read,
|
||||
.writefn = icc_ap_write,
|
||||
},
|
||||
};
|
||||
|
||||
static uint64_t ich_ap_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
{
|
||||
GICv3CPUState *cs = icc_cs_from_env(env);
|
||||
@ -2755,6 +2795,16 @@ void gicv3_init_cpuif(GICv3State *s)
|
||||
ARMCPU *cpu = ARM_CPU(qemu_get_cpu(i));
|
||||
GICv3CPUState *cs = &s->cpu[i];
|
||||
|
||||
/*
|
||||
* If the CPU doesn't define a GICv3 configuration, probably because
|
||||
* in real hardware it doesn't have one, then we use default values
|
||||
* matching the one used by most Arm CPUs. This applies to:
|
||||
* cpu->gic_num_lrs
|
||||
* cpu->gic_vpribits
|
||||
* cpu->gic_vprebits
|
||||
* cpu->gic_pribits
|
||||
*/
|
||||
|
||||
/* Note that we can't just use the GICv3CPUState as an opaque pointer
|
||||
* in define_arm_cp_regs_with_opaque(), because when we're called back
|
||||
* it might be with code translated by CPU 0 but run by CPU 1, in
|
||||
@ -2763,13 +2813,56 @@ void gicv3_init_cpuif(GICv3State *s)
|
||||
* get back to the GICv3CPUState from the CPUARMState.
|
||||
*/
|
||||
define_arm_cp_regs(cpu, gicv3_cpuif_reginfo);
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_EL2)
|
||||
&& cpu->gic_num_lrs) {
|
||||
|
||||
/*
|
||||
* The CPU implementation specifies the number of supported
|
||||
* bits of physical priority. For backwards compatibility
|
||||
* of migration, we have a compat property that forces use
|
||||
* of 8 priority bits regardless of what the CPU really has.
|
||||
*/
|
||||
if (s->force_8bit_prio) {
|
||||
cs->pribits = 8;
|
||||
} else {
|
||||
cs->pribits = cpu->gic_pribits ?: 5;
|
||||
}
|
||||
|
||||
/*
|
||||
* The GICv3 has separate ID register fields for virtual priority
|
||||
* and preemption bit values, but only a single ID register field
|
||||
* for the physical priority bits. The preemption bit count is
|
||||
* always the same as the priority bit count, except that 8 bits
|
||||
* of priority means 7 preemption bits. We precalculate the
|
||||
* preemption bits because it simplifies the code and makes the
|
||||
* parallels between the virtual and physical bits of the GIC
|
||||
* a bit clearer.
|
||||
*/
|
||||
cs->prebits = cs->pribits;
|
||||
if (cs->prebits == 8) {
|
||||
cs->prebits--;
|
||||
}
|
||||
/*
|
||||
* Check that CPU code defining pribits didn't violate
|
||||
* architectural constraints our implementation relies on.
|
||||
*/
|
||||
g_assert(cs->pribits >= 4 && cs->pribits <= 8);
|
||||
|
||||
/*
|
||||
* gicv3_cpuif_reginfo[] defines ICC_AP*R0_EL1; add definitions
|
||||
* for ICC_AP*R{1,2,3}_EL1 if the prebits value requires them.
|
||||
*/
|
||||
if (cs->prebits >= 6) {
|
||||
define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr1_reginfo);
|
||||
}
|
||||
if (cs->prebits == 7) {
|
||||
define_arm_cp_regs(cpu, gicv3_cpuif_icc_apxr23_reginfo);
|
||||
}
|
||||
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_EL2)) {
|
||||
int j;
|
||||
|
||||
cs->num_list_regs = cpu->gic_num_lrs;
|
||||
cs->vpribits = cpu->gic_vpribits;
|
||||
cs->vprebits = cpu->gic_vprebits;
|
||||
cs->num_list_regs = cpu->gic_num_lrs ?: 4;
|
||||
cs->vpribits = cpu->gic_vpribits ?: 5;
|
||||
cs->vprebits = cpu->gic_vprebits ?: 5;
|
||||
|
||||
/* Check against architectural constraints: getting these
|
||||
* wrong would be a bug in the CPU code defining these,
|
||||
|
@ -673,9 +673,19 @@ static void arm_gicv3_icc_reset(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
s = c->gic;
|
||||
|
||||
c->icc_pmr_el1 = 0;
|
||||
c->icc_bpr[GICV3_G0] = GIC_MIN_BPR;
|
||||
c->icc_bpr[GICV3_G1] = GIC_MIN_BPR;
|
||||
c->icc_bpr[GICV3_G1NS] = GIC_MIN_BPR;
|
||||
/*
|
||||
* Architecturally the reset value of the ICC_BPR registers
|
||||
* is UNKNOWN. We set them all to 0 here; when the kernel
|
||||
* uses these values to program the ICH_VMCR_EL2 fields that
|
||||
* determine the guest-visible ICC_BPR register values, the
|
||||
* hardware's "writing a value less than the minimum sets
|
||||
* the field to the minimum value" behaviour will result in
|
||||
* them effectively resetting to the correct minimum value
|
||||
* for the host GIC.
|
||||
*/
|
||||
c->icc_bpr[GICV3_G0] = 0;
|
||||
c->icc_bpr[GICV3_G1] = 0;
|
||||
c->icc_bpr[GICV3_G1NS] = 0;
|
||||
|
||||
c->icc_sre_el1 = 0x7;
|
||||
memset(c->icc_apr, 0, sizeof(c->icc_apr));
|
||||
|
@ -152,7 +152,7 @@ static m5206_timer_state *m5206_timer_init(qemu_irq irq)
|
||||
m5206_timer_state *s;
|
||||
|
||||
s = g_new0(m5206_timer_state, 1);
|
||||
s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer = ptimer_init(m5206_timer_trigger, s, PTIMER_POLICY_LEGACY);
|
||||
s->irq = irq;
|
||||
m5206_timer_reset(s);
|
||||
return s;
|
||||
|
@ -197,7 +197,7 @@ static void mcf5208_sys_init(MemoryRegion *address_space, qemu_irq *pic)
|
||||
/* Timers. */
|
||||
for (i = 0; i < 2; i++) {
|
||||
s = g_new0(m5208_timer_state, 1);
|
||||
s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer = ptimer_init(m5208_timer_trigger, s, PTIMER_POLICY_LEGACY);
|
||||
memory_region_init_io(&s->iomem, NULL, &m5208_timer_ops, s,
|
||||
"m5208-timer", 0x00004000);
|
||||
memory_region_add_subregion(address_space, 0xfc080000 + 0x4000 * i,
|
||||
|
@ -1079,7 +1079,7 @@ static void xlnx_zynqmp_can_realize(DeviceState *dev, Error **errp)
|
||||
|
||||
/* Allocate a new timer. */
|
||||
s->can_timer = ptimer_init(xlnx_zynqmp_can_ptimer_cb, s,
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
|
||||
ptimer_transaction_begin(s->can_timer);
|
||||
|
||||
|
@ -393,7 +393,7 @@ static void etsec_realize(DeviceState *dev, Error **errp)
|
||||
object_get_typename(OBJECT(dev)), dev->id, etsec);
|
||||
qemu_format_nic_info_str(qemu_get_queue(etsec->nic), etsec->conf.macaddr.a);
|
||||
|
||||
etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_DEFAULT);
|
||||
etsec->ptimer = ptimer_init(etsec_timer_hit, etsec, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(etsec->ptimer);
|
||||
ptimer_set_freq(etsec->ptimer, 100);
|
||||
ptimer_transaction_commit(etsec->ptimer);
|
||||
|
@ -1363,7 +1363,7 @@ static void lan9118_realize(DeviceState *dev, Error **errp)
|
||||
s->pmt_ctrl = 1;
|
||||
s->txp = &s->tx_packet;
|
||||
|
||||
s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer = ptimer_init(lan9118_tick, s, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(s->timer);
|
||||
ptimer_set_freq(s->timer, 10000);
|
||||
ptimer_set_limit(s->timer, 0xffff, 1);
|
||||
|
@ -564,14 +564,14 @@ static void exynos4210_rtc_init(Object *obj)
|
||||
Exynos4210RTCState *s = EXYNOS4210_RTC(obj);
|
||||
SysBusDevice *dev = SYS_BUS_DEVICE(obj);
|
||||
|
||||
s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_DEFAULT);
|
||||
s->ptimer = ptimer_init(exynos4210_rtc_tick, s, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(s->ptimer);
|
||||
ptimer_set_freq(s->ptimer, RTC_BASE_FREQ);
|
||||
exynos4210_rtc_update_freq(s, 0);
|
||||
ptimer_transaction_commit(s->ptimer);
|
||||
|
||||
s->ptimer_1Hz = ptimer_init(exynos4210_rtc_1Hz_tick,
|
||||
s, PTIMER_POLICY_DEFAULT);
|
||||
s, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(s->ptimer_1Hz);
|
||||
ptimer_set_freq(s->ptimer_1Hz, RTC_BASE_FREQ);
|
||||
ptimer_transaction_commit(s->ptimer_1Hz);
|
||||
|
@ -275,7 +275,7 @@ static void a10_pit_init(Object *obj)
|
||||
|
||||
tc->container = s;
|
||||
tc->index = i;
|
||||
s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_DEFAULT);
|
||||
s->timer[i] = ptimer_init(a10_pit_timer_cb, tc, PTIMER_POLICY_LEGACY);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -185,7 +185,7 @@ static void altera_timer_realize(DeviceState *dev, Error **errp)
|
||||
return;
|
||||
}
|
||||
|
||||
t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_DEFAULT);
|
||||
t->ptimer = ptimer_init(timer_hit, t, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(t->ptimer);
|
||||
ptimer_set_freq(t->ptimer, t->freq_hz);
|
||||
ptimer_transaction_commit(t->ptimer);
|
||||
|
@ -180,7 +180,7 @@ static arm_timer_state *arm_timer_init(uint32_t freq)
|
||||
s->freq = freq;
|
||||
s->control = TIMER_CTRL_IE;
|
||||
|
||||
s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer = ptimer_init(arm_timer_tick, s, PTIMER_POLICY_LEGACY);
|
||||
vmstate_register(NULL, VMSTATE_INSTANCE_ID_ANY, &vmstate_arm_timer, s);
|
||||
return s;
|
||||
}
|
||||
|
@ -139,7 +139,7 @@ static void digic_timer_init(Object *obj)
|
||||
{
|
||||
DigicTimerState *s = DIGIC_TIMER(obj);
|
||||
|
||||
s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_DEFAULT);
|
||||
s->ptimer = ptimer_init(digic_timer_tick, NULL, PTIMER_POLICY_LEGACY);
|
||||
|
||||
/*
|
||||
* FIXME: there is no documentation on Digic timer
|
||||
|
@ -370,9 +370,9 @@ static void etraxfs_timer_realize(DeviceState *dev, Error **errp)
|
||||
ETRAXTimerState *t = ETRAX_TIMER(dev);
|
||||
SysBusDevice *sbd = SYS_BUS_DEVICE(dev);
|
||||
|
||||
t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_DEFAULT);
|
||||
t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_DEFAULT);
|
||||
t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_DEFAULT);
|
||||
t->ptimer_t0 = ptimer_init(timer0_hit, t, PTIMER_POLICY_LEGACY);
|
||||
t->ptimer_t1 = ptimer_init(timer1_hit, t, PTIMER_POLICY_LEGACY);
|
||||
t->ptimer_wd = ptimer_init(watchdog_hit, t, PTIMER_POLICY_LEGACY);
|
||||
|
||||
sysbus_init_irq(sbd, &t->irq);
|
||||
sysbus_init_irq(sbd, &t->nmi);
|
||||
|
@ -1503,17 +1503,17 @@ static void exynos4210_mct_init(Object *obj)
|
||||
|
||||
/* Global timer */
|
||||
s->g_timer.ptimer_frc = ptimer_init(exynos4210_gfrc_event, s,
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
memset(&s->g_timer.reg, 0, sizeof(struct gregs));
|
||||
|
||||
/* Local timers */
|
||||
for (i = 0; i < 2; i++) {
|
||||
s->l_timer[i].tick_timer.ptimer_tick =
|
||||
ptimer_init(exynos4210_ltick_event, &s->l_timer[i],
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
s->l_timer[i].ptimer_frc =
|
||||
ptimer_init(exynos4210_lfrc_event, &s->l_timer[i],
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
s->l_timer[i].id = i;
|
||||
}
|
||||
|
||||
|
@ -400,7 +400,7 @@ static void exynos4210_pwm_init(Object *obj)
|
||||
sysbus_init_irq(dev, &s->timer[i].irq);
|
||||
s->timer[i].ptimer = ptimer_init(exynos4210_pwm_tick,
|
||||
&s->timer[i],
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
s->timer[i].id = i;
|
||||
s->timer[i].parent = s;
|
||||
}
|
||||
|
@ -383,7 +383,7 @@ static void grlib_gptimer_realize(DeviceState *dev, Error **errp)
|
||||
|
||||
timer->unit = unit;
|
||||
timer->ptimer = ptimer_init(grlib_gptimer_hit, timer,
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
timer->id = i;
|
||||
|
||||
/* One IRQ line for each timer */
|
||||
|
@ -347,9 +347,9 @@ static void imx_epit_realize(DeviceState *dev, Error **errp)
|
||||
0x00001000);
|
||||
sysbus_init_mmio(sbd, &s->iomem);
|
||||
|
||||
s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer_reload = ptimer_init(imx_epit_reload, s, PTIMER_POLICY_LEGACY);
|
||||
|
||||
s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer_cmp = ptimer_init(imx_epit_cmp, s, PTIMER_POLICY_LEGACY);
|
||||
}
|
||||
|
||||
static void imx_epit_class_init(ObjectClass *klass, void *data)
|
||||
|
@ -505,7 +505,7 @@ static void imx_gpt_realize(DeviceState *dev, Error **errp)
|
||||
0x00001000);
|
||||
sysbus_init_mmio(sbd, &s->iomem);
|
||||
|
||||
s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer = ptimer_init(imx_gpt_timeout, s, PTIMER_POLICY_LEGACY);
|
||||
}
|
||||
|
||||
static void imx_gpt_class_init(ObjectClass *klass, void *data)
|
||||
|
@ -232,7 +232,7 @@ static void mss_timer_init(Object *obj)
|
||||
for (i = 0; i < NUM_TIMERS; i++) {
|
||||
struct Msf2Timer *st = &t->timers[i];
|
||||
|
||||
st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_DEFAULT);
|
||||
st->ptimer = ptimer_init(timer_hit, st, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(st->ptimer);
|
||||
ptimer_set_freq(st->ptimer, t->freq_hz);
|
||||
ptimer_transaction_commit(st->ptimer);
|
||||
|
@ -239,7 +239,7 @@ static void *sh_timer_init(uint32_t freq, int feat, qemu_irq irq)
|
||||
s->enabled = 0;
|
||||
s->irq = irq;
|
||||
|
||||
s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_DEFAULT);
|
||||
s->timer = ptimer_init(sh_timer_tick, s, PTIMER_POLICY_LEGACY);
|
||||
|
||||
sh_timer_write(s, OFFSET_TCOR >> 2, s->tcor);
|
||||
sh_timer_write(s, OFFSET_TCNT >> 2, s->tcnt);
|
||||
|
@ -405,7 +405,7 @@ static void slavio_timer_init(Object *obj)
|
||||
tc->timer_index = i;
|
||||
|
||||
s->cputimer[i].timer = ptimer_init(slavio_timer_irq, tc,
|
||||
PTIMER_POLICY_DEFAULT);
|
||||
PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(s->cputimer[i].timer);
|
||||
ptimer_set_period(s->cputimer[i].timer, TIMER_PERIOD);
|
||||
ptimer_transaction_commit(s->cputimer[i].timer);
|
||||
|
@ -223,7 +223,7 @@ static void xilinx_timer_realize(DeviceState *dev, Error **errp)
|
||||
|
||||
xt->parent = t;
|
||||
xt->nr = i;
|
||||
xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_DEFAULT);
|
||||
xt->ptimer = ptimer_init(timer_hit, xt, PTIMER_POLICY_LEGACY);
|
||||
ptimer_transaction_begin(xt->ptimer);
|
||||
ptimer_set_freq(xt->ptimer, t->freq_hz);
|
||||
ptimer_transaction_commit(xt->ptimer);
|
||||
|
@ -39,8 +39,7 @@ struct ZynqXADCState {
|
||||
uint16_t xadc_dfifo[ZYNQ_XADC_FIFO_DEPTH];
|
||||
uint16_t xadc_dfifo_entries;
|
||||
|
||||
struct IRQState *qemu_irq;
|
||||
|
||||
qemu_irq irq;
|
||||
};
|
||||
|
||||
#endif /* ZYNQ_XADC_H */
|
||||
|
@ -51,11 +51,6 @@
|
||||
/* Maximum number of list registers (architectural limit) */
|
||||
#define GICV3_LR_MAX 16
|
||||
|
||||
/* Minimum BPR for Secure, or when security not enabled */
|
||||
#define GIC_MIN_BPR 0
|
||||
/* Minimum BPR for Nonsecure when security is enabled */
|
||||
#define GIC_MIN_BPR_NS (GIC_MIN_BPR + 1)
|
||||
|
||||
/* For some distributor fields we want to model the array of 32-bit
|
||||
* register values which hold various bitmaps corresponding to enabled,
|
||||
* pending, etc bits. These macros and functions facilitate that; the
|
||||
@ -206,6 +201,8 @@ struct GICv3CPUState {
|
||||
int num_list_regs;
|
||||
int vpribits; /* number of virtual priority bits */
|
||||
int vprebits; /* number of virtual preemption bits */
|
||||
int pribits; /* number of physical priority bits */
|
||||
int prebits; /* number of physical preemption bits */
|
||||
|
||||
/* Current highest priority pending interrupt for this CPU.
|
||||
* This is cached information that can be recalculated from the
|
||||
@ -251,6 +248,7 @@ struct GICv3State {
|
||||
uint32_t revision;
|
||||
bool lpi_enable;
|
||||
bool security_extn;
|
||||
bool force_8bit_prio;
|
||||
bool irq_reset_nonsecure;
|
||||
bool gicd_no_migration_shift_bug;
|
||||
|
||||
|
@ -33,9 +33,17 @@
|
||||
* to stderr when the guest attempts to enable the timer.
|
||||
*/
|
||||
|
||||
/* The default ptimer policy retains backward compatibility with the legacy
|
||||
* timers. Custom policies are adjusting the default one. Consider providing
|
||||
* a correct policy for your timer.
|
||||
/*
|
||||
* The 'legacy' ptimer policy retains backward compatibility with the
|
||||
* traditional ptimer behaviour from before policy flags were introduced.
|
||||
* It has several weird behaviours which don't match typical hardware
|
||||
* timer behaviour. For a new device using ptimers, you should not
|
||||
* use PTIMER_POLICY_LEGACY, but instead check the actual behaviour
|
||||
* that you need and specify the right set of policy flags to get that.
|
||||
*
|
||||
* If you are overhauling an existing device that uses PTIMER_POLICY_LEGACY
|
||||
* and are in a position to check or test the real hardware behaviour,
|
||||
* consider updating it to specify the right policy flags.
|
||||
*
|
||||
* The rough edges of the default policy:
|
||||
* - Starting to run with a period = 0 emits error message and stops the
|
||||
@ -54,7 +62,7 @@
|
||||
* since the last period, effectively restarting the timer with a
|
||||
* counter = counter value at the moment of change (.i.e. one less).
|
||||
*/
|
||||
#define PTIMER_POLICY_DEFAULT 0
|
||||
#define PTIMER_POLICY_LEGACY 0
|
||||
|
||||
/* Periodic timer counter stays with "0" for a one period before wrapping
|
||||
* around. */
|
||||
|
@ -461,4 +461,28 @@ static inline bool cp_access_ok(int current_el,
|
||||
/* Raw read of a coprocessor register (as needed for migration, etc) */
|
||||
uint64_t read_raw_cp_reg(CPUARMState *env, const ARMCPRegInfo *ri);
|
||||
|
||||
/*
|
||||
* Return true if the cp register encoding is in the "feature ID space" as
|
||||
* defined by FEAT_IDST (and thus should be reported with ER_ELx.EC
|
||||
* as EC_SYSTEMREGISTERTRAP rather than EC_UNCATEGORIZED).
|
||||
*/
|
||||
static inline bool arm_cpreg_encoding_in_idspace(uint8_t opc0, uint8_t opc1,
|
||||
uint8_t opc2,
|
||||
uint8_t crn, uint8_t crm)
|
||||
{
|
||||
return opc0 == 3 && (opc1 == 0 || opc1 == 1 || opc1 == 3) &&
|
||||
crn == 0 && crm < 8;
|
||||
}
|
||||
|
||||
/*
|
||||
* As arm_cpreg_encoding_in_idspace(), but take the encoding from an
|
||||
* ARMCPRegInfo.
|
||||
*/
|
||||
static inline bool arm_cpreg_in_idspace(const ARMCPRegInfo *ri)
|
||||
{
|
||||
return ri->state == ARM_CP_STATE_AA64 &&
|
||||
arm_cpreg_encoding_in_idspace(ri->opc0, ri->opc1, ri->opc2,
|
||||
ri->crn, ri->crm);
|
||||
}
|
||||
|
||||
#endif /* TARGET_ARM_CPREGS_H */
|
||||
|
@ -201,9 +201,11 @@ static void arm_cpu_reset(DeviceState *dev)
|
||||
/* Trap on btype=3 for PACIxSP. */
|
||||
env->cp15.sctlr_el[1] |= SCTLR_BT0;
|
||||
/* and to the FP/Neon instructions */
|
||||
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 2, 3);
|
||||
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
||||
CPACR_EL1, FPEN, 3);
|
||||
/* and to the SVE instructions */
|
||||
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 16, 2, 3);
|
||||
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
||||
CPACR_EL1, ZEN, 3);
|
||||
/* with reasonable vector length */
|
||||
if (cpu_isar_feature(aa64_sve, cpu)) {
|
||||
env->vfp.zcr_el[1] =
|
||||
@ -252,7 +254,10 @@ static void arm_cpu_reset(DeviceState *dev)
|
||||
} else {
|
||||
#if defined(CONFIG_USER_ONLY)
|
||||
/* Userspace expects access to cp10 and cp11 for FP/Neon */
|
||||
env->cp15.cpacr_el1 = deposit64(env->cp15.cpacr_el1, 20, 4, 0xf);
|
||||
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
||||
CPACR, CP10, 3);
|
||||
env->cp15.cpacr_el1 = FIELD_DP64(env->cp15.cpacr_el1,
|
||||
CPACR, CP11, 3);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -362,6 +362,7 @@ typedef struct CPUArchState {
|
||||
uint32_t pmsav5_data_ap; /* PMSAv5 MPU data access permissions */
|
||||
uint32_t pmsav5_insn_ap; /* PMSAv5 MPU insn access permissions */
|
||||
uint64_t hcr_el2; /* Hypervisor configuration register */
|
||||
uint64_t hcrx_el2; /* Extended Hypervisor configuration register */
|
||||
uint64_t scr_el3; /* Secure configuration register. */
|
||||
union { /* Fault status registers. */
|
||||
struct {
|
||||
@ -965,6 +966,7 @@ struct ArchCPU {
|
||||
uint64_t id_aa64dfr0;
|
||||
uint64_t id_aa64dfr1;
|
||||
uint64_t id_aa64zfr0;
|
||||
uint64_t reset_pmcr_el0;
|
||||
} isar;
|
||||
uint64_t midr;
|
||||
uint32_t revidr;
|
||||
@ -1002,6 +1004,7 @@ struct ArchCPU {
|
||||
int gic_num_lrs; /* number of list registers */
|
||||
int gic_vpribits; /* number of virtual priority bits */
|
||||
int gic_vprebits; /* number of virtual preemption bits */
|
||||
int gic_pribits; /* number of physical priority bits */
|
||||
|
||||
/* Whether the cfgend input is high (i.e. this CPU should reset into
|
||||
* big-endian mode). This setting isn't used directly: instead it modifies
|
||||
@ -1258,11 +1261,45 @@ void pmu_init(ARMCPU *cpu);
|
||||
#define SCTLR_SPINTMASK (1ULL << 62) /* FEAT_NMI */
|
||||
#define SCTLR_TIDCP (1ULL << 63) /* FEAT_TIDCP1 */
|
||||
|
||||
#define CPTR_TCPAC (1U << 31)
|
||||
#define CPTR_TTA (1U << 20)
|
||||
#define CPTR_TFP (1U << 10)
|
||||
#define CPTR_TZ (1U << 8) /* CPTR_EL2 */
|
||||
#define CPTR_EZ (1U << 8) /* CPTR_EL3 */
|
||||
/* Bit definitions for CPACR (AArch32 only) */
|
||||
FIELD(CPACR, CP10, 20, 2)
|
||||
FIELD(CPACR, CP11, 22, 2)
|
||||
FIELD(CPACR, TRCDIS, 28, 1) /* matches CPACR_EL1.TTA */
|
||||
FIELD(CPACR, D32DIS, 30, 1) /* up to v7; RAZ in v8 */
|
||||
FIELD(CPACR, ASEDIS, 31, 1)
|
||||
|
||||
/* Bit definitions for CPACR_EL1 (AArch64 only) */
|
||||
FIELD(CPACR_EL1, ZEN, 16, 2)
|
||||
FIELD(CPACR_EL1, FPEN, 20, 2)
|
||||
FIELD(CPACR_EL1, SMEN, 24, 2)
|
||||
FIELD(CPACR_EL1, TTA, 28, 1) /* matches CPACR.TRCDIS */
|
||||
|
||||
/* Bit definitions for HCPTR (AArch32 only) */
|
||||
FIELD(HCPTR, TCP10, 10, 1)
|
||||
FIELD(HCPTR, TCP11, 11, 1)
|
||||
FIELD(HCPTR, TASE, 15, 1)
|
||||
FIELD(HCPTR, TTA, 20, 1)
|
||||
FIELD(HCPTR, TAM, 30, 1) /* matches CPTR_EL2.TAM */
|
||||
FIELD(HCPTR, TCPAC, 31, 1) /* matches CPTR_EL2.TCPAC */
|
||||
|
||||
/* Bit definitions for CPTR_EL2 (AArch64 only) */
|
||||
FIELD(CPTR_EL2, TZ, 8, 1) /* !E2H */
|
||||
FIELD(CPTR_EL2, TFP, 10, 1) /* !E2H, matches HCPTR.TCP10 */
|
||||
FIELD(CPTR_EL2, TSM, 12, 1) /* !E2H */
|
||||
FIELD(CPTR_EL2, ZEN, 16, 2) /* E2H */
|
||||
FIELD(CPTR_EL2, FPEN, 20, 2) /* E2H */
|
||||
FIELD(CPTR_EL2, SMEN, 24, 2) /* E2H */
|
||||
FIELD(CPTR_EL2, TTA, 28, 1)
|
||||
FIELD(CPTR_EL2, TAM, 30, 1) /* matches HCPTR.TAM */
|
||||
FIELD(CPTR_EL2, TCPAC, 31, 1) /* matches HCPTR.TCPAC */
|
||||
|
||||
/* Bit definitions for CPTR_EL3 (AArch64 only) */
|
||||
FIELD(CPTR_EL3, EZ, 8, 1)
|
||||
FIELD(CPTR_EL3, TFP, 10, 1)
|
||||
FIELD(CPTR_EL3, ESM, 12, 1)
|
||||
FIELD(CPTR_EL3, TTA, 20, 1)
|
||||
FIELD(CPTR_EL3, TAM, 30, 1)
|
||||
FIELD(CPTR_EL3, TCPAC, 31, 1)
|
||||
|
||||
#define MDCR_EPMAD (1U << 21)
|
||||
#define MDCR_EDAD (1U << 20)
|
||||
@ -1543,6 +1580,19 @@ static inline void xpsr_write(CPUARMState *env, uint32_t val, uint32_t mask)
|
||||
#define HCR_TWEDEN (1ULL << 59)
|
||||
#define HCR_TWEDEL MAKE_64BIT_MASK(60, 4)
|
||||
|
||||
#define HCRX_ENAS0 (1ULL << 0)
|
||||
#define HCRX_ENALS (1ULL << 1)
|
||||
#define HCRX_ENASR (1ULL << 2)
|
||||
#define HCRX_FNXS (1ULL << 3)
|
||||
#define HCRX_FGTNXS (1ULL << 4)
|
||||
#define HCRX_SMPME (1ULL << 5)
|
||||
#define HCRX_TALLINT (1ULL << 6)
|
||||
#define HCRX_VINMI (1ULL << 7)
|
||||
#define HCRX_VFNMI (1ULL << 8)
|
||||
#define HCRX_CMOW (1ULL << 9)
|
||||
#define HCRX_MCE2 (1ULL << 10)
|
||||
#define HCRX_MSCEN (1ULL << 11)
|
||||
|
||||
#define HPFAR_NS (1ULL << 63)
|
||||
|
||||
#define SCR_NS (1U << 0)
|
||||
@ -2310,6 +2360,7 @@ static inline bool arm_is_el2_enabled(CPUARMState *env)
|
||||
* Not included here is HCR_RW.
|
||||
*/
|
||||
uint64_t arm_hcr_el2_eff(CPUARMState *env);
|
||||
uint64_t arm_hcrx_el2_eff(CPUARMState *env);
|
||||
|
||||
/* Return true if the specified exception level is running in AArch64 state. */
|
||||
static inline bool arm_el_is_aa64(CPUARMState *env, int el)
|
||||
@ -3931,6 +3982,11 @@ static inline bool isar_feature_aa64_ats1e1(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, PAN) >= 2;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_hcx(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr1, ID_AA64MMFR1, HCX) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_uao(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, UAO) != 0;
|
||||
@ -3941,6 +3997,16 @@ static inline bool isar_feature_aa64_st(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, ST) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_fwb(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, FWB) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_ids(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64mmfr2, ID_AA64MMFR2, IDS) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_bti(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, BT) != 0;
|
||||
|
@ -79,6 +79,7 @@ static void aarch64_a57_initfn(Object *obj)
|
||||
cpu->isar.id_aa64isar0 = 0x00011120;
|
||||
cpu->isar.id_aa64mmfr0 = 0x00001124;
|
||||
cpu->isar.dbgdidr = 0x3516d000;
|
||||
cpu->isar.reset_pmcr_el0 = 0x41013000;
|
||||
cpu->clidr = 0x0a200023;
|
||||
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
|
||||
@ -87,6 +88,7 @@ static void aarch64_a57_initfn(Object *obj)
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
define_cortex_a72_a57_a53_cp_reginfo(cpu);
|
||||
}
|
||||
|
||||
@ -132,6 +134,7 @@ static void aarch64_a53_initfn(Object *obj)
|
||||
cpu->isar.id_aa64isar0 = 0x00011120;
|
||||
cpu->isar.id_aa64mmfr0 = 0x00001122; /* 40 bit physical addr */
|
||||
cpu->isar.dbgdidr = 0x3516d000;
|
||||
cpu->isar.reset_pmcr_el0 = 0x41033000;
|
||||
cpu->clidr = 0x0a200023;
|
||||
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
|
||||
@ -140,6 +143,7 @@ static void aarch64_a53_initfn(Object *obj)
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
define_cortex_a72_a57_a53_cp_reginfo(cpu);
|
||||
}
|
||||
|
||||
@ -183,6 +187,7 @@ static void aarch64_a72_initfn(Object *obj)
|
||||
cpu->isar.id_aa64isar0 = 0x00011120;
|
||||
cpu->isar.id_aa64mmfr0 = 0x00001124;
|
||||
cpu->isar.dbgdidr = 0x3516d000;
|
||||
cpu->isar.reset_pmcr_el0 = 0x41023000;
|
||||
cpu->clidr = 0x0a200023;
|
||||
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
|
||||
@ -191,6 +196,7 @@ static void aarch64_a72_initfn(Object *obj)
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
define_cortex_a72_a57_a53_cp_reginfo(cpu);
|
||||
}
|
||||
|
||||
@ -252,11 +258,15 @@ static void aarch64_a76_initfn(Object *obj)
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
|
||||
/* From B5.1 AdvSIMD AArch64 register summary */
|
||||
cpu->isar.mvfr0 = 0x10110222;
|
||||
cpu->isar.mvfr1 = 0x13211111;
|
||||
cpu->isar.mvfr2 = 0x00000043;
|
||||
|
||||
/* From D5.1 AArch64 PMU register summary */
|
||||
cpu->isar.reset_pmcr_el0 = 0x410b3000;
|
||||
}
|
||||
|
||||
static void aarch64_neoverse_n1_initfn(Object *obj)
|
||||
@ -317,11 +327,15 @@ static void aarch64_neoverse_n1_initfn(Object *obj)
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
|
||||
/* From B5.1 AdvSIMD AArch64 register summary */
|
||||
cpu->isar.mvfr0 = 0x10110222;
|
||||
cpu->isar.mvfr1 = 0x13211111;
|
||||
cpu->isar.mvfr2 = 0x00000043;
|
||||
|
||||
/* From D5.1 AArch64 PMU register summary */
|
||||
cpu->isar.reset_pmcr_el0 = 0x410c3000;
|
||||
}
|
||||
|
||||
void arm_cpu_sve_finalize(ARMCPU *cpu, Error **errp)
|
||||
@ -812,6 +826,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
uint64_t t;
|
||||
uint32_t u;
|
||||
|
||||
if (kvm_enabled() || hvf_enabled()) {
|
||||
/* With KVM or HVF, '-cpu max' is identical to '-cpu host' */
|
||||
@ -842,6 +857,15 @@ static void aarch64_max_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, MIDR_EL1, REVISION, 0);
|
||||
cpu->midr = t;
|
||||
|
||||
/*
|
||||
* We're going to set FEAT_S2FWB, which mandates that CLIDR_EL1.{LoUU,LoUIS}
|
||||
* are zero.
|
||||
*/
|
||||
u = cpu->clidr;
|
||||
u = FIELD_DP32(u, CLIDR_EL1, LOUIS, 0);
|
||||
u = FIELD_DP32(u, CLIDR_EL1, LOUU, 0);
|
||||
cpu->clidr = u;
|
||||
|
||||
t = cpu->isar.id_aa64isar0;
|
||||
t = FIELD_DP64(t, ID_AA64ISAR0, AES, 2); /* FEAT_PMULL */
|
||||
t = FIELD_DP64(t, ID_AA64ISAR0, SHA1, 1); /* FEAT_SHA1 */
|
||||
@ -910,6 +934,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
|
||||
cpu->isar.id_aa64mmfr1 = t;
|
||||
|
||||
t = cpu->isar.id_aa64mmfr2;
|
||||
@ -918,6 +943,8 @@ static void aarch64_max_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, IESB, 1); /* FEAT_IESB */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, VARANGE, 1); /* FEAT_LVA */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, ST, 1); /* FEAT_TTST */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, IDS, 1); /* FEAT_IDST */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, FWB, 1); /* FEAT_S2FWB */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, TTL, 1); /* FEAT_TTL */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR2, BBM, 2); /* FEAT_BBM at level 2 */
|
||||
cpu->isar.id_aa64mmfr2 = t;
|
||||
@ -996,6 +1023,7 @@ static void aarch64_a64fx_initfn(Object *obj)
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
|
||||
/* Suppport of A64FX's vector length are 128,256 and 512bit only */
|
||||
aarch64_add_sve_properties(obj);
|
||||
@ -1004,6 +1032,8 @@ static void aarch64_a64fx_initfn(Object *obj)
|
||||
set_bit(1, cpu->sve_vq_supported); /* 256bit */
|
||||
set_bit(3, cpu->sve_vq_supported); /* 512bit */
|
||||
|
||||
cpu->isar.reset_pmcr_el0 = 0x46014040;
|
||||
|
||||
/* TODO: Add A64FX specific HPC extension registers */
|
||||
}
|
||||
|
||||
|
@ -425,6 +425,7 @@ static void cortex_a8_initfn(Object *obj)
|
||||
cpu->ccsidr[1] = 0x2007e01a; /* 16k L1 icache. */
|
||||
cpu->ccsidr[2] = 0xf0000000; /* No L2 icache. */
|
||||
cpu->reset_auxcr = 2;
|
||||
cpu->isar.reset_pmcr_el0 = 0x41002000;
|
||||
define_arm_cp_regs(cpu, cortexa8_cp_reginfo);
|
||||
}
|
||||
|
||||
@ -496,6 +497,7 @@ static void cortex_a9_initfn(Object *obj)
|
||||
cpu->clidr = (1 << 27) | (1 << 24) | 3;
|
||||
cpu->ccsidr[0] = 0xe00fe019; /* 16k L1 dcache. */
|
||||
cpu->ccsidr[1] = 0x200fe019; /* 16k L1 icache. */
|
||||
cpu->isar.reset_pmcr_el0 = 0x41093000;
|
||||
define_arm_cp_regs(cpu, cortexa9_cp_reginfo);
|
||||
}
|
||||
|
||||
@ -565,6 +567,7 @@ static void cortex_a7_initfn(Object *obj)
|
||||
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
|
||||
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
|
||||
cpu->isar.reset_pmcr_el0 = 0x41072000;
|
||||
define_arm_cp_regs(cpu, cortexa15_cp_reginfo); /* Same as A15 */
|
||||
}
|
||||
|
||||
@ -607,6 +610,7 @@ static void cortex_a15_initfn(Object *obj)
|
||||
cpu->ccsidr[0] = 0x701fe00a; /* 32K L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe00a; /* 32K L1 icache */
|
||||
cpu->ccsidr[2] = 0x711fe07a; /* 4096K L2 unified cache */
|
||||
cpu->isar.reset_pmcr_el0 = 0x410F3000;
|
||||
define_arm_cp_regs(cpu, cortexa15_cp_reginfo);
|
||||
}
|
||||
|
||||
@ -835,6 +839,7 @@ static void cortex_r5_initfn(Object *obj)
|
||||
cpu->isar.id_isar6 = 0x0;
|
||||
cpu->mp_is_up = true;
|
||||
cpu->pmsav7_dregion = 16;
|
||||
cpu->isar.reset_pmcr_el0 = 0x41151800;
|
||||
define_arm_cp_regs(cpu, cortexr5_cp_reginfo);
|
||||
}
|
||||
|
||||
@ -1093,6 +1098,7 @@ static void arm_max_initfn(Object *obj)
|
||||
cpu->isar.id_isar5 = 0x00011121;
|
||||
cpu->isar.id_isar6 = 0;
|
||||
cpu->isar.dbgdidr = 0x3516d000;
|
||||
cpu->isar.reset_pmcr_el0 = 0x41013000;
|
||||
cpu->clidr = 0x0a200023;
|
||||
cpu->ccsidr[0] = 0x701fe00a; /* 32KB L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe012; /* 48KB L1 icache */
|
||||
|
@ -39,7 +39,6 @@
|
||||
#include "cpregs.h"
|
||||
|
||||
#define ARM_CPU_FREQ 1000000000 /* FIXME: 1 GHz, should be configurable */
|
||||
#define PMCR_NUM_COUNTERS 4 /* QEMU IMPDEF choice */
|
||||
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
@ -767,11 +766,14 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
*/
|
||||
if (cpu_isar_feature(aa32_vfp_simd, env_archcpu(env))) {
|
||||
/* VFP coprocessor: cp10 & cp11 [23:20] */
|
||||
mask |= (1 << 31) | (1 << 30) | (0xf << 20);
|
||||
mask |= R_CPACR_ASEDIS_MASK |
|
||||
R_CPACR_D32DIS_MASK |
|
||||
R_CPACR_CP11_MASK |
|
||||
R_CPACR_CP10_MASK;
|
||||
|
||||
if (!arm_feature(env, ARM_FEATURE_NEON)) {
|
||||
/* ASEDIS [31] bit is RAO/WI */
|
||||
value |= (1 << 31);
|
||||
value |= R_CPACR_ASEDIS_MASK;
|
||||
}
|
||||
|
||||
/* VFPv3 and upwards with NEON implement 32 double precision
|
||||
@ -779,7 +781,7 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
*/
|
||||
if (!cpu_isar_feature(aa32_simd_r32, env_archcpu(env))) {
|
||||
/* D32DIS [30] is RAO/WI if D16-31 are not implemented. */
|
||||
value |= (1 << 30);
|
||||
value |= R_CPACR_D32DIS_MASK;
|
||||
}
|
||||
}
|
||||
value &= mask;
|
||||
@ -791,8 +793,8 @@ static void cpacr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
|
||||
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
|
||||
value &= ~(0xf << 20);
|
||||
value |= env->cp15.cpacr_el1 & (0xf << 20);
|
||||
mask = R_CPACR_CP11_MASK | R_CPACR_CP10_MASK;
|
||||
value = (value & ~mask) | (env->cp15.cpacr_el1 & mask);
|
||||
}
|
||||
|
||||
env->cp15.cpacr_el1 = value;
|
||||
@ -808,7 +810,7 @@ static uint64_t cpacr_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
|
||||
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
|
||||
value &= ~(0xf << 20);
|
||||
value = ~(R_CPACR_CP11_MASK | R_CPACR_CP10_MASK);
|
||||
}
|
||||
return value;
|
||||
}
|
||||
@ -828,11 +830,11 @@ static CPAccessResult cpacr_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* Check if CPACR accesses are to be trapped to EL2 */
|
||||
if (arm_current_el(env) == 1 && arm_is_el2_enabled(env) &&
|
||||
(env->cp15.cptr_el[2] & CPTR_TCPAC)) {
|
||||
FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TCPAC)) {
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
/* Check if CPACR accesses are to be trapped to EL3 */
|
||||
} else if (arm_current_el(env) < 3 &&
|
||||
(env->cp15.cptr_el[3] & CPTR_TCPAC)) {
|
||||
FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
}
|
||||
@ -844,7 +846,8 @@ static CPAccessResult cptr_access(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
/* Check if CPTR accesses are set to trap to EL3 */
|
||||
if (arm_current_el(env) == 2 && (env->cp15.cptr_el[3] & CPTR_TCPAC)) {
|
||||
if (arm_current_el(env) == 2 &&
|
||||
FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TCPAC)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
|
||||
@ -3187,6 +3190,12 @@ static uint64_t do_ats_write(CPUARMState *env, uint64_t value,
|
||||
ret = get_phys_addr(env, value, access_type, mmu_idx, &phys_addr, &attrs,
|
||||
&prot, &page_size, &fi, &cacheattrs);
|
||||
|
||||
/*
|
||||
* ATS operations only do S1 or S1+S2 translations, so we never
|
||||
* have to deal with the ARMCacheAttrs format for S2 only.
|
||||
*/
|
||||
assert(!cacheattrs.is_s2_format);
|
||||
|
||||
if (ret) {
|
||||
/*
|
||||
* Some kinds of translation fault must cause exceptions rather
|
||||
@ -5155,6 +5164,9 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
|
||||
if (cpu_isar_feature(aa64_scxtnum, cpu)) {
|
||||
valid_mask |= HCR_ENSCXT;
|
||||
}
|
||||
if (cpu_isar_feature(aa64_fwb, cpu)) {
|
||||
valid_mask |= HCR_FWB;
|
||||
}
|
||||
}
|
||||
|
||||
/* Clear RES0 bits. */
|
||||
@ -5166,8 +5178,10 @@ static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
|
||||
* HCR_PTW forbids certain page-table setups
|
||||
* HCR_DC disables stage1 and enables stage2 translation
|
||||
* HCR_DCT enables tagging on (disabled) stage1 translation
|
||||
* HCR_FWB changes the interpretation of stage2 descriptor bits
|
||||
*/
|
||||
if ((env->cp15.hcr_el2 ^ value) & (HCR_VM | HCR_PTW | HCR_DC | HCR_DCT)) {
|
||||
if ((env->cp15.hcr_el2 ^ value) &
|
||||
(HCR_VM | HCR_PTW | HCR_DC | HCR_DCT | HCR_FWB)) {
|
||||
tlb_flush(CPU(cpu));
|
||||
}
|
||||
env->cp15.hcr_el2 = value;
|
||||
@ -5278,6 +5292,52 @@ uint64_t arm_hcr_el2_eff(CPUARMState *env)
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void hcrx_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
uint64_t valid_mask = 0;
|
||||
|
||||
/* No features adding bits to HCRX are implemented. */
|
||||
|
||||
/* Clear RES0 bits. */
|
||||
env->cp15.hcrx_el2 = value & valid_mask;
|
||||
}
|
||||
|
||||
static CPAccessResult access_hxen(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
bool isread)
|
||||
{
|
||||
if (arm_current_el(env) < 3
|
||||
&& arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_HXEN)) {
|
||||
return CP_ACCESS_TRAP_EL3;
|
||||
}
|
||||
return CP_ACCESS_OK;
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo hcrx_el2_reginfo = {
|
||||
.name = "HCRX_EL2", .state = ARM_CP_STATE_AA64,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 2, .opc2 = 2,
|
||||
.access = PL2_RW, .writefn = hcrx_write, .accessfn = access_hxen,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.hcrx_el2),
|
||||
};
|
||||
|
||||
/* Return the effective value of HCRX_EL2. */
|
||||
uint64_t arm_hcrx_el2_eff(CPUARMState *env)
|
||||
{
|
||||
/*
|
||||
* The bits in this register behave as 0 for all purposes other than
|
||||
* direct reads of the register if:
|
||||
* - EL2 is not enabled in the current security state,
|
||||
* - SCR_EL3.HXEn is 0.
|
||||
*/
|
||||
if (!arm_is_el2_enabled(env)
|
||||
|| (arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.scr_el3 & SCR_HXEN))) {
|
||||
return 0;
|
||||
}
|
||||
return env->cp15.hcrx_el2;
|
||||
}
|
||||
|
||||
static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
@ -5287,8 +5347,8 @@ static void cptr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
*/
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
|
||||
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
|
||||
value &= ~(0x3 << 10);
|
||||
value |= env->cp15.cptr_el[2] & (0x3 << 10);
|
||||
uint64_t mask = R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
|
||||
value = (value & ~mask) | (env->cp15.cptr_el[2] & mask);
|
||||
}
|
||||
env->cp15.cptr_el[2] = value;
|
||||
}
|
||||
@ -5303,7 +5363,7 @@ static uint64_t cptr_el2_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_EL3) && !arm_el_is_aa64(env, 3) &&
|
||||
!arm_is_secure(env) && !extract32(env->cp15.nsacr, 10, 1)) {
|
||||
value |= 0x3 << 10;
|
||||
value |= R_HCPTR_TCP11_MASK | R_HCPTR_TCP10_MASK;
|
||||
}
|
||||
return value;
|
||||
}
|
||||
@ -5533,13 +5593,6 @@ static const ARMCPRegInfo el2_cp_reginfo[] = {
|
||||
.resetvalue = 0,
|
||||
.writefn = gt_hyp_ctl_write, .raw_writefn = raw_write },
|
||||
#endif
|
||||
/* The only field of MDCR_EL2 that has a defined architectural reset value
|
||||
* is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
|
||||
*/
|
||||
{ .name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
|
||||
.access = PL2_RW, .resetvalue = PMCR_NUM_COUNTERS,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2), },
|
||||
{ .name = "HPFAR", .state = ARM_CP_STATE_AA32,
|
||||
.cp = 15, .opc1 = 4, .crn = 6, .crm = 0, .opc2 = 4,
|
||||
.access = PL2_RW, .accessfn = access_el3_aa32ns,
|
||||
@ -6098,8 +6151,7 @@ int sve_exception_el(CPUARMState *env, int el)
|
||||
uint64_t hcr_el2 = arm_hcr_el2_eff(env);
|
||||
|
||||
if (el <= 1 && (hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
|
||||
/* Check CPACR.ZEN. */
|
||||
switch (extract32(env->cp15.cpacr_el1, 16, 2)) {
|
||||
switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, ZEN)) {
|
||||
case 1:
|
||||
if (el != 0) {
|
||||
break;
|
||||
@ -6112,7 +6164,7 @@ int sve_exception_el(CPUARMState *env, int el)
|
||||
}
|
||||
|
||||
/* Check CPACR.FPEN. */
|
||||
switch (extract32(env->cp15.cpacr_el1, 20, 2)) {
|
||||
switch (FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN)) {
|
||||
case 1:
|
||||
if (el != 0) {
|
||||
break;
|
||||
@ -6129,8 +6181,7 @@ int sve_exception_el(CPUARMState *env, int el)
|
||||
*/
|
||||
if (el <= 2) {
|
||||
if (hcr_el2 & HCR_E2H) {
|
||||
/* Check CPTR_EL2.ZEN. */
|
||||
switch (extract32(env->cp15.cptr_el[2], 16, 2)) {
|
||||
switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, ZEN)) {
|
||||
case 1:
|
||||
if (el != 0 || !(hcr_el2 & HCR_TGE)) {
|
||||
break;
|
||||
@ -6141,8 +6192,7 @@ int sve_exception_el(CPUARMState *env, int el)
|
||||
return 2;
|
||||
}
|
||||
|
||||
/* Check CPTR_EL2.FPEN. */
|
||||
switch (extract32(env->cp15.cptr_el[2], 20, 2)) {
|
||||
switch (FIELD_EX32(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
|
||||
case 1:
|
||||
if (el == 2 || !(hcr_el2 & HCR_TGE)) {
|
||||
break;
|
||||
@ -6153,10 +6203,10 @@ int sve_exception_el(CPUARMState *env, int el)
|
||||
return 0;
|
||||
}
|
||||
} else if (arm_is_el2_enabled(env)) {
|
||||
if (env->cp15.cptr_el[2] & CPTR_TZ) {
|
||||
if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TZ)) {
|
||||
return 2;
|
||||
}
|
||||
if (env->cp15.cptr_el[2] & CPTR_TFP) {
|
||||
if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
@ -6164,7 +6214,7 @@ int sve_exception_el(CPUARMState *env, int el)
|
||||
|
||||
/* CPTR_EL3. Since EZ is negative we must check for EL3. */
|
||||
if (arm_feature(env, ARM_FEATURE_EL3)
|
||||
&& !(env->cp15.cptr_el[3] & CPTR_EZ)) {
|
||||
&& !FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, EZ)) {
|
||||
return 3;
|
||||
}
|
||||
#endif
|
||||
@ -6529,7 +6579,6 @@ static void define_debug_regs(ARMCPU *cpu)
|
||||
define_one_arm_cp_reg(cpu, &dbgdidr);
|
||||
}
|
||||
|
||||
/* Note that all these register fields hold "number of Xs minus 1". */
|
||||
brps = arm_num_brps(cpu);
|
||||
wrps = arm_num_wrps(cpu);
|
||||
ctx_cmps = arm_num_ctx_cmps(cpu);
|
||||
@ -6543,14 +6592,16 @@ static void define_debug_regs(ARMCPU *cpu)
|
||||
}
|
||||
|
||||
for (i = 0; i < brps; i++) {
|
||||
char *dbgbvr_el1_name = g_strdup_printf("DBGBVR%d_EL1", i);
|
||||
char *dbgbcr_el1_name = g_strdup_printf("DBGBCR%d_EL1", i);
|
||||
ARMCPRegInfo dbgregs[] = {
|
||||
{ .name = "DBGBVR", .state = ARM_CP_STATE_BOTH,
|
||||
{ .name = dbgbvr_el1_name, .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 4,
|
||||
.access = PL1_RW, .accessfn = access_tda,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgbvr[i]),
|
||||
.writefn = dbgbvr_write, .raw_writefn = raw_write
|
||||
},
|
||||
{ .name = "DBGBCR", .state = ARM_CP_STATE_BOTH,
|
||||
{ .name = dbgbcr_el1_name, .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 5,
|
||||
.access = PL1_RW, .accessfn = access_tda,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgbcr[i]),
|
||||
@ -6558,17 +6609,21 @@ static void define_debug_regs(ARMCPU *cpu)
|
||||
},
|
||||
};
|
||||
define_arm_cp_regs(cpu, dbgregs);
|
||||
g_free(dbgbvr_el1_name);
|
||||
g_free(dbgbcr_el1_name);
|
||||
}
|
||||
|
||||
for (i = 0; i < wrps; i++) {
|
||||
char *dbgwvr_el1_name = g_strdup_printf("DBGWVR%d_EL1", i);
|
||||
char *dbgwcr_el1_name = g_strdup_printf("DBGWCR%d_EL1", i);
|
||||
ARMCPRegInfo dbgregs[] = {
|
||||
{ .name = "DBGWVR", .state = ARM_CP_STATE_BOTH,
|
||||
{ .name = dbgwvr_el1_name, .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 6,
|
||||
.access = PL1_RW, .accessfn = access_tda,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgwvr[i]),
|
||||
.writefn = dbgwvr_write, .raw_writefn = raw_write
|
||||
},
|
||||
{ .name = "DBGWCR", .state = ARM_CP_STATE_BOTH,
|
||||
{ .name = dbgwcr_el1_name, .state = ARM_CP_STATE_BOTH,
|
||||
.cp = 14, .opc0 = 2, .opc1 = 0, .crn = 0, .crm = i, .opc2 = 7,
|
||||
.access = PL1_RW, .accessfn = access_tda,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.dbgwcr[i]),
|
||||
@ -6576,6 +6631,8 @@ static void define_debug_regs(ARMCPU *cpu)
|
||||
},
|
||||
};
|
||||
define_arm_cp_regs(cpu, dbgregs);
|
||||
g_free(dbgwvr_el1_name);
|
||||
g_free(dbgwcr_el1_name);
|
||||
}
|
||||
}
|
||||
|
||||
@ -6586,7 +6643,7 @@ static void define_pmu_regs(ARMCPU *cpu)
|
||||
* field as main ID register, and we implement four counters in
|
||||
* addition to the cycle count register.
|
||||
*/
|
||||
unsigned int i, pmcrn = PMCR_NUM_COUNTERS;
|
||||
unsigned int i, pmcrn = pmu_num_counters(&cpu->env);
|
||||
ARMCPRegInfo pmcr = {
|
||||
.name = "PMCR", .cp = 15, .crn = 9, .crm = 12, .opc1 = 0, .opc2 = 0,
|
||||
.access = PL0_RW,
|
||||
@ -6601,10 +6658,10 @@ static void define_pmu_regs(ARMCPU *cpu)
|
||||
.access = PL0_RW, .accessfn = pmreg_access,
|
||||
.type = ARM_CP_IO,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
|
||||
.resetvalue = (cpu->midr & 0xff000000) | (pmcrn << PMCRN_SHIFT) |
|
||||
PMCRLC,
|
||||
.resetvalue = cpu->isar.reset_pmcr_el0,
|
||||
.writefn = pmcr_write, .raw_writefn = raw_write,
|
||||
};
|
||||
|
||||
define_one_arm_cp_reg(cpu, &pmcr);
|
||||
define_one_arm_cp_reg(cpu, &pmcr64);
|
||||
for (i = 0; i < pmcrn; i++) {
|
||||
@ -6758,7 +6815,7 @@ static CPAccessResult access_pauth(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
int el = arm_current_el(env);
|
||||
|
||||
if (el < 2 &&
|
||||
arm_feature(env, ARM_FEATURE_EL2) &&
|
||||
arm_is_el2_enabled(env) &&
|
||||
!(arm_hcr_el2_eff(env) & HCR_APK)) {
|
||||
return CP_ACCESS_TRAP_EL2;
|
||||
}
|
||||
@ -7961,6 +8018,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
.type = ARM_CP_EL3_NO_EL2_C_NZ,
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.vmpidr_el2) },
|
||||
};
|
||||
/*
|
||||
* The only field of MDCR_EL2 that has a defined architectural reset
|
||||
* value is MDCR_EL2.HPMN which should reset to the value of PMCR_EL0.N.
|
||||
*/
|
||||
ARMCPRegInfo mdcr_el2 = {
|
||||
.name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
|
||||
.access = PL2_RW, .resetvalue = pmu_num_counters(env),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
|
||||
};
|
||||
define_one_arm_cp_reg(cpu, &mdcr_el2);
|
||||
define_arm_cp_regs(cpu, vpidr_regs);
|
||||
define_arm_cp_regs(cpu, el2_cp_reginfo);
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
@ -8384,6 +8452,10 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
define_arm_cp_regs(cpu, zcr_reginfo);
|
||||
}
|
||||
|
||||
if (cpu_isar_feature(aa64_hcx, cpu)) {
|
||||
define_one_arm_cp_reg(cpu, &hcrx_el2_reginfo);
|
||||
}
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
if (cpu_isar_feature(aa64_pauth, cpu)) {
|
||||
define_arm_cp_regs(cpu, pauth_reginfo);
|
||||
@ -10717,6 +10789,25 @@ static bool get_level1_table_address(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool ptw_attrs_are_device(CPUARMState *env, ARMCacheAttrs cacheattrs)
|
||||
{
|
||||
/*
|
||||
* For an S1 page table walk, the stage 1 attributes are always
|
||||
* some form of "this is Normal memory". The combined S1+S2
|
||||
* attributes are therefore only Device if stage 2 specifies Device.
|
||||
* With HCR_EL2.FWB == 0 this is when descriptor bits [5:4] are 0b00,
|
||||
* ie when cacheattrs.attrs bits [3:2] are 0b00.
|
||||
* With HCR_EL2.FWB == 1 this is when descriptor bit [4] is 0, ie
|
||||
* when cacheattrs.attrs bit [2] is 0.
|
||||
*/
|
||||
assert(cacheattrs.is_s2_format);
|
||||
if (arm_hcr_el2_eff(env) & HCR_FWB) {
|
||||
return (cacheattrs.attrs & 0x4) == 0;
|
||||
} else {
|
||||
return (cacheattrs.attrs & 0xc) == 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Translate a S1 pagetable walk through S2 if needed. */
|
||||
static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
hwaddr addr, bool *is_secure,
|
||||
@ -10745,7 +10836,7 @@ static hwaddr S1_ptw_translate(CPUARMState *env, ARMMMUIdx mmu_idx,
|
||||
return ~0;
|
||||
}
|
||||
if ((arm_hcr_el2_eff(env) & HCR_PTW) &&
|
||||
(cacheattrs.attrs & 0xf0) == 0) {
|
||||
ptw_attrs_are_device(env, cacheattrs)) {
|
||||
/*
|
||||
* PTW set and S1 walk touched S2 Device memory:
|
||||
* generate Permission fault.
|
||||
@ -11817,12 +11908,14 @@ static bool get_phys_addr_lpae(CPUARMState *env, uint64_t address,
|
||||
}
|
||||
|
||||
if (mmu_idx == ARMMMUIdx_Stage2 || mmu_idx == ARMMMUIdx_Stage2_S) {
|
||||
cacheattrs->attrs = convert_stage2_attrs(env, extract32(attrs, 0, 4));
|
||||
cacheattrs->is_s2_format = true;
|
||||
cacheattrs->attrs = extract32(attrs, 0, 4);
|
||||
} else {
|
||||
/* Index into MAIR registers for cache attributes */
|
||||
uint8_t attrindx = extract32(attrs, 0, 3);
|
||||
uint64_t mair = env->cp15.mair_el[regime_el(env, mmu_idx)];
|
||||
assert(attrindx <= 7);
|
||||
cacheattrs->is_s2_format = false;
|
||||
cacheattrs->attrs = extract64(mair, attrindx * 8, 8);
|
||||
}
|
||||
|
||||
@ -12557,28 +12650,130 @@ static uint8_t combine_cacheattr_nibble(uint8_t s1, uint8_t s2)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Combine the memory type and cacheability attributes of
|
||||
* s1 and s2 for the HCR_EL2.FWB == 0 case, returning the
|
||||
* combined attributes in MAIR_EL1 format.
|
||||
*/
|
||||
static uint8_t combined_attrs_nofwb(CPUARMState *env,
|
||||
ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
{
|
||||
uint8_t s1lo, s2lo, s1hi, s2hi, s2_mair_attrs, ret_attrs;
|
||||
|
||||
s2_mair_attrs = convert_stage2_attrs(env, s2.attrs);
|
||||
|
||||
s1lo = extract32(s1.attrs, 0, 4);
|
||||
s2lo = extract32(s2_mair_attrs, 0, 4);
|
||||
s1hi = extract32(s1.attrs, 4, 4);
|
||||
s2hi = extract32(s2_mair_attrs, 4, 4);
|
||||
|
||||
/* Combine memory type and cacheability attributes */
|
||||
if (s1hi == 0 || s2hi == 0) {
|
||||
/* Device has precedence over normal */
|
||||
if (s1lo == 0 || s2lo == 0) {
|
||||
/* nGnRnE has precedence over anything */
|
||||
ret_attrs = 0;
|
||||
} else if (s1lo == 4 || s2lo == 4) {
|
||||
/* non-Reordering has precedence over Reordering */
|
||||
ret_attrs = 4; /* nGnRE */
|
||||
} else if (s1lo == 8 || s2lo == 8) {
|
||||
/* non-Gathering has precedence over Gathering */
|
||||
ret_attrs = 8; /* nGRE */
|
||||
} else {
|
||||
ret_attrs = 0xc; /* GRE */
|
||||
}
|
||||
} else { /* Normal memory */
|
||||
/* Outer/inner cacheability combine independently */
|
||||
ret_attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
|
||||
| combine_cacheattr_nibble(s1lo, s2lo);
|
||||
}
|
||||
return ret_attrs;
|
||||
}
|
||||
|
||||
static uint8_t force_cacheattr_nibble_wb(uint8_t attr)
|
||||
{
|
||||
/*
|
||||
* Given the 4 bits specifying the outer or inner cacheability
|
||||
* in MAIR format, return a value specifying Normal Write-Back,
|
||||
* with the allocation and transient hints taken from the input
|
||||
* if the input specified some kind of cacheable attribute.
|
||||
*/
|
||||
if (attr == 0 || attr == 4) {
|
||||
/*
|
||||
* 0 == an UNPREDICTABLE encoding
|
||||
* 4 == Non-cacheable
|
||||
* Either way, force Write-Back RW allocate non-transient
|
||||
*/
|
||||
return 0xf;
|
||||
}
|
||||
/* Change WriteThrough to WriteBack, keep allocation and transient hints */
|
||||
return attr | 4;
|
||||
}
|
||||
|
||||
/*
|
||||
* Combine the memory type and cacheability attributes of
|
||||
* s1 and s2 for the HCR_EL2.FWB == 1 case, returning the
|
||||
* combined attributes in MAIR_EL1 format.
|
||||
*/
|
||||
static uint8_t combined_attrs_fwb(CPUARMState *env,
|
||||
ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
{
|
||||
switch (s2.attrs) {
|
||||
case 7:
|
||||
/* Use stage 1 attributes */
|
||||
return s1.attrs;
|
||||
case 6:
|
||||
/*
|
||||
* Force Normal Write-Back. Note that if S1 is Normal cacheable
|
||||
* then we take the allocation hints from it; otherwise it is
|
||||
* RW allocate, non-transient.
|
||||
*/
|
||||
if ((s1.attrs & 0xf0) == 0) {
|
||||
/* S1 is Device */
|
||||
return 0xff;
|
||||
}
|
||||
/* Need to check the Inner and Outer nibbles separately */
|
||||
return force_cacheattr_nibble_wb(s1.attrs & 0xf) |
|
||||
force_cacheattr_nibble_wb(s1.attrs >> 4) << 4;
|
||||
case 5:
|
||||
/* If S1 attrs are Device, use them; otherwise Normal Non-cacheable */
|
||||
if ((s1.attrs & 0xf0) == 0) {
|
||||
return s1.attrs;
|
||||
}
|
||||
return 0x44;
|
||||
case 0 ... 3:
|
||||
/* Force Device, of subtype specified by S2 */
|
||||
return s2.attrs << 2;
|
||||
default:
|
||||
/*
|
||||
* RESERVED values (including RES0 descriptor bit [5] being nonzero);
|
||||
* arbitrarily force Device.
|
||||
*/
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
/* Combine S1 and S2 cacheability/shareability attributes, per D4.5.4
|
||||
* and CombineS1S2Desc()
|
||||
*
|
||||
* @env: CPUARMState
|
||||
* @s1: Attributes from stage 1 walk
|
||||
* @s2: Attributes from stage 2 walk
|
||||
*/
|
||||
static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
static ARMCacheAttrs combine_cacheattrs(CPUARMState *env,
|
||||
ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
{
|
||||
uint8_t s1lo, s2lo, s1hi, s2hi;
|
||||
ARMCacheAttrs ret;
|
||||
bool tagged = false;
|
||||
|
||||
assert(s2.is_s2_format && !s1.is_s2_format);
|
||||
ret.is_s2_format = false;
|
||||
|
||||
if (s1.attrs == 0xf0) {
|
||||
tagged = true;
|
||||
s1.attrs = 0xff;
|
||||
}
|
||||
|
||||
s1lo = extract32(s1.attrs, 0, 4);
|
||||
s2lo = extract32(s2.attrs, 0, 4);
|
||||
s1hi = extract32(s1.attrs, 4, 4);
|
||||
s2hi = extract32(s2.attrs, 4, 4);
|
||||
|
||||
/* Combine shareability attributes (table D4-43) */
|
||||
if (s1.shareability == 2 || s2.shareability == 2) {
|
||||
/* if either are outer-shareable, the result is outer-shareable */
|
||||
@ -12592,37 +12787,22 @@ static ARMCacheAttrs combine_cacheattrs(ARMCacheAttrs s1, ARMCacheAttrs s2)
|
||||
}
|
||||
|
||||
/* Combine memory type and cacheability attributes */
|
||||
if (s1hi == 0 || s2hi == 0) {
|
||||
/* Device has precedence over normal */
|
||||
if (s1lo == 0 || s2lo == 0) {
|
||||
/* nGnRnE has precedence over anything */
|
||||
ret.attrs = 0;
|
||||
} else if (s1lo == 4 || s2lo == 4) {
|
||||
/* non-Reordering has precedence over Reordering */
|
||||
ret.attrs = 4; /* nGnRE */
|
||||
} else if (s1lo == 8 || s2lo == 8) {
|
||||
/* non-Gathering has precedence over Gathering */
|
||||
ret.attrs = 8; /* nGRE */
|
||||
} else {
|
||||
ret.attrs = 0xc; /* GRE */
|
||||
}
|
||||
if (arm_hcr_el2_eff(env) & HCR_FWB) {
|
||||
ret.attrs = combined_attrs_fwb(env, s1, s2);
|
||||
} else {
|
||||
ret.attrs = combined_attrs_nofwb(env, s1, s2);
|
||||
}
|
||||
|
||||
/* Any location for which the resultant memory type is any
|
||||
* type of Device memory is always treated as Outer Shareable.
|
||||
*/
|
||||
/*
|
||||
* Any location for which the resultant memory type is any
|
||||
* type of Device memory is always treated as Outer Shareable.
|
||||
* Any location for which the resultant memory type is Normal
|
||||
* Inner Non-cacheable, Outer Non-cacheable is always treated
|
||||
* as Outer Shareable.
|
||||
* TODO: FEAT_XS adds another value (0x40) also meaning iNCoNC
|
||||
*/
|
||||
if ((ret.attrs & 0xf0) == 0 || ret.attrs == 0x44) {
|
||||
ret.shareability = 2;
|
||||
} else { /* Normal memory */
|
||||
/* Outer/inner cacheability combine independently */
|
||||
ret.attrs = combine_cacheattr_nibble(s1hi, s2hi) << 4
|
||||
| combine_cacheattr_nibble(s1lo, s2lo);
|
||||
|
||||
if (ret.attrs == 0x44) {
|
||||
/* Any location for which the resultant memory type is Normal
|
||||
* Inner Non-cacheable, Outer Non-cacheable is always treated
|
||||
* as Outer Shareable.
|
||||
*/
|
||||
ret.shareability = 2;
|
||||
}
|
||||
}
|
||||
|
||||
/* TODO: CombineS1S2Desc does not consider transient, only WB, RWA. */
|
||||
@ -12731,7 +12911,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
}
|
||||
cacheattrs->shareability = 0;
|
||||
}
|
||||
*cacheattrs = combine_cacheattrs(*cacheattrs, cacheattrs2);
|
||||
*cacheattrs = combine_cacheattrs(env, *cacheattrs, cacheattrs2);
|
||||
|
||||
/* Check if IPA translates to secure or non-secure PA space. */
|
||||
if (arm_is_secure_below_el3(env)) {
|
||||
@ -12849,6 +13029,7 @@ bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
/* Fill in cacheattr a-la AArch64.TranslateAddressS1Off. */
|
||||
hcr = arm_hcr_el2_eff(env);
|
||||
cacheattrs->shareability = 0;
|
||||
cacheattrs->is_s2_format = false;
|
||||
if (hcr & HCR_DC) {
|
||||
if (hcr & HCR_DCT) {
|
||||
memattr = 0xf0; /* Tagged, Normal, WB, RWA */
|
||||
@ -13216,7 +13397,7 @@ int fp_exception_el(CPUARMState *env, int cur_el)
|
||||
* This register is ignored if E2H+TGE are both set.
|
||||
*/
|
||||
if ((hcr_el2 & (HCR_E2H | HCR_TGE)) != (HCR_E2H | HCR_TGE)) {
|
||||
int fpen = extract32(env->cp15.cpacr_el1, 20, 2);
|
||||
int fpen = FIELD_EX64(env->cp15.cpacr_el1, CPACR_EL1, FPEN);
|
||||
|
||||
switch (fpen) {
|
||||
case 0:
|
||||
@ -13262,8 +13443,7 @@ int fp_exception_el(CPUARMState *env, int cur_el)
|
||||
*/
|
||||
if (cur_el <= 2) {
|
||||
if (hcr_el2 & HCR_E2H) {
|
||||
/* Check CPTR_EL2.FPEN. */
|
||||
switch (extract32(env->cp15.cptr_el[2], 20, 2)) {
|
||||
switch (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, FPEN)) {
|
||||
case 1:
|
||||
if (cur_el != 0 || !(hcr_el2 & HCR_TGE)) {
|
||||
break;
|
||||
@ -13274,14 +13454,14 @@ int fp_exception_el(CPUARMState *env, int cur_el)
|
||||
return 2;
|
||||
}
|
||||
} else if (arm_is_el2_enabled(env)) {
|
||||
if (env->cp15.cptr_el[2] & CPTR_TFP) {
|
||||
if (FIELD_EX64(env->cp15.cptr_el[2], CPTR_EL2, TFP)) {
|
||||
return 2;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* CPTR_EL3 : present in v8 */
|
||||
if (env->cp15.cptr_el[3] & CPTR_TFP) {
|
||||
if (FIELD_EX64(env->cp15.cptr_el[3], CPTR_EL3, TFP)) {
|
||||
/* Trap all FP ops to EL3 */
|
||||
return 3;
|
||||
}
|
||||
|
@ -1149,8 +1149,13 @@ bool pmsav8_mpu_lookup(CPUARMState *env, uint32_t address,
|
||||
|
||||
/* Cacheability and shareability attributes for a memory access */
|
||||
typedef struct ARMCacheAttrs {
|
||||
unsigned int attrs:8; /* as in the MAIR register encoding */
|
||||
/*
|
||||
* If is_s2_format is true, attrs is the S2 descriptor bits [5:2]
|
||||
* Otherwise, attrs is the same as the MAIR_EL1 8-bit format
|
||||
*/
|
||||
unsigned int attrs:8;
|
||||
unsigned int shareability:2; /* as in the SH field of the VMSAv8-64 PTEs */
|
||||
bool is_s2_format:1;
|
||||
} ARMCacheAttrs;
|
||||
|
||||
bool get_phys_addr(CPUARMState *env, target_ulong address,
|
||||
@ -1299,7 +1304,9 @@ enum MVEECIState {
|
||||
|
||||
static inline uint32_t pmu_num_counters(CPUARMState *env)
|
||||
{
|
||||
return (env->cp15.c9_pmcr & PMCRN_MASK) >> PMCRN_SHIFT;
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
|
||||
return (cpu->isar.reset_pmcr_el0 & PMCRN_MASK) >> PMCRN_SHIFT;
|
||||
}
|
||||
|
||||
/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
|
||||
|
@ -505,6 +505,7 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
|
||||
*/
|
||||
int fdarray[3];
|
||||
bool sve_supported;
|
||||
bool pmu_supported = false;
|
||||
uint64_t features = 0;
|
||||
uint64_t t;
|
||||
int err;
|
||||
@ -537,6 +538,11 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
|
||||
1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
|
||||
}
|
||||
|
||||
if (kvm_arm_pmu_supported()) {
|
||||
init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
|
||||
pmu_supported = true;
|
||||
}
|
||||
|
||||
if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
|
||||
return false;
|
||||
}
|
||||
@ -659,6 +665,12 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
|
||||
dbgdidr |= (1 << 15); /* RES1 bit */
|
||||
ahcf->isar.dbgdidr = dbgdidr;
|
||||
}
|
||||
|
||||
if (pmu_supported) {
|
||||
/* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
|
||||
err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
|
||||
ARM64_SYS_REG(3, 3, 9, 12, 0));
|
||||
}
|
||||
}
|
||||
|
||||
sve_supported = ioctl(fdarray[0], KVM_CHECK_EXTENSION, KVM_CAP_ARM_SVE) > 0;
|
||||
|
@ -631,6 +631,7 @@ uint32_t HELPER(mrs_banked)(CPUARMState *env, uint32_t tgtmode, uint32_t regno)
|
||||
void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
|
||||
uint32_t isread)
|
||||
{
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
const ARMCPRegInfo *ri = rip;
|
||||
CPAccessResult res = CP_ACCESS_OK;
|
||||
int target_el;
|
||||
@ -674,6 +675,14 @@ void HELPER(access_check_cp_reg)(CPUARMState *env, void *rip, uint32_t syndrome,
|
||||
case CP_ACCESS_TRAP:
|
||||
break;
|
||||
case CP_ACCESS_TRAP_UNCATEGORIZED:
|
||||
if (cpu_isar_feature(aa64_ids, cpu) && isread &&
|
||||
arm_cpreg_in_idspace(ri)) {
|
||||
/*
|
||||
* FEAT_IDST says this should be reported as EC_SYSTEMREGISTERTRAP,
|
||||
* not EC_UNCATEGORIZED
|
||||
*/
|
||||
break;
|
||||
}
|
||||
syndrome = syn_uncategorized();
|
||||
break;
|
||||
default:
|
||||
|
@ -1795,6 +1795,30 @@ static void gen_set_nzcv(TCGv_i64 tcg_rt)
|
||||
tcg_temp_free_i32(nzcv);
|
||||
}
|
||||
|
||||
static void gen_sysreg_undef(DisasContext *s, bool isread,
|
||||
uint8_t op0, uint8_t op1, uint8_t op2,
|
||||
uint8_t crn, uint8_t crm, uint8_t rt)
|
||||
{
|
||||
/*
|
||||
* Generate code to emit an UNDEF with correct syndrome
|
||||
* information for a failed system register access.
|
||||
* This is EC_UNCATEGORIZED (ie a standard UNDEF) in most cases,
|
||||
* but if FEAT_IDST is implemented then read accesses to registers
|
||||
* in the feature ID space are reported with the EC_SYSTEMREGISTERTRAP
|
||||
* syndrome.
|
||||
*/
|
||||
uint32_t syndrome;
|
||||
|
||||
if (isread && dc_isar_feature(aa64_ids, s) &&
|
||||
arm_cpreg_encoding_in_idspace(op0, op1, op2, crn, crm)) {
|
||||
syndrome = syn_aa64_sysregtrap(op0, op1, op2, crn, crm, rt, isread);
|
||||
} else {
|
||||
syndrome = syn_uncategorized();
|
||||
}
|
||||
gen_exception_insn(s, s->pc_curr, EXCP_UDEF, syndrome,
|
||||
default_exception_el(s));
|
||||
}
|
||||
|
||||
/* MRS - move from system register
|
||||
* MSR (register) - move to system register
|
||||
* SYS
|
||||
@ -1820,13 +1844,13 @@ static void handle_sys(DisasContext *s, uint32_t insn, bool isread,
|
||||
qemu_log_mask(LOG_UNIMP, "%s access to unsupported AArch64 "
|
||||
"system register op0:%d op1:%d crn:%d crm:%d op2:%d\n",
|
||||
isread ? "read" : "write", op0, op1, crn, crm, op2);
|
||||
unallocated_encoding(s);
|
||||
gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
|
||||
return;
|
||||
}
|
||||
|
||||
/* Check access permissions */
|
||||
if (!cp_access_ok(s->current_el, ri, isread)) {
|
||||
unallocated_encoding(s);
|
||||
gen_sysreg_undef(s, isread, op0, op1, op2, crn, crm, rt);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -2103,13 +2127,13 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
* with our 32-bit semihosting).
|
||||
*/
|
||||
if (s->current_el == 0) {
|
||||
unsupported_encoding(s, insn);
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
gen_exception_internal_insn(s, s->pc_curr, EXCP_SEMIHOST);
|
||||
} else {
|
||||
unsupported_encoding(s, insn);
|
||||
unallocated_encoding(s);
|
||||
}
|
||||
break;
|
||||
case 5:
|
||||
@ -2118,7 +2142,7 @@ static void disas_exc(DisasContext *s, uint32_t insn)
|
||||
break;
|
||||
}
|
||||
/* DCPS1, DCPS2, DCPS3 */
|
||||
unsupported_encoding(s, insn);
|
||||
unallocated_encoding(s);
|
||||
break;
|
||||
default:
|
||||
unallocated_encoding(s);
|
||||
@ -2283,7 +2307,7 @@ static void disas_uncond_b_reg(DisasContext *s, uint32_t insn)
|
||||
if (op3 != 0 || op4 != 0 || rn != 0x1f) {
|
||||
goto do_unallocated;
|
||||
} else {
|
||||
unsupported_encoding(s, insn);
|
||||
unallocated_encoding(s);
|
||||
}
|
||||
return;
|
||||
|
||||
|
@ -18,15 +18,6 @@
|
||||
#ifndef TARGET_ARM_TRANSLATE_A64_H
|
||||
#define TARGET_ARM_TRANSLATE_A64_H
|
||||
|
||||
#define unsupported_encoding(s, insn) \
|
||||
do { \
|
||||
qemu_log_mask(LOG_UNIMP, \
|
||||
"%s:%d: unsupported instruction encoding 0x%08x " \
|
||||
"at pc=%016" PRIx64 "\n", \
|
||||
__FILE__, __LINE__, insn, s->pc_curr); \
|
||||
unallocated_encoding(s); \
|
||||
} while (0)
|
||||
|
||||
TCGv_i64 new_tmp_a64(DisasContext *s);
|
||||
TCGv_i64 new_tmp_a64_local(DisasContext *s);
|
||||
TCGv_i64 new_tmp_a64_zero(DisasContext *s);
|
||||
|
@ -768,8 +768,8 @@ static void add_ptimer_tests(uint8_t policy)
|
||||
char policy_name[256] = "";
|
||||
char *tmp;
|
||||
|
||||
if (policy == PTIMER_POLICY_DEFAULT) {
|
||||
g_sprintf(policy_name, "default");
|
||||
if (policy == PTIMER_POLICY_LEGACY) {
|
||||
g_sprintf(policy_name, "legacy");
|
||||
}
|
||||
|
||||
if (policy & PTIMER_POLICY_WRAP_AFTER_ONE_PERIOD) {
|
||||
@ -862,7 +862,7 @@ static void add_ptimer_tests(uint8_t policy)
|
||||
static void add_all_ptimer_policies_comb_tests(void)
|
||||
{
|
||||
int last_policy = PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT;
|
||||
int policy = PTIMER_POLICY_DEFAULT;
|
||||
int policy = PTIMER_POLICY_LEGACY;
|
||||
|
||||
for (; policy < (last_policy << 1); policy++) {
|
||||
if ((policy & PTIMER_POLICY_TRIGGER_ONLY_ON_DECREMENT) &&
|
||||
|
Loading…
Reference in New Issue
Block a user