mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-27 05:20:50 +00:00
Add cortex-a35.
Fix bcm2835 framebuffer for rpi firmware. Add FEAT_ETS. Add FEAT_PMUv3p5. Cleanups to armv7m_load_kernel. -----BEGIN PGP SIGNATURE----- iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmMhwAsdHHJpY2hhcmQu aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/7Xgf9Ezg+etwsEzY0EWtH yoQ7ymJHM1VKqtLkbk9U+Ju18YHEi3fwbYbfLgzkAMFHKDUteKJivKm2w41tsw5g lE+5PojQT3k8PffaqeFzKG/JsDPoqKtTa23WjkafbIQAbjRT8JtmF/JmSLaaxyqW DozPXJ9jXMS+Q2BxHnoCBuy/Kb3zsqekCr8DOgUIXY5gcAB6q1I8SMX2BrhsIyvV vZxNm+hFDCOJtXvkAekMrwGkiIBYoBNpswnv40ldeSvRtD8tnty73JUt+AnWoCwi zH4k+CvWICIuZT7oxVecRhlOojtaP5cUTXs0+zhk6GxEx/X6wmXd42heO9ZJL0y1 6FH0xw== =ohPU -----END PGP SIGNATURE----- Merge tag 'pull-arm-20220914' of https://gitlab.com/rth7680/qemu into staging Add cortex-a35. Fix bcm2835 framebuffer for rpi firmware. Add FEAT_ETS. Add FEAT_PMUv3p5. Cleanups to armv7m_load_kernel. # -----BEGIN PGP SIGNATURE----- # # iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmMhwAsdHHJpY2hhcmQu # aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV/7Xgf9Ezg+etwsEzY0EWtH # yoQ7ymJHM1VKqtLkbk9U+Ju18YHEi3fwbYbfLgzkAMFHKDUteKJivKm2w41tsw5g # lE+5PojQT3k8PffaqeFzKG/JsDPoqKtTa23WjkafbIQAbjRT8JtmF/JmSLaaxyqW # DozPXJ9jXMS+Q2BxHnoCBuy/Kb3zsqekCr8DOgUIXY5gcAB6q1I8SMX2BrhsIyvV # vZxNm+hFDCOJtXvkAekMrwGkiIBYoBNpswnv40ldeSvRtD8tnty73JUt+AnWoCwi # zH4k+CvWICIuZT7oxVecRhlOojtaP5cUTXs0+zhk6GxEx/X6wmXd42heO9ZJL0y1 # 6FH0xw== # =ohPU # -----END PGP SIGNATURE----- # gpg: Signature made Wed 14 Sep 2022 07:50:35 EDT # gpg: using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F # gpg: issuer "richard.henderson@linaro.org" # gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [full] # Primary key fingerprint: 7A48 1E78 868B 4DB6 A85A 05C0 64DF 38E8 AF7E 215F * tag 'pull-arm-20220914' of https://gitlab.com/rth7680/qemu: target/arm: Make boards pass base address to armv7m_load_kernel() target/arm: Remove useless TARGET_BIG_ENDIAN check in armv7m_load_kernel() target/arm: Report FEAT_PMUv3p5 for TCG '-cpu max' target/arm: Support 64-bit event counters for FEAT_PMUv3p5 target/arm: Implement FEAT_PMUv3p5 cycle counter disable bits target/arm: Rename pmu_8_n feature test functions target/arm: Detect overflow when calculating next PMU interrupt target/arm: Honour MDCR_EL2.HPMD in Secure EL2 target/arm: Ignore PMCR.D when PMCR.LC is set target/arm: Don't mishandle count when enabling or disabling PMU counters target/arm: Correct value returned by pmu_counter_mask() target/arm: Don't corrupt high half of PMOVSR when cycle counter overflows target/arm: Add missing space in comment target/arm: Advertise FEAT_ETS for '-cpu max' target/arm: Implement ID_DFR1 target/arm: Implement ID_MMFR5 target/arm: Sort KVM reads of AArch32 ID registers into encoding order target/arm: Make cpregs 0, c0, c{3-15}, {0-7} correctly RAZ in v8 hw/arm/bcm2835_property: Add support for RPI_FIRMWARE_FRAMEBUFFER_GET_NUM_DISPLAYS target/arm: Add cortex-a35 Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
This commit is contained in:
commit
50eac424c7
@ -24,6 +24,7 @@ the following architecture extensions:
|
||||
- FEAT_Debugv8p4 (Debug changes for v8.4)
|
||||
- FEAT_DotProd (Advanced SIMD dot product instructions)
|
||||
- FEAT_DoubleFault (Double Fault Extension)
|
||||
- FEAT_ETS (Enhanced Translation Synchronization)
|
||||
- FEAT_FCMA (Floating-point complex number instructions)
|
||||
- FEAT_FHM (Floating-point half-precision multiplication instructions)
|
||||
- FEAT_FP16 (Half-precision floating-point data processing)
|
||||
@ -52,6 +53,7 @@ the following architecture extensions:
|
||||
- FEAT_PMULL (PMULL, PMULL2 instructions)
|
||||
- FEAT_PMUv3p1 (PMU Extensions v3.1)
|
||||
- FEAT_PMUv3p4 (PMU Extensions v3.4)
|
||||
- FEAT_PMUv3p5 (PMU Extensions v3.5)
|
||||
- FEAT_RAS (Reliability, availability, and serviceability)
|
||||
- FEAT_RASv1p1 (RAS Extension v1.1)
|
||||
- FEAT_RDM (Advanced SIMD rounding double multiply accumulate instructions)
|
||||
|
@ -52,6 +52,7 @@ Supported guest CPU types:
|
||||
|
||||
- ``cortex-a7`` (32-bit)
|
||||
- ``cortex-a15`` (32-bit; the default)
|
||||
- ``cortex-a35`` (64-bit)
|
||||
- ``cortex-a53`` (64-bit)
|
||||
- ``cortex-a57`` (64-bit)
|
||||
- ``cortex-a72`` (64-bit)
|
||||
|
@ -568,21 +568,15 @@ static void armv7m_reset(void *opaque)
|
||||
cpu_reset(CPU(cpu));
|
||||
}
|
||||
|
||||
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size)
|
||||
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename,
|
||||
hwaddr mem_base, int mem_size)
|
||||
{
|
||||
ssize_t image_size;
|
||||
uint64_t entry;
|
||||
int big_endian;
|
||||
AddressSpace *as;
|
||||
int asidx;
|
||||
CPUState *cs = CPU(cpu);
|
||||
|
||||
#if TARGET_BIG_ENDIAN
|
||||
big_endian = 1;
|
||||
#else
|
||||
big_endian = 0;
|
||||
#endif
|
||||
|
||||
if (arm_feature(&cpu->env, ARM_FEATURE_EL3)) {
|
||||
asidx = ARMASIdx_S;
|
||||
} else {
|
||||
@ -593,9 +587,9 @@ void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size)
|
||||
if (kernel_filename) {
|
||||
image_size = load_elf_as(kernel_filename, NULL, NULL, NULL,
|
||||
&entry, NULL, NULL,
|
||||
NULL, big_endian, EM_ARM, 1, 0, as);
|
||||
NULL, 0, EM_ARM, 1, 0, as);
|
||||
if (image_size < 0) {
|
||||
image_size = load_image_targphys_as(kernel_filename, 0,
|
||||
image_size = load_image_targphys_as(kernel_filename, mem_base,
|
||||
mem_size, as);
|
||||
}
|
||||
if (image_size < 0) {
|
||||
|
@ -1430,6 +1430,7 @@ static void aspeed_minibmc_machine_init(MachineState *machine)
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu),
|
||||
machine->kernel_filename,
|
||||
0,
|
||||
AST1030_INTERNAL_FLASH_SIZE);
|
||||
}
|
||||
|
||||
|
@ -57,7 +57,7 @@ static void microbit_init(MachineState *machine)
|
||||
mr, -1);
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
|
||||
s->nrf51.flash_size);
|
||||
0, s->nrf51.flash_size);
|
||||
}
|
||||
|
||||
static void microbit_machine_class_init(ObjectClass *oc, void *data)
|
||||
|
@ -1197,7 +1197,7 @@ static void mps2tz_common_init(MachineState *machine)
|
||||
}
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
|
||||
boot_ram_size(mms));
|
||||
0, boot_ram_size(mms));
|
||||
}
|
||||
|
||||
static void mps2_tz_idau_check(IDAUInterface *ii, uint32_t address,
|
||||
|
@ -450,7 +450,7 @@ static void mps2_common_init(MachineState *machine)
|
||||
mmc->fpga_type == FPGA_AN511 ? 47 : 13));
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
|
||||
0x400000);
|
||||
0, 0x400000);
|
||||
}
|
||||
|
||||
static void mps2_class_init(ObjectClass *oc, void *data)
|
||||
|
@ -98,7 +98,7 @@ static void emcraft_sf2_s2s010_init(MachineState *machine)
|
||||
sysbus_connect_irq(SYS_BUS_DEVICE(&soc->spi[0]), 1, cs_line);
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
|
||||
soc->envm_size);
|
||||
0, soc->envm_size);
|
||||
}
|
||||
|
||||
static void emcraft_sf2_machine_init(MachineClass *mc)
|
||||
|
@ -597,7 +597,8 @@ static void musca_init(MachineState *machine)
|
||||
"cfg_sec_resp", 0));
|
||||
}
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename, 0x2000000);
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
|
||||
0, 0x2000000);
|
||||
}
|
||||
|
||||
static void musca_class_init(ObjectClass *oc, void *data)
|
||||
|
@ -49,7 +49,7 @@ static void netduino2_init(MachineState *machine)
|
||||
sysbus_realize_and_unref(SYS_BUS_DEVICE(dev), &error_fatal);
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), machine->kernel_filename,
|
||||
FLASH_SIZE);
|
||||
0, FLASH_SIZE);
|
||||
}
|
||||
|
||||
static void netduino2_machine_init(MachineClass *mc)
|
||||
|
@ -50,7 +50,7 @@ static void netduinoplus2_init(MachineState *machine)
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu),
|
||||
machine->kernel_filename,
|
||||
FLASH_SIZE);
|
||||
0, FLASH_SIZE);
|
||||
}
|
||||
|
||||
static void netduinoplus2_machine_init(MachineClass *mc)
|
||||
|
@ -1302,7 +1302,7 @@ static void stellaris_init(MachineState *ms, stellaris_board_info *board)
|
||||
create_unimplemented_device("hibernation", 0x400fc000, 0x1000);
|
||||
create_unimplemented_device("flash-control", 0x400fd000, 0x1000);
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, flash_size);
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu), ms->kernel_filename, 0, flash_size);
|
||||
}
|
||||
|
||||
/* FIXME: Figure out how to generate these from stellaris_boards. */
|
||||
|
@ -53,7 +53,7 @@ static void stm32vldiscovery_init(MachineState *machine)
|
||||
|
||||
armv7m_load_kernel(ARM_CPU(first_cpu),
|
||||
machine->kernel_filename,
|
||||
FLASH_SIZE);
|
||||
0, FLASH_SIZE);
|
||||
}
|
||||
|
||||
static void stm32vldiscovery_machine_init(MachineClass *mc)
|
||||
|
@ -199,6 +199,7 @@ static const int a15irqmap[] = {
|
||||
static const char *valid_cpus[] = {
|
||||
ARM_CPU_TYPE_NAME("cortex-a7"),
|
||||
ARM_CPU_TYPE_NAME("cortex-a15"),
|
||||
ARM_CPU_TYPE_NAME("cortex-a35"),
|
||||
ARM_CPU_TYPE_NAME("cortex-a53"),
|
||||
ARM_CPU_TYPE_NAME("cortex-a57"),
|
||||
ARM_CPU_TYPE_NAME("cortex-a72"),
|
||||
|
@ -270,6 +270,10 @@ static void bcm2835_property_mbox_push(BCM2835PropertyState *s, uint32_t value)
|
||||
stl_le_phys(&s->dma_as, value + 12, 0);
|
||||
resplen = 4;
|
||||
break;
|
||||
case 0x00040013: /* Get number of displays */
|
||||
stl_le_phys(&s->dma_as, value + 12, 1);
|
||||
resplen = 4;
|
||||
break;
|
||||
|
||||
case 0x00060001: /* Get DMA channels */
|
||||
/* channels 2-5 */
|
||||
|
@ -25,13 +25,16 @@ typedef enum {
|
||||
* armv7m_load_kernel:
|
||||
* @cpu: CPU
|
||||
* @kernel_filename: file to load
|
||||
* @mem_base: base address to load image at (should be where the
|
||||
* CPU expects to find its vector table on reset)
|
||||
* @mem_size: mem_size: maximum image size to load
|
||||
*
|
||||
* Load the guest image for an ARMv7M system. This must be called by
|
||||
* any ARMv7M board. (This is necessary to ensure that the CPU resets
|
||||
* correctly on system reset, as well as for kernel loading.)
|
||||
*/
|
||||
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename, int mem_size);
|
||||
void armv7m_load_kernel(ARMCPU *cpu, const char *kernel_filename,
|
||||
hwaddr mem_base, int mem_size);
|
||||
|
||||
/* arm_boot.c */
|
||||
struct arm_boot_info {
|
||||
|
@ -975,6 +975,7 @@ struct ArchCPU {
|
||||
uint32_t id_mmfr2;
|
||||
uint32_t id_mmfr3;
|
||||
uint32_t id_mmfr4;
|
||||
uint32_t id_mmfr5;
|
||||
uint32_t id_pfr0;
|
||||
uint32_t id_pfr1;
|
||||
uint32_t id_pfr2;
|
||||
@ -982,6 +983,7 @@ struct ArchCPU {
|
||||
uint32_t mvfr1;
|
||||
uint32_t mvfr2;
|
||||
uint32_t id_dfr0;
|
||||
uint32_t id_dfr1;
|
||||
uint32_t dbgdidr;
|
||||
uint32_t dbgdevid;
|
||||
uint32_t dbgdevid1;
|
||||
@ -1332,6 +1334,9 @@ FIELD(CPTR_EL3, TTA, 20, 1)
|
||||
FIELD(CPTR_EL3, TAM, 30, 1)
|
||||
FIELD(CPTR_EL3, TCPAC, 31, 1)
|
||||
|
||||
#define MDCR_HLP (1U << 26) /* MDCR_EL2 */
|
||||
#define MDCR_SCCD (1U << 23) /* MDCR_EL3 */
|
||||
#define MDCR_HCCD (1U << 23) /* MDCR_EL2 */
|
||||
#define MDCR_EPMAD (1U << 21)
|
||||
#define MDCR_EDAD (1U << 20)
|
||||
#define MDCR_SPME (1U << 17) /* MDCR_EL3 */
|
||||
@ -3710,20 +3715,27 @@ static inline bool isar_feature_aa32_ats1e1(const ARMISARegisters *id)
|
||||
return FIELD_EX32(id->id_mmfr3, ID_MMFR3, PAN) >= 2;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_pmu_8_1(const ARMISARegisters *id)
|
||||
static inline bool isar_feature_aa32_pmuv3p1(const ARMISARegisters *id)
|
||||
{
|
||||
/* 0xf means "non-standard IMPDEF PMU" */
|
||||
return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 4 &&
|
||||
FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_pmu_8_4(const ARMISARegisters *id)
|
||||
static inline bool isar_feature_aa32_pmuv3p4(const ARMISARegisters *id)
|
||||
{
|
||||
/* 0xf means "non-standard IMPDEF PMU" */
|
||||
return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 5 &&
|
||||
FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_pmuv3p5(const ARMISARegisters *id)
|
||||
{
|
||||
/* 0xf means "non-standard IMPDEF PMU" */
|
||||
return FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) >= 6 &&
|
||||
FIELD_EX32(id->id_dfr0, ID_DFR0, PERFMON) != 0xf;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa32_hpd(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX32(id->id_mmfr4, ID_MMFR4, HPDS) != 0;
|
||||
@ -4036,18 +4048,24 @@ static inline bool isar_feature_aa64_sme(const ARMISARegisters *id)
|
||||
return FIELD_EX64(id->id_aa64pfr1, ID_AA64PFR1, SME) != 0;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_pmu_8_1(const ARMISARegisters *id)
|
||||
static inline bool isar_feature_aa64_pmuv3p1(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 4 &&
|
||||
FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_pmu_8_4(const ARMISARegisters *id)
|
||||
static inline bool isar_feature_aa64_pmuv3p4(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 5 &&
|
||||
FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_pmuv3p5(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) >= 6 &&
|
||||
FIELD_EX64(id->id_aa64dfr0, ID_AA64DFR0, PMUVER) != 0xf;
|
||||
}
|
||||
|
||||
static inline bool isar_feature_aa64_rcpc_8_3(const ARMISARegisters *id)
|
||||
{
|
||||
return FIELD_EX64(id->id_aa64isar1, ID_AA64ISAR1, LRCPC) != 0;
|
||||
@ -4211,14 +4229,19 @@ static inline bool isar_feature_any_predinv(const ARMISARegisters *id)
|
||||
return isar_feature_aa64_predinv(id) || isar_feature_aa32_predinv(id);
|
||||
}
|
||||
|
||||
static inline bool isar_feature_any_pmu_8_1(const ARMISARegisters *id)
|
||||
static inline bool isar_feature_any_pmuv3p1(const ARMISARegisters *id)
|
||||
{
|
||||
return isar_feature_aa64_pmu_8_1(id) || isar_feature_aa32_pmu_8_1(id);
|
||||
return isar_feature_aa64_pmuv3p1(id) || isar_feature_aa32_pmuv3p1(id);
|
||||
}
|
||||
|
||||
static inline bool isar_feature_any_pmu_8_4(const ARMISARegisters *id)
|
||||
static inline bool isar_feature_any_pmuv3p4(const ARMISARegisters *id)
|
||||
{
|
||||
return isar_feature_aa64_pmu_8_4(id) || isar_feature_aa32_pmu_8_4(id);
|
||||
return isar_feature_aa64_pmuv3p4(id) || isar_feature_aa32_pmuv3p4(id);
|
||||
}
|
||||
|
||||
static inline bool isar_feature_any_pmuv3p5(const ARMISARegisters *id)
|
||||
{
|
||||
return isar_feature_aa64_pmuv3p5(id) || isar_feature_aa32_pmuv3p5(id);
|
||||
}
|
||||
|
||||
static inline bool isar_feature_any_ccidx(const ARMISARegisters *id)
|
||||
|
@ -36,6 +36,85 @@
|
||||
#include "hw/qdev-properties.h"
|
||||
#include "internals.h"
|
||||
|
||||
static void aarch64_a35_initfn(Object *obj)
|
||||
{
|
||||
ARMCPU *cpu = ARM_CPU(obj);
|
||||
|
||||
cpu->dtb_compatible = "arm,cortex-a35";
|
||||
set_feature(&cpu->env, ARM_FEATURE_V8);
|
||||
set_feature(&cpu->env, ARM_FEATURE_NEON);
|
||||
set_feature(&cpu->env, ARM_FEATURE_GENERIC_TIMER);
|
||||
set_feature(&cpu->env, ARM_FEATURE_AARCH64);
|
||||
set_feature(&cpu->env, ARM_FEATURE_CBAR_RO);
|
||||
set_feature(&cpu->env, ARM_FEATURE_EL2);
|
||||
set_feature(&cpu->env, ARM_FEATURE_EL3);
|
||||
set_feature(&cpu->env, ARM_FEATURE_PMU);
|
||||
|
||||
/* From B2.2 AArch64 identification registers. */
|
||||
cpu->midr = 0x411fd040;
|
||||
cpu->revidr = 0;
|
||||
cpu->ctr = 0x84448004;
|
||||
cpu->isar.id_pfr0 = 0x00000131;
|
||||
cpu->isar.id_pfr1 = 0x00011011;
|
||||
cpu->isar.id_dfr0 = 0x03010066;
|
||||
cpu->id_afr0 = 0;
|
||||
cpu->isar.id_mmfr0 = 0x10201105;
|
||||
cpu->isar.id_mmfr1 = 0x40000000;
|
||||
cpu->isar.id_mmfr2 = 0x01260000;
|
||||
cpu->isar.id_mmfr3 = 0x02102211;
|
||||
cpu->isar.id_isar0 = 0x02101110;
|
||||
cpu->isar.id_isar1 = 0x13112111;
|
||||
cpu->isar.id_isar2 = 0x21232042;
|
||||
cpu->isar.id_isar3 = 0x01112131;
|
||||
cpu->isar.id_isar4 = 0x00011142;
|
||||
cpu->isar.id_isar5 = 0x00011121;
|
||||
cpu->isar.id_aa64pfr0 = 0x00002222;
|
||||
cpu->isar.id_aa64pfr1 = 0;
|
||||
cpu->isar.id_aa64dfr0 = 0x10305106;
|
||||
cpu->isar.id_aa64dfr1 = 0;
|
||||
cpu->isar.id_aa64isar0 = 0x00011120;
|
||||
cpu->isar.id_aa64isar1 = 0;
|
||||
cpu->isar.id_aa64mmfr0 = 0x00101122;
|
||||
cpu->isar.id_aa64mmfr1 = 0;
|
||||
cpu->clidr = 0x0a200023;
|
||||
cpu->dcz_blocksize = 4;
|
||||
|
||||
/* From B2.4 AArch64 Virtual Memory control registers */
|
||||
cpu->reset_sctlr = 0x00c50838;
|
||||
|
||||
/* From B2.10 AArch64 performance monitor registers */
|
||||
cpu->isar.reset_pmcr_el0 = 0x410a3000;
|
||||
|
||||
/* From B2.29 Cache ID registers */
|
||||
cpu->ccsidr[0] = 0x700fe01a; /* 32KB L1 dcache */
|
||||
cpu->ccsidr[1] = 0x201fe00a; /* 32KB L1 icache */
|
||||
cpu->ccsidr[2] = 0x703fe03a; /* 512KB L2 cache */
|
||||
|
||||
/* From B3.5 VGIC Type register */
|
||||
cpu->gic_num_lrs = 4;
|
||||
cpu->gic_vpribits = 5;
|
||||
cpu->gic_vprebits = 5;
|
||||
cpu->gic_pribits = 5;
|
||||
|
||||
/* From C6.4 Debug ID Register */
|
||||
cpu->isar.dbgdidr = 0x3516d000;
|
||||
/* From C6.5 Debug Device ID Register */
|
||||
cpu->isar.dbgdevid = 0x00110f13;
|
||||
/* From C6.6 Debug Device ID Register 1 */
|
||||
cpu->isar.dbgdevid1 = 0x2;
|
||||
|
||||
/* From Cortex-A35 SIMD and Floating-point Support r1p0 */
|
||||
/* From 3.2 AArch32 register summary */
|
||||
cpu->reset_fpsid = 0x41034043;
|
||||
|
||||
/* From 2.2 AArch64 register summary */
|
||||
cpu->isar.mvfr0 = 0x10110222;
|
||||
cpu->isar.mvfr1 = 0x12111111;
|
||||
cpu->isar.mvfr2 = 0x00000043;
|
||||
|
||||
/* These values are the same with A53/A57/A72. */
|
||||
define_cortex_a72_a57_a53_cp_reginfo(cpu);
|
||||
}
|
||||
|
||||
static void aarch64_a57_initfn(Object *obj)
|
||||
{
|
||||
@ -1043,6 +1122,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, LO, 1); /* FEAT_LOR */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, PAN, 2); /* FEAT_PAN2 */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, XNX, 1); /* FEAT_XNX */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, ETS, 1); /* FEAT_ETS */
|
||||
t = FIELD_DP64(t, ID_AA64MMFR1, HCX, 1); /* FEAT_HCX */
|
||||
cpu->isar.id_aa64mmfr1 = t;
|
||||
|
||||
@ -1072,7 +1152,7 @@ static void aarch64_max_initfn(Object *obj)
|
||||
|
||||
t = cpu->isar.id_aa64dfr0;
|
||||
t = FIELD_DP64(t, ID_AA64DFR0, DEBUGVER, 9); /* FEAT_Debugv8p4 */
|
||||
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 5); /* FEAT_PMUv3p4 */
|
||||
t = FIELD_DP64(t, ID_AA64DFR0, PMUVER, 6); /* FEAT_PMUv3p5 */
|
||||
cpu->isar.id_aa64dfr0 = t;
|
||||
|
||||
t = cpu->isar.id_aa64smfr0;
|
||||
@ -1158,6 +1238,7 @@ static void aarch64_a64fx_initfn(Object *obj)
|
||||
}
|
||||
|
||||
static const ARMCPUInfo aarch64_cpus[] = {
|
||||
{ .name = "cortex-a35", .initfn = aarch64_a35_initfn },
|
||||
{ .name = "cortex-a57", .initfn = aarch64_a57_initfn },
|
||||
{ .name = "cortex-a53", .initfn = aarch64_a53_initfn },
|
||||
{ .name = "cortex-a72", .initfn = aarch64_a72_initfn },
|
||||
|
@ -64,9 +64,13 @@ void aa32_max_features(ARMCPU *cpu)
|
||||
t = FIELD_DP32(t, ID_MMFR4, HPDS, 1); /* FEAT_AA32HPD */
|
||||
t = FIELD_DP32(t, ID_MMFR4, AC2, 1); /* ACTLR2, HACTLR2 */
|
||||
t = FIELD_DP32(t, ID_MMFR4, CNP, 1); /* FEAT_TTCNP */
|
||||
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX*/
|
||||
t = FIELD_DP32(t, ID_MMFR4, XNX, 1); /* FEAT_XNX */
|
||||
cpu->isar.id_mmfr4 = t;
|
||||
|
||||
t = cpu->isar.id_mmfr5;
|
||||
t = FIELD_DP32(t, ID_MMFR5, ETS, 1); /* FEAT_ETS */
|
||||
cpu->isar.id_mmfr5 = t;
|
||||
|
||||
t = cpu->isar.id_pfr0;
|
||||
t = FIELD_DP32(t, ID_PFR0, CSV2, 2); /* FEAT_CVS2 */
|
||||
t = FIELD_DP32(t, ID_PFR0, DIT, 1); /* FEAT_DIT */
|
||||
@ -81,7 +85,7 @@ void aa32_max_features(ARMCPU *cpu)
|
||||
t = cpu->isar.id_dfr0;
|
||||
t = FIELD_DP32(t, ID_DFR0, COPDBG, 9); /* FEAT_Debugv8p4 */
|
||||
t = FIELD_DP32(t, ID_DFR0, COPSDBG, 9); /* FEAT_Debugv8p4 */
|
||||
t = FIELD_DP32(t, ID_DFR0, PERFMON, 5); /* FEAT_PMUv3p4 */
|
||||
t = FIELD_DP32(t, ID_DFR0, PERFMON, 6); /* FEAT_PMUv3p5 */
|
||||
cpu->isar.id_dfr0 = t;
|
||||
}
|
||||
|
||||
|
@ -879,16 +879,16 @@ static int64_t instructions_ns_per(uint64_t icount)
|
||||
}
|
||||
#endif
|
||||
|
||||
static bool pmu_8_1_events_supported(CPUARMState *env)
|
||||
static bool pmuv3p1_events_supported(CPUARMState *env)
|
||||
{
|
||||
/* For events which are supported in any v8.1 PMU */
|
||||
return cpu_isar_feature(any_pmu_8_1, env_archcpu(env));
|
||||
return cpu_isar_feature(any_pmuv3p1, env_archcpu(env));
|
||||
}
|
||||
|
||||
static bool pmu_8_4_events_supported(CPUARMState *env)
|
||||
static bool pmuv3p4_events_supported(CPUARMState *env)
|
||||
{
|
||||
/* For events which are supported in any v8.1 PMU */
|
||||
return cpu_isar_feature(any_pmu_8_4, env_archcpu(env));
|
||||
return cpu_isar_feature(any_pmuv3p4, env_archcpu(env));
|
||||
}
|
||||
|
||||
static uint64_t zero_event_get_count(CPUARMState *env)
|
||||
@ -922,17 +922,17 @@ static const pm_event pm_events[] = {
|
||||
},
|
||||
#endif
|
||||
{ .number = 0x023, /* STALL_FRONTEND */
|
||||
.supported = pmu_8_1_events_supported,
|
||||
.supported = pmuv3p1_events_supported,
|
||||
.get_count = zero_event_get_count,
|
||||
.ns_per_count = zero_event_ns_per,
|
||||
},
|
||||
{ .number = 0x024, /* STALL_BACKEND */
|
||||
.supported = pmu_8_1_events_supported,
|
||||
.supported = pmuv3p1_events_supported,
|
||||
.get_count = zero_event_get_count,
|
||||
.ns_per_count = zero_event_ns_per,
|
||||
},
|
||||
{ .number = 0x03c, /* STALL */
|
||||
.supported = pmu_8_4_events_supported,
|
||||
.supported = pmuv3p4_events_supported,
|
||||
.get_count = zero_event_get_count,
|
||||
.ns_per_count = zero_event_ns_per,
|
||||
},
|
||||
@ -1079,6 +1079,15 @@ static CPAccessResult pmreg_access_ccntr(CPUARMState *env,
|
||||
return pmreg_access(env, ri, isread);
|
||||
}
|
||||
|
||||
/*
|
||||
* Bits in MDCR_EL2 and MDCR_EL3 which pmu_counter_enabled() looks at.
|
||||
* We use these to decide whether we need to wrap a write to MDCR_EL2
|
||||
* or MDCR_EL3 in pmu_op_start()/pmu_op_finish() calls.
|
||||
*/
|
||||
#define MDCR_EL2_PMU_ENABLE_BITS \
|
||||
(MDCR_HPME | MDCR_HPMD | MDCR_HPMN | MDCR_HCCD | MDCR_HLP)
|
||||
#define MDCR_EL3_PMU_ENABLE_BITS (MDCR_SPME | MDCR_SCCD)
|
||||
|
||||
/* Returns true if the counter (pass 31 for PMCCNTR) should count events using
|
||||
* the current EL, security state, and register configuration.
|
||||
*/
|
||||
@ -1086,7 +1095,7 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
|
||||
{
|
||||
uint64_t filter;
|
||||
bool e, p, u, nsk, nsu, nsh, m;
|
||||
bool enabled, prohibited, filtered;
|
||||
bool enabled, prohibited = false, filtered;
|
||||
bool secure = arm_is_secure(env);
|
||||
int el = arm_current_el(env);
|
||||
uint64_t mdcr_el2 = arm_mdcr_el2_eff(env);
|
||||
@ -1104,19 +1113,29 @@ static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter)
|
||||
}
|
||||
enabled = e && (env->cp15.c9_pmcnten & (1 << counter));
|
||||
|
||||
if (!secure) {
|
||||
if (el == 2 && (counter < hpmn || counter == 31)) {
|
||||
prohibited = mdcr_el2 & MDCR_HPMD;
|
||||
} else {
|
||||
prohibited = false;
|
||||
}
|
||||
} else {
|
||||
prohibited = arm_feature(env, ARM_FEATURE_EL3) &&
|
||||
!(env->cp15.mdcr_el3 & MDCR_SPME);
|
||||
/* Is event counting prohibited? */
|
||||
if (el == 2 && (counter < hpmn || counter == 31)) {
|
||||
prohibited = mdcr_el2 & MDCR_HPMD;
|
||||
}
|
||||
if (secure) {
|
||||
prohibited = prohibited || !(env->cp15.mdcr_el3 & MDCR_SPME);
|
||||
}
|
||||
|
||||
if (prohibited && counter == 31) {
|
||||
prohibited = env->cp15.c9_pmcr & PMCRDP;
|
||||
if (counter == 31) {
|
||||
/*
|
||||
* The cycle counter defaults to running. PMCR.DP says "disable
|
||||
* the cycle counter when event counting is prohibited".
|
||||
* Some MDCR bits disable the cycle counter specifically.
|
||||
*/
|
||||
prohibited = prohibited && env->cp15.c9_pmcr & PMCRDP;
|
||||
if (cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
|
||||
if (secure) {
|
||||
prohibited = prohibited || (env->cp15.mdcr_el3 & MDCR_SCCD);
|
||||
}
|
||||
if (el == 2) {
|
||||
prohibited = prohibited || (mdcr_el2 & MDCR_HCCD);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (counter == 31) {
|
||||
@ -1164,6 +1183,43 @@ static void pmu_update_irq(CPUARMState *env)
|
||||
(env->cp15.c9_pminten & env->cp15.c9_pmovsr));
|
||||
}
|
||||
|
||||
static bool pmccntr_clockdiv_enabled(CPUARMState *env)
|
||||
{
|
||||
/*
|
||||
* Return true if the clock divider is enabled and the cycle counter
|
||||
* is supposed to tick only once every 64 clock cycles. This is
|
||||
* controlled by PMCR.D, but if PMCR.LC is set to enable the long
|
||||
* (64-bit) cycle counter PMCR.D has no effect.
|
||||
*/
|
||||
return (env->cp15.c9_pmcr & (PMCRD | PMCRLC)) == PMCRD;
|
||||
}
|
||||
|
||||
static bool pmevcntr_is_64_bit(CPUARMState *env, int counter)
|
||||
{
|
||||
/* Return true if the specified event counter is configured to be 64 bit */
|
||||
|
||||
/* This isn't intended to be used with the cycle counter */
|
||||
assert(counter < 31);
|
||||
|
||||
if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (arm_feature(env, ARM_FEATURE_EL2)) {
|
||||
/*
|
||||
* MDCR_EL2.HLP still applies even when EL2 is disabled in the
|
||||
* current security state, so we don't use arm_mdcr_el2_eff() here.
|
||||
*/
|
||||
bool hlp = env->cp15.mdcr_el2 & MDCR_HLP;
|
||||
int hpmn = env->cp15.mdcr_el2 & MDCR_HPMN;
|
||||
|
||||
if (hpmn != 0 && counter >= hpmn) {
|
||||
return hlp;
|
||||
}
|
||||
}
|
||||
return env->cp15.c9_pmcr & PMCRLP;
|
||||
}
|
||||
|
||||
/*
|
||||
* Ensure c15_ccnt is the guest-visible count so that operations such as
|
||||
* enabling/disabling the counter or filtering, modifying the count itself,
|
||||
@ -1176,8 +1232,7 @@ static void pmccntr_op_start(CPUARMState *env)
|
||||
|
||||
if (pmu_counter_enabled(env, 31)) {
|
||||
uint64_t eff_cycles = cycles;
|
||||
if (env->cp15.c9_pmcr & PMCRD) {
|
||||
/* Increment once every 64 processor clock cycles */
|
||||
if (pmccntr_clockdiv_enabled(env)) {
|
||||
eff_cycles /= 64;
|
||||
}
|
||||
|
||||
@ -1186,7 +1241,7 @@ static void pmccntr_op_start(CPUARMState *env)
|
||||
uint64_t overflow_mask = env->cp15.c9_pmcr & PMCRLC ? \
|
||||
1ull << 63 : 1ull << 31;
|
||||
if (env->cp15.c15_ccnt & ~new_pmccntr & overflow_mask) {
|
||||
env->cp15.c9_pmovsr |= (1 << 31);
|
||||
env->cp15.c9_pmovsr |= (1ULL << 31);
|
||||
pmu_update_irq(env);
|
||||
}
|
||||
|
||||
@ -1212,16 +1267,18 @@ static void pmccntr_op_finish(CPUARMState *env)
|
||||
int64_t overflow_in = cycles_ns_per(remaining_cycles);
|
||||
|
||||
if (overflow_in > 0) {
|
||||
int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
overflow_in;
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
|
||||
int64_t overflow_at;
|
||||
|
||||
if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
|
||||
overflow_in, &overflow_at)) {
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
uint64_t prev_cycles = env->cp15.c15_ccnt_delta;
|
||||
if (env->cp15.c9_pmcr & PMCRD) {
|
||||
/* Increment once every 64 processor clock cycles */
|
||||
if (pmccntr_clockdiv_enabled(env)) {
|
||||
prev_cycles /= 64;
|
||||
}
|
||||
env->cp15.c15_ccnt_delta = prev_cycles - env->cp15.c15_ccnt;
|
||||
@ -1239,9 +1296,11 @@ static void pmevcntr_op_start(CPUARMState *env, uint8_t counter)
|
||||
}
|
||||
|
||||
if (pmu_counter_enabled(env, counter)) {
|
||||
uint32_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
|
||||
uint64_t new_pmevcntr = count - env->cp15.c14_pmevcntr_delta[counter];
|
||||
uint64_t overflow_mask = pmevcntr_is_64_bit(env, counter) ?
|
||||
1ULL << 63 : 1ULL << 31;
|
||||
|
||||
if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & INT32_MIN) {
|
||||
if (env->cp15.c14_pmevcntr[counter] & ~new_pmevcntr & overflow_mask) {
|
||||
env->cp15.c9_pmovsr |= (1 << counter);
|
||||
pmu_update_irq(env);
|
||||
}
|
||||
@ -1256,15 +1315,22 @@ static void pmevcntr_op_finish(CPUARMState *env, uint8_t counter)
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
uint16_t event = env->cp15.c14_pmevtyper[counter] & PMXEVTYPER_EVTCOUNT;
|
||||
uint16_t event_idx = supported_event_map[event];
|
||||
uint64_t delta = UINT32_MAX -
|
||||
(uint32_t)env->cp15.c14_pmevcntr[counter] + 1;
|
||||
int64_t overflow_in = pm_events[event_idx].ns_per_count(delta);
|
||||
uint64_t delta = -(env->cp15.c14_pmevcntr[counter] + 1);
|
||||
int64_t overflow_in;
|
||||
|
||||
if (!pmevcntr_is_64_bit(env, counter)) {
|
||||
delta = (uint32_t)delta;
|
||||
}
|
||||
overflow_in = pm_events[event_idx].ns_per_count(delta);
|
||||
|
||||
if (overflow_in > 0) {
|
||||
int64_t overflow_at = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
|
||||
overflow_in;
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
|
||||
int64_t overflow_at;
|
||||
|
||||
if (!sadd64_overflow(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL),
|
||||
overflow_in, &overflow_at)) {
|
||||
ARMCPU *cpu = env_archcpu(env);
|
||||
timer_mod_anticipate_ns(cpu->pmu_timer, overflow_at);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1342,6 +1408,8 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
unsigned int i;
|
||||
uint64_t overflow_mask, new_pmswinc;
|
||||
|
||||
for (i = 0; i < pmu_num_counters(env); i++) {
|
||||
/* Increment a counter's count iff: */
|
||||
if ((value & (1 << i)) && /* counter's bit is set */
|
||||
@ -1355,9 +1423,12 @@ static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
* Detect if this write causes an overflow since we can't predict
|
||||
* PMSWINC overflows like we can for other events
|
||||
*/
|
||||
uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
|
||||
new_pmswinc = env->cp15.c14_pmevcntr[i] + 1;
|
||||
|
||||
if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) {
|
||||
overflow_mask = pmevcntr_is_64_bit(env, i) ?
|
||||
1ULL << 63 : 1ULL << 31;
|
||||
|
||||
if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & overflow_mask) {
|
||||
env->cp15.c9_pmovsr |= (1 << i);
|
||||
pmu_update_irq(env);
|
||||
}
|
||||
@ -1432,15 +1503,19 @@ static uint64_t pmccfiltr_read_a32(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
static void pmcntenset_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
pmu_op_start(env);
|
||||
value &= pmu_counter_mask(env);
|
||||
env->cp15.c9_pmcnten |= value;
|
||||
pmu_op_finish(env);
|
||||
}
|
||||
|
||||
static void pmcntenclr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
pmu_op_start(env);
|
||||
value &= pmu_counter_mask(env);
|
||||
env->cp15.c9_pmcnten &= ~value;
|
||||
pmu_op_finish(env);
|
||||
}
|
||||
|
||||
static void pmovsr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
@ -1560,6 +1635,10 @@ static uint64_t pmxevtyper_read(CPUARMState *env, const ARMCPRegInfo *ri)
|
||||
static void pmevcntr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value, uint8_t counter)
|
||||
{
|
||||
if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
|
||||
/* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
|
||||
value &= MAKE_64BIT_MASK(0, 32);
|
||||
}
|
||||
if (counter < pmu_num_counters(env)) {
|
||||
pmevcntr_op_start(env, counter);
|
||||
env->cp15.c14_pmevcntr[counter] = value;
|
||||
@ -1579,6 +1658,10 @@ static uint64_t pmevcntr_read(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
pmevcntr_op_start(env, counter);
|
||||
ret = env->cp15.c14_pmevcntr[counter];
|
||||
pmevcntr_op_finish(env, counter);
|
||||
if (!cpu_isar_feature(any_pmuv3p5, env_archcpu(env))) {
|
||||
/* Before FEAT_PMUv3p5, top 32 bits of event counters are RES0 */
|
||||
ret &= MAKE_64BIT_MASK(0, 32);
|
||||
}
|
||||
return ret;
|
||||
} else {
|
||||
/* We opt to behave as a RAZ/WI when attempts to access PM[X]EVCNTR
|
||||
@ -4681,7 +4764,39 @@ static void sctlr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
static void sdcr_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
/*
|
||||
* Some MDCR_EL3 bits affect whether PMU counters are running:
|
||||
* if we are trying to change any of those then we must
|
||||
* bracket this update with PMU start/finish calls.
|
||||
*/
|
||||
bool pmu_op = (env->cp15.mdcr_el3 ^ value) & MDCR_EL3_PMU_ENABLE_BITS;
|
||||
|
||||
if (pmu_op) {
|
||||
pmu_op_start(env);
|
||||
}
|
||||
env->cp15.mdcr_el3 = value & SDCR_VALID_MASK;
|
||||
if (pmu_op) {
|
||||
pmu_op_finish(env);
|
||||
}
|
||||
}
|
||||
|
||||
static void mdcr_el2_write(CPUARMState *env, const ARMCPRegInfo *ri,
|
||||
uint64_t value)
|
||||
{
|
||||
/*
|
||||
* Some MDCR_EL2 bits affect whether PMU counters are running:
|
||||
* if we are trying to change any of those then we must
|
||||
* bracket this update with PMU start/finish calls.
|
||||
*/
|
||||
bool pmu_op = (env->cp15.mdcr_el2 ^ value) & MDCR_EL2_PMU_ENABLE_BITS;
|
||||
|
||||
if (pmu_op) {
|
||||
pmu_op_start(env);
|
||||
}
|
||||
env->cp15.mdcr_el2 = value;
|
||||
if (pmu_op) {
|
||||
pmu_op_finish(env);
|
||||
}
|
||||
}
|
||||
|
||||
static const ARMCPRegInfo v8_cp_reginfo[] = {
|
||||
@ -6344,7 +6459,7 @@ static void define_pmu_regs(ARMCPU *cpu)
|
||||
g_free(pmevtyper_name);
|
||||
g_free(pmevtyper_el0_name);
|
||||
}
|
||||
if (cpu_isar_feature(aa32_pmu_8_1, cpu)) {
|
||||
if (cpu_isar_feature(aa32_pmuv3p1, cpu)) {
|
||||
ARMCPRegInfo v81_pmu_regs[] = {
|
||||
{ .name = "PMCEID2", .state = ARM_CP_STATE_AA32,
|
||||
.cp = 15, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 4,
|
||||
@ -6357,7 +6472,7 @@ static void define_pmu_regs(ARMCPU *cpu)
|
||||
};
|
||||
define_arm_cp_regs(cpu, v81_pmu_regs);
|
||||
}
|
||||
if (cpu_isar_feature(any_pmu_8_4, cpu)) {
|
||||
if (cpu_isar_feature(any_pmuv3p4, cpu)) {
|
||||
static const ARMCPRegInfo v84_pmmir = {
|
||||
.name = "PMMIR_EL1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 9, .crm = 14, .opc2 = 6,
|
||||
@ -7345,11 +7460,16 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
define_arm_cp_regs(cpu, not_v7_cp_reginfo);
|
||||
}
|
||||
if (arm_feature(env, ARM_FEATURE_V8)) {
|
||||
/* AArch64 ID registers, which all have impdef reset values.
|
||||
/*
|
||||
* v8 ID registers, which all have impdef reset values.
|
||||
* Note that within the ID register ranges the unused slots
|
||||
* must all RAZ, not UNDEF; future architecture versions may
|
||||
* define new registers here.
|
||||
* ID registers which are AArch64 views of the AArch32 ID registers
|
||||
* which already existed in v6 and v7 are handled elsewhere,
|
||||
* in v6_idregs[].
|
||||
*/
|
||||
int i;
|
||||
ARMCPRegInfo v8_idregs[] = {
|
||||
/*
|
||||
* ID_AA64PFR0_EL1 is not a plain ARM_CP_CONST in system
|
||||
@ -7539,7 +7659,34 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = cpu->isar.mvfr2 },
|
||||
{ .name = "MVFR3_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
|
||||
/*
|
||||
* "0, c0, c3, {0,1,2}" are the encodings corresponding to
|
||||
* AArch64 MVFR[012]_EL1. Define the STATE_AA32 encoding
|
||||
* as RAZ, since it is in the "reserved for future ID
|
||||
* registers, RAZ" part of the AArch32 encoding space.
|
||||
*/
|
||||
{ .name = "RES_0_C0_C3_0", .state = ARM_CP_STATE_AA32,
|
||||
.cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 0,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = 0 },
|
||||
{ .name = "RES_0_C0_C3_1", .state = ARM_CP_STATE_AA32,
|
||||
.cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 1,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = 0 },
|
||||
{ .name = "RES_0_C0_C3_2", .state = ARM_CP_STATE_AA32,
|
||||
.cp = 15, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 2,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = 0 },
|
||||
/*
|
||||
* Other encodings in "0, c0, c3, ..." are STATE_BOTH because
|
||||
* they're also RAZ for AArch64, and in v8 are gradually
|
||||
* being filled with AArch64-view-of-AArch32-ID-register
|
||||
* for new ID registers.
|
||||
*/
|
||||
{ .name = "RES_0_C0_C3_3", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 3,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
@ -7549,17 +7696,17 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = cpu->isar.id_pfr2 },
|
||||
{ .name = "MVFR5_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
|
||||
{ .name = "ID_DFR1", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 5,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = 0 },
|
||||
{ .name = "MVFR6_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
|
||||
.resetvalue = cpu->isar.id_dfr1 },
|
||||
{ .name = "ID_MMFR5", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 6,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = 0 },
|
||||
{ .name = "MVFR7_EL1_RESERVED", .state = ARM_CP_STATE_AA64,
|
||||
.resetvalue = cpu->isar.id_mmfr5 },
|
||||
{ .name = "RES_0_C0_C3_7", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 0, .crn = 0, .crm = 3, .opc2 = 7,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
@ -7625,6 +7772,29 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
}
|
||||
define_arm_cp_regs(cpu, v8_idregs);
|
||||
define_arm_cp_regs(cpu, v8_cp_reginfo);
|
||||
|
||||
for (i = 4; i < 16; i++) {
|
||||
/*
|
||||
* Encodings in "0, c0, {c4-c7}, {0-7}" are RAZ for AArch32.
|
||||
* For pre-v8 cores there are RAZ patterns for these in
|
||||
* id_pre_v8_midr_cp_reginfo[]; for v8 we do that here.
|
||||
* v8 extends the "must RAZ" part of the ID register space
|
||||
* to also cover c0, 0, c{8-15}, {0-7}.
|
||||
* These are STATE_AA32 because in the AArch64 sysreg space
|
||||
* c4-c7 is where the AArch64 ID registers live (and we've
|
||||
* already defined those in v8_idregs[]), and c8-c15 are not
|
||||
* "must RAZ" for AArch64.
|
||||
*/
|
||||
g_autofree char *name = g_strdup_printf("RES_0_C0_C%d_X", i);
|
||||
ARMCPRegInfo v8_aa32_raz_idregs = {
|
||||
.name = name,
|
||||
.state = ARM_CP_STATE_AA32,
|
||||
.cp = 15, .opc1 = 0, .crn = 0, .crm = i, .opc2 = CP_ANY,
|
||||
.access = PL1_R, .type = ARM_CP_CONST,
|
||||
.accessfn = access_aa64_tid3,
|
||||
.resetvalue = 0 };
|
||||
define_one_arm_cp_reg(cpu, &v8_aa32_raz_idregs);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
@ -7669,6 +7839,7 @@ void register_cp_regs_for_features(ARMCPU *cpu)
|
||||
ARMCPRegInfo mdcr_el2 = {
|
||||
.name = "MDCR_EL2", .state = ARM_CP_STATE_BOTH,
|
||||
.opc0 = 3, .opc1 = 4, .crn = 1, .crm = 1, .opc2 = 1,
|
||||
.writefn = mdcr_el2_write,
|
||||
.access = PL2_RW, .resetvalue = pmu_num_counters(env),
|
||||
.fieldoffset = offsetof(CPUARMState, cp15.mdcr_el2),
|
||||
};
|
||||
|
@ -1256,6 +1256,7 @@ enum MVEECIState {
|
||||
/* Definitions for the PMU registers */
|
||||
#define PMCRN_MASK 0xf800
|
||||
#define PMCRN_SHIFT 11
|
||||
#define PMCRLP 0x80
|
||||
#define PMCRLC 0x40
|
||||
#define PMCRDP 0x20
|
||||
#define PMCRX 0x10
|
||||
@ -1267,7 +1268,7 @@ enum MVEECIState {
|
||||
* Mask of PMCR bits writable by guest (not including WO bits like C, P,
|
||||
* which can be written as 1 to trigger behaviour but which stay RAZ).
|
||||
*/
|
||||
#define PMCR_WRITABLE_MASK (PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
|
||||
#define PMCR_WRITABLE_MASK (PMCRLP | PMCRLC | PMCRDP | PMCRX | PMCRD | PMCRE)
|
||||
|
||||
#define PMXEVTYPER_P 0x80000000
|
||||
#define PMXEVTYPER_U 0x40000000
|
||||
@ -1296,7 +1297,7 @@ static inline uint32_t pmu_num_counters(CPUARMState *env)
|
||||
/* Bits allowed to be set/cleared for PMCNTEN* and PMINTEN* */
|
||||
static inline uint64_t pmu_counter_mask(CPUARMState *env)
|
||||
{
|
||||
return (1 << 31) | ((1 << pmu_num_counters(env)) - 1);
|
||||
return (1ULL << 31) | ((1ULL << pmu_num_counters(env)) - 1);
|
||||
}
|
||||
|
||||
#ifdef TARGET_AARCH64
|
||||
|
@ -608,8 +608,6 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
|
||||
ARM64_SYS_REG(3, 0, 0, 1, 0));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
|
||||
ARM64_SYS_REG(3, 0, 0, 1, 1));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
|
||||
ARM64_SYS_REG(3, 0, 0, 3, 4));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
|
||||
ARM64_SYS_REG(3, 0, 0, 1, 2));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
|
||||
@ -643,6 +641,12 @@ bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
|
||||
ARM64_SYS_REG(3, 0, 0, 3, 1));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
|
||||
ARM64_SYS_REG(3, 0, 0, 3, 2));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
|
||||
ARM64_SYS_REG(3, 0, 0, 3, 4));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
|
||||
ARM64_SYS_REG(3, 0, 0, 3, 5));
|
||||
err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
|
||||
ARM64_SYS_REG(3, 0, 0, 3, 6));
|
||||
|
||||
/*
|
||||
* DBGDIDR is a bit complicated because the kernel doesn't
|
||||
|
Loading…
Reference in New Issue
Block a user