mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 19:49:43 +00:00
target/ppc: Introduce ppc_xlate
Create one common dispatch for all of the ppc_*_xlate functions. Use ppc64_v3_radix to directly dispatch between ppc_radix64_xlate and ppc_hash64_xlate. Remove the separate *_handle_mmu_fault and *_get_phys_page_debug functions, using common code for ppc_cpu_tlb_fill and ppc_cpu_get_phys_page_debug. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Message-Id: <20210621125115.67717-9-bruno.larsen@eldorado.org.br> Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
This commit is contained in:
parent
af44a14236
commit
51806b5458
@ -23,25 +23,6 @@
|
||||
#include "mmu-book3s-v3.h"
|
||||
#include "mmu-radix64.h"
|
||||
|
||||
int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||
int mmu_idx)
|
||||
{
|
||||
if (ppc64_v3_radix(cpu)) { /* Guest uses radix */
|
||||
return ppc_radix64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx);
|
||||
} else { /* Guest uses hash */
|
||||
return ppc_hash64_handle_mmu_fault(cpu, eaddr, rwx, mmu_idx);
|
||||
}
|
||||
}
|
||||
|
||||
hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr)
|
||||
{
|
||||
if (ppc64_v3_radix(cpu)) {
|
||||
return ppc_radix64_get_phys_page_debug(cpu, eaddr);
|
||||
} else {
|
||||
return ppc_hash64_get_phys_page_debug(cpu, eaddr);
|
||||
}
|
||||
}
|
||||
|
||||
bool ppc64_v3_get_pate(PowerPCCPU *cpu, target_ulong lpid, ppc_v3_pate_t *entry)
|
||||
{
|
||||
uint64_t patb = cpu->env.spr[SPR_PTCR] & PTCR_PATB;
|
||||
|
@ -67,11 +67,6 @@ static inline bool ppc64_v3_radix(PowerPCCPU *cpu)
|
||||
return !!(cpu->env.spr[SPR_LPCR] & LPCR_HR);
|
||||
}
|
||||
|
||||
hwaddr ppc64_v3_get_phys_page_debug(PowerPCCPU *cpu, vaddr eaddr);
|
||||
|
||||
int ppc64_v3_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
|
||||
int mmu_idx);
|
||||
|
||||
static inline hwaddr ppc_hash64_hpt_base(PowerPCCPU *cpu)
|
||||
{
|
||||
uint64_t base;
|
||||
|
@ -424,10 +424,9 @@ static hwaddr ppc_hash32_pte_raddr(target_ulong sr, ppc_hash_pte32_t pte,
|
||||
return (rpn & ~mask) | (eaddr & mask);
|
||||
}
|
||||
|
||||
static bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
bool guest_visible)
|
||||
bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
bool guest_visible)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
@ -569,34 +568,3 @@ static bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr,
|
||||
*protp = prot;
|
||||
return true;
|
||||
}
|
||||
|
||||
int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
int page_size, prot;
|
||||
hwaddr raddr;
|
||||
|
||||
/* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
|
||||
if (!ppc_hash32_xlate(cpu, eaddr, access_type, &raddr,
|
||||
&page_size, &prot, true)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, 1UL << page_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
|
||||
{
|
||||
int psize, prot;
|
||||
hwaddr raddr;
|
||||
|
||||
if (!ppc_hash32_xlate(cpu, eaddr, MMU_DATA_LOAD, &raddr,
|
||||
&psize, &prot, false)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return raddr & TARGET_PAGE_MASK;
|
||||
}
|
||||
|
@ -4,9 +4,9 @@
|
||||
#ifndef CONFIG_USER_ONLY
|
||||
|
||||
hwaddr get_pteg_offset32(PowerPCCPU *cpu, hwaddr hash);
|
||||
hwaddr ppc_hash32_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||
int ppc_hash32_handle_mmu_fault(PowerPCCPU *cpu, vaddr address,
|
||||
MMUAccessType access_type, int mmu_idx);
|
||||
bool ppc_hash32_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
bool guest_visible);
|
||||
|
||||
/*
|
||||
* Segment register definitions
|
||||
|
@ -873,10 +873,9 @@ static int build_vrma_slbe(PowerPCCPU *cpu, ppc_slb_t *slb)
|
||||
return -1;
|
||||
}
|
||||
|
||||
static bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
bool guest_visible)
|
||||
bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
bool guest_visible)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
@ -1094,36 +1093,6 @@ static bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr,
|
||||
return true;
|
||||
}
|
||||
|
||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
int page_size, prot;
|
||||
hwaddr raddr;
|
||||
|
||||
if (!ppc_hash64_xlate(cpu, eaddr, access_type, &raddr,
|
||||
&page_size, &prot, true)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, 1UL << page_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
|
||||
{
|
||||
int psize, prot;
|
||||
hwaddr raddr;
|
||||
|
||||
if (!ppc_hash64_xlate(cpu, eaddr, MMU_DATA_LOAD, &raddr,
|
||||
&psize, &prot, false)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return raddr & TARGET_PAGE_MASK;
|
||||
}
|
||||
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
|
||||
target_ulong pte0, target_ulong pte1)
|
||||
{
|
||||
|
@ -7,9 +7,9 @@
|
||||
void dump_slb(PowerPCCPU *cpu);
|
||||
int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
|
||||
target_ulong esid, target_ulong vsid);
|
||||
hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||
int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr address,
|
||||
MMUAccessType access_type, int mmu_idx);
|
||||
bool ppc_hash64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
bool guest_visible);
|
||||
void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu,
|
||||
target_ulong pte_index,
|
||||
target_ulong pte0, target_ulong pte1);
|
||||
|
@ -463,10 +463,9 @@ static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
|
||||
* | = On | Process Scoped | Scoped |
|
||||
* +-------------+----------------+---------------+
|
||||
*/
|
||||
static bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type,
|
||||
hwaddr *raddr, int *psizep, int *protp,
|
||||
bool guest_visible)
|
||||
bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddr, int *psizep, int *protp,
|
||||
bool guest_visible)
|
||||
{
|
||||
CPUPPCState *env = &cpu->env;
|
||||
uint64_t lpid, pid;
|
||||
@ -584,34 +583,3 @@ static bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr,
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
int page_size, prot;
|
||||
hwaddr raddr;
|
||||
|
||||
/* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
|
||||
if (!ppc_radix64_xlate(cpu, eaddr, access_type, &raddr,
|
||||
&page_size, &prot, true)) {
|
||||
return 1;
|
||||
}
|
||||
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, 1UL << page_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
|
||||
{
|
||||
int psize, prot;
|
||||
hwaddr raddr;
|
||||
|
||||
if (!ppc_radix64_xlate(cpu, eaddr, MMU_DATA_LOAD, &raddr,
|
||||
&psize, &prot, false)) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
return raddr & TARGET_PAGE_MASK;
|
||||
}
|
||||
|
@ -44,9 +44,9 @@
|
||||
|
||||
#ifdef TARGET_PPC64
|
||||
|
||||
int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type, int mmu_idx);
|
||||
hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr);
|
||||
bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddr, int *psizep, int *protp,
|
||||
bool guest_visible);
|
||||
|
||||
static inline int ppc_radix64_get_prot_eaa(uint64_t pte)
|
||||
{
|
||||
|
@ -2899,98 +2899,72 @@ void helper_check_tlb_flush_global(CPUPPCState *env)
|
||||
|
||||
/*****************************************************************************/
|
||||
|
||||
static int cpu_ppc_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
|
||||
MMUAccessType access_type, int mmu_idx)
|
||||
static bool ppc_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
|
||||
hwaddr *raddrp, int *psizep, int *protp,
|
||||
int mmu_idx, bool guest_visible)
|
||||
{
|
||||
CPUState *cs = CPU(cpu);
|
||||
int page_size, prot;
|
||||
hwaddr raddr;
|
||||
switch (cpu->env.mmu_model) {
|
||||
#if defined(TARGET_PPC64)
|
||||
case POWERPC_MMU_3_00:
|
||||
if (ppc64_v3_radix(cpu)) {
|
||||
return ppc_radix64_xlate(cpu, eaddr, access_type,
|
||||
raddrp, psizep, protp, guest_visible);
|
||||
}
|
||||
/* fall through */
|
||||
case POWERPC_MMU_64B:
|
||||
case POWERPC_MMU_2_03:
|
||||
case POWERPC_MMU_2_06:
|
||||
case POWERPC_MMU_2_07:
|
||||
return ppc_hash64_xlate(cpu, eaddr, access_type,
|
||||
raddrp, psizep, protp, guest_visible);
|
||||
#endif
|
||||
|
||||
if (!ppc_jumbo_xlate(cpu, eaddr, access_type, &raddr,
|
||||
&page_size, &prot, mmu_idx, true)) {
|
||||
return 1;
|
||||
case POWERPC_MMU_32B:
|
||||
case POWERPC_MMU_601:
|
||||
return ppc_hash32_xlate(cpu, eaddr, access_type,
|
||||
raddrp, psizep, protp, guest_visible);
|
||||
|
||||
default:
|
||||
return ppc_jumbo_xlate(cpu, eaddr, access_type, raddrp,
|
||||
psizep, protp, mmu_idx, guest_visible);
|
||||
}
|
||||
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, 1UL << page_size);
|
||||
return 0;
|
||||
}
|
||||
|
||||
hwaddr ppc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
hwaddr raddr;
|
||||
int s, p;
|
||||
|
||||
switch (env->mmu_model) {
|
||||
#if defined(TARGET_PPC64)
|
||||
case POWERPC_MMU_64B:
|
||||
case POWERPC_MMU_2_03:
|
||||
case POWERPC_MMU_2_06:
|
||||
case POWERPC_MMU_2_07:
|
||||
return ppc_hash64_get_phys_page_debug(cpu, addr);
|
||||
case POWERPC_MMU_3_00:
|
||||
return ppc64_v3_get_phys_page_debug(cpu, addr);
|
||||
#endif
|
||||
|
||||
case POWERPC_MMU_32B:
|
||||
case POWERPC_MMU_601:
|
||||
return ppc_hash32_get_phys_page_debug(cpu, addr);
|
||||
|
||||
default:
|
||||
;
|
||||
}
|
||||
|
||||
/*
|
||||
* Some MMUs have separate TLBs for code and data. If we only
|
||||
* try an MMU_DATA_LOAD, we may not be able to read instructions
|
||||
* mapped by code TLBs, so we also try a MMU_INST_FETCH.
|
||||
*/
|
||||
if (ppc_jumbo_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 0, false) ||
|
||||
ppc_jumbo_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 0, false)) {
|
||||
if (ppc_xlate(cpu, addr, MMU_DATA_LOAD, &raddr, &s, &p, 0, false) ||
|
||||
ppc_xlate(cpu, addr, MMU_INST_FETCH, &raddr, &s, &p, 0, false)) {
|
||||
return raddr & TARGET_PAGE_MASK;
|
||||
}
|
||||
return -1;
|
||||
}
|
||||
|
||||
|
||||
bool ppc_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
|
||||
bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
|
||||
MMUAccessType access_type, int mmu_idx,
|
||||
bool probe, uintptr_t retaddr)
|
||||
{
|
||||
PowerPCCPU *cpu = POWERPC_CPU(cs);
|
||||
CPUPPCState *env = &cpu->env;
|
||||
int ret;
|
||||
hwaddr raddr;
|
||||
int page_size, prot;
|
||||
|
||||
switch (env->mmu_model) {
|
||||
#if defined(TARGET_PPC64)
|
||||
case POWERPC_MMU_64B:
|
||||
case POWERPC_MMU_2_03:
|
||||
case POWERPC_MMU_2_06:
|
||||
case POWERPC_MMU_2_07:
|
||||
ret = ppc_hash64_handle_mmu_fault(cpu, addr, access_type, mmu_idx);
|
||||
break;
|
||||
case POWERPC_MMU_3_00:
|
||||
ret = ppc64_v3_handle_mmu_fault(cpu, addr, access_type, mmu_idx);
|
||||
break;
|
||||
#endif
|
||||
|
||||
case POWERPC_MMU_32B:
|
||||
case POWERPC_MMU_601:
|
||||
ret = ppc_hash32_handle_mmu_fault(cpu, addr, access_type, mmu_idx);
|
||||
break;
|
||||
|
||||
default:
|
||||
ret = cpu_ppc_handle_mmu_fault(cpu, addr, access_type, mmu_idx);
|
||||
break;
|
||||
if (ppc_xlate(cpu, eaddr, access_type, &raddr,
|
||||
&page_size, &prot, mmu_idx, !probe)) {
|
||||
tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
|
||||
prot, mmu_idx, 1UL << page_size);
|
||||
return true;
|
||||
}
|
||||
if (unlikely(ret != 0)) {
|
||||
if (probe) {
|
||||
return false;
|
||||
}
|
||||
raise_exception_err_ra(env, cs->exception_index, env->error_code,
|
||||
retaddr);
|
||||
if (probe) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
raise_exception_err_ra(&cpu->env, cs->exception_index,
|
||||
cpu->env.error_code, retaddr);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user