mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 19:49:43 +00:00
tlb: Add "ifetch" argument to cpu_mmu_index()
This is set to true when the index is for an instruction fetch translation. The core get_page_addr_code() sets it, as do the SOFTMMU_CODE_ACCESS acessors. All targets ignore it for now, and all other callers pass "false". This will allow targets who wish to split the mmu index between instruction and data accesses to do so. A subsequent patch will do just that for PowerPC. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Message-Id: <1439796853-4410-2-git-send-email-benh@kernel.crashing.org> Signed-off-by: Richard Henderson <rth@twiddle.net>
This commit is contained in:
parent
ba9cef7b6e
commit
97ed5ccdee
2
cputlb.c
2
cputlb.c
@ -452,7 +452,7 @@ tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
|
|||||||
CPUState *cpu = ENV_GET_CPU(env1);
|
CPUState *cpu = ENV_GET_CPU(env1);
|
||||||
|
|
||||||
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
page_index = (addr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
|
||||||
mmu_idx = cpu_mmu_index(env1);
|
mmu_idx = cpu_mmu_index(env1, true);
|
||||||
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
|
if (unlikely(env1->tlb_table[mmu_idx][page_index].addr_code !=
|
||||||
(addr & TARGET_PAGE_MASK))) {
|
(addr & TARGET_PAGE_MASK))) {
|
||||||
cpu_ldub_code(env1, addr);
|
cpu_ldub_code(env1, addr);
|
||||||
|
@ -363,7 +363,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
|||||||
#endif /* (NB_MMU_MODES > 12) */
|
#endif /* (NB_MMU_MODES > 12) */
|
||||||
|
|
||||||
/* these access are slower, they must be as rare as possible */
|
/* these access are slower, they must be as rare as possible */
|
||||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
#define CPU_MMU_INDEX (cpu_mmu_index(env, false))
|
||||||
#define MEMSUFFIX _data
|
#define MEMSUFFIX _data
|
||||||
#define DATA_SIZE 1
|
#define DATA_SIZE 1
|
||||||
#include "exec/cpu_ldst_template.h"
|
#include "exec/cpu_ldst_template.h"
|
||||||
@ -379,7 +379,7 @@ uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
|
|||||||
#undef CPU_MMU_INDEX
|
#undef CPU_MMU_INDEX
|
||||||
#undef MEMSUFFIX
|
#undef MEMSUFFIX
|
||||||
|
|
||||||
#define CPU_MMU_INDEX (cpu_mmu_index(env))
|
#define CPU_MMU_INDEX (cpu_mmu_index(env, true))
|
||||||
#define MEMSUFFIX _code
|
#define MEMSUFFIX _code
|
||||||
#define SOFTMMU_CODE_ACCESS
|
#define SOFTMMU_CODE_ACCESS
|
||||||
|
|
||||||
|
@ -376,7 +376,7 @@ enum {
|
|||||||
PS_USER_MODE = 8
|
PS_USER_MODE = 8
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline int cpu_mmu_index(CPUAlphaState *env)
|
static inline int cpu_mmu_index(CPUAlphaState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
if (env->pal_mode) {
|
if (env->pal_mode) {
|
||||||
return MMU_KERNEL_IDX;
|
return MMU_KERNEL_IDX;
|
||||||
|
@ -2878,7 +2878,7 @@ static inline void gen_intermediate_code_internal(AlphaCPU *cpu,
|
|||||||
|
|
||||||
ctx.tb = tb;
|
ctx.tb = tb;
|
||||||
ctx.pc = pc_start;
|
ctx.pc = pc_start;
|
||||||
ctx.mem_idx = cpu_mmu_index(env);
|
ctx.mem_idx = cpu_mmu_index(env, false);
|
||||||
ctx.implver = env->implver;
|
ctx.implver = env->implver;
|
||||||
ctx.singlestep_enabled = cs->singlestep_enabled;
|
ctx.singlestep_enabled = cs->singlestep_enabled;
|
||||||
|
|
||||||
|
@ -1678,7 +1678,7 @@ static inline int arm_mmu_idx_to_el(ARMMMUIdx mmu_idx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* Determine the current mmu_idx to use for normal loads/stores */
|
/* Determine the current mmu_idx to use for normal loads/stores */
|
||||||
static inline int cpu_mmu_index(CPUARMState *env)
|
static inline int cpu_mmu_index(CPUARMState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
int el = arm_current_el(env);
|
int el = arm_current_el(env);
|
||||||
|
|
||||||
@ -1911,7 +1911,7 @@ static inline void cpu_get_tb_cpu_state(CPUARMState *env, target_ulong *pc,
|
|||||||
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
|
<< ARM_TBFLAG_XSCALE_CPAR_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
*flags |= (cpu_mmu_index(env) << ARM_TBFLAG_MMUIDX_SHIFT);
|
*flags |= (cpu_mmu_index(env, false) << ARM_TBFLAG_MMUIDX_SHIFT);
|
||||||
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
|
/* The SS_ACTIVE and PSTATE_SS bits correspond to the state machine
|
||||||
* states defined in the ARM ARM for software singlestep:
|
* states defined in the ARM ARM for software singlestep:
|
||||||
* SS_ACTIVE PSTATE.SS State
|
* SS_ACTIVE PSTATE.SS State
|
||||||
|
@ -6892,7 +6892,7 @@ hwaddr arm_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|||||||
uint32_t fsr;
|
uint32_t fsr;
|
||||||
MemTxAttrs attrs = {};
|
MemTxAttrs attrs = {};
|
||||||
|
|
||||||
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env), &phys_addr,
|
ret = get_phys_addr(env, addr, 0, cpu_mmu_index(env, false), &phys_addr,
|
||||||
&attrs, &prot, &page_size, &fsr);
|
&attrs, &prot, &page_size, &fsr);
|
||||||
|
|
||||||
if (ret) {
|
if (ret) {
|
||||||
@ -7057,7 +7057,7 @@ void HELPER(dc_zva)(CPUARMState *env, uint64_t vaddr_in)
|
|||||||
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
|
int maxidx = DIV_ROUND_UP(blocklen, TARGET_PAGE_SIZE);
|
||||||
void *hostaddr[maxidx];
|
void *hostaddr[maxidx];
|
||||||
int try, i;
|
int try, i;
|
||||||
unsigned mmu_idx = cpu_mmu_index(env);
|
unsigned mmu_idx = cpu_mmu_index(env, false);
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
TCGMemOpIdx oi = make_memop_idx(MO_UB, mmu_idx);
|
||||||
|
|
||||||
for (try = 0; try < 2; try++) {
|
for (try = 0; try < 2; try++) {
|
||||||
|
@ -233,7 +233,7 @@ enum {
|
|||||||
#define MMU_MODE0_SUFFIX _kernel
|
#define MMU_MODE0_SUFFIX _kernel
|
||||||
#define MMU_MODE1_SUFFIX _user
|
#define MMU_MODE1_SUFFIX _user
|
||||||
#define MMU_USER_IDX 1
|
#define MMU_USER_IDX 1
|
||||||
static inline int cpu_mmu_index (CPUCRISState *env)
|
static inline int cpu_mmu_index (CPUCRISState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return !!(env->pregs[PR_CCS] & U_FLAG);
|
return !!(env->pregs[PR_CCS] & U_FLAG);
|
||||||
}
|
}
|
||||||
|
@ -1083,7 +1083,7 @@ static inline void cris_prepare_jmp (DisasContext *dc, unsigned int type)
|
|||||||
|
|
||||||
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
|
static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
|
||||||
{
|
{
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
/* If we get a fault on a delayslot we must keep the jmp state in
|
/* If we get a fault on a delayslot we must keep the jmp state in
|
||||||
the cpu-state to be able to re-execute the jmp. */
|
the cpu-state to be able to re-execute the jmp. */
|
||||||
@ -1097,7 +1097,7 @@ static void gen_load64(DisasContext *dc, TCGv_i64 dst, TCGv addr)
|
|||||||
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
|
static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
|
||||||
unsigned int size, int sign)
|
unsigned int size, int sign)
|
||||||
{
|
{
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
/* If we get a fault on a delayslot we must keep the jmp state in
|
/* If we get a fault on a delayslot we must keep the jmp state in
|
||||||
the cpu-state to be able to re-execute the jmp. */
|
the cpu-state to be able to re-execute the jmp. */
|
||||||
@ -1112,7 +1112,7 @@ static void gen_load(DisasContext *dc, TCGv dst, TCGv addr,
|
|||||||
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
|
static void gen_store (DisasContext *dc, TCGv addr, TCGv val,
|
||||||
unsigned int size)
|
unsigned int size)
|
||||||
{
|
{
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
/* If we get a fault on a delayslot we must keep the jmp state in
|
/* If we get a fault on a delayslot we must keep the jmp state in
|
||||||
the cpu-state to be able to re-execute the jmp. */
|
the cpu-state to be able to re-execute the jmp. */
|
||||||
|
@ -96,7 +96,7 @@ static void gen_store_v10_conditional(DisasContext *dc, TCGv addr, TCGv val,
|
|||||||
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
|
static void gen_store_v10(DisasContext *dc, TCGv addr, TCGv val,
|
||||||
unsigned int size)
|
unsigned int size)
|
||||||
{
|
{
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
/* If we get a fault on a delayslot we must keep the jmp state in
|
/* If we get a fault on a delayslot we must keep the jmp state in
|
||||||
the cpu-state to be able to re-execute the jmp. */
|
the cpu-state to be able to re-execute the jmp. */
|
||||||
|
@ -1199,7 +1199,7 @@ uint64_t cpu_get_tsc(CPUX86State *env);
|
|||||||
#define MMU_KSMAP_IDX 0
|
#define MMU_KSMAP_IDX 0
|
||||||
#define MMU_USER_IDX 1
|
#define MMU_USER_IDX 1
|
||||||
#define MMU_KNOSMAP_IDX 2
|
#define MMU_KNOSMAP_IDX 2
|
||||||
static inline int cpu_mmu_index(CPUX86State *env)
|
static inline int cpu_mmu_index(CPUX86State *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
|
return (env->hflags & HF_CPL_MASK) == 3 ? MMU_USER_IDX :
|
||||||
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
|
(!(env->hflags & HF_SMAP_MASK) || (env->eflags & AC_MASK))
|
||||||
|
@ -7942,7 +7942,7 @@ static inline void gen_intermediate_code_internal(X86CPU *cpu,
|
|||||||
/* select memory access functions */
|
/* select memory access functions */
|
||||||
dc->mem_index = 0;
|
dc->mem_index = 0;
|
||||||
if (flags & HF_SOFTMMU_MASK) {
|
if (flags & HF_SOFTMMU_MASK) {
|
||||||
dc->mem_index = cpu_mmu_index(env);
|
dc->mem_index = cpu_mmu_index(env, false);
|
||||||
}
|
}
|
||||||
dc->cpuid_features = env->features[FEAT_1_EDX];
|
dc->cpuid_features = env->features[FEAT_1_EDX];
|
||||||
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
|
dc->cpuid_ext_features = env->features[FEAT_1_ECX];
|
||||||
|
@ -34,7 +34,7 @@ typedef struct CPULM32State CPULM32State;
|
|||||||
|
|
||||||
#define NB_MMU_MODES 1
|
#define NB_MMU_MODES 1
|
||||||
#define TARGET_PAGE_BITS 12
|
#define TARGET_PAGE_BITS 12
|
||||||
static inline int cpu_mmu_index(CPULM32State *env)
|
static inline int cpu_mmu_index(CPULM32State *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -223,7 +223,7 @@ void register_m68k_insns (CPUM68KState *env);
|
|||||||
#define MMU_MODE0_SUFFIX _kernel
|
#define MMU_MODE0_SUFFIX _kernel
|
||||||
#define MMU_MODE1_SUFFIX _user
|
#define MMU_MODE1_SUFFIX _user
|
||||||
#define MMU_USER_IDX 1
|
#define MMU_USER_IDX 1
|
||||||
static inline int cpu_mmu_index (CPUM68KState *env)
|
static inline int cpu_mmu_index (CPUM68KState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return (env->sr & SR_S) == 0 ? 1 : 0;
|
return (env->sr & SR_S) == 0 ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
@ -309,7 +309,7 @@ int cpu_mb_signal_handler(int host_signum, void *pinfo,
|
|||||||
#define MMU_USER_IDX 2
|
#define MMU_USER_IDX 2
|
||||||
/* See NB_MMU_MODES further up the file. */
|
/* See NB_MMU_MODES further up the file. */
|
||||||
|
|
||||||
static inline int cpu_mmu_index (CPUMBState *env)
|
static inline int cpu_mmu_index (CPUMBState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
/* Are we in nommu mode?. */
|
/* Are we in nommu mode?. */
|
||||||
if (!(env->sregs[SR_MSR] & MSR_VM))
|
if (!(env->sregs[SR_MSR] & MSR_VM))
|
||||||
|
@ -279,7 +279,7 @@ void mmu_write(CPUMBState *env, uint32_t rn, uint32_t v)
|
|||||||
}
|
}
|
||||||
|
|
||||||
hit = mmu_translate(&env->mmu, &lu,
|
hit = mmu_translate(&env->mmu, &lu,
|
||||||
v & TLB_EPN_MASK, 0, cpu_mmu_index(env));
|
v & TLB_EPN_MASK, 0, cpu_mmu_index(env, false));
|
||||||
if (hit) {
|
if (hit) {
|
||||||
env->mmu.regs[MMU_R_TLBX] = lu.idx;
|
env->mmu.regs[MMU_R_TLBX] = lu.idx;
|
||||||
} else
|
} else
|
||||||
|
@ -418,7 +418,7 @@ static void dec_msr(DisasContext *dc)
|
|||||||
CPUState *cs = CPU(dc->cpu);
|
CPUState *cs = CPU(dc->cpu);
|
||||||
TCGv t0, t1;
|
TCGv t0, t1;
|
||||||
unsigned int sr, to, rn;
|
unsigned int sr, to, rn;
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
sr = dc->imm & ((1 << 14) - 1);
|
sr = dc->imm & ((1 << 14) - 1);
|
||||||
to = dc->imm & (1 << 14);
|
to = dc->imm & (1 << 14);
|
||||||
@ -730,7 +730,7 @@ static void dec_bit(DisasContext *dc)
|
|||||||
CPUState *cs = CPU(dc->cpu);
|
CPUState *cs = CPU(dc->cpu);
|
||||||
TCGv t0;
|
TCGv t0;
|
||||||
unsigned int op;
|
unsigned int op;
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
op = dc->ir & ((1 << 9) - 1);
|
op = dc->ir & ((1 << 9) - 1);
|
||||||
switch (op) {
|
switch (op) {
|
||||||
@ -994,7 +994,7 @@ static void dec_load(DisasContext *dc)
|
|||||||
* address and if that succeeds we write into the destination reg.
|
* address and if that succeeds we write into the destination reg.
|
||||||
*/
|
*/
|
||||||
v = tcg_temp_new();
|
v = tcg_temp_new();
|
||||||
tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env), mop);
|
tcg_gen_qemu_ld_tl(v, *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
|
||||||
|
|
||||||
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
|
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
|
||||||
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
|
tcg_gen_movi_tl(cpu_SR[SR_PC], dc->pc);
|
||||||
@ -1072,7 +1072,7 @@ static void dec_store(DisasContext *dc)
|
|||||||
this compare and the following write to be atomic. For user
|
this compare and the following write to be atomic. For user
|
||||||
emulation we need to add atomicity between threads. */
|
emulation we need to add atomicity between threads. */
|
||||||
tval = tcg_temp_new();
|
tval = tcg_temp_new();
|
||||||
tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env),
|
tcg_gen_qemu_ld_tl(tval, swx_addr, cpu_mmu_index(&dc->cpu->env, false),
|
||||||
MO_TEUL);
|
MO_TEUL);
|
||||||
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
|
tcg_gen_brcond_tl(TCG_COND_NE, env_res_val, tval, swx_skip);
|
||||||
write_carryi(dc, 0);
|
write_carryi(dc, 0);
|
||||||
@ -1123,7 +1123,7 @@ static void dec_store(DisasContext *dc)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env), mop);
|
tcg_gen_qemu_st_tl(cpu_R[dc->rd], *addr, cpu_mmu_index(&dc->cpu->env, false), mop);
|
||||||
|
|
||||||
/* Verify alignment if needed. */
|
/* Verify alignment if needed. */
|
||||||
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
|
if ((dc->cpu->env.pvr.regs[2] & PVR2_UNALIGNED_EXC_MASK) && size > 1) {
|
||||||
@ -1219,7 +1219,7 @@ static void dec_bcc(DisasContext *dc)
|
|||||||
static void dec_br(DisasContext *dc)
|
static void dec_br(DisasContext *dc)
|
||||||
{
|
{
|
||||||
unsigned int dslot, link, abs, mbar;
|
unsigned int dslot, link, abs, mbar;
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
dslot = dc->ir & (1 << 20);
|
dslot = dc->ir & (1 << 20);
|
||||||
abs = dc->ir & (1 << 19);
|
abs = dc->ir & (1 << 19);
|
||||||
@ -1351,7 +1351,7 @@ static inline void do_rte(DisasContext *dc)
|
|||||||
static void dec_rts(DisasContext *dc)
|
static void dec_rts(DisasContext *dc)
|
||||||
{
|
{
|
||||||
unsigned int b_bit, i_bit, e_bit;
|
unsigned int b_bit, i_bit, e_bit;
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
|
|
||||||
i_bit = dc->ir & (1 << 21);
|
i_bit = dc->ir & (1 << 21);
|
||||||
b_bit = dc->ir & (1 << 22);
|
b_bit = dc->ir & (1 << 22);
|
||||||
@ -1523,7 +1523,7 @@ static void dec_null(DisasContext *dc)
|
|||||||
/* Insns connected to FSL or AXI stream attached devices. */
|
/* Insns connected to FSL or AXI stream attached devices. */
|
||||||
static void dec_stream(DisasContext *dc)
|
static void dec_stream(DisasContext *dc)
|
||||||
{
|
{
|
||||||
int mem_index = cpu_mmu_index(&dc->cpu->env);
|
int mem_index = cpu_mmu_index(&dc->cpu->env, false);
|
||||||
TCGv_i32 t_id, t_ctrl;
|
TCGv_i32 t_id, t_ctrl;
|
||||||
int ctrl;
|
int ctrl;
|
||||||
|
|
||||||
|
@ -634,7 +634,7 @@ extern uint32_t cpu_rddsp(uint32_t mask_num, CPUMIPSState *env);
|
|||||||
#define MMU_MODE1_SUFFIX _super
|
#define MMU_MODE1_SUFFIX _super
|
||||||
#define MMU_MODE2_SUFFIX _user
|
#define MMU_MODE2_SUFFIX _user
|
||||||
#define MMU_USER_IDX 2
|
#define MMU_USER_IDX 2
|
||||||
static inline int cpu_mmu_index (CPUMIPSState *env)
|
static inline int cpu_mmu_index (CPUMIPSState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return env->hflags & MIPS_HFLAG_KSU;
|
return env->hflags & MIPS_HFLAG_KSU;
|
||||||
}
|
}
|
||||||
|
@ -3629,7 +3629,7 @@ FOP_CONDN_S(sne, (float32_lt(fst1, fst0, &env->active_fpu.fp_status)
|
|||||||
#if !defined(CONFIG_USER_ONLY)
|
#if !defined(CONFIG_USER_ONLY)
|
||||||
#define MEMOP_IDX(DF) \
|
#define MEMOP_IDX(DF) \
|
||||||
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
|
TCGMemOpIdx oi = make_memop_idx(MO_TE | DF | MO_UNALN, \
|
||||||
cpu_mmu_index(env));
|
cpu_mmu_index(env, false));
|
||||||
#else
|
#else
|
||||||
#define MEMOP_IDX(DF)
|
#define MEMOP_IDX(DF)
|
||||||
#endif
|
#endif
|
||||||
@ -3685,7 +3685,7 @@ void helper_msa_st_ ## TYPE(CPUMIPSState *env, uint32_t wd, \
|
|||||||
target_ulong addr) \
|
target_ulong addr) \
|
||||||
{ \
|
{ \
|
||||||
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
|
wr_t *pwd = &(env->active_fpu.fpr[wd].wr); \
|
||||||
int mmu_idx = cpu_mmu_index(env); \
|
int mmu_idx = cpu_mmu_index(env, false); \
|
||||||
int i; \
|
int i; \
|
||||||
MEMOP_IDX(DF) \
|
MEMOP_IDX(DF) \
|
||||||
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
|
ensure_writable_pages(env, addr, mmu_idx, GETRA()); \
|
||||||
|
@ -127,7 +127,7 @@ int cpu_moxie_signal_handler(int host_signum, void *pinfo,
|
|||||||
#define cpu_gen_code cpu_moxie_gen_code
|
#define cpu_gen_code cpu_moxie_gen_code
|
||||||
#define cpu_signal_handler cpu_moxie_signal_handler
|
#define cpu_signal_handler cpu_moxie_signal_handler
|
||||||
|
|
||||||
static inline int cpu_mmu_index(CPUMoxieState *env)
|
static inline int cpu_mmu_index(CPUMoxieState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -403,7 +403,7 @@ static inline void cpu_get_tb_cpu_state(CPUOpenRISCState *env,
|
|||||||
*flags = (env->flags & D_FLAG);
|
*flags = (env->flags & D_FLAG);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline int cpu_mmu_index(CPUOpenRISCState *env)
|
static inline int cpu_mmu_index(CPUOpenRISCState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
if (!(env->sr & SR_IME)) {
|
if (!(env->sr & SR_IME)) {
|
||||||
return MMU_NOMMU_IDX;
|
return MMU_NOMMU_IDX;
|
||||||
|
@ -1653,7 +1653,7 @@ static inline void gen_intermediate_code_internal(OpenRISCCPU *cpu,
|
|||||||
dc->ppc = pc_start;
|
dc->ppc = pc_start;
|
||||||
dc->pc = pc_start;
|
dc->pc = pc_start;
|
||||||
dc->flags = cpu->env.cpucfgr;
|
dc->flags = cpu->env.cpucfgr;
|
||||||
dc->mem_idx = cpu_mmu_index(&cpu->env);
|
dc->mem_idx = cpu_mmu_index(&cpu->env, false);
|
||||||
dc->synced_flags = dc->tb_flags = tb->flags;
|
dc->synced_flags = dc->tb_flags = tb->flags;
|
||||||
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
|
dc->delayed_branch = !!(dc->tb_flags & D_FLAG);
|
||||||
dc->singlestep_enabled = cs->singlestep_enabled;
|
dc->singlestep_enabled = cs->singlestep_enabled;
|
||||||
|
@ -1250,7 +1250,7 @@ int ppc_dcr_write (ppc_dcr_t *dcr_env, int dcrn, uint32_t val);
|
|||||||
#define MMU_MODE1_SUFFIX _kernel
|
#define MMU_MODE1_SUFFIX _kernel
|
||||||
#define MMU_MODE2_SUFFIX _hypv
|
#define MMU_MODE2_SUFFIX _hypv
|
||||||
#define MMU_USER_IDX 0
|
#define MMU_USER_IDX 0
|
||||||
static inline int cpu_mmu_index (CPUPPCState *env)
|
static inline int cpu_mmu_index (CPUPPCState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return env->mmu_idx;
|
return env->mmu_idx;
|
||||||
}
|
}
|
||||||
|
@ -308,7 +308,7 @@ static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
|
|||||||
#define MMU_SECONDARY_IDX 1
|
#define MMU_SECONDARY_IDX 1
|
||||||
#define MMU_HOME_IDX 2
|
#define MMU_HOME_IDX 2
|
||||||
|
|
||||||
static inline int cpu_mmu_index (CPUS390XState *env)
|
static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
switch (env->psw.mask & PSW_MASK_ASC) {
|
switch (env->psw.mask & PSW_MASK_ASC) {
|
||||||
case PSW_ASC_PRIMARY:
|
case PSW_ASC_PRIMARY:
|
||||||
|
@ -69,7 +69,7 @@ static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
|
|||||||
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
|
static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
|
||||||
uint32_t l)
|
uint32_t l)
|
||||||
{
|
{
|
||||||
int mmu_idx = cpu_mmu_index(env);
|
int mmu_idx = cpu_mmu_index(env, false);
|
||||||
|
|
||||||
while (l > 0) {
|
while (l > 0) {
|
||||||
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
|
void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
|
||||||
@ -92,7 +92,7 @@ static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
|
|||||||
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
|
static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
|
||||||
uint32_t l)
|
uint32_t l)
|
||||||
{
|
{
|
||||||
int mmu_idx = cpu_mmu_index(env);
|
int mmu_idx = cpu_mmu_index(env, false);
|
||||||
|
|
||||||
while (l > 0) {
|
while (l > 0) {
|
||||||
void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
|
void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
|
||||||
|
@ -235,7 +235,7 @@ void cpu_load_tlb(CPUSH4State * env);
|
|||||||
#define MMU_MODE0_SUFFIX _kernel
|
#define MMU_MODE0_SUFFIX _kernel
|
||||||
#define MMU_MODE1_SUFFIX _user
|
#define MMU_MODE1_SUFFIX _user
|
||||||
#define MMU_USER_IDX 1
|
#define MMU_USER_IDX 1
|
||||||
static inline int cpu_mmu_index (CPUSH4State *env)
|
static inline int cpu_mmu_index (CPUSH4State *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
|
return (env->sr & (1u << SR_MD)) == 0 ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
@ -642,7 +642,7 @@ static inline int cpu_supervisor_mode(CPUSPARCState *env1)
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
static inline int cpu_mmu_index(CPUSPARCState *env1)
|
static inline int cpu_mmu_index(CPUSPARCState *env1, bool ifetch)
|
||||||
{
|
{
|
||||||
#if defined(CONFIG_USER_ONLY)
|
#if defined(CONFIG_USER_ONLY)
|
||||||
return MMU_USER_IDX;
|
return MMU_USER_IDX;
|
||||||
|
@ -849,7 +849,7 @@ hwaddr sparc_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
|
|||||||
SPARCCPU *cpu = SPARC_CPU(cs);
|
SPARCCPU *cpu = SPARC_CPU(cs);
|
||||||
CPUSPARCState *env = &cpu->env;
|
CPUSPARCState *env = &cpu->env;
|
||||||
hwaddr phys_addr;
|
hwaddr phys_addr;
|
||||||
int mmu_idx = cpu_mmu_index(env);
|
int mmu_idx = cpu_mmu_index(env, false);
|
||||||
MemoryRegionSection section;
|
MemoryRegionSection section;
|
||||||
|
|
||||||
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
|
if (cpu_sparc_get_phys_page(env, &phys_addr, addr, 2, mmu_idx) != 0) {
|
||||||
|
@ -5234,7 +5234,7 @@ static inline void gen_intermediate_code_internal(SPARCCPU *cpu,
|
|||||||
last_pc = dc->pc;
|
last_pc = dc->pc;
|
||||||
dc->npc = (target_ulong) tb->cs_base;
|
dc->npc = (target_ulong) tb->cs_base;
|
||||||
dc->cc_op = CC_OP_DYNAMIC;
|
dc->cc_op = CC_OP_DYNAMIC;
|
||||||
dc->mem_idx = cpu_mmu_index(env);
|
dc->mem_idx = cpu_mmu_index(env, false);
|
||||||
dc->def = env->def;
|
dc->def = env->def;
|
||||||
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
|
dc->fpu_enabled = tb_fpu_enabled(tb->flags);
|
||||||
dc->address_mask_32bit = tb_am_enabled(tb->flags);
|
dc->address_mask_32bit = tb_am_enabled(tb->flags);
|
||||||
|
@ -350,7 +350,7 @@ void tricore_cpu_list(FILE *f, fprintf_function cpu_fprintf);
|
|||||||
#define cpu_signal_handler cpu_tricore_signal_handler
|
#define cpu_signal_handler cpu_tricore_signal_handler
|
||||||
#define cpu_list tricore_cpu_list
|
#define cpu_list tricore_cpu_list
|
||||||
|
|
||||||
static inline int cpu_mmu_index(CPUTriCoreState *env)
|
static inline int cpu_mmu_index(CPUTriCoreState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
@ -8287,7 +8287,7 @@ gen_intermediate_code_internal(TriCoreCPU *cpu, struct TranslationBlock *tb,
|
|||||||
ctx.tb = tb;
|
ctx.tb = tb;
|
||||||
ctx.singlestep_enabled = cs->singlestep_enabled;
|
ctx.singlestep_enabled = cs->singlestep_enabled;
|
||||||
ctx.bstate = BS_NONE;
|
ctx.bstate = BS_NONE;
|
||||||
ctx.mem_idx = cpu_mmu_index(env);
|
ctx.mem_idx = cpu_mmu_index(env, false);
|
||||||
|
|
||||||
tcg_clear_temp_count();
|
tcg_clear_temp_count();
|
||||||
gen_tb_start(tb);
|
gen_tb_start(tb);
|
||||||
|
@ -131,7 +131,7 @@ int uc32_cpu_signal_handler(int host_signum, void *pinfo, void *puc);
|
|||||||
#define MMU_MODE0_SUFFIX _kernel
|
#define MMU_MODE0_SUFFIX _kernel
|
||||||
#define MMU_MODE1_SUFFIX _user
|
#define MMU_MODE1_SUFFIX _user
|
||||||
#define MMU_USER_IDX 1
|
#define MMU_USER_IDX 1
|
||||||
static inline int cpu_mmu_index(CPUUniCore32State *env)
|
static inline int cpu_mmu_index(CPUUniCore32State *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
|
return (env->uncached_asr & ASR_M) == ASR_MODE_USER ? 1 : 0;
|
||||||
}
|
}
|
||||||
|
@ -492,7 +492,7 @@ static inline uint32_t xtensa_replicate_windowstart(CPUXtensaState *env)
|
|||||||
#define MMU_MODE2_SUFFIX _ring2
|
#define MMU_MODE2_SUFFIX _ring2
|
||||||
#define MMU_MODE3_SUFFIX _ring3
|
#define MMU_MODE3_SUFFIX _ring3
|
||||||
|
|
||||||
static inline int cpu_mmu_index(CPUXtensaState *env)
|
static inline int cpu_mmu_index(CPUXtensaState *env, bool ifetch)
|
||||||
{
|
{
|
||||||
return xtensa_get_cring(env);
|
return xtensa_get_cring(env);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user