target/s390x: Implement the MVPG condition-code-option bit

If the CCO bit is set, MVPG should not generate an exception but
report page translation faults via a CC code.

Create a new helper, access_prepare_nf, which can use probe_access_flags
in non-faulting mode, and then handle watchpoints.

Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
[thuth: Added logic to still inject protection exceptions]
Signed-off-by: Thomas Huth <thuth@redhat.com>
[david: Look at env->tlb_fill_exc to determine if there was an exception]
Signed-off-by: David Hildenbrand <david@redhat.com>
Tested-by: Thomas Huth <thuth@redhat.com>
Message-Id: <20210315085449.34676-2-david@redhat.com>
Signed-off-by: Cornelia Huck <cohuck@redhat.com>
This commit is contained in:
Richard Henderson 2021-03-15 09:54:48 +01:00 committed by Cornelia Huck
parent d66a52b50f
commit e56552cf07
3 changed files with 121 additions and 23 deletions

View File

@ -114,6 +114,11 @@ struct CPUS390XState {
uint64_t diag318_info;
#if !defined(CONFIG_USER_ONLY)
uint64_t tlb_fill_tec; /* translation exception code during tlb_fill */
int tlb_fill_exc; /* exception number seen during tlb_fill */
#endif
/* Fields up to this point are cleared by a CPU reset */
struct {} end_reset_fields;

View File

@ -164,6 +164,9 @@ bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
tec = 0; /* unused */
}
env->tlb_fill_exc = excp;
env->tlb_fill_tec = tec;
if (!excp) {
qemu_log_mask(CPU_LOG_MMU,
"%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",

View File

@ -130,28 +130,103 @@ typedef struct S390Access {
int mmu_idx;
} S390Access;
/*
* With nonfault=1, return the PGM_ exception that would have been injected
* into the guest; return 0 if no exception was detected.
*
* For !CONFIG_USER_ONLY, the TEC is stored stored to env->tlb_fill_tec.
* For CONFIG_USER_ONLY, the faulting address is stored to env->__excp_addr.
*/
static int s390_probe_access(CPUArchState *env, target_ulong addr, int size,
MMUAccessType access_type, int mmu_idx,
bool nonfault, void **phost, uintptr_t ra)
{
int flags;
#if defined(CONFIG_USER_ONLY)
flags = page_get_flags(addr);
if (!(flags & (access_type == MMU_DATA_LOAD ? PAGE_READ : PAGE_WRITE))) {
env->__excp_addr = addr;
flags = (flags & PAGE_VALID) ? PGM_PROTECTION : PGM_ADDRESSING;
if (nonfault) {
return flags;
}
tcg_s390_program_interrupt(env, flags, ra);
}
*phost = g2h(env_cpu(env), addr);
#else
/*
* For !CONFIG_USER_ONLY, we cannot rely on TLB_INVALID_MASK or haddr==NULL
* to detect if there was an exception during tlb_fill().
*/
env->tlb_fill_exc = 0;
flags = probe_access_flags(env, addr, access_type, mmu_idx, nonfault, phost,
ra);
if (env->tlb_fill_exc) {
return env->tlb_fill_exc;
}
if (unlikely(flags & TLB_WATCHPOINT)) {
/* S390 does not presently use transaction attributes. */
cpu_check_watchpoint(env_cpu(env), addr, size,
MEMTXATTRS_UNSPECIFIED,
(access_type == MMU_DATA_STORE
? BP_MEM_WRITE : BP_MEM_READ), ra);
}
#endif
return 0;
}
static int access_prepare_nf(S390Access *access, CPUS390XState *env,
bool nonfault, vaddr vaddr1, int size,
MMUAccessType access_type,
int mmu_idx, uintptr_t ra)
{
void *haddr1, *haddr2 = NULL;
int size1, size2, exc;
vaddr vaddr2 = 0;
assert(size > 0 && size <= 4096);
size1 = MIN(size, -(vaddr1 | TARGET_PAGE_MASK)),
size2 = size - size1;
exc = s390_probe_access(env, vaddr1, size1, access_type, mmu_idx, nonfault,
&haddr1, ra);
if (exc) {
return exc;
}
if (unlikely(size2)) {
/* The access crosses page boundaries. */
vaddr2 = wrap_address(env, vaddr1 + size1);
exc = s390_probe_access(env, vaddr2, size2, access_type, mmu_idx,
nonfault, &haddr2, ra);
if (exc) {
return exc;
}
}
*access = (S390Access) {
.vaddr1 = vaddr1,
.vaddr2 = vaddr2,
.haddr1 = haddr1,
.haddr2 = haddr2,
.size1 = size1,
.size2 = size2,
.mmu_idx = mmu_idx
};
return 0;
}
static S390Access access_prepare(CPUS390XState *env, vaddr vaddr, int size,
MMUAccessType access_type, int mmu_idx,
uintptr_t ra)
{
S390Access access = {
.vaddr1 = vaddr,
.size1 = MIN(size, -(vaddr | TARGET_PAGE_MASK)),
.mmu_idx = mmu_idx,
};
g_assert(size > 0 && size <= 4096);
access.haddr1 = probe_access(env, access.vaddr1, access.size1, access_type,
mmu_idx, ra);
if (unlikely(access.size1 != size)) {
/* The access crosses page boundaries. */
access.vaddr2 = wrap_address(env, vaddr + access.size1);
access.size2 = size - access.size1;
access.haddr2 = probe_access(env, access.vaddr2, access.size2,
access_type, mmu_idx, ra);
}
return access;
S390Access ret;
int exc = access_prepare_nf(&ret, env, false, vaddr, size,
access_type, mmu_idx, ra);
assert(!exc);
return ret;
}
/* Helper to handle memset on a single page. */
@ -845,8 +920,10 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
const int mmu_idx = cpu_mmu_index(env, false);
const bool f = extract64(r0, 11, 1);
const bool s = extract64(r0, 10, 1);
const bool cco = extract64(r0, 8, 1);
uintptr_t ra = GETPC();
S390Access srca, desta;
int exc;
if ((f && s) || extract64(r0, 12, 4)) {
tcg_s390_program_interrupt(env, PGM_SPECIFICATION, GETPC());
@ -858,13 +935,26 @@ uint32_t HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
/*
* TODO:
* - Access key handling
* - CC-option with surpression of page-translation exceptions
* - Store r1/r2 register identifiers at real location 162
*/
srca = access_prepare(env, r2, TARGET_PAGE_SIZE, MMU_DATA_LOAD, mmu_idx,
ra);
desta = access_prepare(env, r1, TARGET_PAGE_SIZE, MMU_DATA_STORE, mmu_idx,
ra);
exc = access_prepare_nf(&srca, env, cco, r2, TARGET_PAGE_SIZE,
MMU_DATA_LOAD, mmu_idx, ra);
if (exc) {
return 2;
}
exc = access_prepare_nf(&desta, env, cco, r1, TARGET_PAGE_SIZE,
MMU_DATA_STORE, mmu_idx, ra);
if (exc) {
if (exc == PGM_PROTECTION) {
#if !defined(CONFIG_USER_ONLY)
stq_phys(env_cpu(env)->as,
env->psa + offsetof(LowCore, trans_exc_code),
env->tlb_fill_tec);
#endif
tcg_s390_program_interrupt(env, PGM_PROTECTION, ra);
}
return 1;
}
access_memmove(env, &desta, &srca, ra);
return 0; /* data moved */
}