mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 11:39:53 +00:00
accel/tlb: Add tlb_flush_range_by_mmuidx_all_cpus_synced()
Forward tlb_flush_page_bits_by_mmuidx_all_cpus_synced to tlb_flush_range_by_mmuidx_all_cpus_synced passing TARGET_PAGE_SIZE. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Message-id: 20210509151618.2331764-7-f4bug@amsat.org Message-Id: <20210508201640.1045808-1-richard.henderson@linaro.org> [PMD: Split from bigger patch] Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org> Reviewed-by: Peter Maydell <peter.maydell@linaro.org> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
parent
600b819f23
commit
c13b27d826
@ -887,16 +887,20 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
|
||||
idxmap, bits);
|
||||
}
|
||||
|
||||
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||
target_ulong addr,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||
target_ulong addr,
|
||||
target_ulong len,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
TLBFlushRangeData d, *p;
|
||||
CPUState *dst_cpu;
|
||||
|
||||
/* If all bits are significant, this devolves to tlb_flush_page. */
|
||||
if (bits >= TARGET_LONG_BITS) {
|
||||
/*
|
||||
* If all bits are significant, and len is small,
|
||||
* this devolves to tlb_flush_page.
|
||||
*/
|
||||
if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
|
||||
tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
|
||||
return;
|
||||
}
|
||||
@ -908,7 +912,7 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||
|
||||
/* This should already be page aligned */
|
||||
d.addr = addr & TARGET_PAGE_MASK;
|
||||
d.len = TARGET_PAGE_SIZE;
|
||||
d.len = len;
|
||||
d.idxmap = idxmap;
|
||||
d.bits = bits;
|
||||
|
||||
@ -926,6 +930,15 @@ void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||
RUN_ON_CPU_HOST_PTR(p));
|
||||
}
|
||||
|
||||
void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
|
||||
target_ulong addr,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
|
||||
idxmap, bits);
|
||||
}
|
||||
|
||||
/* update the TLBs so that writes to code in the virtual page 'addr'
|
||||
can be detected */
|
||||
void tlb_protect_code(ram_addr_t ram_addr)
|
||||
|
@ -281,6 +281,11 @@ void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
|
||||
void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr,
|
||||
target_ulong len, uint16_t idxmap,
|
||||
unsigned bits);
|
||||
void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
||||
target_ulong addr,
|
||||
target_ulong len,
|
||||
uint16_t idxmap,
|
||||
unsigned bits);
|
||||
|
||||
/**
|
||||
* tlb_set_page_with_attrs:
|
||||
@ -397,6 +402,13 @@ static inline void tlb_flush_range_by_mmuidx_all_cpus(CPUState *cpu,
|
||||
unsigned bits)
|
||||
{
|
||||
}
|
||||
static inline void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *cpu,
|
||||
target_ulong addr,
|
||||
target_long len,
|
||||
uint16_t idxmap,
|
||||
unsigned bits)
|
||||
{
|
||||
}
|
||||
#endif
|
||||
/**
|
||||
* probe_access:
|
||||
|
Loading…
Reference in New Issue
Block a user