mirror of
https://github.com/xemu-project/xemu.git
synced 2024-11-23 11:39:53 +00:00
migration: stable ram block ordering
This makes ram block ordering under migration stable, ordered by offset. This is especially useful for migration to exec, for debugging. Signed-off-by: Michael S. Tsirkin <mst@redhat.com> Tested-by: Jason Wang <jasowang@redhat.com>
This commit is contained in:
parent
c924f36a30
commit
b2e0a138e7
35
arch_init.c
35
arch_init.c
@ -23,6 +23,7 @@
|
||||
*/
|
||||
#include <stdint.h>
|
||||
#include <stdarg.h>
|
||||
#include <stdlib.h>
|
||||
#ifndef _WIN32
|
||||
#include <sys/types.h>
|
||||
#include <sys/mman.h>
|
||||
@ -212,6 +213,39 @@ uint64_t ram_bytes_total(void)
|
||||
return total;
|
||||
}
|
||||
|
||||
static int block_compar(const void *a, const void *b)
|
||||
{
|
||||
RAMBlock * const *ablock = a;
|
||||
RAMBlock * const *bblock = b;
|
||||
if ((*ablock)->offset < (*bblock)->offset) {
|
||||
return -1;
|
||||
} else if ((*ablock)->offset > (*bblock)->offset) {
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void sort_ram_list(void)
|
||||
{
|
||||
RAMBlock *block, *nblock, **blocks;
|
||||
int n;
|
||||
n = 0;
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
++n;
|
||||
}
|
||||
blocks = qemu_malloc(n * sizeof *blocks);
|
||||
n = 0;
|
||||
QLIST_FOREACH_SAFE(block, &ram_list.blocks, next, nblock) {
|
||||
blocks[n++] = block;
|
||||
QLIST_REMOVE(block, next);
|
||||
}
|
||||
qsort(blocks, n, sizeof *blocks, block_compar);
|
||||
while (--n >= 0) {
|
||||
QLIST_INSERT_HEAD(&ram_list.blocks, blocks[n], next);
|
||||
}
|
||||
qemu_free(blocks);
|
||||
}
|
||||
|
||||
int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
|
||||
{
|
||||
ram_addr_t addr;
|
||||
@ -234,6 +268,7 @@ int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
|
||||
bytes_transferred = 0;
|
||||
last_block = NULL;
|
||||
last_offset = 0;
|
||||
sort_ram_list();
|
||||
|
||||
/* Make sure all dirty bits are set */
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
|
@ -46,6 +46,9 @@ ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
|
||||
void qemu_ram_free(ram_addr_t addr);
|
||||
/* This should only be used for ram local to a device. */
|
||||
void *qemu_get_ram_ptr(ram_addr_t addr);
|
||||
/* Same but slower, to use for migration, where the order of
|
||||
* RAMBlocks must not change. */
|
||||
void *qemu_safe_ram_ptr(ram_addr_t addr);
|
||||
/* This should not be used by devices. */
|
||||
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
|
||||
ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
|
||||
|
24
exec.c
24
exec.c
@ -2030,10 +2030,10 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
|
||||
|
||||
/* we modify the TLB cache so that the dirty bit will be set again
|
||||
when accessing the range */
|
||||
start1 = (unsigned long)qemu_get_ram_ptr(start);
|
||||
start1 = (unsigned long)qemu_safe_ram_ptr(start);
|
||||
/* Chek that we don't span multiple blocks - this breaks the
|
||||
address comparisons below. */
|
||||
if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
|
||||
if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
|
||||
!= (end - 1) - start) {
|
||||
abort();
|
||||
}
|
||||
@ -2858,6 +2858,7 @@ ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
|
||||
new_block->length = size;
|
||||
|
||||
QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
|
||||
fprintf(stderr, "alloc ram %s len 0x%x\n", new_block->idstr, (int)new_block->length);
|
||||
|
||||
ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
|
||||
last_ram_offset() >> TARGET_PAGE_BITS);
|
||||
@ -2931,6 +2932,25 @@ void *qemu_get_ram_ptr(ram_addr_t addr)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* Return a host pointer to ram allocated with qemu_ram_alloc.
|
||||
* Same as qemu_get_ram_ptr but avoid reordering ramblocks.
|
||||
*/
|
||||
void *qemu_safe_ram_ptr(ram_addr_t addr)
|
||||
{
|
||||
RAMBlock *block;
|
||||
|
||||
QLIST_FOREACH(block, &ram_list.blocks, next) {
|
||||
if (addr - block->offset < block->length) {
|
||||
return block->host + (addr - block->offset);
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
|
||||
abort();
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
|
||||
{
|
||||
RAMBlock *block;
|
||||
|
@ -162,7 +162,7 @@ static int kvm_set_user_memory_region(KVMState *s, KVMSlot *slot)
|
||||
mem.slot = slot->slot;
|
||||
mem.guest_phys_addr = slot->start_addr;
|
||||
mem.memory_size = slot->memory_size;
|
||||
mem.userspace_addr = (unsigned long)qemu_get_ram_ptr(slot->phys_offset);
|
||||
mem.userspace_addr = (unsigned long)qemu_safe_ram_ptr(slot->phys_offset);
|
||||
mem.flags = slot->flags;
|
||||
if (s->migration_log) {
|
||||
mem.flags |= KVM_MEM_LOG_DIRTY_PAGES;
|
||||
|
Loading…
Reference in New Issue
Block a user