Migration pull 2017-07-10

-----BEGIN PGP SIGNATURE-----
 
 iQIcBAABAgAGBQJZY7O5AAoJEAUWMx68W/3nBvMP/Rhj/ldyo9xiKX47VtYElFwI
 RHnzGFYzGCeaEseowESlG0mLXoqVtz/g1k2DKvu6TNJLfZKvgUVHa4gG/+n95STL
 dyjQyRLI08FIezKU2iR95dCecbWJGytze/K53bdXLi82rKcIrLFhfgjhfOl9XSvm
 Law+6UjZB0mi4YBxNqJqAnyTCUSdKfHpqrdECmkkJb9iDGvgO+lZVap+kU04RcTH
 awu3dK68R+pcKXjoo3YvXzoReRJnaxWl/QkcpqSISuel4UT9vX3xfbZ8EnCmxY1t
 F5CiyjDl4yFzcuThzCaSr8xk/W22nCyUa4jojhxXV9UllXrCiqya18eW0p5BBaO7
 /ymuXlOEFOIkZBOmC/Iqh3gOc/wqGrpo6k0T+KJNFZeAPX500HghKgcpId697BoJ
 2AjNuY8t0ond3DLMOfYaIxCLt0q6deyFmPY+mqZzQoO4o5ZcOrebkdTE1feYj+Fg
 RZykuElVrcrW+e1sqnFZMrcTF7PJ14sIH1jq/Ln92q34sQEIm+JHb0rdgtBUTmPO
 Fi8umtX7Wim9hurcpRnGaWgm9kimVtWNUuJbgbP5gI7NHku9cOV4VujyqYpsx1rr
 d2Q8l1UvjUNGDSyG3K10KP2eNIc725dRI4GXhNw0X3gqQuJibGBjXP01VWyFkTvR
 Z8H2Dfb8/c7VFzoPjmm+
 =KZXr
 -----END PGP SIGNATURE-----

Merge remote-tracking branch 'remotes/dgilbert/tags/pull-migration-20170710a' into staging

Migration pull 2017-07-10

# gpg: Signature made Mon 10 Jul 2017 18:04:57 BST
# gpg:                using RSA key 0x0516331EBC5BFDE7
# gpg: Good signature from "Dr. David Alan Gilbert (RH2) <dgilbert@redhat.com>"
# gpg: WARNING: This key is not certified with a trusted signature!
# gpg:          There is no indication that the signature belongs to the owner.
# Primary key fingerprint: 45F5 C71B 4A0C B7FB 977A  9FA9 0516 331E BC5B FDE7

* remotes/dgilbert/tags/pull-migration-20170710a:
  migration: Make compression_threads use save/load_setup/cleanup()
  migration: Convert ram to use new load_setup()/load_cleanup()
  migration: Create load_setup()/cleanup() methods
  migration: Rename cleanup() to save_cleanup()
  migration: Rename save_live_setup() to save_setup()
  doc: update TYPE_MIGRATION documents
  doc: add item for "-M enforce-config-section"
  vl: move global property, migrate init earlier
  migration: fix handling for --only-migratable

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
This commit is contained in:
Peter Maydell 2017-07-10 18:13:03 +01:00
commit 3d0bf8dfdf
13 changed files with 138 additions and 77 deletions

View File

@ -1945,7 +1945,7 @@ static int htab_load(QEMUFile *f, void *opaque, int version_id)
return 0; return 0;
} }
static void htab_cleanup(void *opaque) static void htab_save_cleanup(void *opaque)
{ {
sPAPRMachineState *spapr = opaque; sPAPRMachineState *spapr = opaque;
@ -1953,10 +1953,10 @@ static void htab_cleanup(void *opaque)
} }
static SaveVMHandlers savevm_htab_handlers = { static SaveVMHandlers savevm_htab_handlers = {
.save_live_setup = htab_save_setup, .save_setup = htab_save_setup,
.save_live_iterate = htab_save_iterate, .save_live_iterate = htab_save_iterate,
.save_live_complete_precopy = htab_save_complete, .save_live_complete_precopy = htab_save_complete,
.cleanup = htab_cleanup, .save_cleanup = htab_save_cleanup,
.load_state = htab_load, .load_state = htab_load,
}; };

View File

@ -53,7 +53,6 @@ bool migration_has_finished(MigrationState *);
bool migration_has_failed(MigrationState *); bool migration_has_failed(MigrationState *);
/* ...and after the device transmission */ /* ...and after the device transmission */
bool migration_in_postcopy_after_devices(MigrationState *); bool migration_in_postcopy_after_devices(MigrationState *);
void migration_only_migratable_set(void);
void migration_global_dump(Monitor *mon); void migration_global_dump(Monitor *mon);
#endif #endif

View File

@ -18,7 +18,7 @@ typedef struct SaveVMHandlers {
/* This runs inside the iothread lock. */ /* This runs inside the iothread lock. */
SaveStateHandler *save_state; SaveStateHandler *save_state;
void (*cleanup)(void *opaque); void (*save_cleanup)(void *opaque);
int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque); int (*save_live_complete_postcopy)(QEMUFile *f, void *opaque);
int (*save_live_complete_precopy)(QEMUFile *f, void *opaque); int (*save_live_complete_precopy)(QEMUFile *f, void *opaque);
@ -33,12 +33,14 @@ typedef struct SaveVMHandlers {
int (*save_live_iterate)(QEMUFile *f, void *opaque); int (*save_live_iterate)(QEMUFile *f, void *opaque);
/* This runs outside the iothread lock! */ /* This runs outside the iothread lock! */
int (*save_live_setup)(QEMUFile *f, void *opaque); int (*save_setup)(QEMUFile *f, void *opaque);
void (*save_live_pending)(QEMUFile *f, void *opaque, void (*save_live_pending)(QEMUFile *f, void *opaque,
uint64_t threshold_size, uint64_t threshold_size,
uint64_t *non_postcopiable_pending, uint64_t *non_postcopiable_pending,
uint64_t *postcopiable_pending); uint64_t *postcopiable_pending);
LoadStateHandler *load_state; LoadStateHandler *load_state;
int (*load_setup)(QEMUFile *f, void *opaque);
int (*load_cleanup)(void *opaque);
} SaveVMHandlers; } SaveVMHandlers;
int register_savevm_live(DeviceState *dev, int register_savevm_live(DeviceState *dev,

View File

@ -1008,12 +1008,12 @@ static bool block_is_active(void *opaque)
} }
static SaveVMHandlers savevm_block_handlers = { static SaveVMHandlers savevm_block_handlers = {
.save_live_setup = block_save_setup, .save_setup = block_save_setup,
.save_live_iterate = block_save_iterate, .save_live_iterate = block_save_iterate,
.save_live_complete_precopy = block_save_complete, .save_live_complete_precopy = block_save_complete,
.save_live_pending = block_save_pending, .save_live_pending = block_save_pending,
.load_state = block_load, .load_state = block_load,
.cleanup = block_migration_cleanup, .save_cleanup = block_migration_cleanup,
.is_active = block_is_active, .is_active = block_is_active,
}; };

View File

@ -350,7 +350,7 @@ static int colo_do_checkpoint_transaction(MigrationState *s,
/* Disable block migration */ /* Disable block migration */
migrate_set_block_enabled(false, &local_err); migrate_set_block_enabled(false, &local_err);
qemu_savevm_state_header(fb); qemu_savevm_state_header(fb);
qemu_savevm_state_begin(fb); qemu_savevm_state_setup(fb);
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
qemu_savevm_state_complete_precopy(fb, false, false); qemu_savevm_state_complete_precopy(fb, false, false);
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();

View File

@ -128,11 +128,6 @@ MigrationState *migrate_get_current(void)
return current_migration; return current_migration;
} }
void migration_only_migratable_set(void)
{
migrate_get_current()->only_migratable = true;
}
MigrationIncomingState *migration_incoming_get_current(void) MigrationIncomingState *migration_incoming_get_current(void)
{ {
static bool once; static bool once;
@ -291,7 +286,6 @@ static void process_incoming_migration_bh(void *opaque)
} else { } else {
runstate_set(global_state_get_runstate()); runstate_set(global_state_get_runstate());
} }
migrate_decompress_threads_join();
/* /*
* This must happen after any state changes since as soon as an external * This must happen after any state changes since as soon as an external
* observer sees this event they might start to prod at the VM assuming * observer sees this event they might start to prod at the VM assuming
@ -354,12 +348,8 @@ static void process_incoming_migration_co(void *opaque)
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE, migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
MIGRATION_STATUS_FAILED); MIGRATION_STATUS_FAILED);
error_report("load of migration failed: %s", strerror(-ret)); error_report("load of migration failed: %s", strerror(-ret));
migrate_decompress_threads_join();
exit(EXIT_FAILURE); exit(EXIT_FAILURE);
} }
free_xbzrle_decoded_buf();
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis); mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
qemu_bh_schedule(mis->bh); qemu_bh_schedule(mis->bh);
} }
@ -368,7 +358,6 @@ void migration_fd_process_incoming(QEMUFile *f)
{ {
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f); Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, f);
migrate_decompress_threads_create();
qemu_file_set_blocking(f, false); qemu_file_set_blocking(f, false);
qemu_coroutine_enter(co); qemu_coroutine_enter(co);
} }
@ -835,7 +824,6 @@ static void migrate_fd_cleanup(void *opaque)
} }
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
migrate_compress_threads_join();
qemu_fclose(s->to_dst_file); qemu_fclose(s->to_dst_file);
s->to_dst_file = NULL; s->to_dst_file = NULL;
} }
@ -1840,7 +1828,7 @@ static void *migration_thread(void *opaque)
qemu_savevm_send_postcopy_advise(s->to_dst_file); qemu_savevm_send_postcopy_advise(s->to_dst_file);
} }
qemu_savevm_state_begin(s->to_dst_file); qemu_savevm_state_setup(s->to_dst_file);
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start; s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP, migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
@ -1998,7 +1986,6 @@ void migrate_fd_connect(MigrationState *s)
} }
} }
migrate_compress_threads_create();
qemu_thread_create(&s->thread, "live_migration", migration_thread, s, qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
QEMU_THREAD_JOINABLE); QEMU_THREAD_JOINABLE);
s->migration_thread_running = true; s->migration_thread_running = true;
@ -2057,12 +2044,12 @@ static void migration_instance_init(Object *obj)
static const TypeInfo migration_type = { static const TypeInfo migration_type = {
.name = TYPE_MIGRATION, .name = TYPE_MIGRATION,
/* /*
* NOTE: "migration" itself is not really a device. We used * NOTE: TYPE_MIGRATION is not really a device, as the object is
* TYPE_DEVICE here only to leverage some existing QDev features * not created using qdev_create(), it is not attached to the qdev
* like "-global" properties, and HW_COMPAT_* fields (which are * device tree, and it is never realized.
* finally applied as global properties as well). If one day the *
* global property feature can be migrated from QDev to QObject in * TODO: Make this TYPE_OBJECT once QOM provides something like
* general, then we can switch to QObject as well. * TYPE_DEVICE's "-global" properties.
*/ */
.parent = TYPE_DEVICE, .parent = TYPE_DEVICE,
.class_init = migration_class_init, .class_init = migration_class_init,

View File

@ -85,11 +85,10 @@ static struct {
QemuMutex lock; QemuMutex lock;
/* it will store a page full of zeros */ /* it will store a page full of zeros */
uint8_t *zero_target_page; uint8_t *zero_target_page;
/* buffer used for XBZRLE decoding */
uint8_t *decoded_buf;
} XBZRLE; } XBZRLE;
/* buffer used for XBZRLE decoding */
static uint8_t *xbzrle_decoded_buf;
static void XBZRLE_cache_lock(void) static void XBZRLE_cache_lock(void)
{ {
if (migrate_use_xbzrle()) if (migrate_use_xbzrle())
@ -307,7 +306,7 @@ static inline void terminate_compression_threads(void)
} }
} }
void migrate_compress_threads_join(void) static void compress_threads_save_cleanup(void)
{ {
int i, thread_count; int i, thread_count;
@ -330,7 +329,7 @@ void migrate_compress_threads_join(void)
comp_param = NULL; comp_param = NULL;
} }
void migrate_compress_threads_create(void) static void compress_threads_save_setup(void)
{ {
int i, thread_count; int i, thread_count;
@ -1350,13 +1349,18 @@ uint64_t ram_bytes_total(void)
return total; return total;
} }
void free_xbzrle_decoded_buf(void) static void xbzrle_load_setup(void)
{ {
g_free(xbzrle_decoded_buf); XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
xbzrle_decoded_buf = NULL;
} }
static void ram_migration_cleanup(void *opaque) static void xbzrle_load_cleanup(void)
{
g_free(XBZRLE.decoded_buf);
XBZRLE.decoded_buf = NULL;
}
static void ram_save_cleanup(void *opaque)
{ {
RAMState **rsp = opaque; RAMState **rsp = opaque;
RAMBlock *block; RAMBlock *block;
@ -1386,6 +1390,7 @@ static void ram_migration_cleanup(void *opaque)
} }
XBZRLE_cache_unlock(); XBZRLE_cache_unlock();
migration_page_queue_free(*rsp); migration_page_queue_free(*rsp);
compress_threads_save_cleanup();
g_free(*rsp); g_free(*rsp);
*rsp = NULL; *rsp = NULL;
} }
@ -1919,6 +1924,7 @@ static int ram_save_setup(QEMUFile *f, void *opaque)
} }
rcu_read_unlock(); rcu_read_unlock();
compress_threads_save_setup();
ram_control_before_iterate(f, RAM_CONTROL_SETUP); ram_control_before_iterate(f, RAM_CONTROL_SETUP);
ram_control_after_iterate(f, RAM_CONTROL_SETUP); ram_control_after_iterate(f, RAM_CONTROL_SETUP);
@ -2078,11 +2084,6 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
int xh_flags; int xh_flags;
uint8_t *loaded_data; uint8_t *loaded_data;
if (!xbzrle_decoded_buf) {
xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
}
loaded_data = xbzrle_decoded_buf;
/* extract RLE header */ /* extract RLE header */
xh_flags = qemu_get_byte(f); xh_flags = qemu_get_byte(f);
xh_len = qemu_get_be16(f); xh_len = qemu_get_be16(f);
@ -2096,7 +2097,9 @@ static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
error_report("Failed to load XBZRLE page - len overflow!"); error_report("Failed to load XBZRLE page - len overflow!");
return -1; return -1;
} }
loaded_data = XBZRLE.decoded_buf;
/* load data and decode */ /* load data and decode */
/* it can change loaded_data to point to an internal buffer */
qemu_get_buffer_in_place(f, &loaded_data, xh_len); qemu_get_buffer_in_place(f, &loaded_data, xh_len);
/* decode RLE */ /* decode RLE */
@ -2230,7 +2233,7 @@ static void wait_for_decompress_done(void)
qemu_mutex_unlock(&decomp_done_lock); qemu_mutex_unlock(&decomp_done_lock);
} }
void migrate_decompress_threads_create(void) static void compress_threads_load_setup(void)
{ {
int i, thread_count; int i, thread_count;
@ -2254,7 +2257,7 @@ void migrate_decompress_threads_create(void)
} }
} }
void migrate_decompress_threads_join(void) static void compress_threads_load_cleanup(void)
{ {
int i, thread_count; int i, thread_count;
@ -2309,6 +2312,28 @@ static void decompress_data_with_multi_threads(QEMUFile *f,
qemu_mutex_unlock(&decomp_done_lock); qemu_mutex_unlock(&decomp_done_lock);
} }
/**
* ram_load_setup: Setup RAM for migration incoming side
*
* Returns zero to indicate success and negative for error
*
* @f: QEMUFile where to receive the data
* @opaque: RAMState pointer
*/
static int ram_load_setup(QEMUFile *f, void *opaque)
{
xbzrle_load_setup();
compress_threads_load_setup();
return 0;
}
static int ram_load_cleanup(void *opaque)
{
xbzrle_load_cleanup();
compress_threads_load_cleanup();
return 0;
}
/** /**
* ram_postcopy_incoming_init: allocate postcopy data structures * ram_postcopy_incoming_init: allocate postcopy data structures
* *
@ -2623,13 +2648,15 @@ static int ram_load(QEMUFile *f, void *opaque, int version_id)
} }
static SaveVMHandlers savevm_ram_handlers = { static SaveVMHandlers savevm_ram_handlers = {
.save_live_setup = ram_save_setup, .save_setup = ram_save_setup,
.save_live_iterate = ram_save_iterate, .save_live_iterate = ram_save_iterate,
.save_live_complete_postcopy = ram_save_complete, .save_live_complete_postcopy = ram_save_complete,
.save_live_complete_precopy = ram_save_complete, .save_live_complete_precopy = ram_save_complete,
.save_live_pending = ram_save_pending, .save_live_pending = ram_save_pending,
.load_state = ram_load, .load_state = ram_load,
.cleanup = ram_migration_cleanup, .save_cleanup = ram_save_cleanup,
.load_setup = ram_load_setup,
.load_cleanup = ram_load_cleanup,
}; };
void ram_mig_init(void) void ram_mig_init(void)

View File

@ -39,15 +39,9 @@ int64_t xbzrle_cache_resize(int64_t new_size);
uint64_t ram_bytes_remaining(void); uint64_t ram_bytes_remaining(void);
uint64_t ram_bytes_total(void); uint64_t ram_bytes_total(void);
void migrate_compress_threads_create(void);
void migrate_compress_threads_join(void);
void migrate_decompress_threads_create(void);
void migrate_decompress_threads_join(void);
uint64_t ram_pagesize_summary(void); uint64_t ram_pagesize_summary(void);
int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len); int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len);
void acct_update_position(QEMUFile *f, size_t size, bool zero); void acct_update_position(QEMUFile *f, size_t size, bool zero);
void free_xbzrle_decoded_buf(void);
void ram_debug_dump_bitmap(unsigned long *todump, bool expected, void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
unsigned long pages); unsigned long pages);
void ram_postcopy_migrated_memory_release(MigrationState *ms); void ram_postcopy_migrated_memory_release(MigrationState *ms);

View File

@ -596,7 +596,7 @@ int register_savevm_live(DeviceState *dev,
se->opaque = opaque; se->opaque = opaque;
se->vmsd = NULL; se->vmsd = NULL;
/* if this is a live_savem then set is_ram */ /* if this is a live_savem then set is_ram */
if (ops->save_live_setup != NULL) { if (ops->save_setup != NULL) {
se->is_ram = 1; se->is_ram = 1;
} }
@ -955,14 +955,14 @@ void qemu_savevm_state_header(QEMUFile *f)
} }
} }
void qemu_savevm_state_begin(QEMUFile *f) void qemu_savevm_state_setup(QEMUFile *f)
{ {
SaveStateEntry *se; SaveStateEntry *se;
int ret; int ret;
trace_savevm_state_begin(); trace_savevm_state_setup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops || !se->ops->save_live_setup) { if (!se->ops || !se->ops->save_setup) {
continue; continue;
} }
if (se->ops && se->ops->is_active) { if (se->ops && se->ops->is_active) {
@ -972,7 +972,7 @@ void qemu_savevm_state_begin(QEMUFile *f)
} }
save_section_header(f, se, QEMU_VM_SECTION_START); save_section_header(f, se, QEMU_VM_SECTION_START);
ret = se->ops->save_live_setup(f, se->opaque); ret = se->ops->save_setup(f, se->opaque);
save_section_footer(f, se); save_section_footer(f, se);
if (ret < 0) { if (ret < 0) {
qemu_file_set_error(f, ret); qemu_file_set_error(f, ret);
@ -1215,8 +1215,8 @@ void qemu_savevm_state_cleanup(void)
trace_savevm_state_cleanup(); trace_savevm_state_cleanup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) { QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (se->ops && se->ops->cleanup) { if (se->ops && se->ops->save_cleanup) {
se->ops->cleanup(se->opaque); se->ops->save_cleanup(se->opaque);
} }
} }
} }
@ -1241,7 +1241,7 @@ static int qemu_savevm_state(QEMUFile *f, Error **errp)
qemu_mutex_unlock_iothread(); qemu_mutex_unlock_iothread();
qemu_savevm_state_header(f); qemu_savevm_state_header(f);
qemu_savevm_state_begin(f); qemu_savevm_state_setup(f);
qemu_mutex_lock_iothread(); qemu_mutex_lock_iothread();
while (qemu_file_get_error(f) == 0) { while (qemu_file_get_error(f) == 0) {
@ -1541,7 +1541,7 @@ static void *postcopy_ram_listen_thread(void *opaque)
* got a bad migration state). * got a bad migration state).
*/ */
migration_incoming_state_destroy(); migration_incoming_state_destroy();
qemu_loadvm_state_cleanup();
return NULL; return NULL;
} }
@ -1901,6 +1901,44 @@ qemu_loadvm_section_part_end(QEMUFile *f, MigrationIncomingState *mis)
return 0; return 0;
} }
static int qemu_loadvm_state_setup(QEMUFile *f)
{
SaveStateEntry *se;
int ret;
trace_loadvm_state_setup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (!se->ops || !se->ops->load_setup) {
continue;
}
if (se->ops && se->ops->is_active) {
if (!se->ops->is_active(se->opaque)) {
continue;
}
}
ret = se->ops->load_setup(f, se->opaque);
if (ret < 0) {
qemu_file_set_error(f, ret);
error_report("Load state of device %s failed", se->idstr);
return ret;
}
}
return 0;
}
void qemu_loadvm_state_cleanup(void)
{
SaveStateEntry *se;
trace_loadvm_state_cleanup();
QTAILQ_FOREACH(se, &savevm_state.handlers, entry) {
if (se->ops && se->ops->load_cleanup) {
se->ops->load_cleanup(se->opaque);
}
}
}
static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis) static int qemu_loadvm_state_main(QEMUFile *f, MigrationIncomingState *mis)
{ {
uint8_t section_type; uint8_t section_type;
@ -1973,6 +2011,10 @@ int qemu_loadvm_state(QEMUFile *f)
return -ENOTSUP; return -ENOTSUP;
} }
if (qemu_loadvm_state_setup(f) != 0) {
return -EINVAL;
}
if (migrate_get_current()->send_configuration) { if (migrate_get_current()->send_configuration) {
if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) { if (qemu_get_byte(f) != QEMU_VM_CONFIGURATION) {
error_report("Configuration section missing"); error_report("Configuration section missing");
@ -2036,6 +2078,7 @@ int qemu_loadvm_state(QEMUFile *f)
} }
} }
qemu_loadvm_state_cleanup();
cpu_synchronize_all_post_init(); cpu_synchronize_all_post_init();
return ret; return ret;

View File

@ -30,7 +30,7 @@
#define QEMU_VM_SECTION_FOOTER 0x7e #define QEMU_VM_SECTION_FOOTER 0x7e
bool qemu_savevm_state_blocked(Error **errp); bool qemu_savevm_state_blocked(Error **errp);
void qemu_savevm_state_begin(QEMUFile *f); void qemu_savevm_state_setup(QEMUFile *f);
void qemu_savevm_state_header(QEMUFile *f); void qemu_savevm_state_header(QEMUFile *f);
int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy); int qemu_savevm_state_iterate(QEMUFile *f, bool postcopy);
void qemu_savevm_state_cleanup(void); void qemu_savevm_state_cleanup(void);
@ -53,5 +53,6 @@ void qemu_savevm_send_postcopy_ram_discard(QEMUFile *f, const char *name,
uint64_t *length_list); uint64_t *length_list);
int qemu_loadvm_state(QEMUFile *f); int qemu_loadvm_state(QEMUFile *f);
void qemu_loadvm_state_cleanup(void);
#endif #endif

View File

@ -7,6 +7,8 @@ qemu_loadvm_state_section_partend(uint32_t section_id) "%u"
qemu_loadvm_state_post_main(int ret) "%d" qemu_loadvm_state_post_main(int ret) "%d"
qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u" qemu_loadvm_state_section_startfull(uint32_t section_id, const char *idstr, uint32_t instance_id, uint32_t version_id) "%u(%s) %u %u"
qemu_savevm_send_packaged(void) "" qemu_savevm_send_packaged(void) ""
loadvm_state_setup(void) ""
loadvm_state_cleanup(void) ""
loadvm_handle_cmd_packaged(unsigned int length) "%u" loadvm_handle_cmd_packaged(unsigned int length) "%u"
loadvm_handle_cmd_packaged_main(int ret) "%d" loadvm_handle_cmd_packaged_main(int ret) "%d"
loadvm_handle_cmd_packaged_received(int ret) "%d" loadvm_handle_cmd_packaged_received(int ret) "%d"
@ -32,7 +34,7 @@ savevm_send_open_return_path(void) ""
savevm_send_ping(uint32_t val) "%x" savevm_send_ping(uint32_t val) "%x"
savevm_send_postcopy_listen(void) "" savevm_send_postcopy_listen(void) ""
savevm_send_postcopy_run(void) "" savevm_send_postcopy_run(void) ""
savevm_state_begin(void) "" savevm_state_setup(void) ""
savevm_state_header(void) "" savevm_state_header(void) ""
savevm_state_iterate(void) "" savevm_state_iterate(void) ""
savevm_state_cleanup(void) "" savevm_state_cleanup(void) ""

View File

@ -85,6 +85,12 @@ Enables or disables NVDIMM support. The default is off.
@item s390-squash-mcss=on|off @item s390-squash-mcss=on|off
Enables or disables squashing subchannels into the default css. Enables or disables squashing subchannels into the default css.
The default is off. The default is off.
@item enforce-config-section=on|off
If @option{enforce-config-section} is set to @var{on}, force migration
code to send configuration section even if the machine-type sets the
@option{migration.send-configuration} property to @var{off}.
NOTE: this parameter is deprecated. Please use @option{-global}
@option{migration.send-configuration}=@var{on|off} instead.
@end table @end table
ETEXI ETEXI

26
vl.c
View File

@ -3962,7 +3962,7 @@ int main(int argc, char **argv, char **envp)
* *
* "-global migration.only-migratable=true" * "-global migration.only-migratable=true"
*/ */
migration_only_migratable_set(); qemu_global_option("migration.only-migratable=true");
break; break;
case QEMU_OPTION_nodefaults: case QEMU_OPTION_nodefaults:
has_defaults = 0; has_defaults = 0;
@ -4418,6 +4418,18 @@ int main(int argc, char **argv, char **envp)
configure_accelerator(current_machine); configure_accelerator(current_machine);
/*
* Register all the global properties, including accel properties,
* machine properties, and user-specified ones.
*/
register_global_properties(current_machine);
/*
* Migration object can only be created after global properties
* are applied correctly.
*/
migration_object_init();
if (qtest_chrdev) { if (qtest_chrdev) {
qtest_init(qtest_chrdev, qtest_log, &error_fatal); qtest_init(qtest_chrdev, qtest_log, &error_fatal);
} }
@ -4601,18 +4613,6 @@ int main(int argc, char **argv, char **envp)
exit (i == 1 ? 1 : 0); exit (i == 1 ? 1 : 0);
} }
/*
* Register all the global properties, including accel properties,
* machine properties, and user-specified ones.
*/
register_global_properties(current_machine);
/*
* Migration object can only be created after global properties
* are applied correctly.
*/
migration_object_init();
/* This checkpoint is required by replay to separate prior clock /* This checkpoint is required by replay to separate prior clock
reading from the other reads, because timer polling functions query reading from the other reads, because timer polling functions query
clock values from the log. */ clock values from the log. */