2008-10-13 03:12:02 +00:00
|
|
|
/*
|
|
|
|
* QEMU live migration
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-13 16:44:23 +00:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2008-10-13 03:12:02 +00:00
|
|
|
*/
|
|
|
|
|
2016-01-26 18:16:54 +00:00
|
|
|
#include "qemu/osdep.h"
|
2016-03-20 17:16:19 +00:00
|
|
|
#include "qemu/cutils.h"
|
2015-03-17 17:29:20 +00:00
|
|
|
#include "qemu/error-report.h"
|
Include qemu/main-loop.h less
In my "build everything" tree, changing qemu/main-loop.h triggers a
recompile of some 5600 out of 6600 objects (not counting tests and
objects that don't depend on qemu/osdep.h). It includes block/aio.h,
which in turn includes qemu/event_notifier.h, qemu/notify.h,
qemu/processor.h, qemu/qsp.h, qemu/queue.h, qemu/thread-posix.h,
qemu/thread.h, qemu/timer.h, and a few more.
Include qemu/main-loop.h only where it's needed. Touching it now
recompiles only some 1700 objects. For block/aio.h and
qemu/event_notifier.h, these numbers drop from 5600 to 2800. For the
others, they shrink only slightly.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190812052359.30071-21-armbru@redhat.com>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Tested-by: Philippe Mathieu-Daudé <philmd@redhat.com>
2019-08-12 05:23:50 +00:00
|
|
|
#include "qemu/main-loop.h"
|
2017-04-06 10:00:28 +00:00
|
|
|
#include "migration/blocker.h"
|
2017-04-05 13:54:10 +00:00
|
|
|
#include "exec.h"
|
2017-04-05 13:58:29 +00:00
|
|
|
#include "fd.h"
|
2017-04-05 15:40:11 +00:00
|
|
|
#include "socket.h"
|
2019-08-12 05:23:59 +00:00
|
|
|
#include "sysemu/runstate.h"
|
2019-08-12 05:23:57 +00:00
|
|
|
#include "sysemu/sysemu.h"
|
2020-06-29 09:35:03 +00:00
|
|
|
#include "sysemu/cpu-throttle.h"
|
2017-04-17 18:32:36 +00:00
|
|
|
#include "rdma.h"
|
2017-04-17 18:26:27 +00:00
|
|
|
#include "ram.h"
|
2017-04-24 16:53:30 +00:00
|
|
|
#include "migration/global_state.h"
|
2017-04-24 17:02:44 +00:00
|
|
|
#include "migration/misc.h"
|
2017-04-24 18:07:27 +00:00
|
|
|
#include "migration.h"
|
2017-04-20 12:48:46 +00:00
|
|
|
#include "savevm.h"
|
2017-04-17 17:34:36 +00:00
|
|
|
#include "qemu-file-channel.h"
|
2017-04-20 16:52:18 +00:00
|
|
|
#include "qemu-file.h"
|
2017-04-17 17:02:59 +00:00
|
|
|
#include "migration/vmstate.h"
|
2012-12-17 17:19:44 +00:00
|
|
|
#include "block/block.h"
|
2018-02-01 11:18:31 +00:00
|
|
|
#include "qapi/error.h"
|
2019-02-27 10:51:27 +00:00
|
|
|
#include "qapi/clone-visitor.h"
|
2020-08-20 15:07:23 +00:00
|
|
|
#include "qapi/qapi-visit-migration.h"
|
2019-02-27 10:51:27 +00:00
|
|
|
#include "qapi/qapi-visit-sockets.h"
|
2018-02-11 09:36:01 +00:00
|
|
|
#include "qapi/qapi-commands-migration.h"
|
|
|
|
#include "qapi/qapi-events-migration.h"
|
2015-03-17 16:22:46 +00:00
|
|
|
#include "qapi/qmp/qerror.h"
|
2018-02-01 11:18:36 +00:00
|
|
|
#include "qapi/qmp/qnull.h"
|
2015-07-09 06:55:38 +00:00
|
|
|
#include "qemu/rcu.h"
|
2017-04-21 12:31:22 +00:00
|
|
|
#include "block.h"
|
2017-04-20 11:12:24 +00:00
|
|
|
#include "postcopy-ram.h"
|
2012-07-23 03:45:29 +00:00
|
|
|
#include "qemu/thread.h"
|
2013-02-22 16:36:19 +00:00
|
|
|
#include "trace.h"
|
2017-04-24 18:50:19 +00:00
|
|
|
#include "exec/target_page.h"
|
2016-04-27 10:05:01 +00:00
|
|
|
#include "io/channel-buffer.h"
|
2016-10-27 06:42:52 +00:00
|
|
|
#include "migration/colo.h"
|
2017-06-27 04:10:18 +00:00
|
|
|
#include "hw/boards.h"
|
2019-08-12 05:23:51 +00:00
|
|
|
#include "hw/qdev-properties.h"
|
2017-06-27 04:10:19 +00:00
|
|
|
#include "monitor/monitor.h"
|
2019-02-27 13:24:05 +00:00
|
|
|
#include "net/announce.h"
|
2019-10-29 11:49:02 +00:00
|
|
|
#include "qemu/queue.h"
|
2020-01-22 15:16:07 +00:00
|
|
|
#include "multifd.h"
|
2008-11-11 16:46:33 +00:00
|
|
|
|
2015-09-08 17:12:37 +00:00
|
|
|
#define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
|
2008-10-13 03:12:02 +00:00
|
|
|
|
2012-12-19 09:40:48 +00:00
|
|
|
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
|
|
|
|
* data. */
|
|
|
|
#define BUFFER_DELAY 100
|
|
|
|
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
|
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
/* Time in milliseconds we are allowed to stop the source,
|
|
|
|
* for sending the last part */
|
|
|
|
#define DEFAULT_MIGRATE_SET_DOWNTIME 300
|
|
|
|
|
2017-02-22 15:17:29 +00:00
|
|
|
/* Maximum migrate downtime set to 2000 seconds */
|
|
|
|
#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
|
|
|
|
#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
|
|
|
|
|
2015-03-23 08:32:17 +00:00
|
|
|
/* Default compression thread count */
|
|
|
|
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
|
2015-03-23 08:32:18 +00:00
|
|
|
/* Default decompression thread count, usually decompression is at
|
|
|
|
* least 4 times as fast as compression.*/
|
|
|
|
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
|
2015-03-23 08:32:17 +00:00
|
|
|
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
|
|
|
|
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
|
2015-09-08 17:12:34 +00:00
|
|
|
/* Define default autoconverge cpu throttle migration parameters */
|
2020-02-24 02:31:42 +00:00
|
|
|
#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50
|
2016-04-21 18:07:18 +00:00
|
|
|
#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
|
|
|
|
#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
|
2018-08-01 13:00:20 +00:00
|
|
|
#define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99
|
2015-03-23 08:32:17 +00:00
|
|
|
|
2012-08-06 18:42:53 +00:00
|
|
|
/* Migration XBZRLE default cache size */
|
2017-10-05 19:30:10 +00:00
|
|
|
#define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024)
|
2012-08-06 18:42:53 +00:00
|
|
|
|
2018-09-03 04:38:45 +00:00
|
|
|
/* The delay time (in ms) between two COLO checkpoints */
|
|
|
|
#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
|
2016-01-15 07:56:17 +00:00
|
|
|
#define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
|
2019-01-16 09:35:55 +00:00
|
|
|
#define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE
|
2020-01-23 16:08:52 +00:00
|
|
|
/* 0: means nocompress, 1: best speed, ... 9: best compress ratio */
|
|
|
|
#define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1
|
2020-01-23 16:41:36 +00:00
|
|
|
/* 0: means nocompress, 1: best speed, ... 20: best compress ratio */
|
|
|
|
#define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1
|
2016-10-27 06:43:01 +00:00
|
|
|
|
2018-06-13 10:26:40 +00:00
|
|
|
/* Background transfer rate for postcopy, 0 means unlimited, note
|
|
|
|
* that page requests can still exceed this limit.
|
|
|
|
*/
|
|
|
|
#define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0
|
|
|
|
|
2019-02-27 13:24:06 +00:00
|
|
|
/*
|
|
|
|
* Parameters for self_announce_delay giving a stream of RARP/ARP
|
|
|
|
* packets after migration.
|
|
|
|
*/
|
|
|
|
#define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50
|
|
|
|
#define DEFAULT_MIGRATE_ANNOUNCE_MAX 550
|
|
|
|
#define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5
|
|
|
|
#define DEFAULT_MIGRATE_ANNOUNCE_STEP 100
|
|
|
|
|
2010-12-13 16:30:12 +00:00
|
|
|
static NotifierList migration_state_notifiers =
|
|
|
|
NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
|
|
|
|
|
2015-02-19 11:40:27 +00:00
|
|
|
static bool deferred_incoming;
|
|
|
|
|
2017-04-24 15:37:14 +00:00
|
|
|
/* Messages sent on the return path from destination to source */
|
|
|
|
enum mig_rp_message_type {
|
|
|
|
MIG_RP_MSG_INVALID = 0, /* Must be 0 */
|
|
|
|
MIG_RP_MSG_SHUT, /* sibling will not send any more RP messages */
|
|
|
|
MIG_RP_MSG_PONG, /* Response to a PING; data (seq: be32 ) */
|
|
|
|
|
|
|
|
MIG_RP_MSG_REQ_PAGES_ID, /* data (start: be64, len: be32, id: string) */
|
|
|
|
MIG_RP_MSG_REQ_PAGES, /* data (start: be64, len: be32) */
|
2018-05-02 10:47:28 +00:00
|
|
|
MIG_RP_MSG_RECV_BITMAP, /* send recved_bitmap back to source */
|
2018-05-02 10:47:30 +00:00
|
|
|
MIG_RP_MSG_RESUME_ACK, /* tell source that we are ready to resume */
|
2017-04-24 15:37:14 +00:00
|
|
|
|
|
|
|
MIG_RP_MSG_MAX
|
|
|
|
};
|
|
|
|
|
2011-10-05 11:50:43 +00:00
|
|
|
/* When we add fault tolerance, we could have several
|
|
|
|
migrations at once. For now we don't need to add
|
|
|
|
dynamic creation of migration */
|
|
|
|
|
2017-06-27 04:10:13 +00:00
|
|
|
static MigrationState *current_migration;
|
2018-05-02 10:47:35 +00:00
|
|
|
static MigrationIncomingState *current_incoming;
|
2017-06-27 04:10:13 +00:00
|
|
|
|
2017-07-18 03:39:06 +00:00
|
|
|
static bool migration_object_check(MigrationState *ms, Error **errp);
|
2017-10-20 09:05:56 +00:00
|
|
|
static int migration_maybe_pause(MigrationState *s,
|
|
|
|
int *current_active_state,
|
|
|
|
int new_state);
|
2019-02-27 16:49:00 +00:00
|
|
|
static void migrate_fd_cancel(MigrationState *s);
|
2017-07-18 03:39:06 +00:00
|
|
|
|
2017-06-27 04:10:13 +00:00
|
|
|
void migration_object_init(void)
|
|
|
|
{
|
2017-06-27 04:10:18 +00:00
|
|
|
MachineState *ms = MACHINE(qdev_get_machine());
|
2017-07-18 03:39:06 +00:00
|
|
|
Error *err = NULL;
|
2017-06-27 04:10:18 +00:00
|
|
|
|
2017-06-27 04:10:13 +00:00
|
|
|
/* This can only be called once. */
|
|
|
|
assert(!current_migration);
|
|
|
|
current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
|
2017-06-27 04:10:18 +00:00
|
|
|
|
2018-05-02 10:47:35 +00:00
|
|
|
/*
|
|
|
|
* Init the migrate incoming object as well no matter whether
|
|
|
|
* we'll use it or not.
|
|
|
|
*/
|
|
|
|
assert(!current_incoming);
|
|
|
|
current_incoming = g_new0(MigrationIncomingState, 1);
|
|
|
|
current_incoming->state = MIGRATION_STATUS_NONE;
|
|
|
|
current_incoming->postcopy_remote_fds =
|
|
|
|
g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
|
|
|
|
qemu_mutex_init(¤t_incoming->rp_mutex);
|
|
|
|
qemu_event_init(¤t_incoming->main_thread_load_event, false);
|
|
|
|
qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0);
|
|
|
|
qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0);
|
|
|
|
|
2017-07-18 03:39:06 +00:00
|
|
|
if (!migration_object_check(current_migration, &err)) {
|
|
|
|
error_report_err(err);
|
|
|
|
exit(1);
|
|
|
|
}
|
|
|
|
|
2017-06-27 04:10:18 +00:00
|
|
|
/*
|
|
|
|
* We cannot really do this in migration_instance_init() since at
|
|
|
|
* that time global properties are not yet applied, then this
|
|
|
|
* value will be definitely replaced by something else.
|
|
|
|
*/
|
|
|
|
if (ms->enforce_config_section) {
|
|
|
|
current_migration->send_configuration = true;
|
|
|
|
}
|
2017-06-27 04:10:13 +00:00
|
|
|
}
|
|
|
|
|
2019-02-27 16:49:00 +00:00
|
|
|
void migration_shutdown(void)
|
2017-12-28 09:16:16 +00:00
|
|
|
{
|
2019-02-27 16:49:00 +00:00
|
|
|
/*
|
|
|
|
* Cancel the current migration - that will (eventually)
|
|
|
|
* stop the migration using this structure
|
|
|
|
*/
|
|
|
|
migrate_fd_cancel(current_migration);
|
2017-12-28 09:16:16 +00:00
|
|
|
object_unref(OBJECT(current_migration));
|
2020-07-27 19:42:31 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel outgoing migration of dirty bitmaps. It should
|
|
|
|
* at least unref used block nodes.
|
|
|
|
*/
|
|
|
|
dirty_bitmap_mig_cancel_outgoing();
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Cancel incoming migration of dirty bitmaps. Dirty bitmaps
|
|
|
|
* are non-critical data, and their loss never considered as
|
|
|
|
* something serious.
|
|
|
|
*/
|
|
|
|
dirty_bitmap_mig_cancel_incoming();
|
2017-12-28 09:16:16 +00:00
|
|
|
}
|
|
|
|
|
2015-05-21 12:24:14 +00:00
|
|
|
/* For outgoing */
|
2012-08-13 07:42:49 +00:00
|
|
|
MigrationState *migrate_get_current(void)
|
2011-10-05 11:50:43 +00:00
|
|
|
{
|
2017-06-27 04:10:13 +00:00
|
|
|
/* This can only be called after the object created. */
|
|
|
|
assert(current_migration);
|
|
|
|
return current_migration;
|
2011-10-05 11:50:43 +00:00
|
|
|
}
|
|
|
|
|
2015-05-21 12:24:14 +00:00
|
|
|
MigrationIncomingState *migration_incoming_get_current(void)
|
|
|
|
{
|
2018-05-02 10:47:35 +00:00
|
|
|
assert(current_incoming);
|
|
|
|
return current_incoming;
|
2015-05-21 12:24:14 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void migration_incoming_state_destroy(void)
|
|
|
|
{
|
2017-01-23 21:32:06 +00:00
|
|
|
struct MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
2017-05-19 06:43:29 +00:00
|
|
|
if (mis->to_src_file) {
|
2017-05-19 06:43:30 +00:00
|
|
|
/* Tell source that we are done */
|
|
|
|
migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
|
2017-05-19 06:43:29 +00:00
|
|
|
qemu_fclose(mis->to_src_file);
|
|
|
|
mis->to_src_file = NULL;
|
|
|
|
}
|
|
|
|
|
2017-05-19 06:43:30 +00:00
|
|
|
if (mis->from_src_file) {
|
|
|
|
qemu_fclose(mis->from_src_file);
|
|
|
|
mis->from_src_file = NULL;
|
|
|
|
}
|
2018-03-12 17:21:04 +00:00
|
|
|
if (mis->postcopy_remote_fds) {
|
|
|
|
g_array_free(mis->postcopy_remote_fds, TRUE);
|
|
|
|
mis->postcopy_remote_fds = NULL;
|
|
|
|
}
|
2017-05-19 06:43:30 +00:00
|
|
|
|
2019-02-27 10:51:27 +00:00
|
|
|
if (mis->socket_address_list) {
|
|
|
|
qapi_free_SocketAddressList(mis->socket_address_list);
|
|
|
|
mis->socket_address_list = NULL;
|
|
|
|
}
|
2020-09-08 20:30:17 +00:00
|
|
|
|
|
|
|
qemu_event_destroy(&mis->main_thread_load_event);
|
|
|
|
qemu_sem_destroy(&mis->postcopy_pause_sem_dst);
|
|
|
|
qemu_sem_destroy(&mis->postcopy_pause_sem_fault);
|
|
|
|
qemu_mutex_destroy(&mis->rp_mutex);
|
2015-05-21 12:24:14 +00:00
|
|
|
}
|
|
|
|
|
2015-07-07 12:44:05 +00:00
|
|
|
static void migrate_generate_event(int new_state)
|
|
|
|
{
|
|
|
|
if (migrate_use_events()) {
|
2018-08-15 13:37:37 +00:00
|
|
|
qapi_event_send_migration(new_state);
|
2015-07-07 12:44:05 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-04-16 17:09:30 +00:00
|
|
|
static bool migrate_late_block_activate(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[
|
|
|
|
MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
|
|
|
|
}
|
|
|
|
|
2015-02-19 11:40:27 +00:00
|
|
|
/*
|
|
|
|
* Called on -incoming with a defer: uri.
|
|
|
|
* The migration can be started later after any parameters have been
|
|
|
|
* changed.
|
|
|
|
*/
|
|
|
|
static void deferred_incoming_migration(Error **errp)
|
|
|
|
{
|
|
|
|
if (deferred_incoming) {
|
|
|
|
error_setg(errp, "Incoming migration already deferred");
|
|
|
|
}
|
|
|
|
deferred_incoming = true;
|
|
|
|
}
|
|
|
|
|
2017-04-24 15:37:14 +00:00
|
|
|
/*
|
|
|
|
* Send a message on the return channel back to the source
|
|
|
|
* of the migration.
|
|
|
|
*/
|
2018-02-08 10:31:12 +00:00
|
|
|
static int migrate_send_rp_message(MigrationIncomingState *mis,
|
|
|
|
enum mig_rp_message_type message_type,
|
|
|
|
uint16_t len, void *data)
|
2017-04-24 15:37:14 +00:00
|
|
|
{
|
2018-02-08 10:31:12 +00:00
|
|
|
int ret = 0;
|
|
|
|
|
2017-04-24 15:37:14 +00:00
|
|
|
trace_migrate_send_rp_message((int)message_type, len);
|
|
|
|
qemu_mutex_lock(&mis->rp_mutex);
|
2018-02-08 10:31:12 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* It's possible that the file handle got lost due to network
|
|
|
|
* failures.
|
|
|
|
*/
|
|
|
|
if (!mis->to_src_file) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto error;
|
|
|
|
}
|
|
|
|
|
2017-04-24 15:37:14 +00:00
|
|
|
qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
|
|
|
|
qemu_put_be16(mis->to_src_file, len);
|
|
|
|
qemu_put_buffer(mis->to_src_file, data, len);
|
|
|
|
qemu_fflush(mis->to_src_file);
|
2018-02-08 10:31:12 +00:00
|
|
|
|
|
|
|
/* It's possible that qemu file got error during sending */
|
|
|
|
ret = qemu_file_get_error(mis->to_src_file);
|
|
|
|
|
|
|
|
error:
|
2017-04-24 15:37:14 +00:00
|
|
|
qemu_mutex_unlock(&mis->rp_mutex);
|
2018-02-08 10:31:12 +00:00
|
|
|
return ret;
|
2017-04-24 15:37:14 +00:00
|
|
|
}
|
|
|
|
|
2020-09-08 20:30:18 +00:00
|
|
|
/* Request one page from the source VM at the given start address.
|
|
|
|
* rb: the RAMBlock to request the page in
|
2015-11-05 18:11:07 +00:00
|
|
|
* Start: Address offset within the RB
|
|
|
|
* Len: Length in bytes required - must be a multiple of pagesize
|
|
|
|
*/
|
2020-09-08 20:30:18 +00:00
|
|
|
int migrate_send_rp_req_pages(MigrationIncomingState *mis, RAMBlock *rb,
|
|
|
|
ram_addr_t start)
|
2015-11-05 18:11:07 +00:00
|
|
|
{
|
2016-03-23 14:59:57 +00:00
|
|
|
uint8_t bufc[12 + 1 + 255]; /* start (8), len (4), rbname up to 256 */
|
2015-11-05 18:11:07 +00:00
|
|
|
size_t msglen = 12; /* start + len */
|
2020-09-08 20:30:18 +00:00
|
|
|
size_t len = qemu_ram_pagesize(rb);
|
2018-02-08 10:31:12 +00:00
|
|
|
enum mig_rp_message_type msg_type;
|
2020-09-08 20:30:18 +00:00
|
|
|
const char *rbname;
|
|
|
|
int rbname_len;
|
2015-11-05 18:11:07 +00:00
|
|
|
|
|
|
|
*(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
|
|
|
|
*(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
|
|
|
|
|
2020-09-08 20:30:18 +00:00
|
|
|
/*
|
|
|
|
* We maintain the last ramblock that we requested for page. Note that we
|
|
|
|
* don't need locking because this function will only be called within the
|
|
|
|
* postcopy ram fault thread.
|
|
|
|
*/
|
|
|
|
if (rb != mis->last_rb) {
|
|
|
|
mis->last_rb = rb;
|
|
|
|
|
|
|
|
rbname = qemu_ram_get_idstr(rb);
|
|
|
|
rbname_len = strlen(rbname);
|
|
|
|
|
2015-11-05 18:11:07 +00:00
|
|
|
assert(rbname_len < 256);
|
|
|
|
|
|
|
|
bufc[msglen++] = rbname_len;
|
|
|
|
memcpy(bufc + msglen, rbname, rbname_len);
|
|
|
|
msglen += rbname_len;
|
2018-02-08 10:31:12 +00:00
|
|
|
msg_type = MIG_RP_MSG_REQ_PAGES_ID;
|
2015-11-05 18:11:07 +00:00
|
|
|
} else {
|
2018-02-08 10:31:12 +00:00
|
|
|
msg_type = MIG_RP_MSG_REQ_PAGES;
|
2015-11-05 18:11:07 +00:00
|
|
|
}
|
2018-02-08 10:31:12 +00:00
|
|
|
|
|
|
|
return migrate_send_rp_message(mis, msg_type, msglen, bufc);
|
2015-11-05 18:11:07 +00:00
|
|
|
}
|
|
|
|
|
2018-09-03 04:38:47 +00:00
|
|
|
static bool migration_colo_enabled;
|
|
|
|
bool migration_incoming_colo_enabled(void)
|
|
|
|
{
|
|
|
|
return migration_colo_enabled;
|
|
|
|
}
|
|
|
|
|
|
|
|
void migration_incoming_disable_colo(void)
|
|
|
|
{
|
2020-06-26 07:22:36 +00:00
|
|
|
ram_block_discard_disable(false);
|
2018-09-03 04:38:47 +00:00
|
|
|
migration_colo_enabled = false;
|
|
|
|
}
|
|
|
|
|
2020-06-26 07:22:36 +00:00
|
|
|
int migration_incoming_enable_colo(void)
|
2018-09-03 04:38:47 +00:00
|
|
|
{
|
2020-06-26 07:22:36 +00:00
|
|
|
if (ram_block_discard_disable(true)) {
|
|
|
|
error_report("COLO: cannot disable RAM discard");
|
|
|
|
return -EBUSY;
|
|
|
|
}
|
2018-09-03 04:38:47 +00:00
|
|
|
migration_colo_enabled = true;
|
2020-06-26 07:22:36 +00:00
|
|
|
return 0;
|
2018-09-03 04:38:47 +00:00
|
|
|
}
|
|
|
|
|
2019-02-27 10:51:27 +00:00
|
|
|
void migrate_add_address(SocketAddress *address)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
SocketAddressList *addrs;
|
|
|
|
|
|
|
|
addrs = g_new0(SocketAddressList, 1);
|
|
|
|
addrs->next = mis->socket_address_list;
|
|
|
|
mis->socket_address_list = addrs;
|
|
|
|
addrs->value = QAPI_CLONE(SocketAddress, address);
|
|
|
|
}
|
|
|
|
|
2012-10-02 16:21:18 +00:00
|
|
|
void qemu_start_incoming_migration(const char *uri, Error **errp)
|
2008-10-13 03:12:02 +00:00
|
|
|
{
|
2020-08-06 07:40:29 +00:00
|
|
|
const char *p = NULL;
|
2008-10-13 03:14:31 +00:00
|
|
|
|
2018-08-15 13:37:37 +00:00
|
|
|
qapi_event_send_migration(MIGRATION_STATUS_SETUP);
|
2015-02-19 11:40:27 +00:00
|
|
|
if (!strcmp(uri, "defer")) {
|
|
|
|
deferred_incoming_migration(errp);
|
2020-08-06 07:40:29 +00:00
|
|
|
} else if (strstart(uri, "tcp:", &p) ||
|
2020-08-06 07:40:30 +00:00
|
|
|
strstart(uri, "unix:", NULL) ||
|
|
|
|
strstart(uri, "vsock:", NULL)) {
|
2020-08-06 07:40:29 +00:00
|
|
|
socket_start_incoming_migration(p ? p : uri, errp);
|
2013-07-22 14:01:54 +00:00
|
|
|
#ifdef CONFIG_RDMA
|
2015-02-19 11:40:27 +00:00
|
|
|
} else if (strstart(uri, "rdma:", &p)) {
|
2013-07-22 14:01:54 +00:00
|
|
|
rdma_start_incoming_migration(p, errp);
|
|
|
|
#endif
|
2015-02-19 11:40:27 +00:00
|
|
|
} else if (strstart(uri, "exec:", &p)) {
|
2012-10-02 16:21:18 +00:00
|
|
|
exec_start_incoming_migration(p, errp);
|
2015-02-19 11:40:27 +00:00
|
|
|
} else if (strstart(uri, "fd:", &p)) {
|
2012-10-02 16:21:18 +00:00
|
|
|
fd_start_incoming_migration(p, errp);
|
2015-02-19 11:40:27 +00:00
|
|
|
} else {
|
error: Strip trailing '\n' from error string arguments (again)
Commit 6daf194d and be62a2eb got rid of a bunch, but they keep coming
back. Tracked down with this Coccinelle semantic patch:
@r@
expression err, eno, cls, fmt;
position p;
@@
(
error_report(fmt, ...)@p
|
error_set(err, cls, fmt, ...)@p
|
error_set_errno(err, eno, cls, fmt, ...)@p
|
error_setg(err, fmt, ...)@p
|
error_setg_errno(err, eno, fmt, ...)@p
)
@script:python@
fmt << r.fmt;
p << r.p;
@@
if "\\n" in str(fmt):
print "%s:%s:%s:%s" % (p[0].file, p[0].line, p[0].column, fmt)
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-id: 1360354939-10994-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-02-08 20:22:16 +00:00
|
|
|
error_setg(errp, "unknown migration protocol: %s", uri);
|
2010-06-09 12:10:54 +00:00
|
|
|
}
|
2008-10-13 03:12:02 +00:00
|
|
|
}
|
|
|
|
|
2016-02-24 08:53:38 +00:00
|
|
|
static void process_incoming_migration_bh(void *opaque)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
MigrationIncomingState *mis = opaque;
|
|
|
|
|
2018-04-16 17:09:30 +00:00
|
|
|
/* If capability late_block_activate is set:
|
|
|
|
* Only fire up the block code now if we're going to restart the
|
|
|
|
* VM, else 'cont' will do it.
|
|
|
|
* This causes file locking to happen; so we don't want it to happen
|
|
|
|
* unless we really are starting the VM.
|
|
|
|
*/
|
|
|
|
if (!migrate_late_block_activate() ||
|
|
|
|
(autostart && (!global_state_received() ||
|
|
|
|
global_state_get_runstate() == RUN_STATE_RUNNING))) {
|
|
|
|
/* Make sure all file formats flush their mutable metadata.
|
|
|
|
* If we get an error here, just don't restart the VM yet. */
|
|
|
|
bdrv_invalidate_cache_all(&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
local_err = NULL;
|
|
|
|
autostart = false;
|
|
|
|
}
|
2017-04-04 15:29:03 +00:00
|
|
|
}
|
|
|
|
|
2016-02-24 08:53:38 +00:00
|
|
|
/*
|
|
|
|
* This must happen after all error conditions are dealt with and
|
|
|
|
* we're sure the VM is going to be running on this host.
|
|
|
|
*/
|
2019-02-27 13:24:08 +00:00
|
|
|
qemu_announce_self(&mis->announce_timer, migrate_announce_params());
|
2016-02-24 08:53:38 +00:00
|
|
|
|
2016-01-14 15:52:55 +00:00
|
|
|
if (multifd_load_cleanup(&local_err) != 0) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
autostart = false;
|
|
|
|
}
|
2016-02-24 08:53:38 +00:00
|
|
|
/* If global state section was not received or we are in running
|
|
|
|
state, we need to obey autostart. Any other state is set with
|
|
|
|
runstate_set. */
|
|
|
|
|
2018-03-13 19:34:01 +00:00
|
|
|
dirty_bitmap_mig_before_vm_start();
|
|
|
|
|
2016-02-24 08:53:38 +00:00
|
|
|
if (!global_state_received() ||
|
|
|
|
global_state_get_runstate() == RUN_STATE_RUNNING) {
|
|
|
|
if (autostart) {
|
|
|
|
vm_start();
|
|
|
|
} else {
|
|
|
|
runstate_set(RUN_STATE_PAUSED);
|
|
|
|
}
|
2019-03-03 14:50:17 +00:00
|
|
|
} else if (migration_incoming_colo_enabled()) {
|
|
|
|
migration_incoming_disable_colo();
|
|
|
|
vm_start();
|
2016-02-24 08:53:38 +00:00
|
|
|
} else {
|
|
|
|
runstate_set(global_state_get_runstate());
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* This must happen after any state changes since as soon as an external
|
|
|
|
* observer sees this event they might start to prod at the VM assuming
|
|
|
|
* it's ready to use.
|
|
|
|
*/
|
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_COMPLETED);
|
|
|
|
qemu_bh_delete(mis->bh);
|
|
|
|
migration_incoming_state_destroy();
|
|
|
|
}
|
|
|
|
|
2012-08-07 08:57:43 +00:00
|
|
|
static void process_incoming_migration_co(void *opaque)
|
2010-06-09 12:10:55 +00:00
|
|
|
{
|
2017-01-23 21:32:06 +00:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2015-11-05 18:11:21 +00:00
|
|
|
PostcopyState ps;
|
2012-08-07 08:51:51 +00:00
|
|
|
int ret;
|
2018-09-03 04:38:46 +00:00
|
|
|
Error *local_err = NULL;
|
2012-08-07 08:51:51 +00:00
|
|
|
|
2017-07-24 10:42:02 +00:00
|
|
|
assert(mis->from_src_file);
|
2018-08-06 13:29:35 +00:00
|
|
|
mis->migration_incoming_co = qemu_coroutine_self();
|
2017-02-24 18:28:34 +00:00
|
|
|
mis->largest_page_size = qemu_ram_pagesize_largest();
|
2015-11-05 18:10:52 +00:00
|
|
|
postcopy_state_set(POSTCOPY_INCOMING_NONE);
|
2015-12-16 11:47:34 +00:00
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
|
|
|
|
MIGRATION_STATUS_ACTIVE);
|
2017-07-24 10:42:02 +00:00
|
|
|
ret = qemu_loadvm_state(mis->from_src_file);
|
2015-05-21 12:24:14 +00:00
|
|
|
|
2015-11-05 18:11:21 +00:00
|
|
|
ps = postcopy_state_get();
|
|
|
|
trace_process_incoming_migration_co_end(ret, ps);
|
|
|
|
if (ps != POSTCOPY_INCOMING_NONE) {
|
|
|
|
if (ps == POSTCOPY_INCOMING_ADVISE) {
|
|
|
|
/*
|
|
|
|
* Where a migration had postcopy enabled (and thus went to advise)
|
|
|
|
* but managed to complete within the precopy period, we can use
|
|
|
|
* the normal exit.
|
|
|
|
*/
|
|
|
|
postcopy_ram_incoming_cleanup(mis);
|
|
|
|
} else if (ret >= 0) {
|
|
|
|
/*
|
|
|
|
* Postcopy was started, cleanup should happen at the end of the
|
|
|
|
* postcopy thread.
|
|
|
|
*/
|
|
|
|
trace_process_incoming_migration_co_postcopy_end_main();
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/* Else if something went wrong then just fall out of the normal exit */
|
|
|
|
}
|
|
|
|
|
2016-10-27 06:42:55 +00:00
|
|
|
/* we get COLO info, and know if we are in COLO mode */
|
2018-09-03 04:38:47 +00:00
|
|
|
if (!ret && migration_incoming_colo_enabled()) {
|
2018-09-03 04:38:46 +00:00
|
|
|
/* Make sure all file formats flush their mutable metadata */
|
|
|
|
bdrv_invalidate_cache_all(&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
2019-01-13 14:08:49 +00:00
|
|
|
goto fail;
|
2018-09-03 04:38:46 +00:00
|
|
|
}
|
|
|
|
|
2016-10-27 06:42:55 +00:00
|
|
|
qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
|
|
|
|
colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
|
|
|
|
mis->have_colo_incoming_thread = true;
|
|
|
|
qemu_coroutine_yield();
|
|
|
|
|
|
|
|
/* Wait checkpoint incoming thread exit before free resource */
|
|
|
|
qemu_thread_join(&mis->colo_incoming_thread);
|
2018-09-03 04:38:48 +00:00
|
|
|
/* We hold the global iothread lock, so it is safe here */
|
|
|
|
colo_release_ram_cache();
|
2016-10-27 06:42:55 +00:00
|
|
|
}
|
|
|
|
|
2012-08-07 08:51:51 +00:00
|
|
|
if (ret < 0) {
|
2014-06-10 09:29:16 +00:00
|
|
|
error_report("load of migration failed: %s", strerror(-ret));
|
2019-01-13 14:08:49 +00:00
|
|
|
goto fail;
|
2010-06-09 12:10:55 +00:00
|
|
|
}
|
2016-02-24 08:53:38 +00:00
|
|
|
mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
|
|
|
|
qemu_bh_schedule(mis->bh);
|
2018-08-06 13:29:35 +00:00
|
|
|
mis->migration_incoming_co = NULL;
|
2019-01-13 14:08:49 +00:00
|
|
|
return;
|
|
|
|
fail:
|
|
|
|
local_err = NULL;
|
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
qemu_fclose(mis->from_src_file);
|
|
|
|
if (multifd_load_cleanup(&local_err) != 0) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
}
|
|
|
|
exit(EXIT_FAILURE);
|
2010-06-09 12:10:55 +00:00
|
|
|
}
|
|
|
|
|
2019-06-12 09:44:19 +00:00
|
|
|
/**
|
|
|
|
* @migration_incoming_setup: Setup incoming migration
|
|
|
|
*
|
|
|
|
* Returns 0 for no error or 1 for error
|
|
|
|
*
|
|
|
|
* @f: file for main migration channel
|
|
|
|
* @errp: where to put errors
|
|
|
|
*/
|
|
|
|
static int migration_incoming_setup(QEMUFile *f, Error **errp)
|
2012-08-07 08:57:43 +00:00
|
|
|
{
|
2017-07-24 10:42:02 +00:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2019-06-12 09:44:19 +00:00
|
|
|
Error *local_err = NULL;
|
2012-08-07 08:57:43 +00:00
|
|
|
|
2019-06-12 09:44:19 +00:00
|
|
|
if (multifd_load_setup(&local_err) != 0) {
|
2016-01-14 15:52:55 +00:00
|
|
|
/* We haven't been able to create multifd threads
|
|
|
|
nothing better to do */
|
2019-06-12 09:44:19 +00:00
|
|
|
error_report_err(local_err);
|
2016-01-14 15:52:55 +00:00
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
|
|
|
|
2017-07-24 10:42:02 +00:00
|
|
|
if (!mis->from_src_file) {
|
|
|
|
mis->from_src_file = f;
|
|
|
|
}
|
2016-04-27 10:04:56 +00:00
|
|
|
qemu_file_set_blocking(f, false);
|
2019-06-12 09:44:19 +00:00
|
|
|
return 0;
|
2017-07-17 10:30:25 +00:00
|
|
|
}
|
|
|
|
|
2018-03-07 07:40:52 +00:00
|
|
|
void migration_incoming_process(void)
|
2017-07-17 10:30:25 +00:00
|
|
|
{
|
|
|
|
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
|
coroutine: move entry argument to qemu_coroutine_create
In practice the entry argument is always known at creation time, and
it is confusing that sometimes qemu_coroutine_enter is used with a
non-NULL argument to re-enter a coroutine (this happens in
block/sheepdog.c and tests/test-coroutine.c). So pass the opaque value
at creation time, for consistency with e.g. aio_bh_new.
Mostly done with the following semantic patch:
@ entry1 @
expression entry, arg, co;
@@
- co = qemu_coroutine_create(entry);
+ co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry2 @
expression entry, arg;
identifier co;
@@
- Coroutine *co = qemu_coroutine_create(entry);
+ Coroutine *co = qemu_coroutine_create(entry, arg);
...
- qemu_coroutine_enter(co, arg);
+ qemu_coroutine_enter(co);
@ entry3 @
expression entry, arg;
@@
- qemu_coroutine_enter(qemu_coroutine_create(entry), arg);
+ qemu_coroutine_enter(qemu_coroutine_create(entry, arg));
@ reentry @
expression co;
@@
- qemu_coroutine_enter(co, NULL);
+ qemu_coroutine_enter(co);
except for the aforementioned few places where the semantic patch
stumbled (as expected) and for test_co_queue, which would otherwise
produce an uninitialized variable warning.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Reviewed-by: Fam Zheng <famz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2016-07-04 17:10:01 +00:00
|
|
|
qemu_coroutine_enter(co);
|
2012-08-07 08:57:43 +00:00
|
|
|
}
|
|
|
|
|
2018-06-27 13:22:45 +00:00
|
|
|
/* Returns true if recovered from a paused migration, otherwise false */
|
|
|
|
static bool postcopy_try_recover(QEMUFile *f)
|
2017-07-17 10:30:25 +00:00
|
|
|
{
|
2018-05-02 10:47:26 +00:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
|
|
|
if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
/* Resumed from a paused postcopy migration */
|
|
|
|
|
|
|
|
mis->from_src_file = f;
|
|
|
|
/* Postcopy has standalone thread to do vm load */
|
|
|
|
qemu_file_set_blocking(f, true);
|
|
|
|
|
|
|
|
/* Re-configure the return path */
|
|
|
|
mis->to_src_file = qemu_file_get_return_path(f);
|
|
|
|
|
|
|
|
migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_RECOVER);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Here, we only wake up the main loading thread (while the
|
|
|
|
* fault thread will still be waiting), so that we can receive
|
|
|
|
* commands from source now, and answer it if needed. The
|
|
|
|
* fault thread will be woken up afterwards until we are sure
|
|
|
|
* that source is ready to reply to page requests.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&mis->postcopy_pause_sem_dst);
|
2018-06-27 13:22:45 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-06-12 09:44:19 +00:00
|
|
|
void migration_fd_process_incoming(QEMUFile *f, Error **errp)
|
2018-06-27 13:22:45 +00:00
|
|
|
{
|
2019-06-12 09:44:19 +00:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2018-06-27 13:22:45 +00:00
|
|
|
if (postcopy_try_recover(f)) {
|
|
|
|
return;
|
2018-05-02 10:47:26 +00:00
|
|
|
}
|
2018-06-27 13:22:45 +00:00
|
|
|
|
2019-06-12 09:44:19 +00:00
|
|
|
if (migration_incoming_setup(f, &local_err)) {
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2018-06-27 13:22:45 +00:00
|
|
|
migration_incoming_process();
|
2017-07-17 10:30:25 +00:00
|
|
|
}
|
|
|
|
|
2019-01-13 14:08:46 +00:00
|
|
|
void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
|
2017-07-24 10:42:02 +00:00
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2019-06-12 09:44:19 +00:00
|
|
|
Error *local_err = NULL;
|
2018-06-27 13:22:46 +00:00
|
|
|
bool start_migration;
|
2017-07-24 10:42:02 +00:00
|
|
|
|
|
|
|
if (!mis->from_src_file) {
|
2018-06-27 13:22:46 +00:00
|
|
|
/* The first connection (multifd may have multiple) */
|
2017-07-24 10:42:02 +00:00
|
|
|
QEMUFile *f = qemu_fopen_channel_input(ioc);
|
2018-06-27 13:22:46 +00:00
|
|
|
|
|
|
|
/* If it's a recovery, we're done */
|
2018-06-27 13:22:45 +00:00
|
|
|
if (postcopy_try_recover(f)) {
|
|
|
|
return;
|
|
|
|
}
|
2018-06-27 13:22:46 +00:00
|
|
|
|
2019-06-12 09:44:19 +00:00
|
|
|
if (migration_incoming_setup(f, &local_err)) {
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
2018-06-27 13:22:46 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Common migration only needs one channel, so we can start
|
|
|
|
* right now. Multifd needs more than one channel, we wait.
|
|
|
|
*/
|
|
|
|
start_migration = !migrate_use_multifd();
|
|
|
|
} else {
|
|
|
|
/* Multiple connections */
|
|
|
|
assert(migrate_use_multifd());
|
2019-01-13 14:08:46 +00:00
|
|
|
start_migration = multifd_recv_new_channel(ioc, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
2017-07-24 10:42:02 +00:00
|
|
|
}
|
2018-06-27 13:22:44 +00:00
|
|
|
|
2018-06-27 13:22:46 +00:00
|
|
|
if (start_migration) {
|
2018-06-27 13:22:44 +00:00
|
|
|
migration_incoming_process();
|
|
|
|
}
|
2017-07-24 10:42:02 +00:00
|
|
|
}
|
|
|
|
|
2017-07-24 11:06:25 +00:00
|
|
|
/**
|
|
|
|
* @migration_has_all_channels: We have received all channels that we need
|
|
|
|
*
|
|
|
|
* Returns true when we have got connections to all the channels that
|
|
|
|
* we need for migration.
|
|
|
|
*/
|
|
|
|
bool migration_has_all_channels(void)
|
|
|
|
{
|
2018-06-19 16:35:52 +00:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2018-02-19 17:59:02 +00:00
|
|
|
bool all_channels;
|
|
|
|
|
|
|
|
all_channels = multifd_recv_all_channels_created();
|
|
|
|
|
2018-06-19 16:35:52 +00:00
|
|
|
return all_channels && mis->from_src_file != NULL;
|
2017-07-24 11:06:25 +00:00
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:47 +00:00
|
|
|
/*
|
|
|
|
* Send a 'SHUT' message on the return channel with the given value
|
|
|
|
* to indicate that we've finished with the RP. Non-0 value indicates
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_shut(MigrationIncomingState *mis,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a 'PONG' message on the return channel with the given value
|
|
|
|
* (normally in response to a 'PING')
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_pong(MigrationIncomingState *mis,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:28 +00:00
|
|
|
void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
|
|
|
|
char *block_name)
|
|
|
|
{
|
|
|
|
char buf[512];
|
|
|
|
int len;
|
|
|
|
int64_t res;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* First, we send the header part. It contains only the len of
|
|
|
|
* idstr, and the idstr itself.
|
|
|
|
*/
|
|
|
|
len = strlen(block_name);
|
|
|
|
buf[0] = len;
|
|
|
|
memcpy(buf + 1, block_name, len);
|
|
|
|
|
|
|
|
if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
|
|
|
error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
|
|
|
|
__func__);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Next, we dump the received bitmap to the stream.
|
|
|
|
*
|
|
|
|
* TODO: currently we are safe since we are the only one that is
|
|
|
|
* using the to_src_file handle (fault thread is still paused),
|
|
|
|
* and it's ok even not taking the mutex. However the best way is
|
|
|
|
* to take the lock before sending the message header, and release
|
|
|
|
* the lock after sending the bitmap.
|
|
|
|
*/
|
|
|
|
qemu_mutex_lock(&mis->rp_mutex);
|
|
|
|
res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
|
|
|
|
qemu_mutex_unlock(&mis->rp_mutex);
|
|
|
|
|
|
|
|
trace_migrate_send_rp_recv_bitmap(block_name, res);
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:30 +00:00
|
|
|
void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
2012-08-06 18:42:47 +00:00
|
|
|
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationCapabilityStatusList *head = NULL;
|
|
|
|
MigrationCapabilityStatusList *caps;
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
int i;
|
|
|
|
|
2013-10-05 09:18:28 +00:00
|
|
|
caps = NULL; /* silence compiler warning */
|
qapi: Don't let implicit enum MAX member collide
Now that we guarantee the user doesn't have any enum values
beginning with a single underscore, we can use that for our
own purposes. Renaming ENUM_MAX to ENUM__MAX makes it obvious
that the sentinel is generated.
This patch was mostly generated by applying a temporary patch:
|diff --git a/scripts/qapi.py b/scripts/qapi.py
|index e6d014b..b862ec9 100644
|--- a/scripts/qapi.py
|+++ b/scripts/qapi.py
|@@ -1570,6 +1570,7 @@ const char *const %(c_name)s_lookup[] = {
| max_index = c_enum_const(name, 'MAX', prefix)
| ret += mcgen('''
| [%(max_index)s] = NULL,
|+// %(max_index)s
| };
| ''',
| max_index=max_index)
then running:
$ cat qapi-{types,event}.c tests/test-qapi-types.c |
sed -n 's,^// \(.*\)MAX,s|\1MAX|\1_MAX|g,p' > list
$ git grep -l _MAX | xargs sed -i -f list
The only things not generated are the changes in scripts/qapi.py.
Rejecting enum members named 'MAX' is now useless, and will be dropped
in the next patch.
Signed-off-by: Eric Blake <eblake@redhat.com>
Message-Id: <1447836791-369-23-git-send-email-eblake@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
[Rebased to current master, commit message tweaked]
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2015-11-18 08:52:57 +00:00
|
|
|
for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
|
2017-05-15 14:05:29 +00:00
|
|
|
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
|
|
|
|
if (i == MIGRATION_CAPABILITY_BLOCK) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
#endif
|
2012-08-06 18:42:47 +00:00
|
|
|
if (head == NULL) {
|
|
|
|
head = g_malloc0(sizeof(*caps));
|
|
|
|
caps = head;
|
|
|
|
} else {
|
|
|
|
caps->next = g_malloc0(sizeof(*caps));
|
|
|
|
caps = caps->next;
|
|
|
|
}
|
|
|
|
caps->value =
|
|
|
|
g_malloc(sizeof(*caps->value));
|
|
|
|
caps->value->capability = i;
|
|
|
|
caps->value->state = s->enabled_capabilities[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2015-03-23 08:32:28 +00:00
|
|
|
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationParameters *params;
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
2017-07-18 10:57:38 +00:00
|
|
|
/* TODO use QAPI_CLONE() instead of duplicating it inline */
|
2015-03-23 08:32:28 +00:00
|
|
|
params = g_malloc0(sizeof(*params));
|
2016-09-09 03:14:15 +00:00
|
|
|
params->has_compress_level = true;
|
2016-04-27 10:05:14 +00:00
|
|
|
params->compress_level = s->parameters.compress_level;
|
2016-09-09 03:14:15 +00:00
|
|
|
params->has_compress_threads = true;
|
2016-04-27 10:05:14 +00:00
|
|
|
params->compress_threads = s->parameters.compress_threads;
|
2018-08-21 08:10:20 +00:00
|
|
|
params->has_compress_wait_thread = true;
|
|
|
|
params->compress_wait_thread = s->parameters.compress_wait_thread;
|
2016-09-09 03:14:15 +00:00
|
|
|
params->has_decompress_threads = true;
|
2016-04-27 10:05:14 +00:00
|
|
|
params->decompress_threads = s->parameters.decompress_threads;
|
2020-02-24 02:31:42 +00:00
|
|
|
params->has_throttle_trigger_threshold = true;
|
|
|
|
params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold;
|
2016-09-09 03:14:15 +00:00
|
|
|
params->has_cpu_throttle_initial = true;
|
2016-04-27 10:05:14 +00:00
|
|
|
params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
|
2016-09-09 03:14:15 +00:00
|
|
|
params->has_cpu_throttle_increment = true;
|
2016-04-27 10:05:14 +00:00
|
|
|
params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
|
2020-04-13 10:15:08 +00:00
|
|
|
params->has_cpu_throttle_tailslow = true;
|
|
|
|
params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow;
|
2017-07-18 10:04:54 +00:00
|
|
|
params->has_tls_creds = true;
|
2016-04-27 10:05:15 +00:00
|
|
|
params->tls_creds = g_strdup(s->parameters.tls_creds);
|
2017-07-18 10:04:54 +00:00
|
|
|
params->has_tls_hostname = true;
|
2016-04-27 10:05:15 +00:00
|
|
|
params->tls_hostname = g_strdup(s->parameters.tls_hostname);
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 14:53:24 +00:00
|
|
|
params->has_tls_authz = true;
|
2020-03-25 01:49:30 +00:00
|
|
|
params->tls_authz = g_strdup(s->parameters.tls_authz ?
|
|
|
|
s->parameters.tls_authz : "");
|
2016-09-15 16:20:28 +00:00
|
|
|
params->has_max_bandwidth = true;
|
|
|
|
params->max_bandwidth = s->parameters.max_bandwidth;
|
|
|
|
params->has_downtime_limit = true;
|
|
|
|
params->downtime_limit = s->parameters.downtime_limit;
|
2016-11-02 07:42:09 +00:00
|
|
|
params->has_x_checkpoint_delay = true;
|
2016-10-27 06:43:01 +00:00
|
|
|
params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
|
2017-04-05 16:32:37 +00:00
|
|
|
params->has_block_incremental = true;
|
|
|
|
params->block_incremental = s->parameters.block_incremental;
|
2019-02-06 12:54:06 +00:00
|
|
|
params->has_multifd_channels = true;
|
|
|
|
params->multifd_channels = s->parameters.multifd_channels;
|
2019-01-16 09:35:55 +00:00
|
|
|
params->has_multifd_compression = true;
|
|
|
|
params->multifd_compression = s->parameters.multifd_compression;
|
2020-01-23 16:08:52 +00:00
|
|
|
params->has_multifd_zlib_level = true;
|
|
|
|
params->multifd_zlib_level = s->parameters.multifd_zlib_level;
|
2020-01-23 16:41:36 +00:00
|
|
|
params->has_multifd_zstd_level = true;
|
|
|
|
params->multifd_zstd_level = s->parameters.multifd_zstd_level;
|
2017-10-05 19:30:10 +00:00
|
|
|
params->has_xbzrle_cache_size = true;
|
|
|
|
params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
|
2018-06-13 10:26:40 +00:00
|
|
|
params->has_max_postcopy_bandwidth = true;
|
|
|
|
params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth;
|
2018-08-01 13:00:20 +00:00
|
|
|
params->has_max_cpu_throttle = true;
|
|
|
|
params->max_cpu_throttle = s->parameters.max_cpu_throttle;
|
2019-02-27 13:24:06 +00:00
|
|
|
params->has_announce_initial = true;
|
|
|
|
params->announce_initial = s->parameters.announce_initial;
|
|
|
|
params->has_announce_max = true;
|
|
|
|
params->announce_max = s->parameters.announce_max;
|
|
|
|
params->has_announce_rounds = true;
|
|
|
|
params->announce_rounds = s->parameters.announce_rounds;
|
|
|
|
params->has_announce_step = true;
|
|
|
|
params->announce_step = s->parameters.announce_step;
|
2015-03-23 08:32:28 +00:00
|
|
|
|
2020-08-20 15:07:23 +00:00
|
|
|
if (s->parameters.has_block_bitmap_mapping) {
|
|
|
|
params->has_block_bitmap_mapping = true;
|
|
|
|
params->block_bitmap_mapping =
|
|
|
|
QAPI_CLONE(BitmapMigrationNodeAliasList,
|
|
|
|
s->parameters.block_bitmap_mapping);
|
|
|
|
}
|
|
|
|
|
2015-03-23 08:32:28 +00:00
|
|
|
return params;
|
|
|
|
}
|
|
|
|
|
2019-02-27 13:24:06 +00:00
|
|
|
AnnounceParameters *migrate_announce_params(void)
|
|
|
|
{
|
|
|
|
static AnnounceParameters ap;
|
|
|
|
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
ap.initial = s->parameters.announce_initial;
|
|
|
|
ap.max = s->parameters.announce_max;
|
|
|
|
ap.rounds = s->parameters.announce_rounds;
|
|
|
|
ap.step = s->parameters.announce_step;
|
|
|
|
|
|
|
|
return ≈
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:48 +00:00
|
|
|
/*
|
|
|
|
* Return true if we're already in the middle of a migration
|
|
|
|
* (i.e. any of the active or setup states)
|
|
|
|
*/
|
2018-10-26 08:36:20 +00:00
|
|
|
bool migration_is_setup_or_active(int state)
|
2015-11-05 18:10:48 +00:00
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
2015-11-05 18:10:58 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
2018-05-02 10:47:18 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
2018-05-02 10:47:25 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
2015-11-05 18:10:48 +00:00
|
|
|
case MIGRATION_STATUS_SETUP:
|
2017-10-20 09:05:51 +00:00
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
2019-10-29 11:49:02 +00:00
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
2020-02-24 06:54:12 +00:00
|
|
|
case MIGRATION_STATUS_COLO:
|
2015-11-05 18:10:48 +00:00
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-01-21 14:39:23 +00:00
|
|
|
bool migration_is_running(int state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-07-16 00:54:11 +00:00
|
|
|
static void populate_time_info(MigrationInfo *info, MigrationState *s)
|
|
|
|
{
|
|
|
|
info->has_status = true;
|
|
|
|
info->has_setup_time = true;
|
|
|
|
info->setup_time = s->setup_time;
|
|
|
|
if (s->state == MIGRATION_STATUS_COMPLETED) {
|
|
|
|
info->has_total_time = true;
|
|
|
|
info->total_time = s->total_time;
|
|
|
|
info->has_downtime = true;
|
|
|
|
info->downtime = s->downtime;
|
|
|
|
} else {
|
|
|
|
info->has_total_time = true;
|
|
|
|
info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
|
|
|
|
s->start_time;
|
|
|
|
info->has_expected_downtime = true;
|
|
|
|
info->expected_downtime = s->expected_downtime;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-06-13 11:16:41 +00:00
|
|
|
static void populate_ram_info(MigrationInfo *info, MigrationState *s)
|
|
|
|
{
|
|
|
|
info->has_ram = true;
|
|
|
|
info->ram = g_malloc0(sizeof(*info->ram));
|
2017-06-06 17:49:03 +00:00
|
|
|
info->ram->transferred = ram_counters.transferred;
|
2016-06-13 11:16:41 +00:00
|
|
|
info->ram->total = ram_bytes_total();
|
2017-06-06 17:49:03 +00:00
|
|
|
info->ram->duplicate = ram_counters.duplicate;
|
2017-03-13 19:35:54 +00:00
|
|
|
/* legacy value. It is not used anymore */
|
|
|
|
info->ram->skipped = 0;
|
2017-06-06 17:49:03 +00:00
|
|
|
info->ram->normal = ram_counters.normal;
|
|
|
|
info->ram->normal_bytes = ram_counters.normal *
|
2017-03-21 08:09:14 +00:00
|
|
|
qemu_target_page_size();
|
2016-06-13 11:16:41 +00:00
|
|
|
info->ram->mbps = s->mbps;
|
2017-06-06 17:49:03 +00:00
|
|
|
info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
|
|
|
|
info->ram->postcopy_requests = ram_counters.postcopy_requests;
|
2017-03-21 02:22:43 +00:00
|
|
|
info->ram->page_size = qemu_target_page_size();
|
2018-06-26 13:20:11 +00:00
|
|
|
info->ram->multifd_bytes = ram_counters.multifd_bytes;
|
2019-01-11 06:37:30 +00:00
|
|
|
info->ram->pages_per_second = s->pages_per_second;
|
2016-06-13 11:16:41 +00:00
|
|
|
|
2017-05-04 08:09:21 +00:00
|
|
|
if (migrate_use_xbzrle()) {
|
|
|
|
info->has_xbzrle_cache = true;
|
|
|
|
info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
|
|
|
|
info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
|
2017-06-06 17:49:03 +00:00
|
|
|
info->xbzrle_cache->bytes = xbzrle_counters.bytes;
|
|
|
|
info->xbzrle_cache->pages = xbzrle_counters.pages;
|
|
|
|
info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
|
|
|
|
info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
|
2020-04-30 00:59:35 +00:00
|
|
|
info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
|
2017-06-06 17:49:03 +00:00
|
|
|
info->xbzrle_cache->overflow = xbzrle_counters.overflow;
|
2017-05-04 08:09:21 +00:00
|
|
|
}
|
|
|
|
|
2018-09-06 07:01:00 +00:00
|
|
|
if (migrate_use_compression()) {
|
|
|
|
info->has_compression = true;
|
|
|
|
info->compression = g_malloc0(sizeof(*info->compression));
|
|
|
|
info->compression->pages = compression_counters.pages;
|
|
|
|
info->compression->busy = compression_counters.busy;
|
|
|
|
info->compression->busy_rate = compression_counters.busy_rate;
|
|
|
|
info->compression->compressed_size =
|
|
|
|
compression_counters.compressed_size;
|
|
|
|
info->compression->compression_rate =
|
|
|
|
compression_counters.compression_rate;
|
|
|
|
}
|
|
|
|
|
2017-05-03 11:16:38 +00:00
|
|
|
if (cpu_throttle_active()) {
|
|
|
|
info->has_cpu_throttle_percentage = true;
|
|
|
|
info->cpu_throttle_percentage = cpu_throttle_get_percentage();
|
|
|
|
}
|
|
|
|
|
2016-06-13 11:16:41 +00:00
|
|
|
if (s->state != MIGRATION_STATUS_COMPLETED) {
|
|
|
|
info->ram->remaining = ram_bytes_remaining();
|
2017-06-06 17:49:03 +00:00
|
|
|
info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
|
2016-06-13 11:16:41 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-05-04 08:21:46 +00:00
|
|
|
static void populate_disk_info(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
if (blk_mig_active()) {
|
|
|
|
info->has_disk = true;
|
|
|
|
info->disk = g_malloc0(sizeof(*info->disk));
|
|
|
|
info->disk->transferred = blk_mig_bytes_transferred();
|
|
|
|
info->disk->remaining = blk_mig_bytes_remaining();
|
|
|
|
info->disk->total = blk_mig_bytes_total();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-03-22 18:17:27 +00:00
|
|
|
static void fill_source_migration_info(MigrationInfo *info)
|
2008-10-13 03:12:02 +00:00
|
|
|
{
|
2011-10-05 11:50:43 +00:00
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
switch (s->state) {
|
2015-03-13 08:08:38 +00:00
|
|
|
case MIGRATION_STATUS_NONE:
|
2011-10-05 11:50:43 +00:00
|
|
|
/* no migration has happened ever */
|
2018-03-22 18:17:27 +00:00
|
|
|
/* do not overwrite destination migration status */
|
|
|
|
return;
|
2015-03-13 08:08:38 +00:00
|
|
|
case MIGRATION_STATUS_SETUP:
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 14:01:57 +00:00
|
|
|
info->has_status = true;
|
2013-07-22 14:01:58 +00:00
|
|
|
info->has_total_time = false;
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 14:01:57 +00:00
|
|
|
break;
|
2015-03-13 08:08:38 +00:00
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
2015-11-05 18:10:58 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
2017-10-20 09:05:51 +00:00
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
2018-05-02 10:47:18 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
2018-05-02 10:47:25 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
2019-07-16 00:54:11 +00:00
|
|
|
/* TODO add some postcopy stats */
|
|
|
|
populate_time_info(info, s);
|
2016-06-13 11:16:41 +00:00
|
|
|
populate_ram_info(info, s);
|
2017-05-04 08:21:46 +00:00
|
|
|
populate_disk_info(info);
|
2011-10-05 11:50:43 +00:00
|
|
|
break;
|
2016-10-27 06:42:54 +00:00
|
|
|
case MIGRATION_STATUS_COLO:
|
|
|
|
info->has_status = true;
|
|
|
|
/* TODO: display COLO specific information (checkpoint info etc.) */
|
|
|
|
break;
|
2015-03-13 08:08:38 +00:00
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
2019-07-16 00:54:11 +00:00
|
|
|
populate_time_info(info, s);
|
2016-06-13 11:16:41 +00:00
|
|
|
populate_ram_info(info, s);
|
2011-10-05 11:50:43 +00:00
|
|
|
break;
|
2015-03-13 08:08:38 +00:00
|
|
|
case MIGRATION_STATUS_FAILED:
|
2011-09-13 20:37:16 +00:00
|
|
|
info->has_status = true;
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 10:05:00 +00:00
|
|
|
if (s->error) {
|
|
|
|
info->has_error_desc = true;
|
|
|
|
info->error_desc = g_strdup(error_get_pretty(s->error));
|
|
|
|
}
|
2011-10-05 11:50:43 +00:00
|
|
|
break;
|
2015-03-13 08:08:38 +00:00
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
2011-09-13 20:37:16 +00:00
|
|
|
info->has_status = true;
|
2011-10-05 11:50:43 +00:00
|
|
|
break;
|
2019-10-29 11:49:02 +00:00
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
|
|
|
info->has_status = true;
|
|
|
|
break;
|
2008-10-13 03:12:02 +00:00
|
|
|
}
|
2015-03-13 08:08:41 +00:00
|
|
|
info->status = s->state;
|
2008-10-13 03:12:02 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:08 +00:00
|
|
|
/**
|
|
|
|
* @migration_caps_check - check capability validity
|
|
|
|
*
|
|
|
|
* @cap_list: old capability list, array of bool
|
|
|
|
* @params: new capabilities to be applied soon
|
|
|
|
* @errp: set *errp if the check failed, with reason
|
|
|
|
*
|
|
|
|
* Returns true if check passed, otherwise false.
|
|
|
|
*/
|
|
|
|
static bool migrate_caps_check(bool *cap_list,
|
|
|
|
MigrationCapabilityStatusList *params,
|
|
|
|
Error **errp)
|
2012-08-06 18:42:48 +00:00
|
|
|
{
|
|
|
|
MigrationCapabilityStatusList *cap;
|
2017-07-18 03:39:08 +00:00
|
|
|
bool old_postcopy_cap;
|
2017-09-19 16:47:56 +00:00
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
2012-08-06 18:42:48 +00:00
|
|
|
|
2017-07-18 03:39:08 +00:00
|
|
|
old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM];
|
2012-08-06 18:42:48 +00:00
|
|
|
|
|
|
|
for (cap = params; cap; cap = cap->next) {
|
2017-07-18 03:39:08 +00:00
|
|
|
cap_list[cap->value->capability] = cap->value->state;
|
|
|
|
}
|
|
|
|
|
2017-05-15 14:05:29 +00:00
|
|
|
#ifndef CONFIG_LIVE_BLOCK_MIGRATION
|
2017-07-18 03:39:08 +00:00
|
|
|
if (cap_list[MIGRATION_CAPABILITY_BLOCK]) {
|
|
|
|
error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
|
|
|
|
"block migration");
|
|
|
|
error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
|
|
|
|
return false;
|
2012-08-06 18:42:48 +00:00
|
|
|
}
|
2017-07-18 03:39:08 +00:00
|
|
|
#endif
|
2015-11-05 18:10:51 +00:00
|
|
|
|
2018-11-14 19:09:12 +00:00
|
|
|
#ifndef CONFIG_REPLICATION
|
|
|
|
if (cap_list[MIGRATION_CAPABILITY_X_COLO]) {
|
|
|
|
error_setg(errp, "QEMU compiled without replication module"
|
|
|
|
" can't enable COLO");
|
|
|
|
error_append_hint(errp, "Please enable replication before COLO.\n");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-07-18 03:39:08 +00:00
|
|
|
if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
|
2016-06-13 11:16:45 +00:00
|
|
|
/* This check is reasonably expensive, so only when it's being
|
|
|
|
* set the first time, also it's only the destination that needs
|
|
|
|
* special support.
|
|
|
|
*/
|
|
|
|
if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
|
2017-09-19 16:47:56 +00:00
|
|
|
!postcopy_ram_supported_by_host(mis)) {
|
2016-06-13 11:16:45 +00:00
|
|
|
/* postcopy_ram_supported_by_host will have emitted a more
|
|
|
|
* detailed message
|
|
|
|
*/
|
2017-07-18 03:39:08 +00:00
|
|
|
error_setg(errp, "Postcopy is not supported");
|
|
|
|
return false;
|
2016-06-13 11:16:45 +00:00
|
|
|
}
|
2019-02-15 17:45:45 +00:00
|
|
|
|
|
|
|
if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
|
|
|
|
error_setg(errp, "Postcopy is not compatible with ignore-shared");
|
|
|
|
return false;
|
|
|
|
}
|
2015-11-05 18:10:51 +00:00
|
|
|
}
|
2017-07-18 03:39:08 +00:00
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-03-22 18:17:27 +00:00
|
|
|
static void fill_destination_migration_info(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
2019-02-27 10:51:27 +00:00
|
|
|
if (mis->socket_address_list) {
|
|
|
|
info->has_socket_address = true;
|
|
|
|
info->socket_address =
|
|
|
|
QAPI_CLONE(SocketAddressList, mis->socket_address_list);
|
|
|
|
}
|
|
|
|
|
2018-03-22 18:17:27 +00:00
|
|
|
switch (mis->state) {
|
|
|
|
case MIGRATION_STATUS_NONE:
|
|
|
|
return;
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
2018-07-10 09:18:56 +00:00
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
2018-03-22 18:17:27 +00:00
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
case MIGRATION_STATUS_COLO:
|
|
|
|
info->has_status = true;
|
|
|
|
break;
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
info->has_status = true;
|
|
|
|
fill_destination_postcopy_migration_info(info);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
info->status = mis->state;
|
|
|
|
}
|
|
|
|
|
|
|
|
MigrationInfo *qmp_query_migrate(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationInfo *info = g_malloc0(sizeof(*info));
|
|
|
|
|
|
|
|
fill_destination_migration_info(info);
|
|
|
|
fill_source_migration_info(info);
|
|
|
|
|
|
|
|
return info;
|
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:08 +00:00
|
|
|
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
MigrationCapabilityStatusList *cap;
|
2018-03-05 09:49:38 +00:00
|
|
|
bool cap_list[MIGRATION_CAPABILITY__MAX];
|
2017-07-18 03:39:08 +00:00
|
|
|
|
2020-01-21 14:39:23 +00:00
|
|
|
if (migration_is_running(s->state)) {
|
2017-07-18 03:39:08 +00:00
|
|
|
error_setg(errp, QERR_MIGRATION_ACTIVE);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-03-05 09:49:38 +00:00
|
|
|
memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list));
|
|
|
|
if (!migrate_caps_check(cap_list, params, errp)) {
|
2017-07-18 03:39:08 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cap = params; cap; cap = cap->next) {
|
|
|
|
s->enabled_capabilities[cap->value->capability] = cap->value->state;
|
|
|
|
}
|
2012-08-06 18:42:48 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:04 +00:00
|
|
|
/*
|
|
|
|
* Check whether the parameters are valid. Error will be put into errp
|
|
|
|
* (if provided). Return true if valid, otherwise false.
|
|
|
|
*/
|
|
|
|
static bool migrate_params_check(MigrationParameters *params, Error **errp)
|
2015-03-23 08:32:28 +00:00
|
|
|
{
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_compress_level &&
|
2017-12-01 12:08:38 +00:00
|
|
|
(params->compress_level > 9)) {
|
2015-03-17 10:54:50 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
|
|
|
|
"is invalid, it should be in the range of 0 to 9");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2017-12-01 12:08:38 +00:00
|
|
|
if (params->has_compress_threads && (params->compress_threads < 1)) {
|
2015-03-17 10:54:50 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"compress_threads",
|
|
|
|
"is invalid, it should be in the range of 1 to 255");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2017-12-01 12:08:38 +00:00
|
|
|
if (params->has_decompress_threads && (params->decompress_threads < 1)) {
|
2015-03-17 10:54:50 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"decompress_threads",
|
|
|
|
"is invalid, it should be in the range of 1 to 255");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2020-02-24 02:31:42 +00:00
|
|
|
if (params->has_throttle_trigger_threshold &&
|
|
|
|
(params->throttle_trigger_threshold < 1 ||
|
|
|
|
params->throttle_trigger_threshold > 100)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"throttle_trigger_threshold",
|
|
|
|
"an integer in the range of 1 to 100");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_cpu_throttle_initial &&
|
|
|
|
(params->cpu_throttle_initial < 1 ||
|
|
|
|
params->cpu_throttle_initial > 99)) {
|
2015-09-08 17:12:34 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
2016-04-21 18:07:18 +00:00
|
|
|
"cpu_throttle_initial",
|
2015-09-08 17:12:34 +00:00
|
|
|
"an integer in the range of 1 to 99");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2015-09-08 17:12:34 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_cpu_throttle_increment &&
|
|
|
|
(params->cpu_throttle_increment < 1 ||
|
|
|
|
params->cpu_throttle_increment > 99)) {
|
2015-09-08 17:12:34 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
2016-04-21 18:07:18 +00:00
|
|
|
"cpu_throttle_increment",
|
2015-09-08 17:12:34 +00:00
|
|
|
"an integer in the range of 1 to 99");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2015-09-08 17:12:34 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2017-12-01 12:08:38 +00:00
|
|
|
if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
|
2020-03-31 08:22:05 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"max_bandwidth",
|
|
|
|
"an integer in the range of 0 to "stringify(SIZE_MAX)
|
|
|
|
" bytes/second");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2016-09-15 16:20:28 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
if (params->has_downtime_limit &&
|
2017-12-01 12:08:38 +00:00
|
|
|
(params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
|
2020-03-31 08:22:05 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"downtime_limit",
|
|
|
|
"an integer in the range of 0 to "
|
2020-03-31 08:22:07 +00:00
|
|
|
stringify(MAX_MIGRATE_DOWNTIME)" ms");
|
2017-07-18 03:39:04 +00:00
|
|
|
return false;
|
2016-09-15 16:20:28 +00:00
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2017-12-01 12:08:38 +00:00
|
|
|
/* x_checkpoint_delay is now always positive */
|
|
|
|
|
2019-02-06 12:54:06 +00:00
|
|
|
if (params->has_multifd_channels && (params->multifd_channels < 1)) {
|
2016-01-15 07:56:17 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"multifd_channels",
|
|
|
|
"is invalid, it should be in the range of 1 to 255");
|
|
|
|
return false;
|
|
|
|
}
|
2017-07-18 03:39:04 +00:00
|
|
|
|
2020-01-23 16:08:52 +00:00
|
|
|
if (params->has_multifd_zlib_level &&
|
|
|
|
(params->multifd_zlib_level > 9)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level",
|
|
|
|
"is invalid, it should be in the range of 0 to 9");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2020-01-23 16:41:36 +00:00
|
|
|
if (params->has_multifd_zstd_level &&
|
|
|
|
(params->multifd_zstd_level > 20)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level",
|
|
|
|
"is invalid, it should be in the range of 0 to 20");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-10-05 19:30:10 +00:00
|
|
|
if (params->has_xbzrle_cache_size &&
|
|
|
|
(params->xbzrle_cache_size < qemu_target_page_size() ||
|
|
|
|
!is_power_of_2(params->xbzrle_cache_size))) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"xbzrle_cache_size",
|
|
|
|
"is invalid, it should be bigger than target page size"
|
2020-03-20 14:32:16 +00:00
|
|
|
" and a power of 2");
|
2017-10-05 19:30:10 +00:00
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-08-01 13:00:20 +00:00
|
|
|
if (params->has_max_cpu_throttle &&
|
|
|
|
(params->max_cpu_throttle < params->cpu_throttle_initial ||
|
|
|
|
params->max_cpu_throttle > 99)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"max_cpu_throttle",
|
|
|
|
"an integer in the range of cpu_throttle_initial to 99");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-02-27 13:24:06 +00:00
|
|
|
if (params->has_announce_initial &&
|
|
|
|
params->announce_initial > 100000) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"announce_initial",
|
|
|
|
"is invalid, it must be less than 100000 ms");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (params->has_announce_max &&
|
|
|
|
params->announce_max > 100000) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"announce_max",
|
|
|
|
"is invalid, it must be less than 100000 ms");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (params->has_announce_rounds &&
|
|
|
|
params->announce_rounds > 1000) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"announce_rounds",
|
|
|
|
"is invalid, it must be in the range of 0 to 1000");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (params->has_announce_step &&
|
|
|
|
(params->announce_step < 1 ||
|
|
|
|
params->announce_step > 10000)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"announce_step",
|
|
|
|
"is invalid, it must be in the range of 1 to 10000 ms");
|
|
|
|
return false;
|
|
|
|
}
|
2020-08-20 15:07:23 +00:00
|
|
|
|
|
|
|
if (params->has_block_bitmap_mapping &&
|
|
|
|
!check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) {
|
|
|
|
error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: ");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:04 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
static void migrate_params_test_apply(MigrateSetParameters *params,
|
|
|
|
MigrationParameters *dest)
|
|
|
|
{
|
|
|
|
*dest = migrate_get_current()->parameters;
|
|
|
|
|
|
|
|
/* TODO use QAPI_CLONE() instead of duplicating it inline */
|
|
|
|
|
|
|
|
if (params->has_compress_level) {
|
|
|
|
dest->compress_level = params->compress_level;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_compress_threads) {
|
|
|
|
dest->compress_threads = params->compress_threads;
|
|
|
|
}
|
|
|
|
|
2018-08-21 08:10:20 +00:00
|
|
|
if (params->has_compress_wait_thread) {
|
|
|
|
dest->compress_wait_thread = params->compress_wait_thread;
|
|
|
|
}
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
if (params->has_decompress_threads) {
|
|
|
|
dest->decompress_threads = params->decompress_threads;
|
|
|
|
}
|
|
|
|
|
2020-02-24 02:31:42 +00:00
|
|
|
if (params->has_throttle_trigger_threshold) {
|
|
|
|
dest->throttle_trigger_threshold = params->throttle_trigger_threshold;
|
|
|
|
}
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
if (params->has_cpu_throttle_initial) {
|
|
|
|
dest->cpu_throttle_initial = params->cpu_throttle_initial;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_cpu_throttle_increment) {
|
|
|
|
dest->cpu_throttle_increment = params->cpu_throttle_increment;
|
|
|
|
}
|
|
|
|
|
2020-04-13 10:15:08 +00:00
|
|
|
if (params->has_cpu_throttle_tailslow) {
|
|
|
|
dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow;
|
|
|
|
}
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
if (params->has_tls_creds) {
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
assert(params->tls_creds->type == QTYPE_QSTRING);
|
2020-07-09 08:28:25 +00:00
|
|
|
dest->tls_creds = params->tls_creds->u.s;
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_tls_hostname) {
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
assert(params->tls_hostname->type == QTYPE_QSTRING);
|
2020-07-09 08:28:25 +00:00
|
|
|
dest->tls_hostname = params->tls_hostname->u.s;
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_max_bandwidth) {
|
|
|
|
dest->max_bandwidth = params->max_bandwidth;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_downtime_limit) {
|
|
|
|
dest->downtime_limit = params->downtime_limit;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_x_checkpoint_delay) {
|
|
|
|
dest->x_checkpoint_delay = params->x_checkpoint_delay;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (params->has_block_incremental) {
|
|
|
|
dest->block_incremental = params->block_incremental;
|
|
|
|
}
|
2019-02-06 12:54:06 +00:00
|
|
|
if (params->has_multifd_channels) {
|
|
|
|
dest->multifd_channels = params->multifd_channels;
|
2017-10-09 16:07:56 +00:00
|
|
|
}
|
2019-01-16 09:35:55 +00:00
|
|
|
if (params->has_multifd_compression) {
|
|
|
|
dest->multifd_compression = params->multifd_compression;
|
|
|
|
}
|
2017-10-05 19:30:10 +00:00
|
|
|
if (params->has_xbzrle_cache_size) {
|
|
|
|
dest->xbzrle_cache_size = params->xbzrle_cache_size;
|
|
|
|
}
|
2018-06-13 10:26:40 +00:00
|
|
|
if (params->has_max_postcopy_bandwidth) {
|
|
|
|
dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth;
|
|
|
|
}
|
2018-08-01 13:00:20 +00:00
|
|
|
if (params->has_max_cpu_throttle) {
|
|
|
|
dest->max_cpu_throttle = params->max_cpu_throttle;
|
|
|
|
}
|
2019-02-27 13:24:06 +00:00
|
|
|
if (params->has_announce_initial) {
|
|
|
|
dest->announce_initial = params->announce_initial;
|
|
|
|
}
|
|
|
|
if (params->has_announce_max) {
|
|
|
|
dest->announce_max = params->announce_max;
|
|
|
|
}
|
|
|
|
if (params->has_announce_rounds) {
|
|
|
|
dest->announce_rounds = params->announce_rounds;
|
|
|
|
}
|
|
|
|
if (params->has_announce_step) {
|
|
|
|
dest->announce_step = params->announce_step;
|
|
|
|
}
|
2020-08-20 15:07:23 +00:00
|
|
|
|
|
|
|
if (params->has_block_bitmap_mapping) {
|
|
|
|
dest->has_block_bitmap_mapping = true;
|
|
|
|
dest->block_bitmap_mapping = params->block_bitmap_mapping;
|
|
|
|
}
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
}
|
|
|
|
|
2017-10-05 19:30:10 +00:00
|
|
|
static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
|
2017-07-18 03:39:04 +00:00
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
2017-07-18 10:57:38 +00:00
|
|
|
/* TODO use QAPI_CLONE() instead of duplicating it inline */
|
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_compress_level) {
|
|
|
|
s->parameters.compress_level = params->compress_level;
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_compress_threads) {
|
|
|
|
s->parameters.compress_threads = params->compress_threads;
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2018-08-21 08:10:20 +00:00
|
|
|
if (params->has_compress_wait_thread) {
|
|
|
|
s->parameters.compress_wait_thread = params->compress_wait_thread;
|
|
|
|
}
|
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_decompress_threads) {
|
|
|
|
s->parameters.decompress_threads = params->decompress_threads;
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2020-02-24 02:31:42 +00:00
|
|
|
if (params->has_throttle_trigger_threshold) {
|
|
|
|
s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold;
|
|
|
|
}
|
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_cpu_throttle_initial) {
|
|
|
|
s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
|
2015-09-08 17:12:34 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_cpu_throttle_increment) {
|
|
|
|
s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
|
2015-09-08 17:12:34 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2020-04-13 10:15:08 +00:00
|
|
|
if (params->has_cpu_throttle_tailslow) {
|
|
|
|
s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow;
|
|
|
|
}
|
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_tls_creds) {
|
2016-04-27 10:05:15 +00:00
|
|
|
g_free(s->parameters.tls_creds);
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
assert(params->tls_creds->type == QTYPE_QSTRING);
|
|
|
|
s->parameters.tls_creds = g_strdup(params->tls_creds->u.s);
|
2016-04-27 10:05:15 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2016-09-09 03:14:16 +00:00
|
|
|
if (params->has_tls_hostname) {
|
2016-04-27 10:05:15 +00:00
|
|
|
g_free(s->parameters.tls_hostname);
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
assert(params->tls_hostname->type == QTYPE_QSTRING);
|
|
|
|
s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s);
|
2016-04-27 10:05:15 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
migration: add support for a "tls-authz" migration parameter
The QEMU instance that runs as the server for the migration data
transport (ie the target QEMU) needs to be able to configure access
control so it can prevent unauthorized clients initiating an incoming
migration. This adds a new 'tls-authz' migration parameter that is used
to provide the QOM ID of a QAuthZ subclass instance that provides the
access control check. This is checked against the x509 certificate
obtained during the TLS handshake.
For example, when starting a QEMU for incoming migration, it is
possible to give an example identity of the source QEMU that is
intended to be connecting later:
$QEMU \
-monitor stdio \
-incoming defer \
...other args...
(qemu) object_add tls-creds-x509,id=tls0,dir=/home/berrange/qemutls,\
endpoint=server,verify-peer=yes \
(qemu) object_add authz-simple,id=auth0,identity=CN=laptop.example.com,,\
O=Example Org,,L=London,,ST=London,,C=GB \
(qemu) migrate_incoming tcp:localhost:9000
Reviewed-by: Juan Quintela <quintela@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-02-27 14:53:24 +00:00
|
|
|
if (params->has_tls_authz) {
|
|
|
|
g_free(s->parameters.tls_authz);
|
|
|
|
assert(params->tls_authz->type == QTYPE_QSTRING);
|
|
|
|
s->parameters.tls_authz = g_strdup(params->tls_authz->u.s);
|
|
|
|
}
|
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
if (params->has_max_bandwidth) {
|
|
|
|
s->parameters.max_bandwidth = params->max_bandwidth;
|
2019-03-08 10:12:10 +00:00
|
|
|
if (s->to_dst_file && !migration_in_postcopy()) {
|
2016-09-15 16:20:28 +00:00
|
|
|
qemu_file_set_rate_limit(s->to_dst_file,
|
|
|
|
s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
|
|
|
|
}
|
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
if (params->has_downtime_limit) {
|
|
|
|
s->parameters.downtime_limit = params->downtime_limit;
|
|
|
|
}
|
2016-10-27 06:43:01 +00:00
|
|
|
|
|
|
|
if (params->has_x_checkpoint_delay) {
|
|
|
|
s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
|
2017-01-17 12:57:42 +00:00
|
|
|
if (migration_in_colo_state()) {
|
|
|
|
colo_checkpoint_notify(s);
|
|
|
|
}
|
2016-10-27 06:43:01 +00:00
|
|
|
}
|
2017-07-18 03:39:05 +00:00
|
|
|
|
2017-04-05 16:32:37 +00:00
|
|
|
if (params->has_block_incremental) {
|
|
|
|
s->parameters.block_incremental = params->block_incremental;
|
|
|
|
}
|
2019-02-06 12:54:06 +00:00
|
|
|
if (params->has_multifd_channels) {
|
|
|
|
s->parameters.multifd_channels = params->multifd_channels;
|
2016-01-15 07:56:17 +00:00
|
|
|
}
|
2019-01-16 09:35:55 +00:00
|
|
|
if (params->has_multifd_compression) {
|
|
|
|
s->parameters.multifd_compression = params->multifd_compression;
|
|
|
|
}
|
2017-10-05 19:30:10 +00:00
|
|
|
if (params->has_xbzrle_cache_size) {
|
|
|
|
s->parameters.xbzrle_cache_size = params->xbzrle_cache_size;
|
|
|
|
xbzrle_cache_resize(params->xbzrle_cache_size, errp);
|
|
|
|
}
|
2018-06-13 10:26:40 +00:00
|
|
|
if (params->has_max_postcopy_bandwidth) {
|
|
|
|
s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth;
|
2019-03-08 10:12:10 +00:00
|
|
|
if (s->to_dst_file && migration_in_postcopy()) {
|
|
|
|
qemu_file_set_rate_limit(s->to_dst_file,
|
|
|
|
s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO);
|
|
|
|
}
|
2018-06-13 10:26:40 +00:00
|
|
|
}
|
2018-08-01 13:00:20 +00:00
|
|
|
if (params->has_max_cpu_throttle) {
|
|
|
|
s->parameters.max_cpu_throttle = params->max_cpu_throttle;
|
|
|
|
}
|
2019-02-27 13:24:06 +00:00
|
|
|
if (params->has_announce_initial) {
|
|
|
|
s->parameters.announce_initial = params->announce_initial;
|
|
|
|
}
|
|
|
|
if (params->has_announce_max) {
|
|
|
|
s->parameters.announce_max = params->announce_max;
|
|
|
|
}
|
|
|
|
if (params->has_announce_rounds) {
|
|
|
|
s->parameters.announce_rounds = params->announce_rounds;
|
|
|
|
}
|
|
|
|
if (params->has_announce_step) {
|
|
|
|
s->parameters.announce_step = params->announce_step;
|
|
|
|
}
|
2020-08-20 15:07:23 +00:00
|
|
|
|
|
|
|
if (params->has_block_bitmap_mapping) {
|
|
|
|
qapi_free_BitmapMigrationNodeAliasList(
|
|
|
|
s->parameters.block_bitmap_mapping);
|
|
|
|
|
|
|
|
s->parameters.has_block_bitmap_mapping = true;
|
|
|
|
s->parameters.block_bitmap_mapping =
|
|
|
|
QAPI_CLONE(BitmapMigrationNodeAliasList,
|
|
|
|
params->block_bitmap_mapping);
|
|
|
|
}
|
2015-03-23 08:32:28 +00:00
|
|
|
}
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
|
2017-07-18 03:39:05 +00:00
|
|
|
{
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
MigrationParameters tmp;
|
|
|
|
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
/* TODO Rewrite "" to null instead */
|
|
|
|
if (params->has_tls_creds
|
|
|
|
&& params->tls_creds->type == QTYPE_QNULL) {
|
2018-04-19 15:01:43 +00:00
|
|
|
qobject_unref(params->tls_creds->u.n);
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
params->tls_creds->type = QTYPE_QSTRING;
|
|
|
|
params->tls_creds->u.s = strdup("");
|
|
|
|
}
|
|
|
|
/* TODO Rewrite "" to null instead */
|
|
|
|
if (params->has_tls_hostname
|
|
|
|
&& params->tls_hostname->type == QTYPE_QNULL) {
|
2018-04-19 15:01:43 +00:00
|
|
|
qobject_unref(params->tls_hostname->u.n);
|
migration: Use JSON null instead of "" to reset parameter to default
migrate-set-parameters sets migration parameters according to is
arguments like this:
* Present means "set the parameter to this value"
* Absent means "leave the parameter unchanged"
* Except for parameters tls_creds and tls_hostname, "" means "reset
the parameter to its default value
The first two are perfectly normal: presence of the parameter makes
the command do something.
The third one overloads the parameter with a second meaning. The
overloading is *implicit*, i.e. it's not visible in the types. Works
here, because "" is neither a valid TLS credentials ID, nor a valid
host name.
Pressing argument values the schema accepts, but are semantically
invalid, into service to mean "reset to default" is not general, as
suitable invalid values need not exist. I also find it ugly.
To clean this up, we could add a separate flag argument to ask for
"reset to default", or add a distinct value to @tls_creds and
@tls_hostname. This commit implements the latter: add JSON null to
the values of @tls_creds and @tls_hostname, deprecate "".
Because we're so close to the 2.10 freeze, implement it in the
stupidest way possible: have qmp_migrate_set_parameters() rewrite null
to "" before anything else can see the null. The proper way to do it
would be rewriting "" to null, but that requires fixing up code to
work with null. Add TODO comments for that.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 12:42:04 +00:00
|
|
|
params->tls_hostname->type = QTYPE_QSTRING;
|
|
|
|
params->tls_hostname->u.s = strdup("");
|
|
|
|
}
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
migrate_params_test_apply(params, &tmp);
|
|
|
|
|
|
|
|
if (!migrate_params_check(&tmp, errp)) {
|
2017-07-18 03:39:05 +00:00
|
|
|
/* Invalid parameter */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-10-05 19:30:10 +00:00
|
|
|
migrate_params_apply(params, errp);
|
2017-07-18 03:39:05 +00:00
|
|
|
}
|
|
|
|
|
2016-04-27 10:05:14 +00:00
|
|
|
|
2015-11-05 18:10:56 +00:00
|
|
|
void qmp_migrate_start_postcopy(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
2018-03-13 19:34:01 +00:00
|
|
|
if (!migrate_postcopy()) {
|
2015-11-12 11:34:44 +00:00
|
|
|
error_setg(errp, "Enable postcopy with migrate_set_capability before"
|
2015-11-05 18:10:56 +00:00
|
|
|
" the start of migration");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_NONE) {
|
|
|
|
error_setg(errp, "Postcopy must be started after migration has been"
|
|
|
|
" started");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* we don't error if migration has finished since that would be racy
|
|
|
|
* with issuing this command.
|
|
|
|
*/
|
2020-09-23 10:56:46 +00:00
|
|
|
qatomic_set(&s->start_postcopy, true);
|
2015-11-05 18:10:56 +00:00
|
|
|
}
|
|
|
|
|
2008-11-11 16:46:33 +00:00
|
|
|
/* shared migration helpers */
|
|
|
|
|
2015-12-16 11:47:33 +00:00
|
|
|
void migrate_set_state(int *state, int old_state, int new_state)
|
2013-11-07 11:01:15 +00:00
|
|
|
{
|
2017-08-30 08:32:01 +00:00
|
|
|
assert(new_state < MIGRATION_STATUS__MAX);
|
2020-09-23 10:56:46 +00:00
|
|
|
if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
|
2017-08-30 08:32:01 +00:00
|
|
|
trace_migrate_set_state(MigrationStatus_str(new_state));
|
2015-07-07 12:44:05 +00:00
|
|
|
migrate_generate_event(new_state);
|
2013-11-07 11:01:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:09 +00:00
|
|
|
static MigrationCapabilityStatusList *migrate_cap_add(
|
|
|
|
MigrationCapabilityStatusList *list,
|
|
|
|
MigrationCapability index,
|
|
|
|
bool state)
|
2017-04-05 16:32:37 +00:00
|
|
|
{
|
|
|
|
MigrationCapabilityStatusList *cap;
|
|
|
|
|
|
|
|
cap = g_new0(MigrationCapabilityStatusList, 1);
|
|
|
|
cap->value = g_new0(MigrationCapabilityStatus, 1);
|
2017-07-18 03:39:09 +00:00
|
|
|
cap->value->capability = index;
|
|
|
|
cap->value->state = state;
|
|
|
|
cap->next = list;
|
|
|
|
|
|
|
|
return cap;
|
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_set_block_enabled(bool value, Error **errp)
|
|
|
|
{
|
|
|
|
MigrationCapabilityStatusList *cap;
|
|
|
|
|
|
|
|
cap = migrate_cap_add(NULL, MIGRATION_CAPABILITY_BLOCK, value);
|
2017-04-05 16:32:37 +00:00
|
|
|
qmp_migrate_set_capabilities(cap, errp);
|
|
|
|
qapi_free_MigrationCapabilityStatusList(cap);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void migrate_set_block_incremental(MigrationState *s, bool value)
|
|
|
|
{
|
|
|
|
s->parameters.block_incremental = value;
|
|
|
|
}
|
|
|
|
|
|
|
|
static void block_cleanup_parameters(MigrationState *s)
|
|
|
|
{
|
|
|
|
if (s->must_remove_block_options) {
|
|
|
|
/* setting to false can never fail */
|
|
|
|
migrate_set_block_enabled(false, &error_abort);
|
|
|
|
migrate_set_block_incremental(s, false);
|
|
|
|
s->must_remove_block_options = false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-04-08 11:33:43 +00:00
|
|
|
static void migrate_fd_cleanup(MigrationState *s)
|
2008-11-11 16:46:33 +00:00
|
|
|
{
|
2013-02-22 16:36:21 +00:00
|
|
|
qemu_bh_delete(s->cleanup_bh);
|
|
|
|
s->cleanup_bh = NULL;
|
|
|
|
|
2018-01-03 12:20:06 +00:00
|
|
|
qemu_savevm_state_cleanup();
|
|
|
|
|
2016-01-15 03:37:42 +00:00
|
|
|
if (s->to_dst_file) {
|
2018-05-02 10:47:38 +00:00
|
|
|
QEMUFile *tmp;
|
2016-01-14 15:52:55 +00:00
|
|
|
|
2014-03-10 23:42:29 +00:00
|
|
|
trace_migrate_fd_cleanup();
|
2013-02-22 16:36:46 +00:00
|
|
|
qemu_mutex_unlock_iothread();
|
2015-11-05 18:11:05 +00:00
|
|
|
if (s->migration_thread_running) {
|
|
|
|
qemu_thread_join(&s->thread);
|
|
|
|
s->migration_thread_running = false;
|
|
|
|
}
|
2013-02-22 16:36:46 +00:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
|
2019-01-13 14:08:47 +00:00
|
|
|
multifd_save_cleanup();
|
2018-05-02 10:47:38 +00:00
|
|
|
qemu_mutex_lock(&s->qemu_file_lock);
|
|
|
|
tmp = s->to_dst_file;
|
2016-01-15 03:37:42 +00:00
|
|
|
s->to_dst_file = NULL;
|
2018-05-02 10:47:38 +00:00
|
|
|
qemu_mutex_unlock(&s->qemu_file_lock);
|
|
|
|
/*
|
|
|
|
* Close the file handle without the lock to make sure the
|
|
|
|
* critical section won't block for long.
|
|
|
|
*/
|
|
|
|
qemu_fclose(tmp);
|
2008-11-11 16:46:33 +00:00
|
|
|
}
|
|
|
|
|
2019-07-17 00:53:41 +00:00
|
|
|
assert(!migration_is_active(s));
|
2013-02-22 16:36:09 +00:00
|
|
|
|
2015-11-02 07:37:00 +00:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING) {
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
|
2015-11-02 07:37:00 +00:00
|
|
|
MIGRATION_STATUS_CANCELLED);
|
2013-02-22 16:36:09 +00:00
|
|
|
}
|
2013-02-22 16:36:18 +00:00
|
|
|
|
2017-09-05 10:50:22 +00:00
|
|
|
if (s->error) {
|
|
|
|
/* It is used on info migrate. We can't free it */
|
|
|
|
error_report_err(error_copy(s->error));
|
|
|
|
}
|
2013-02-22 16:36:18 +00:00
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
2017-04-05 16:32:37 +00:00
|
|
|
block_cleanup_parameters(s);
|
2008-11-11 16:46:33 +00:00
|
|
|
}
|
|
|
|
|
2019-04-08 11:33:43 +00:00
|
|
|
static void migrate_fd_cleanup_schedule(MigrationState *s)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Ref the state for bh, because it may be called when
|
|
|
|
* there're already no other refs
|
|
|
|
*/
|
|
|
|
object_ref(OBJECT(s));
|
|
|
|
qemu_bh_schedule(s->cleanup_bh);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void migrate_fd_cleanup_bh(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *s = opaque;
|
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
object_unref(OBJECT(s));
|
|
|
|
}
|
|
|
|
|
2017-09-05 10:50:22 +00:00
|
|
|
void migrate_set_error(MigrationState *s, const Error *error)
|
|
|
|
{
|
2020-04-04 04:21:08 +00:00
|
|
|
QEMU_LOCK_GUARD(&s->error_mutex);
|
2017-09-05 10:50:22 +00:00
|
|
|
if (!s->error) {
|
|
|
|
s->error = error_copy(error);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 10:05:00 +00:00
|
|
|
void migrate_fd_error(MigrationState *s, const Error *error)
|
2008-11-11 16:46:33 +00:00
|
|
|
{
|
2016-10-21 17:41:45 +00:00
|
|
|
trace_migrate_fd_error(error_get_pretty(error));
|
2016-01-15 03:37:42 +00:00
|
|
|
assert(s->to_dst_file == NULL);
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
2017-09-05 10:50:22 +00:00
|
|
|
migrate_set_error(s, error);
|
2011-02-22 22:32:54 +00:00
|
|
|
}
|
|
|
|
|
2010-05-11 14:28:39 +00:00
|
|
|
static void migrate_fd_cancel(MigrationState *s)
|
2008-11-11 16:46:33 +00:00
|
|
|
{
|
2013-11-07 08:21:23 +00:00
|
|
|
int old_state ;
|
2016-01-15 03:37:42 +00:00
|
|
|
QEMUFile *f = migrate_get_current()->to_dst_file;
|
2014-03-10 23:42:29 +00:00
|
|
|
trace_migrate_fd_cancel();
|
2008-11-11 16:46:33 +00:00
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
if (s->rp_state.from_dst_file) {
|
|
|
|
/* shutdown the rp socket, so causing the rp thread to shutdown */
|
|
|
|
qemu_file_shutdown(s->rp_state.from_dst_file);
|
|
|
|
}
|
|
|
|
|
2013-11-07 08:21:23 +00:00
|
|
|
do {
|
|
|
|
old_state = s->state;
|
2020-01-21 14:39:23 +00:00
|
|
|
if (!migration_is_running(old_state)) {
|
2013-11-07 08:21:23 +00:00
|
|
|
break;
|
|
|
|
}
|
2017-10-20 09:05:55 +00:00
|
|
|
/* If the migration is paused, kick it out of the pause */
|
|
|
|
if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
|
|
|
|
qemu_sem_post(&s->pause_sem);
|
|
|
|
}
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
|
2015-03-13 08:08:38 +00:00
|
|
|
} while (s->state != MIGRATION_STATUS_CANCELLING);
|
2015-01-08 11:11:32 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're unlucky the migration code might be stuck somewhere in a
|
|
|
|
* send/write while the network has failed and is waiting to timeout;
|
|
|
|
* if we've got shutdown(2) available then we can force it to quit.
|
|
|
|
* The outgoing qemu file gets closed in migrate_fd_cleanup that is
|
|
|
|
* called in a bh, so there is no race against this cancel.
|
|
|
|
*/
|
2015-03-13 08:08:38 +00:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING && f) {
|
2015-01-08 11:11:32 +00:00
|
|
|
qemu_file_shutdown(f);
|
|
|
|
}
|
2017-01-24 07:59:52 +00:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
bdrv_invalidate_cache_all(&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
} else {
|
|
|
|
s->block_inactive = false;
|
|
|
|
}
|
|
|
|
}
|
2008-11-11 16:46:33 +00:00
|
|
|
}
|
|
|
|
|
2010-12-13 16:30:12 +00:00
|
|
|
void add_migration_state_change_notifier(Notifier *notify)
|
|
|
|
{
|
|
|
|
notifier_list_add(&migration_state_notifiers, notify);
|
|
|
|
}
|
|
|
|
|
|
|
|
void remove_migration_state_change_notifier(Notifier *notify)
|
|
|
|
{
|
2012-01-13 16:34:01 +00:00
|
|
|
notifier_remove(notify);
|
2010-12-13 16:30:12 +00:00
|
|
|
}
|
|
|
|
|
2013-07-29 13:01:58 +00:00
|
|
|
bool migration_in_setup(MigrationState *s)
|
2011-10-25 11:50:11 +00:00
|
|
|
{
|
2015-03-13 08:08:38 +00:00
|
|
|
return s->state == MIGRATION_STATUS_SETUP;
|
2011-10-25 11:50:11 +00:00
|
|
|
}
|
|
|
|
|
2011-02-22 23:43:59 +00:00
|
|
|
bool migration_has_finished(MigrationState *s)
|
2010-12-13 16:30:12 +00:00
|
|
|
{
|
2015-03-13 08:08:38 +00:00
|
|
|
return s->state == MIGRATION_STATUS_COMPLETED;
|
2010-12-13 16:30:12 +00:00
|
|
|
}
|
2010-05-11 14:28:39 +00:00
|
|
|
|
2011-10-25 11:50:11 +00:00
|
|
|
bool migration_has_failed(MigrationState *s)
|
|
|
|
{
|
2015-03-13 08:08:38 +00:00
|
|
|
return (s->state == MIGRATION_STATUS_CANCELLED ||
|
|
|
|
s->state == MIGRATION_STATUS_FAILED);
|
2011-10-25 11:50:11 +00:00
|
|
|
}
|
|
|
|
|
2017-03-20 21:25:28 +00:00
|
|
|
bool migration_in_postcopy(void)
|
2015-11-05 18:10:58 +00:00
|
|
|
{
|
2017-03-20 21:25:28 +00:00
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
2019-09-23 17:49:42 +00:00
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_PAUSED:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_RECOVER:
|
|
|
|
return true;
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
}
|
2015-11-05 18:10:58 +00:00
|
|
|
}
|
|
|
|
|
2016-02-22 17:17:32 +00:00
|
|
|
bool migration_in_postcopy_after_devices(MigrationState *s)
|
|
|
|
{
|
2017-03-20 21:25:28 +00:00
|
|
|
return migration_in_postcopy() && s->postcopy_after_devices;
|
2016-02-22 17:17:32 +00:00
|
|
|
}
|
|
|
|
|
2020-06-26 07:22:33 +00:00
|
|
|
bool migration_in_incoming_postcopy(void)
|
|
|
|
{
|
|
|
|
PostcopyState ps = postcopy_state_get();
|
|
|
|
|
|
|
|
return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
|
|
|
|
}
|
|
|
|
|
2017-03-22 16:36:57 +00:00
|
|
|
bool migration_is_idle(void)
|
2017-01-16 11:31:53 +00:00
|
|
|
{
|
2019-04-01 09:08:25 +00:00
|
|
|
MigrationState *s = current_migration;
|
|
|
|
|
|
|
|
if (!s) {
|
|
|
|
return true;
|
|
|
|
}
|
2017-01-16 11:31:53 +00:00
|
|
|
|
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_NONE:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
return true;
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_POSTCOPY_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_COLO:
|
2017-10-20 09:05:51 +00:00
|
|
|
case MIGRATION_STATUS_PRE_SWITCHOVER:
|
|
|
|
case MIGRATION_STATUS_DEVICE:
|
2019-10-29 11:49:02 +00:00
|
|
|
case MIGRATION_STATUS_WAIT_UNPLUG:
|
2017-01-16 11:31:53 +00:00
|
|
|
return false;
|
|
|
|
case MIGRATION_STATUS__MAX:
|
|
|
|
g_assert_not_reached();
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2019-07-17 00:53:41 +00:00
|
|
|
bool migration_is_active(MigrationState *s)
|
|
|
|
{
|
|
|
|
return (s->state == MIGRATION_STATUS_ACTIVE ||
|
|
|
|
s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
}
|
|
|
|
|
2018-02-08 10:31:15 +00:00
|
|
|
void migrate_init(MigrationState *s)
|
2010-05-11 14:28:39 +00:00
|
|
|
{
|
2015-11-12 15:38:27 +00:00
|
|
|
/*
|
|
|
|
* Reinitialise all migration state, except
|
|
|
|
* parameters/capabilities that the user set, and
|
|
|
|
* locks.
|
|
|
|
*/
|
|
|
|
s->cleanup_bh = 0;
|
2016-01-15 03:37:42 +00:00
|
|
|
s->to_dst_file = NULL;
|
2015-11-12 15:38:27 +00:00
|
|
|
s->state = MIGRATION_STATUS_NONE;
|
|
|
|
s->rp_state.from_dst_file = NULL;
|
|
|
|
s->rp_state.error = false;
|
|
|
|
s->mbps = 0.0;
|
2019-01-11 06:37:30 +00:00
|
|
|
s->pages_per_second = 0.0;
|
2015-11-12 15:38:27 +00:00
|
|
|
s->downtime = 0;
|
|
|
|
s->expected_downtime = 0;
|
|
|
|
s->setup_time = 0;
|
|
|
|
s->start_postcopy = false;
|
2016-02-22 17:17:32 +00:00
|
|
|
s->postcopy_after_devices = false;
|
2015-11-12 15:38:27 +00:00
|
|
|
s->migration_thread_running = false;
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 10:05:00 +00:00
|
|
|
error_free(s->error);
|
|
|
|
s->error = NULL;
|
2015-11-12 15:38:27 +00:00
|
|
|
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
|
2010-05-11 14:28:39 +00:00
|
|
|
|
2018-01-03 12:20:08 +00:00
|
|
|
s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
s->total_time = 0;
|
2018-01-03 12:20:09 +00:00
|
|
|
s->vm_was_running = false;
|
2018-01-03 12:20:13 +00:00
|
|
|
s->iteration_initial_bytes = 0;
|
|
|
|
s->threshold_size = 0;
|
2010-05-11 14:28:39 +00:00
|
|
|
}
|
2011-02-22 22:54:21 +00:00
|
|
|
|
2011-11-14 21:09:43 +00:00
|
|
|
static GSList *migration_blockers;
|
|
|
|
|
2017-01-16 11:31:53 +00:00
|
|
|
int migrate_add_blocker(Error *reason, Error **errp)
|
2011-11-14 21:09:43 +00:00
|
|
|
{
|
Revert "migration: move only_migratable to MigrationState"
This reverts commit 3df663e575f1876d7f3bc684f80e72fca0703d39.
This reverts commit b605c47b57b58e61a901a50a0762dccf43d94783.
Command line option --only-migratable is for disallowing any
configuration that can block migration.
Initially, --only-migratable set global variable @only_migratable.
Commit 3df663e575 "migration: move only_migratable to MigrationState"
replaced it by MigrationState member @only_migratable. That was a
mistake.
First, it doesn't make sense on the design level. MigrationState
captures the state of an individual migration, but --only-migratable
isn't a property of an individual migration, it's a restriction on
QEMU configuration. With fault tolerance, we could have several
migrations at once. --only-migratable would certainly protect all of
them. Storing it in MigrationState feels inappropriate.
Second, it contributes to a dependency cycle that manifests itself as
a bug now.
Putting @only_migratable into MigrationState means its available only
after migration_object_init().
We can't set it before migration_object_init(), so we delay setting it
with a global property (this is fixup commit b605c47b57 "migration:
fix handling for --only-migratable").
We can't get it before migration_object_init(), so anything that uses
it can only run afterwards.
Since migrate_add_blocker() needs to obey --only-migratable, any code
adding migration blockers can run only afterwards. This contributes
to the following dependency cycle:
* configure_blockdev() must run before machine_set_property()
so machine properties can refer to block backends
* machine_set_property() before configure_accelerator()
so machine properties like kvm-irqchip get applied
* configure_accelerator() before migration_object_init()
so that Xen's accelerator compat properties get applied.
* migration_object_init() before configure_blockdev()
so configure_blockdev() can add migration blockers
The cycle was closed when recent commit cda4aa9a5a0 "Create block
backends before setting machine properties" added the first
dependency, and satisfied it by violating the last one. Broke block
backends that add migration blockers.
Moving @only_migratable into MigrationState was a mistake. Revert it.
This doesn't quite break the "migration_object_init() before
configure_blockdev() dependency, since migrate_add_blocker() still has
another dependency on migration_object_init(). To be addressed the
next commit.
Note that the reverted commit made -only-migratable sugar for -global
migration.only-migratable=on below the hood. Documentation has only
ever mentioned -only-migratable. This commit removes the arcane &
undocumented alternative to -only-migratable again. Nobody should be
using it.
Conflicts:
include/migration/misc.h
migration/migration.c
migration/migration.h
vl.c
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190401090827.20793-3-armbru@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
2019-04-01 09:08:24 +00:00
|
|
|
if (only_migratable) {
|
2018-10-17 08:26:25 +00:00
|
|
|
error_propagate_prepend(errp, error_copy(reason),
|
|
|
|
"disallowing migration blocker "
|
2019-05-16 10:58:05 +00:00
|
|
|
"(--only-migratable) for: ");
|
2017-01-16 11:31:54 +00:00
|
|
|
return -EACCES;
|
|
|
|
}
|
|
|
|
|
2017-03-22 16:36:57 +00:00
|
|
|
if (migration_is_idle()) {
|
2017-01-16 11:31:53 +00:00
|
|
|
migration_blockers = g_slist_prepend(migration_blockers, reason);
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-10-17 08:26:25 +00:00
|
|
|
error_propagate_prepend(errp, error_copy(reason),
|
|
|
|
"disallowing migration blocker "
|
|
|
|
"(migration in progress) for: ");
|
2017-01-16 11:31:53 +00:00
|
|
|
return -EBUSY;
|
2011-11-14 21:09:43 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_del_blocker(Error *reason)
|
|
|
|
{
|
|
|
|
migration_blockers = g_slist_remove(migration_blockers, reason);
|
|
|
|
}
|
|
|
|
|
2015-02-19 11:40:28 +00:00
|
|
|
void qmp_migrate_incoming(const char *uri, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
2015-02-26 14:54:41 +00:00
|
|
|
static bool once = true;
|
2015-02-19 11:40:28 +00:00
|
|
|
|
|
|
|
if (!deferred_incoming) {
|
2015-02-26 14:54:41 +00:00
|
|
|
error_setg(errp, "For use with '-incoming defer'");
|
2015-02-19 11:40:28 +00:00
|
|
|
return;
|
|
|
|
}
|
2015-02-26 14:54:41 +00:00
|
|
|
if (!once) {
|
|
|
|
error_setg(errp, "The incoming migration has already been started");
|
2019-11-13 17:53:25 +00:00
|
|
|
return;
|
2015-02-26 14:54:41 +00:00
|
|
|
}
|
2015-02-19 11:40:28 +00:00
|
|
|
|
|
|
|
qemu_start_incoming_migration(uri, &local_err);
|
|
|
|
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-26 14:54:41 +00:00
|
|
|
once = false;
|
2015-02-19 11:40:28 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:36 +00:00
|
|
|
void qmp_migrate_recover(const char *uri, Error **errp)
|
|
|
|
{
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
|
|
|
|
if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
error_setg(errp, "Migrate recover can only be run "
|
|
|
|
"when postcopy is paused.");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-09-23 10:56:46 +00:00
|
|
|
if (qatomic_cmpxchg(&mis->postcopy_recover_triggered,
|
2018-05-02 10:47:36 +00:00
|
|
|
false, true) == true) {
|
|
|
|
error_setg(errp, "Migrate recovery is triggered already");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Note that this call will never start a real migration; it will
|
|
|
|
* only re-setup the migration stream and poke existing migration
|
|
|
|
* to continue using that newly established channel.
|
|
|
|
*/
|
|
|
|
qemu_start_incoming_migration(uri, errp);
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:39 +00:00
|
|
|
void qmp_migrate_pause(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *ms = migrate_get_current();
|
|
|
|
MigrationIncomingState *mis = migration_incoming_get_current();
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
/* Source side, during postcopy */
|
|
|
|
qemu_mutex_lock(&ms->qemu_file_lock);
|
|
|
|
ret = qemu_file_shutdown(ms->to_dst_file);
|
|
|
|
qemu_mutex_unlock(&ms->qemu_file_lock);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to pause source migration");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
ret = qemu_file_shutdown(mis->from_src_file);
|
|
|
|
if (ret) {
|
|
|
|
error_setg(errp, "Failed to pause destination migration");
|
|
|
|
}
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
error_setg(errp, "migrate-pause is currently only supported "
|
|
|
|
"during postcopy-active state");
|
|
|
|
}
|
|
|
|
|
2016-05-04 19:44:19 +00:00
|
|
|
bool migration_is_blocked(Error **errp)
|
|
|
|
{
|
|
|
|
if (qemu_savevm_state_blocked(errp)) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (migration_blockers) {
|
2017-06-08 13:39:05 +00:00
|
|
|
error_propagate(errp, error_copy(migration_blockers->data));
|
2016-05-04 19:44:19 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
/* Returns true if continue to migrate, or false if error detected */
|
|
|
|
static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
|
|
|
|
bool resume, Error **errp)
|
2011-02-22 22:54:21 +00:00
|
|
|
{
|
2012-10-03 12:34:33 +00:00
|
|
|
Error *local_err = NULL;
|
2018-05-02 10:47:24 +00:00
|
|
|
|
|
|
|
if (resume) {
|
|
|
|
if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
error_setg(errp, "Cannot resume if there is no "
|
|
|
|
"paused migration");
|
|
|
|
return false;
|
|
|
|
}
|
2018-07-23 12:33:03 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Postcopy recovery won't work well with release-ram
|
|
|
|
* capability since release-ram will drop the page buffer as
|
|
|
|
* long as the page is put into the send buffer. So if there
|
|
|
|
* is a network failure happened, any page buffers that have
|
|
|
|
* not yet reached the destination VM but have already been
|
|
|
|
* sent from the source VM will be lost forever. Let's refuse
|
|
|
|
* the client from resuming such a postcopy migration.
|
|
|
|
* Luckily release-ram was designed to only be used when src
|
|
|
|
* and destination VMs are on the same host, so it should be
|
|
|
|
* fine.
|
|
|
|
*/
|
|
|
|
if (migrate_release_ram()) {
|
|
|
|
error_setg(errp, "Postcopy recovery cannot work "
|
|
|
|
"when release-ram capability is set");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
/* This is a resume, skip init status */
|
|
|
|
return true;
|
|
|
|
}
|
2011-02-22 22:54:21 +00:00
|
|
|
|
2020-01-21 14:39:23 +00:00
|
|
|
if (migration_is_running(s->state)) {
|
2015-03-17 10:54:50 +00:00
|
|
|
error_setg(errp, QERR_MIGRATION_ACTIVE);
|
2018-05-02 10:47:24 +00:00
|
|
|
return false;
|
2011-02-22 22:54:21 +00:00
|
|
|
}
|
2018-05-02 10:47:24 +00:00
|
|
|
|
2014-04-14 16:03:59 +00:00
|
|
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
error_setg(errp, "Guest is waiting for an incoming migration");
|
2018-05-02 10:47:24 +00:00
|
|
|
return false;
|
2014-04-14 16:03:59 +00:00
|
|
|
}
|
|
|
|
|
2016-05-04 19:44:19 +00:00
|
|
|
if (migration_is_blocked(errp)) {
|
2018-05-02 10:47:24 +00:00
|
|
|
return false;
|
2011-11-14 21:09:43 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
if (blk || blk_inc) {
|
2017-04-05 16:32:37 +00:00
|
|
|
if (migrate_use_block() || migrate_use_block_incremental()) {
|
|
|
|
error_setg(errp, "Command options are incompatible with "
|
|
|
|
"current migration capabilities");
|
2018-05-02 10:47:24 +00:00
|
|
|
return false;
|
2017-04-05 16:32:37 +00:00
|
|
|
}
|
|
|
|
migrate_set_block_enabled(true, &local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
2018-05-02 10:47:24 +00:00
|
|
|
return false;
|
2017-04-05 16:32:37 +00:00
|
|
|
}
|
|
|
|
s->must_remove_block_options = true;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
if (blk_inc) {
|
2017-04-05 16:32:37 +00:00
|
|
|
migrate_set_block_incremental(s, true);
|
|
|
|
}
|
|
|
|
|
2018-02-08 10:31:15 +00:00
|
|
|
migrate_init(s);
|
2019-08-02 10:18:41 +00:00
|
|
|
/*
|
|
|
|
* set ram_counters memory to zero for a
|
|
|
|
* new migration
|
|
|
|
*/
|
|
|
|
memset(&ram_counters, 0, sizeof(ram_counters));
|
2011-02-22 22:54:21 +00:00
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
|
|
|
bool has_inc, bool inc, bool has_detach, bool detach,
|
|
|
|
bool has_resume, bool resume, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
|
|
|
MigrationState *s = migrate_get_current();
|
2020-08-06 07:40:29 +00:00
|
|
|
const char *p = NULL;
|
2018-05-02 10:47:24 +00:00
|
|
|
|
|
|
|
if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
|
|
|
|
has_resume && resume, errp)) {
|
|
|
|
/* Error detected, put into errp */
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2020-08-06 07:40:29 +00:00
|
|
|
if (strstart(uri, "tcp:", &p) ||
|
2020-08-06 07:40:30 +00:00
|
|
|
strstart(uri, "unix:", NULL) ||
|
|
|
|
strstart(uri, "vsock:", NULL)) {
|
2020-08-06 07:40:29 +00:00
|
|
|
socket_start_outgoing_migration(s, p ? p : uri, &local_err);
|
2013-07-22 14:01:54 +00:00
|
|
|
#ifdef CONFIG_RDMA
|
2013-12-18 20:52:01 +00:00
|
|
|
} else if (strstart(uri, "rdma:", &p)) {
|
2013-07-22 14:01:54 +00:00
|
|
|
rdma_start_outgoing_migration(s, p, &local_err);
|
|
|
|
#endif
|
2011-02-22 22:54:21 +00:00
|
|
|
} else if (strstart(uri, "exec:", &p)) {
|
2012-10-02 08:02:46 +00:00
|
|
|
exec_start_outgoing_migration(s, p, &local_err);
|
2011-02-22 22:54:21 +00:00
|
|
|
} else if (strstart(uri, "fd:", &p)) {
|
2012-10-02 08:02:46 +00:00
|
|
|
fd_start_outgoing_migration(s, p, &local_err);
|
2010-12-13 16:30:12 +00:00
|
|
|
} else {
|
2015-03-17 10:54:50 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
|
|
|
|
"a valid migration protocol");
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
2018-03-16 20:21:14 +00:00
|
|
|
block_cleanup_parameters(s);
|
2011-12-05 16:48:01 +00:00
|
|
|
return;
|
2011-02-22 22:54:21 +00:00
|
|
|
}
|
|
|
|
|
2012-10-02 08:02:46 +00:00
|
|
|
if (local_err) {
|
migration: add reporting of errors for outgoing migration
Currently if an application initiates an outgoing migration,
it may or may not, get an error reported back on failure. If
the error occurs synchronously to the 'migrate' command
execution, the client app will see the error message. This
is the case for DNS lookup failures. If the error occurs
asynchronously to the monitor command though, the error
will be thrown away and the client left guessing about
what went wrong. This is the case for failure to connect
to the TCP server (eg due to wrong port, or firewall
rules, or other similar errors).
In the future we'll be adding more scope for errors to
happen asynchronously with the TLS protocol handshake.
TLS errors are hard to diagnose even when they are well
reported, so discarding errors entirely will make it
impossible to debug TLS connection problems.
Management apps which do migration are already using
'query-migrate' / 'info migrate' to check up on progress
of background migration operations and to see their end
status. This is a fine place to also include the error
message when things go wrong.
This patch thus adds an 'error-desc' field to the
MigrationInfo struct, which will be populated when
the 'status' is set to 'failed':
(qemu) migrate -d tcp:localhost:9001
(qemu) info migrate
capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off compress: off events: off x-postcopy-ram: off
Migration status: failed (Error connecting to socket: Connection refused)
total time: 0 milliseconds
In the HMP, when doing non-detached migration, it is
also possible to display this error message directly
to the app.
(qemu) migrate tcp:localhost:9001
Error connecting to socket: Connection refused
Or with QMP
{
"execute": "query-migrate",
"arguments": {}
}
{
"return": {
"status": "failed",
"error-desc": "address resolution failed for myhost:9000: No address associated with hostname"
}
}
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Message-Id: <1461751518-12128-11-git-send-email-berrange@redhat.com>
Signed-off-by: Amit Shah <amit.shah@redhat.com>
2016-04-27 10:05:00 +00:00
|
|
|
migrate_fd_error(s, local_err);
|
2012-10-02 08:02:46 +00:00
|
|
|
error_propagate(errp, local_err);
|
2011-12-05 16:48:01 +00:00
|
|
|
return;
|
2011-11-09 20:29:01 +00:00
|
|
|
}
|
2011-02-22 22:54:21 +00:00
|
|
|
}
|
|
|
|
|
2011-11-28 00:54:09 +00:00
|
|
|
void qmp_migrate_cancel(Error **errp)
|
2011-02-22 22:54:21 +00:00
|
|
|
{
|
2011-10-05 11:50:43 +00:00
|
|
|
migrate_fd_cancel(migrate_get_current());
|
2011-02-22 22:54:21 +00:00
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:53 +00:00
|
|
|
void qmp_migrate_continue(MigrationStatus state, Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
if (s->state != state) {
|
|
|
|
error_setg(errp, "Migration not in expected state: %s",
|
|
|
|
MigrationStatus_str(s->state));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
qemu_sem_post(&s->pause_sem);
|
|
|
|
}
|
|
|
|
|
2012-08-06 18:42:54 +00:00
|
|
|
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
|
|
|
|
{
|
2017-10-05 19:30:10 +00:00
|
|
|
MigrateSetParameters p = {
|
|
|
|
.has_xbzrle_cache_size = true,
|
|
|
|
.xbzrle_cache_size = value,
|
|
|
|
};
|
2014-01-30 18:08:34 +00:00
|
|
|
|
2017-10-05 19:30:10 +00:00
|
|
|
qmp_migrate_set_parameters(&p, errp);
|
2012-08-06 18:42:54 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int64_t qmp_query_migrate_cache_size(Error **errp)
|
|
|
|
{
|
|
|
|
return migrate_xbzrle_cache_size();
|
|
|
|
}
|
|
|
|
|
2011-11-28 13:59:37 +00:00
|
|
|
void qmp_migrate_set_speed(int64_t value, Error **errp)
|
2011-02-22 22:54:21 +00:00
|
|
|
{
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
MigrateSetParameters p = {
|
2016-09-15 16:20:28 +00:00
|
|
|
.has_max_bandwidth = true,
|
|
|
|
.max_bandwidth = value,
|
|
|
|
};
|
2011-02-22 22:54:21 +00:00
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
qmp_migrate_set_parameters(&p, errp);
|
2011-02-22 22:54:21 +00:00
|
|
|
}
|
|
|
|
|
2011-11-28 01:18:01 +00:00
|
|
|
void qmp_migrate_set_downtime(double value, Error **errp)
|
2011-02-22 22:54:21 +00:00
|
|
|
{
|
2017-02-22 15:17:29 +00:00
|
|
|
if (value < 0 || value > MAX_MIGRATE_DOWNTIME_SECONDS) {
|
2020-03-31 08:22:05 +00:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"downtime_limit",
|
|
|
|
"an integer in the range of 0 to "
|
|
|
|
stringify(MAX_MIGRATE_DOWNTIME_SECONDS)" seconds");
|
2017-02-22 15:17:29 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
value *= 1000; /* Convert to milliseconds */
|
|
|
|
|
migration: Unshare MigrationParameters struct for now
Commit de63ab6 "migrate: Share common MigrationParameters struct"
reused MigrationParameters for the arguments of
migrate-set-parameters, with the following rationale:
It is rather verbose, and slightly error-prone, to repeat
the same set of parameters for input (migrate-set-parameters)
as for output (query-migrate-parameters), where the only
difference is whether the members are optional. We can just
document that the optional members will always be present
on output, and then share a common struct between both
commands. The next patch can then reduce the amount of
code needed on input.
I need to unshare them to correct a design flaw in a stupid, but
minimally invasive way, in the next commit. We can restore the
sharing when we redo that patch in a less stupid way. Add a suitable
TODO comment.
Note that I revert only the sharing part of commit de63ab6, not the
part that made the members of query-migrate-parameters' result
optional. The schema (and thus introspection) remains inaccurate for
query-migrate-parameters. If we decide not to restore the sharing, we
should revert that part, too.
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Reviewed-by: Daniel P. Berrange <berrange@redhat.com>
Reviewed-by: Eric Blake <eblake@redhat.com>
2017-07-18 11:42:11 +00:00
|
|
|
MigrateSetParameters p = {
|
2016-09-15 16:20:28 +00:00
|
|
|
.has_downtime_limit = true,
|
2019-11-22 08:00:38 +00:00
|
|
|
.downtime_limit = (int64_t)value,
|
2016-09-15 16:20:28 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
qmp_migrate_set_parameters(&p, errp);
|
2010-12-13 16:30:12 +00:00
|
|
|
}
|
2012-08-06 18:42:53 +00:00
|
|
|
|
2017-02-03 15:23:20 +00:00
|
|
|
bool migrate_release_ram(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:51 +00:00
|
|
|
bool migrate_postcopy_ram(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2016-03-11 09:53:36 +00:00
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
|
2015-11-05 18:10:51 +00:00
|
|
|
}
|
|
|
|
|
2017-07-10 16:30:16 +00:00
|
|
|
bool migrate_postcopy(void)
|
|
|
|
{
|
2018-03-13 19:34:00 +00:00
|
|
|
return migrate_postcopy_ram() || migrate_dirty_bitmaps();
|
2017-07-10 16:30:16 +00:00
|
|
|
}
|
|
|
|
|
2013-06-24 09:49:42 +00:00
|
|
|
bool migrate_auto_converge(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
|
|
|
|
}
|
|
|
|
|
2013-07-18 07:48:50 +00:00
|
|
|
bool migrate_zero_blocks(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
|
|
|
|
}
|
|
|
|
|
2018-03-22 18:17:22 +00:00
|
|
|
bool migrate_postcopy_blocktime(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
|
|
|
|
}
|
|
|
|
|
2015-03-23 08:32:17 +00:00
|
|
|
bool migrate_use_compression(void)
|
|
|
|
{
|
2015-03-23 08:32:26 +00:00
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
|
2015-03-23 08:32:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int migrate_compress_level(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2016-04-27 10:05:14 +00:00
|
|
|
return s->parameters.compress_level;
|
2015-03-23 08:32:17 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int migrate_compress_threads(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2016-04-27 10:05:14 +00:00
|
|
|
return s->parameters.compress_threads;
|
2015-03-23 08:32:17 +00:00
|
|
|
}
|
|
|
|
|
2018-08-21 08:10:20 +00:00
|
|
|
int migrate_compress_wait_thread(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->parameters.compress_wait_thread;
|
|
|
|
}
|
|
|
|
|
2015-03-23 08:32:18 +00:00
|
|
|
int migrate_decompress_threads(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2016-04-27 10:05:14 +00:00
|
|
|
return s->parameters.decompress_threads;
|
2015-03-23 08:32:18 +00:00
|
|
|
}
|
|
|
|
|
2018-03-13 19:34:00 +00:00
|
|
|
bool migrate_dirty_bitmaps(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
|
|
|
|
}
|
|
|
|
|
2019-02-15 17:45:45 +00:00
|
|
|
bool migrate_ignore_shared(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
|
|
|
|
}
|
|
|
|
|
2019-09-03 16:22:44 +00:00
|
|
|
bool migrate_validate_uuid(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
|
|
|
|
}
|
|
|
|
|
2015-07-07 12:44:05 +00:00
|
|
|
bool migrate_use_events(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
|
|
|
|
}
|
|
|
|
|
2016-01-14 11:23:00 +00:00
|
|
|
bool migrate_use_multifd(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2019-02-06 12:54:06 +00:00
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD];
|
2016-01-14 11:23:00 +00:00
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:50 +00:00
|
|
|
bool migrate_pause_before_switchover(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[
|
|
|
|
MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
|
|
|
|
}
|
|
|
|
|
2016-01-15 07:56:17 +00:00
|
|
|
int migrate_multifd_channels(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2019-02-06 12:54:06 +00:00
|
|
|
return s->parameters.multifd_channels;
|
2016-01-15 07:56:17 +00:00
|
|
|
}
|
|
|
|
|
2019-05-15 11:37:46 +00:00
|
|
|
MultiFDCompression migrate_multifd_compression(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->parameters.multifd_compression;
|
|
|
|
}
|
|
|
|
|
2020-01-23 16:08:52 +00:00
|
|
|
int migrate_multifd_zlib_level(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->parameters.multifd_zlib_level;
|
|
|
|
}
|
|
|
|
|
2020-01-23 16:41:36 +00:00
|
|
|
int migrate_multifd_zstd_level(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->parameters.multifd_zstd_level;
|
|
|
|
}
|
|
|
|
|
2012-08-06 18:42:53 +00:00
|
|
|
int migrate_use_xbzrle(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t migrate_xbzrle_cache_size(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2017-10-05 19:30:10 +00:00
|
|
|
return s->parameters.xbzrle_cache_size;
|
2012-08-06 18:42:53 +00:00
|
|
|
}
|
2012-10-03 12:18:33 +00:00
|
|
|
|
2018-06-13 10:26:40 +00:00
|
|
|
static int64_t migrate_max_postcopy_bandwidth(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->parameters.max_postcopy_bandwidth;
|
|
|
|
}
|
|
|
|
|
2017-04-05 16:32:37 +00:00
|
|
|
bool migrate_use_block(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
|
|
|
|
}
|
|
|
|
|
2017-06-26 10:28:55 +00:00
|
|
|
bool migrate_use_return_path(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
|
|
|
|
}
|
|
|
|
|
2017-04-05 16:32:37 +00:00
|
|
|
bool migrate_use_block_incremental(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->parameters.block_incremental;
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
/* migration thread support */
|
|
|
|
/*
|
|
|
|
* Something bad happened to the RP stream, mark an error
|
|
|
|
* The caller shall print or trace something to indicate why
|
|
|
|
*/
|
|
|
|
static void mark_source_rp_bad(MigrationState *s)
|
|
|
|
{
|
|
|
|
s->rp_state.error = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rp_cmd_args {
|
|
|
|
ssize_t len; /* -1 = variable */
|
|
|
|
const char *name;
|
|
|
|
} rp_cmd_args[] = {
|
|
|
|
[MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
|
|
|
|
[MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
|
|
|
|
[MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
|
2015-11-05 18:11:07 +00:00
|
|
|
[MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
|
|
|
|
[MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
|
2018-05-02 10:47:28 +00:00
|
|
|
[MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" },
|
2018-05-02 10:47:30 +00:00
|
|
|
[MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" },
|
2015-11-05 18:10:49 +00:00
|
|
|
[MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
|
|
|
|
};
|
|
|
|
|
2015-11-05 18:11:07 +00:00
|
|
|
/*
|
|
|
|
* Process a request for pages received on the return path,
|
|
|
|
* We're allowed to send more than requested (e.g. to round to our page size)
|
|
|
|
* and we don't need to send pages that have already been sent.
|
|
|
|
*/
|
|
|
|
static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
|
|
|
|
ram_addr_t start, size_t len)
|
|
|
|
{
|
2019-10-13 02:11:45 +00:00
|
|
|
long our_host_ps = qemu_real_host_page_size;
|
2015-11-05 18:11:08 +00:00
|
|
|
|
2015-11-05 18:11:07 +00:00
|
|
|
trace_migrate_handle_rp_req_pages(rbname, start, len);
|
2015-11-05 18:11:08 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Since we currently insist on matching page sizes, just sanity check
|
|
|
|
* we're being asked for whole host pages.
|
|
|
|
*/
|
|
|
|
if (start & (our_host_ps-1) ||
|
|
|
|
(len & (our_host_ps-1))) {
|
|
|
|
error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
|
|
|
|
" len: %zd", __func__, start, len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-03-14 17:41:03 +00:00
|
|
|
if (ram_save_queue_pages(rbname, start, len)) {
|
2015-11-05 18:11:08 +00:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
2015-11-05 18:11:07 +00:00
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:21 +00:00
|
|
|
/* Return true to retry, false to quit */
|
|
|
|
static bool postcopy_pause_return_path_thread(MigrationState *s)
|
|
|
|
{
|
|
|
|
trace_postcopy_pause_return_path();
|
|
|
|
|
|
|
|
qemu_sem_wait(&s->postcopy_pause_rp_sem);
|
|
|
|
|
|
|
|
trace_postcopy_pause_return_path_continued();
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:28 +00:00
|
|
|
static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
|
|
|
|
{
|
|
|
|
RAMBlock *block = qemu_ram_block_by_name(block_name);
|
|
|
|
|
|
|
|
if (!block) {
|
|
|
|
error_report("%s: invalid block name '%s'", __func__, block_name);
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Fetch the received bitmap and refresh the dirty bitmap */
|
|
|
|
return ram_dirty_bitmap_reload(s, block);
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:30 +00:00
|
|
|
static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
|
|
|
|
{
|
|
|
|
trace_source_return_path_thread_resume_ack(value);
|
|
|
|
|
|
|
|
if (value != MIGRATION_RESUME_ACK_VALUE) {
|
|
|
|
error_report("%s: illegal resume_ack value %"PRIu32,
|
|
|
|
__func__, value);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Now both sides are active. */
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
|
2018-05-02 10:47:34 +00:00
|
|
|
/* Notify send thread that time to continue send pages */
|
|
|
|
qemu_sem_post(&s->rp_state.rp_sem);
|
2018-05-02 10:47:30 +00:00
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
/*
|
|
|
|
* Handles messages sent on the return path towards the source VM
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void *source_return_path_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *ms = opaque;
|
|
|
|
QEMUFile *rp = ms->rp_state.from_dst_file;
|
|
|
|
uint16_t header_len, header_type;
|
2016-03-09 06:12:12 +00:00
|
|
|
uint8_t buf[512];
|
2015-11-05 18:10:49 +00:00
|
|
|
uint32_t tmp32, sibling_error;
|
2015-11-05 18:11:07 +00:00
|
|
|
ram_addr_t start = 0; /* =0 to silence warning */
|
|
|
|
size_t len = 0, expected_len;
|
2015-11-05 18:10:49 +00:00
|
|
|
int res;
|
|
|
|
|
|
|
|
trace_source_return_path_thread_entry();
|
2018-08-06 13:29:29 +00:00
|
|
|
rcu_register_thread();
|
2018-05-02 10:47:21 +00:00
|
|
|
|
|
|
|
retry:
|
2015-11-05 18:10:49 +00:00
|
|
|
while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
|
|
|
|
migration_is_setup_or_active(ms->state)) {
|
|
|
|
trace_source_return_path_thread_loop_top();
|
|
|
|
header_type = qemu_get_be16(rp);
|
|
|
|
header_len = qemu_get_be16(rp);
|
|
|
|
|
2018-02-08 10:31:05 +00:00
|
|
|
if (qemu_file_get_error(rp)) {
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
if (header_type >= MIG_RP_MSG_MAX ||
|
|
|
|
header_type == MIG_RP_MSG_INVALID) {
|
|
|
|
error_report("RP: Received invalid message 0x%04x length 0x%04x",
|
2020-03-27 17:19:08 +00:00
|
|
|
header_type, header_len);
|
2015-11-05 18:10:49 +00:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((rp_cmd_args[header_type].len != -1 &&
|
|
|
|
header_len != rp_cmd_args[header_type].len) ||
|
2016-03-09 06:12:12 +00:00
|
|
|
header_len > sizeof(buf)) {
|
2015-11-05 18:10:49 +00:00
|
|
|
error_report("RP: Received '%s' message (0x%04x) with"
|
2020-03-27 17:19:08 +00:00
|
|
|
"incorrect length %d expecting %zu",
|
|
|
|
rp_cmd_args[header_type].name, header_type, header_len,
|
|
|
|
(size_t)rp_cmd_args[header_type].len);
|
2015-11-05 18:10:49 +00:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We know we've got a valid header by this point */
|
|
|
|
res = qemu_get_buffer(rp, buf, header_len);
|
|
|
|
if (res != header_len) {
|
|
|
|
error_report("RP: Failed reading data for message 0x%04x"
|
|
|
|
" read %d expected %d",
|
|
|
|
header_type, res, header_len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, we have the message and the data */
|
|
|
|
switch (header_type) {
|
|
|
|
case MIG_RP_MSG_SHUT:
|
2016-06-10 16:09:22 +00:00
|
|
|
sibling_error = ldl_be_p(buf);
|
2015-11-05 18:10:49 +00:00
|
|
|
trace_source_return_path_thread_shut(sibling_error);
|
|
|
|
if (sibling_error) {
|
|
|
|
error_report("RP: Sibling indicated error %d", sibling_error);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We'll let the main thread deal with closing the RP
|
|
|
|
* we could do a shutdown(2) on it, but we're the only user
|
|
|
|
* anyway, so there's nothing gained.
|
|
|
|
*/
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
case MIG_RP_MSG_PONG:
|
2016-06-10 16:09:22 +00:00
|
|
|
tmp32 = ldl_be_p(buf);
|
2015-11-05 18:10:49 +00:00
|
|
|
trace_source_return_path_thread_pong(tmp32);
|
|
|
|
break;
|
|
|
|
|
2015-11-05 18:11:07 +00:00
|
|
|
case MIG_RP_MSG_REQ_PAGES:
|
2016-06-10 16:09:22 +00:00
|
|
|
start = ldq_be_p(buf);
|
|
|
|
len = ldl_be_p(buf + 8);
|
2015-11-05 18:11:07 +00:00
|
|
|
migrate_handle_rp_req_pages(ms, NULL, start, len);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIG_RP_MSG_REQ_PAGES_ID:
|
|
|
|
expected_len = 12 + 1; /* header + termination */
|
|
|
|
|
|
|
|
if (header_len >= expected_len) {
|
2016-06-10 16:09:22 +00:00
|
|
|
start = ldq_be_p(buf);
|
|
|
|
len = ldl_be_p(buf + 8);
|
2015-11-05 18:11:07 +00:00
|
|
|
/* Now we expect an idstr */
|
|
|
|
tmp32 = buf[12]; /* Length of the following idstr */
|
|
|
|
buf[13 + tmp32] = '\0';
|
|
|
|
expected_len += tmp32;
|
|
|
|
}
|
|
|
|
if (header_len != expected_len) {
|
|
|
|
error_report("RP: Req_Page_id with length %d expecting %zd",
|
2020-03-27 17:19:08 +00:00
|
|
|
header_len, expected_len);
|
2015-11-05 18:11:07 +00:00
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
|
|
|
|
break;
|
|
|
|
|
2018-05-02 10:47:28 +00:00
|
|
|
case MIG_RP_MSG_RECV_BITMAP:
|
|
|
|
if (header_len < 1) {
|
|
|
|
error_report("%s: missing block name", __func__);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
/* Format: len (1B) + idstr (<255B). This ends the idstr. */
|
|
|
|
buf[buf[0] + 1] = '\0';
|
|
|
|
if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2018-05-02 10:47:30 +00:00
|
|
|
case MIG_RP_MSG_RESUME_ACK:
|
|
|
|
tmp32 = ldl_be_p(buf);
|
|
|
|
if (migrate_handle_rp_resume_ack(ms, tmp32)) {
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 10:47:21 +00:00
|
|
|
|
|
|
|
out:
|
|
|
|
res = qemu_file_get_error(rp);
|
|
|
|
if (res) {
|
2019-10-07 10:35:07 +00:00
|
|
|
if (res == -EIO && migration_in_postcopy()) {
|
2018-05-02 10:47:21 +00:00
|
|
|
/*
|
|
|
|
* Maybe there is something we can do: it looks like a
|
|
|
|
* network down issue, and we pause for a recovery.
|
|
|
|
*/
|
|
|
|
if (postcopy_pause_return_path_thread(ms)) {
|
|
|
|
/* Reload rp, reset the rest */
|
2018-09-25 09:22:45 +00:00
|
|
|
if (rp != ms->rp_state.from_dst_file) {
|
|
|
|
qemu_fclose(rp);
|
|
|
|
rp = ms->rp_state.from_dst_file;
|
|
|
|
}
|
2018-05-02 10:47:21 +00:00
|
|
|
ms->rp_state.error = false;
|
|
|
|
goto retry;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
trace_source_return_path_thread_bad_end();
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_source_return_path_thread_end();
|
|
|
|
ms->rp_state.from_dst_file = NULL;
|
|
|
|
qemu_fclose(rp);
|
2018-08-06 13:29:29 +00:00
|
|
|
rcu_unregister_thread();
|
2015-11-05 18:10:49 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
static int open_return_path_on_source(MigrationState *ms,
|
|
|
|
bool create_thread)
|
2015-11-05 18:10:49 +00:00
|
|
|
{
|
|
|
|
|
2016-01-15 03:37:42 +00:00
|
|
|
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
|
2015-11-05 18:10:49 +00:00
|
|
|
if (!ms->rp_state.from_dst_file) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_open_return_path_on_source();
|
2018-05-02 10:47:24 +00:00
|
|
|
|
|
|
|
if (!create_thread) {
|
|
|
|
/* We're done */
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
qemu_thread_create(&ms->rp_state.rp_thread, "return path",
|
|
|
|
source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
|
|
|
|
|
|
|
|
trace_open_return_path_on_source_continue();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
|
|
|
|
static int await_return_path_close_on_source(MigrationState *ms)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If this is a normal exit then the destination will send a SHUT and the
|
|
|
|
* rp_thread will exit, however if there's an error we need to cause
|
|
|
|
* it to exit.
|
|
|
|
*/
|
2016-01-15 03:37:42 +00:00
|
|
|
if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
|
2015-11-05 18:10:49 +00:00
|
|
|
/*
|
|
|
|
* shutdown(2), if we have it, will cause it to unblock if it's stuck
|
|
|
|
* waiting for the destination.
|
|
|
|
*/
|
|
|
|
qemu_file_shutdown(ms->rp_state.from_dst_file);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
trace_await_return_path_close_on_source_joining();
|
|
|
|
qemu_thread_join(&ms->rp_state.rp_thread);
|
|
|
|
trace_await_return_path_close_on_source_close();
|
|
|
|
return ms->rp_state.error;
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:11:05 +00:00
|
|
|
/*
|
|
|
|
* Switch from normal iteration to postcopy
|
|
|
|
* Returns non-0 on error
|
|
|
|
*/
|
2018-01-03 12:20:09 +00:00
|
|
|
static int postcopy_start(MigrationState *ms)
|
2015-11-05 18:11:05 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2016-04-27 10:05:01 +00:00
|
|
|
QIOChannelBuffer *bioc;
|
|
|
|
QEMUFile *fb;
|
2015-11-05 18:11:05 +00:00
|
|
|
int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2018-06-13 10:26:40 +00:00
|
|
|
int64_t bandwidth = migrate_max_postcopy_bandwidth();
|
2017-02-02 15:59:09 +00:00
|
|
|
bool restart_block = false;
|
2017-10-20 09:05:56 +00:00
|
|
|
int cur_state = MIGRATION_STATUS_ACTIVE;
|
|
|
|
if (!migrate_pause_before_switchover()) {
|
|
|
|
migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
|
|
|
|
trace_postcopy_start();
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
trace_postcopy_start_set_run();
|
|
|
|
|
qmp hmp: Make system_wakeup check wake-up support and run state
The qmp/hmp command 'system_wakeup' is simply a direct call to
'qemu_system_wakeup_request' from vl.c. This function verifies if
runstate is SUSPENDED and if the wake up reason is valid before
proceeding. However, no error or warning is thrown if any of those
pre-requirements isn't met. There is no way for the caller to
differentiate between a successful wakeup or an error state caused
when trying to wake up a guest that wasn't suspended.
This means that system_wakeup is silently failing, which can be
considered a bug. Adding error handling isn't an API break in this
case - applications that didn't check the result will remain broken,
the ones that check it will have a chance to deal with it.
Adding to that, the commit before previous created a new QMP API called
query-current-machine, with a new flag called wakeup-suspend-support,
that indicates if the guest has the capability of waking up from suspended
state. Although such guest will never reach SUSPENDED state and erroring
it out in this scenario would suffice, it is more informative for the user
to differentiate between a failure because the guest isn't suspended versus
a failure because the guest does not have support for wake up at all.
All this considered, this patch changes qmp_system_wakeup to check if
the guest is capable of waking up from suspend, and if it is suspended.
After this patch, this is the output of system_wakeup in a guest that
does not have wake-up from suspend support (ppc64):
(qemu) system_wakeup
wake-up from suspend is not supported by this guest
(qemu)
And this is the output of system_wakeup in a x86 guest that has the
support but isn't suspended:
(qemu) system_wakeup
Unable to wake up: guest is not in suspended state
(qemu)
Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20181205194701.17836-4-danielhb413@gmail.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2018-12-05 19:47:01 +00:00
|
|
|
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
|
2015-11-05 18:11:05 +00:00
|
|
|
global_state_store();
|
|
|
|
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
|
2015-12-22 13:07:08 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
|
2017-10-20 09:05:56 +00:00
|
|
|
ret = migration_maybe_pause(ms, &cur_state,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
2015-12-22 13:07:08 +00:00
|
|
|
ret = bdrv_inactivate_all();
|
2015-11-05 18:11:05 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
2017-02-02 15:59:09 +00:00
|
|
|
restart_block = true;
|
2015-11-05 18:11:05 +00:00
|
|
|
|
2015-11-11 14:02:27 +00:00
|
|
|
/*
|
|
|
|
* Cause any non-postcopiable, but iterative devices to
|
|
|
|
* send out their final data.
|
|
|
|
*/
|
2017-06-16 16:06:58 +00:00
|
|
|
qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
|
2015-11-11 14:02:27 +00:00
|
|
|
|
2015-11-05 18:11:05 +00:00
|
|
|
/*
|
|
|
|
* in Finish migrate and with the io-lock held everything should
|
|
|
|
* be quiet, but we've potentially still got dirty pages and we
|
|
|
|
* need to tell the destination to throw any pages it's already received
|
|
|
|
* that are dirty
|
|
|
|
*/
|
2017-07-10 16:30:16 +00:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
if (ram_postcopy_send_discard_bitmap(ms)) {
|
|
|
|
error_report("postcopy send discard bitmap failed");
|
|
|
|
goto fail;
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* send rest of state - note things that are doing postcopy
|
|
|
|
* will notice we're in POSTCOPY_ACTIVE and not actually
|
|
|
|
* wrap their state up here
|
|
|
|
*/
|
2018-06-13 10:26:40 +00:00
|
|
|
/* 0 max-postcopy-bandwidth means unlimited */
|
|
|
|
if (!bandwidth) {
|
|
|
|
qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
|
|
|
|
} else {
|
|
|
|
qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO);
|
|
|
|
}
|
2017-07-10 16:30:16 +00:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
/* Ping just for debugging, helps line traces up */
|
|
|
|
qemu_savevm_send_ping(ms->to_dst_file, 2);
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* While loading the device state we may trigger page transfer
|
|
|
|
* requests and the fd must be free to process those, and thus
|
|
|
|
* the destination must read the whole device state off the fd before
|
|
|
|
* it starts processing it. Unfortunately the ad-hoc migration format
|
|
|
|
* doesn't allow the destination to know the size to read without fully
|
|
|
|
* parsing it through each devices load-state code (especially the open
|
|
|
|
* coded devices that use get/put).
|
|
|
|
* So we wrap the device state up in a package with a length at the start;
|
|
|
|
* to do this we use a qemu_buf to hold the whole of the device state.
|
|
|
|
*/
|
2016-04-27 10:05:01 +00:00
|
|
|
bioc = qio_channel_buffer_new(4096);
|
2016-09-30 10:57:14 +00:00
|
|
|
qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
|
2016-04-27 10:05:01 +00:00
|
|
|
fb = qemu_fopen_channel_output(QIO_CHANNEL(bioc));
|
|
|
|
object_unref(OBJECT(bioc));
|
2015-11-05 18:11:05 +00:00
|
|
|
|
2015-11-05 18:11:18 +00:00
|
|
|
/*
|
|
|
|
* Make sure the receiver can get incoming pages before we send the rest
|
|
|
|
* of the state
|
|
|
|
*/
|
|
|
|
qemu_savevm_send_postcopy_listen(fb);
|
|
|
|
|
2017-06-16 16:06:58 +00:00
|
|
|
qemu_savevm_state_complete_precopy(fb, false, false);
|
2017-07-10 16:30:16 +00:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
qemu_savevm_send_ping(fb, 3);
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
|
|
|
|
qemu_savevm_send_postcopy_run(fb);
|
|
|
|
|
|
|
|
/* <><> end of stuff going into the package */
|
|
|
|
|
2017-02-02 15:59:09 +00:00
|
|
|
/* Last point of recovery; as soon as we send the package the destination
|
|
|
|
* can open devices and potentially start running.
|
|
|
|
* Lets just check again we've not got any errors.
|
|
|
|
*/
|
|
|
|
ret = qemu_file_get_error(ms->to_dst_file);
|
|
|
|
if (ret) {
|
|
|
|
error_report("postcopy_start: Migration stream errored (pre package)");
|
|
|
|
goto fail_closefb;
|
|
|
|
}
|
|
|
|
|
|
|
|
restart_block = false;
|
|
|
|
|
2015-11-05 18:11:05 +00:00
|
|
|
/* Now send that blob */
|
2016-04-27 10:05:01 +00:00
|
|
|
if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
|
2015-11-05 18:11:05 +00:00
|
|
|
goto fail_closefb;
|
|
|
|
}
|
|
|
|
qemu_fclose(fb);
|
2016-02-22 17:17:32 +00:00
|
|
|
|
|
|
|
/* Send a notify to give a chance for anything that needs to happen
|
|
|
|
* at the transition to postcopy and after the device state; in particular
|
|
|
|
* spice needs to trigger a transition now
|
|
|
|
*/
|
|
|
|
ms->postcopy_after_devices = true;
|
|
|
|
notifier_list_notify(&migration_state_notifiers, ms);
|
|
|
|
|
2015-11-05 18:11:05 +00:00
|
|
|
ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
|
|
|
|
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
|
2017-07-10 16:30:16 +00:00
|
|
|
if (migrate_postcopy_ram()) {
|
|
|
|
/*
|
|
|
|
* Although this ping is just for debug, it could potentially be
|
|
|
|
* used for getting a better measurement of downtime at the source.
|
|
|
|
*/
|
|
|
|
qemu_savevm_send_ping(ms->to_dst_file, 4);
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
|
2017-02-03 15:23:21 +00:00
|
|
|
if (migrate_release_ram()) {
|
|
|
|
ram_postcopy_migrated_memory_release(ms);
|
|
|
|
}
|
|
|
|
|
2016-01-15 03:37:42 +00:00
|
|
|
ret = qemu_file_get_error(ms->to_dst_file);
|
2015-11-05 18:11:05 +00:00
|
|
|
if (ret) {
|
|
|
|
error_report("postcopy_start: Migration stream errored");
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
|
2015-11-05 18:11:05 +00:00
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
|
|
|
|
return ret;
|
|
|
|
|
|
|
|
fail_closefb:
|
|
|
|
qemu_fclose(fb);
|
|
|
|
fail:
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
|
2015-11-05 18:11:05 +00:00
|
|
|
MIGRATION_STATUS_FAILED);
|
2017-02-02 15:59:09 +00:00
|
|
|
if (restart_block) {
|
|
|
|
/* A failure happened early enough that we know the destination hasn't
|
|
|
|
* accessed block devices, so we're safe to recover.
|
|
|
|
*/
|
|
|
|
Error *local_err = NULL;
|
|
|
|
|
|
|
|
bdrv_invalidate_cache_all(&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
|
|
|
}
|
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2017-10-20 09:05:52 +00:00
|
|
|
/**
|
|
|
|
* migration_maybe_pause: Pause if required to by
|
|
|
|
* migrate_pause_before_switchover called with the iothread locked
|
|
|
|
* Returns: 0 on success
|
|
|
|
*/
|
2017-10-20 09:05:56 +00:00
|
|
|
static int migration_maybe_pause(MigrationState *s,
|
|
|
|
int *current_active_state,
|
|
|
|
int new_state)
|
2017-10-20 09:05:52 +00:00
|
|
|
{
|
|
|
|
if (!migrate_pause_before_switchover()) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Since leaving this state is not atomic with posting the semaphore
|
|
|
|
* it's possible that someone could have issued multiple migrate_continue
|
|
|
|
* and the semaphore is incorrectly positive at this point;
|
|
|
|
* the docs say it's undefined to reinit a semaphore that's already
|
|
|
|
* init'd, so use timedwait to eat up any existing posts.
|
|
|
|
*/
|
|
|
|
while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
|
|
|
|
/* This block intentionally left blank */
|
|
|
|
}
|
|
|
|
|
2020-01-14 09:43:09 +00:00
|
|
|
/*
|
|
|
|
* If the migration is cancelled when it is in the completion phase,
|
|
|
|
* the migration state is set to MIGRATION_STATUS_CANCELLING.
|
|
|
|
* So we don't need to wait a semaphore, otherwise we would always
|
|
|
|
* wait for the 'pause_sem' semaphore.
|
|
|
|
*/
|
|
|
|
if (s->state != MIGRATION_STATUS_CANCELLING) {
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
migrate_set_state(&s->state, *current_active_state,
|
|
|
|
MIGRATION_STATUS_PRE_SWITCHOVER);
|
|
|
|
qemu_sem_wait(&s->pause_sem);
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
|
|
|
|
new_state);
|
|
|
|
*current_active_state = new_state;
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
}
|
2017-10-20 09:05:52 +00:00
|
|
|
|
2017-10-20 09:05:56 +00:00
|
|
|
return s->state == new_state ? 0 : -EINVAL;
|
2017-10-20 09:05:52 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 10:51:31 +00:00
|
|
|
/**
|
|
|
|
* migration_completion: Used by migration_thread when there's not much left.
|
|
|
|
* The caller 'breaks' the loop when this returns.
|
|
|
|
*
|
|
|
|
* @s: Current migration state
|
|
|
|
*/
|
2018-01-03 12:20:14 +00:00
|
|
|
static void migration_completion(MigrationState *s)
|
2015-08-13 10:51:31 +00:00
|
|
|
{
|
|
|
|
int ret;
|
2018-01-03 12:20:14 +00:00
|
|
|
int current_active_state = s->state;
|
2015-08-13 10:51:31 +00:00
|
|
|
|
2015-11-05 18:11:06 +00:00
|
|
|
if (s->state == MIGRATION_STATUS_ACTIVE) {
|
|
|
|
qemu_mutex_lock_iothread();
|
2018-01-03 12:20:10 +00:00
|
|
|
s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
qmp hmp: Make system_wakeup check wake-up support and run state
The qmp/hmp command 'system_wakeup' is simply a direct call to
'qemu_system_wakeup_request' from vl.c. This function verifies if
runstate is SUSPENDED and if the wake up reason is valid before
proceeding. However, no error or warning is thrown if any of those
pre-requirements isn't met. There is no way for the caller to
differentiate between a successful wakeup or an error state caused
when trying to wake up a guest that wasn't suspended.
This means that system_wakeup is silently failing, which can be
considered a bug. Adding error handling isn't an API break in this
case - applications that didn't check the result will remain broken,
the ones that check it will have a chance to deal with it.
Adding to that, the commit before previous created a new QMP API called
query-current-machine, with a new flag called wakeup-suspend-support,
that indicates if the guest has the capability of waking up from suspended
state. Although such guest will never reach SUSPENDED state and erroring
it out in this scenario would suffice, it is more informative for the user
to differentiate between a failure because the guest isn't suspended versus
a failure because the guest does not have support for wake up at all.
All this considered, this patch changes qmp_system_wakeup to check if
the guest is capable of waking up from suspend, and if it is suspended.
After this patch, this is the output of system_wakeup in a guest that
does not have wake-up from suspend support (ppc64):
(qemu) system_wakeup
wake-up from suspend is not supported by this guest
(qemu)
And this is the output of system_wakeup in a x86 guest that has the
support but isn't suspended:
(qemu) system_wakeup
Unable to wake up: guest is not in suspended state
(qemu)
Reported-by: Balamuruhan S <bala24@linux.vnet.ibm.com>
Signed-off-by: Daniel Henrique Barboza <danielhb413@gmail.com>
Message-Id: <20181205194701.17836-4-danielhb413@gmail.com>
Reviewed-by: Markus Armbruster <armbru@redhat.com>
Acked-by: Eduardo Habkost <ehabkost@redhat.com>
Reviewed-by: Michael S. Tsirkin <mst@redhat.com>
Signed-off-by: Markus Armbruster <armbru@redhat.com>
2018-12-05 19:47:01 +00:00
|
|
|
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
|
2018-01-03 12:20:09 +00:00
|
|
|
s->vm_was_running = runstate_is_running();
|
2015-11-05 18:11:06 +00:00
|
|
|
ret = global_state_store();
|
|
|
|
|
|
|
|
if (!ret) {
|
2017-06-16 16:06:58 +00:00
|
|
|
bool inactivate = !migrate_colo_enabled();
|
2015-11-05 18:11:06 +00:00
|
|
|
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
|
2017-10-20 09:05:52 +00:00
|
|
|
if (ret >= 0) {
|
2017-10-20 09:05:56 +00:00
|
|
|
ret = migration_maybe_pause(s, ¤t_active_state,
|
|
|
|
MIGRATION_STATUS_DEVICE);
|
2017-10-20 09:05:52 +00:00
|
|
|
}
|
2017-05-22 15:10:38 +00:00
|
|
|
if (ret >= 0) {
|
|
|
|
qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
|
2017-06-16 16:06:58 +00:00
|
|
|
ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
|
|
|
|
inactivate);
|
2017-05-22 15:10:38 +00:00
|
|
|
}
|
2017-06-16 16:06:58 +00:00
|
|
|
if (inactivate && ret >= 0) {
|
|
|
|
s->block_inactive = true;
|
2015-11-05 18:11:06 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock_iothread();
|
2015-08-13 10:51:31 +00:00
|
|
|
|
2015-11-05 18:11:06 +00:00
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
2015-08-13 10:51:31 +00:00
|
|
|
}
|
2015-11-05 18:11:06 +00:00
|
|
|
} else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
trace_migration_completion_postcopy_end();
|
|
|
|
|
2016-01-15 03:37:42 +00:00
|
|
|
qemu_savevm_state_complete_postcopy(s->to_dst_file);
|
2015-11-05 18:11:06 +00:00
|
|
|
trace_migration_completion_postcopy_end_after_complete();
|
2015-08-13 10:51:31 +00:00
|
|
|
}
|
|
|
|
|
2015-11-05 18:11:06 +00:00
|
|
|
/*
|
|
|
|
* If rp was opened we must clean up the thread before
|
|
|
|
* cleaning everything else up (since if there are no failures
|
|
|
|
* it will wait for the destination to send it's status in
|
|
|
|
* a SHUT command).
|
|
|
|
*/
|
2017-05-31 10:35:34 +00:00
|
|
|
if (s->rp_state.from_dst_file) {
|
2015-11-05 18:11:06 +00:00
|
|
|
int rp_error;
|
2017-05-31 10:35:34 +00:00
|
|
|
trace_migration_return_path_end_before();
|
2015-11-05 18:11:06 +00:00
|
|
|
rp_error = await_return_path_close_on_source(s);
|
2017-05-31 10:35:34 +00:00
|
|
|
trace_migration_return_path_end_after(rp_error);
|
2015-11-05 18:11:06 +00:00
|
|
|
if (rp_error) {
|
2016-05-18 13:44:36 +00:00
|
|
|
goto fail_invalidate;
|
2015-11-05 18:11:06 +00:00
|
|
|
}
|
2015-08-13 10:51:31 +00:00
|
|
|
}
|
|
|
|
|
2016-01-15 03:37:42 +00:00
|
|
|
if (qemu_file_get_error(s->to_dst_file)) {
|
2015-08-13 10:51:31 +00:00
|
|
|
trace_migration_completion_file_err();
|
2016-05-18 13:44:36 +00:00
|
|
|
goto fail_invalidate;
|
2015-08-13 10:51:31 +00:00
|
|
|
}
|
|
|
|
|
2016-10-27 06:42:54 +00:00
|
|
|
if (!migrate_colo_enabled()) {
|
|
|
|
migrate_set_state(&s->state, current_active_state,
|
|
|
|
MIGRATION_STATUS_COMPLETED);
|
|
|
|
}
|
|
|
|
|
2015-08-13 10:51:31 +00:00
|
|
|
return;
|
|
|
|
|
2016-05-18 13:44:36 +00:00
|
|
|
fail_invalidate:
|
|
|
|
/* If not doing postcopy, vm_start() will be called: let's regain
|
|
|
|
* control on images.
|
|
|
|
*/
|
2018-02-05 09:13:37 +00:00
|
|
|
if (s->state == MIGRATION_STATUS_ACTIVE ||
|
|
|
|
s->state == MIGRATION_STATUS_DEVICE) {
|
2016-05-18 13:44:36 +00:00
|
|
|
Error *local_err = NULL;
|
|
|
|
|
2017-01-24 07:59:52 +00:00
|
|
|
qemu_mutex_lock_iothread();
|
2016-05-18 13:44:36 +00:00
|
|
|
bdrv_invalidate_cache_all(&local_err);
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
2017-01-24 07:59:52 +00:00
|
|
|
} else {
|
|
|
|
s->block_inactive = false;
|
2016-05-18 13:44:36 +00:00
|
|
|
}
|
2017-01-24 07:59:52 +00:00
|
|
|
qemu_mutex_unlock_iothread();
|
2016-05-18 13:44:36 +00:00
|
|
|
}
|
|
|
|
|
2015-08-13 10:51:31 +00:00
|
|
|
fail:
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, current_active_state,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
2015-08-13 10:51:31 +00:00
|
|
|
}
|
|
|
|
|
2016-10-27 06:42:52 +00:00
|
|
|
bool migrate_colo_enabled(void)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:19 +00:00
|
|
|
typedef enum MigThrError {
|
|
|
|
/* No error detected */
|
|
|
|
MIG_THR_ERR_NONE = 0,
|
|
|
|
/* Detected error, but resumed successfully */
|
|
|
|
MIG_THR_ERR_RECOVERED = 1,
|
|
|
|
/* Detected fatal error, need to exit */
|
|
|
|
MIG_THR_ERR_FATAL = 2,
|
|
|
|
} MigThrError;
|
|
|
|
|
2018-05-02 10:47:34 +00:00
|
|
|
static int postcopy_resume_handshake(MigrationState *s)
|
|
|
|
{
|
|
|
|
qemu_savevm_send_postcopy_resume(s->to_dst_file);
|
|
|
|
|
|
|
|
while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
|
|
|
qemu_sem_wait(&s->rp_state.rp_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
/* Return zero if success, or <0 for error */
|
|
|
|
static int postcopy_do_resume(MigrationState *s)
|
|
|
|
{
|
2018-05-02 10:47:31 +00:00
|
|
|
int ret;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Call all the resume_prepare() hooks, so that modules can be
|
|
|
|
* ready for the migration resume.
|
|
|
|
*/
|
|
|
|
ret = qemu_savevm_state_resume_prepare(s);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: resume_prepare() failure detected: %d",
|
|
|
|
__func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2018-05-02 10:47:34 +00:00
|
|
|
* Last handshake with destination on the resume (destination will
|
|
|
|
* switch to postcopy-active afterwards)
|
2018-05-02 10:47:31 +00:00
|
|
|
*/
|
2018-05-02 10:47:34 +00:00
|
|
|
ret = postcopy_resume_handshake(s);
|
|
|
|
if (ret) {
|
|
|
|
error_report("%s: handshake failed: %d", __func__, ret);
|
|
|
|
return ret;
|
|
|
|
}
|
2018-05-02 10:47:31 +00:00
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:19 +00:00
|
|
|
/*
|
|
|
|
* We don't return until we are in a safe state to continue current
|
|
|
|
* postcopy migration. Returns MIG_THR_ERR_RECOVERED if recovered, or
|
|
|
|
* MIG_THR_ERR_FATAL if unrecovery failure happened.
|
|
|
|
*/
|
|
|
|
static MigThrError postcopy_pause(MigrationState *s)
|
|
|
|
{
|
|
|
|
assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
|
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
while (true) {
|
2018-05-02 10:47:38 +00:00
|
|
|
QEMUFile *file;
|
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
migrate_set_state(&s->state, s->state,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_PAUSED);
|
2018-05-02 10:47:19 +00:00
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
/* Current channel is possibly broken. Release it. */
|
|
|
|
assert(s->to_dst_file);
|
2018-05-02 10:47:38 +00:00
|
|
|
qemu_mutex_lock(&s->qemu_file_lock);
|
|
|
|
file = s->to_dst_file;
|
2018-05-02 10:47:25 +00:00
|
|
|
s->to_dst_file = NULL;
|
2018-05-02 10:47:38 +00:00
|
|
|
qemu_mutex_unlock(&s->qemu_file_lock);
|
|
|
|
|
|
|
|
qemu_file_shutdown(file);
|
|
|
|
qemu_fclose(file);
|
2018-05-02 10:47:19 +00:00
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
error_report("Detected IO failure for postcopy. "
|
|
|
|
"Migration paused.");
|
2018-05-02 10:47:19 +00:00
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
/*
|
|
|
|
* We wait until things fixed up. Then someone will setup the
|
|
|
|
* status back for us.
|
|
|
|
*/
|
|
|
|
while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
|
|
|
|
qemu_sem_wait(&s->postcopy_pause_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
|
|
|
|
/* Woken up by a recover procedure. Give it a shot */
|
2018-05-02 10:47:19 +00:00
|
|
|
|
2018-05-02 10:47:25 +00:00
|
|
|
/*
|
|
|
|
* Firstly, let's wake up the return path now, with a new
|
|
|
|
* return path channel.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&s->postcopy_pause_rp_sem);
|
|
|
|
|
|
|
|
/* Do the resume logic */
|
|
|
|
if (postcopy_do_resume(s) == 0) {
|
|
|
|
/* Let's continue! */
|
|
|
|
trace_postcopy_pause_continued();
|
|
|
|
return MIG_THR_ERR_RECOVERED;
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* Something wrong happened during the recovery, let's
|
|
|
|
* pause again. Pause is always better than throwing
|
|
|
|
* data away.
|
|
|
|
*/
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
/* This is not right... Time to quit. */
|
|
|
|
return MIG_THR_ERR_FATAL;
|
|
|
|
}
|
|
|
|
}
|
2018-05-02 10:47:19 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static MigThrError migration_detect_error(MigrationState *s)
|
|
|
|
{
|
|
|
|
int ret;
|
2019-02-19 19:59:28 +00:00
|
|
|
int state = s->state;
|
2019-04-22 10:34:20 +00:00
|
|
|
Error *local_error = NULL;
|
2019-02-19 19:59:28 +00:00
|
|
|
|
|
|
|
if (state == MIGRATION_STATUS_CANCELLING ||
|
|
|
|
state == MIGRATION_STATUS_CANCELLED) {
|
|
|
|
/* End the migration, but don't set the state to failed */
|
|
|
|
return MIG_THR_ERR_FATAL;
|
|
|
|
}
|
2018-05-02 10:47:19 +00:00
|
|
|
|
|
|
|
/* Try to detect any file errors */
|
2019-04-22 10:34:20 +00:00
|
|
|
ret = qemu_file_get_error_obj(s->to_dst_file, &local_error);
|
2018-05-02 10:47:19 +00:00
|
|
|
if (!ret) {
|
|
|
|
/* Everything is fine */
|
2019-04-22 10:34:20 +00:00
|
|
|
assert(!local_error);
|
2018-05-02 10:47:19 +00:00
|
|
|
return MIG_THR_ERR_NONE;
|
|
|
|
}
|
|
|
|
|
2019-04-22 10:34:20 +00:00
|
|
|
if (local_error) {
|
|
|
|
migrate_set_error(s, local_error);
|
|
|
|
error_free(local_error);
|
|
|
|
}
|
|
|
|
|
2019-02-19 19:59:28 +00:00
|
|
|
if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret == -EIO) {
|
2018-05-02 10:47:19 +00:00
|
|
|
/*
|
|
|
|
* For postcopy, we allow the network to be down for a
|
|
|
|
* while. After that, it can be continued by a
|
|
|
|
* recovery phase.
|
|
|
|
*/
|
|
|
|
return postcopy_pause(s);
|
|
|
|
} else {
|
|
|
|
/*
|
|
|
|
* For precopy (or postcopy with error outside IO), we fail
|
|
|
|
* with no time.
|
|
|
|
*/
|
2019-02-19 19:59:28 +00:00
|
|
|
migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
|
2018-05-02 10:47:19 +00:00
|
|
|
trace_migration_thread_file_err();
|
|
|
|
|
|
|
|
/* Time to stop the migration, now. */
|
|
|
|
return MIG_THR_ERR_FATAL;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-09-12 02:49:57 +00:00
|
|
|
/* How many bytes have we transferred since the beginning of the migration */
|
2018-06-26 13:38:00 +00:00
|
|
|
static uint64_t migration_total_bytes(MigrationState *s)
|
|
|
|
{
|
2016-01-15 10:40:13 +00:00
|
|
|
return qemu_ftell(s->to_dst_file) + ram_counters.multifd_bytes;
|
2018-06-26 13:38:00 +00:00
|
|
|
}
|
|
|
|
|
2018-01-03 12:20:11 +00:00
|
|
|
static void migration_calculate_complete(MigrationState *s)
|
|
|
|
{
|
2018-06-26 13:38:00 +00:00
|
|
|
uint64_t bytes = migration_total_bytes(s);
|
2018-01-03 12:20:11 +00:00
|
|
|
int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2018-06-26 13:26:35 +00:00
|
|
|
int64_t transfer_time;
|
2018-01-03 12:20:11 +00:00
|
|
|
|
|
|
|
s->total_time = end_time - s->start_time;
|
|
|
|
if (!s->downtime) {
|
|
|
|
/*
|
|
|
|
* It's still not set, so we are precopy migration. For
|
|
|
|
* postcopy, downtime is calculated during postcopy_start().
|
|
|
|
*/
|
|
|
|
s->downtime = end_time - s->downtime_start;
|
|
|
|
}
|
|
|
|
|
2018-06-26 13:26:35 +00:00
|
|
|
transfer_time = s->total_time - s->setup_time;
|
|
|
|
if (transfer_time) {
|
|
|
|
s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
|
2018-01-03 12:20:11 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-08-02 10:18:41 +00:00
|
|
|
static void update_iteration_initial_status(MigrationState *s)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* Update these three fields at the same time to avoid mismatch info lead
|
|
|
|
* wrong speed calculation.
|
|
|
|
*/
|
|
|
|
s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
s->iteration_initial_bytes = migration_total_bytes(s);
|
|
|
|
s->iteration_initial_pages = ram_get_total_transferred_pages();
|
|
|
|
}
|
|
|
|
|
2018-01-03 12:20:13 +00:00
|
|
|
static void migration_update_counters(MigrationState *s,
|
|
|
|
int64_t current_time)
|
|
|
|
{
|
2019-01-11 06:37:30 +00:00
|
|
|
uint64_t transferred, transferred_pages, time_spent;
|
2018-06-26 13:38:00 +00:00
|
|
|
uint64_t current_bytes; /* bytes transferred since the beginning */
|
2018-01-03 12:20:13 +00:00
|
|
|
double bandwidth;
|
|
|
|
|
|
|
|
if (current_time < s->iteration_start_time + BUFFER_DELAY) {
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2018-06-26 13:38:00 +00:00
|
|
|
current_bytes = migration_total_bytes(s);
|
|
|
|
transferred = current_bytes - s->iteration_initial_bytes;
|
2018-01-03 12:20:13 +00:00
|
|
|
time_spent = current_time - s->iteration_start_time;
|
|
|
|
bandwidth = (double)transferred / time_spent;
|
2018-01-22 11:36:39 +00:00
|
|
|
s->threshold_size = bandwidth * s->parameters.downtime_limit;
|
2018-01-03 12:20:13 +00:00
|
|
|
|
|
|
|
s->mbps = (((double) transferred * 8.0) /
|
|
|
|
((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
|
|
|
|
|
2019-01-11 06:37:30 +00:00
|
|
|
transferred_pages = ram_get_total_transferred_pages() -
|
|
|
|
s->iteration_initial_pages;
|
|
|
|
s->pages_per_second = (double) transferred_pages /
|
|
|
|
(((double) time_spent / 1000.0));
|
|
|
|
|
2018-01-03 12:20:13 +00:00
|
|
|
/*
|
|
|
|
* if we haven't sent anything, we don't want to
|
|
|
|
* recalculate. 10000 is a small enough number for our purposes
|
|
|
|
*/
|
|
|
|
if (ram_counters.dirty_pages_rate && transferred > 10000) {
|
2018-06-12 08:50:09 +00:00
|
|
|
s->expected_downtime = ram_counters.remaining / bandwidth;
|
2018-01-03 12:20:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
qemu_file_reset_rate_limit(s->to_dst_file);
|
|
|
|
|
2019-08-02 10:18:41 +00:00
|
|
|
update_iteration_initial_status(s);
|
2018-01-03 12:20:13 +00:00
|
|
|
|
|
|
|
trace_migrate_transferred(transferred, time_spent,
|
2018-01-22 11:36:39 +00:00
|
|
|
bandwidth, s->threshold_size);
|
2018-01-03 12:20:13 +00:00
|
|
|
}
|
|
|
|
|
2018-01-03 12:20:14 +00:00
|
|
|
/* Migration thread iteration status */
|
|
|
|
typedef enum {
|
|
|
|
MIG_ITERATE_RESUME, /* Resume current iteration */
|
|
|
|
MIG_ITERATE_SKIP, /* Skip current iteration */
|
|
|
|
MIG_ITERATE_BREAK, /* Break the loop */
|
|
|
|
} MigIterateState;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if continue to the next iteration directly, false
|
|
|
|
* otherwise.
|
|
|
|
*/
|
|
|
|
static MigIterateState migration_iteration_run(MigrationState *s)
|
|
|
|
{
|
2018-03-13 19:34:00 +00:00
|
|
|
uint64_t pending_size, pend_pre, pend_compat, pend_post;
|
2018-01-03 12:20:14 +00:00
|
|
|
bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
|
|
|
|
|
2018-03-13 19:34:00 +00:00
|
|
|
qemu_savevm_state_pending(s->to_dst_file, s->threshold_size, &pend_pre,
|
|
|
|
&pend_compat, &pend_post);
|
|
|
|
pending_size = pend_pre + pend_compat + pend_post;
|
2018-01-03 12:20:14 +00:00
|
|
|
|
|
|
|
trace_migrate_pending(pending_size, s->threshold_size,
|
2018-03-13 19:34:00 +00:00
|
|
|
pend_pre, pend_compat, pend_post);
|
2018-01-03 12:20:14 +00:00
|
|
|
|
|
|
|
if (pending_size && pending_size >= s->threshold_size) {
|
|
|
|
/* Still a significant amount to transfer */
|
2019-07-18 08:37:47 +00:00
|
|
|
if (!in_postcopy && pend_pre <= s->threshold_size &&
|
2020-09-23 10:56:46 +00:00
|
|
|
qatomic_read(&s->start_postcopy)) {
|
2018-01-03 12:20:14 +00:00
|
|
|
if (postcopy_start(s)) {
|
|
|
|
error_report("%s: postcopy failed to start", __func__);
|
|
|
|
}
|
|
|
|
return MIG_ITERATE_SKIP;
|
|
|
|
}
|
|
|
|
/* Just another iteration step */
|
2019-10-05 22:05:16 +00:00
|
|
|
qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
|
2018-01-03 12:20:14 +00:00
|
|
|
} else {
|
|
|
|
trace_migration_thread_low_pending(pending_size);
|
|
|
|
migration_completion(s);
|
|
|
|
return MIG_ITERATE_BREAK;
|
|
|
|
}
|
|
|
|
|
|
|
|
return MIG_ITERATE_RESUME;
|
|
|
|
}
|
|
|
|
|
2018-01-03 12:20:15 +00:00
|
|
|
static void migration_iteration_finish(MigrationState *s)
|
|
|
|
{
|
|
|
|
/* If we enabled cpu throttling for auto-converge, turn it off. */
|
|
|
|
cpu_throttle_stop();
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
switch (s->state) {
|
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
|
|
|
migration_calculate_complete(s);
|
|
|
|
runstate_set(RUN_STATE_POSTMIGRATE);
|
|
|
|
break;
|
|
|
|
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
/*
|
|
|
|
* We should really assert here, but since it's during
|
|
|
|
* migration, let's try to reduce the usage of assertions.
|
|
|
|
*/
|
|
|
|
if (!migrate_colo_enabled()) {
|
|
|
|
error_report("%s: critical error: calling COLO code without "
|
|
|
|
"COLO enabled", __func__);
|
|
|
|
}
|
|
|
|
migrate_start_colo_process(s);
|
|
|
|
/*
|
|
|
|
* Fixme: we will run VM in COLO no matter its old running state.
|
|
|
|
* After exited COLO, we will keep running.
|
|
|
|
*/
|
|
|
|
s->vm_was_running = true;
|
|
|
|
/* Fallthrough */
|
|
|
|
case MIGRATION_STATUS_FAILED:
|
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
2018-07-19 09:22:57 +00:00
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
2018-01-03 12:20:15 +00:00
|
|
|
if (s->vm_was_running) {
|
|
|
|
vm_start();
|
|
|
|
} else {
|
|
|
|
if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
|
|
|
|
runstate_set(RUN_STATE_POSTMIGRATE);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
/* Should not reach here, but if so, forgive the VM. */
|
|
|
|
error_report("%s: Unknown ending state %d", __func__, s->state);
|
|
|
|
break;
|
|
|
|
}
|
2019-04-08 11:33:43 +00:00
|
|
|
migrate_fd_cleanup_schedule(s);
|
2018-01-03 12:20:15 +00:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
}
|
|
|
|
|
2018-06-13 10:26:41 +00:00
|
|
|
void migration_make_urgent_request(void)
|
|
|
|
{
|
|
|
|
qemu_sem_post(&migrate_get_current()->rate_limit_sem);
|
|
|
|
}
|
|
|
|
|
|
|
|
void migration_consume_urgent_request(void)
|
|
|
|
{
|
|
|
|
qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
|
|
|
|
}
|
|
|
|
|
2019-12-05 10:29:18 +00:00
|
|
|
/* Returns true if the rate limiting was broken by an urgent request */
|
|
|
|
bool migration_rate_limit(void)
|
|
|
|
{
|
|
|
|
int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
bool urgent = false;
|
|
|
|
migration_update_counters(s, now);
|
|
|
|
if (qemu_file_rate_limit(s->to_dst_file)) {
|
2020-05-20 20:42:32 +00:00
|
|
|
|
|
|
|
if (qemu_file_get_error(s->to_dst_file)) {
|
|
|
|
return false;
|
|
|
|
}
|
2019-12-05 10:29:18 +00:00
|
|
|
/*
|
|
|
|
* Wait for a delay to do rate limiting OR
|
|
|
|
* something urgent to post the semaphore.
|
|
|
|
*/
|
|
|
|
int ms = s->iteration_start_time + BUFFER_DELAY - now;
|
|
|
|
trace_migration_rate_limit_pre(ms);
|
|
|
|
if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
|
|
|
|
/*
|
|
|
|
* We were woken by one or more urgent things but
|
|
|
|
* the timedwait will have consumed one of them.
|
|
|
|
* The service routine for the urgent wake will dec
|
|
|
|
* the semaphore itself for each item it consumes,
|
|
|
|
* so add this one we just eat back.
|
|
|
|
*/
|
|
|
|
qemu_sem_post(&s->rate_limit_sem);
|
|
|
|
urgent = true;
|
|
|
|
}
|
|
|
|
trace_migration_rate_limit_post(urgent);
|
|
|
|
}
|
|
|
|
return urgent;
|
|
|
|
}
|
|
|
|
|
2015-11-05 18:10:49 +00:00
|
|
|
/*
|
|
|
|
* Master migration thread on the source VM.
|
|
|
|
* It drives the migration and pumps the data down the outgoing channel.
|
|
|
|
*/
|
2013-02-22 16:36:30 +00:00
|
|
|
static void *migration_thread(void *opaque)
|
2012-10-03 12:18:33 +00:00
|
|
|
{
|
2012-12-19 08:55:50 +00:00
|
|
|
MigrationState *s = opaque;
|
2013-08-21 15:03:08 +00:00
|
|
|
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
2018-05-02 10:47:19 +00:00
|
|
|
MigThrError thr_error;
|
2018-06-13 10:26:41 +00:00
|
|
|
bool urgent = false;
|
2012-10-03 18:16:24 +00:00
|
|
|
|
2015-07-09 06:55:38 +00:00
|
|
|
rcu_register_thread();
|
|
|
|
|
2019-02-27 16:49:00 +00:00
|
|
|
object_ref(OBJECT(s));
|
2019-08-02 10:18:41 +00:00
|
|
|
update_iteration_initial_status(s);
|
2018-01-03 12:20:13 +00:00
|
|
|
|
2016-01-15 03:37:42 +00:00
|
|
|
qemu_savevm_state_header(s->to_dst_file);
|
2015-11-05 18:11:05 +00:00
|
|
|
|
2017-06-14 07:55:58 +00:00
|
|
|
/*
|
|
|
|
* If we opened the return path, we need to make sure dst has it
|
|
|
|
* opened as well.
|
|
|
|
*/
|
|
|
|
if (s->rp_state.from_dst_file) {
|
2015-11-05 18:11:05 +00:00
|
|
|
/* Now tell the dest that it should open its end so it can reply */
|
2016-01-15 03:37:42 +00:00
|
|
|
qemu_savevm_send_open_return_path(s->to_dst_file);
|
2015-11-05 18:11:05 +00:00
|
|
|
|
|
|
|
/* And do a ping that will make stuff easier to debug */
|
2016-01-15 03:37:42 +00:00
|
|
|
qemu_savevm_send_ping(s->to_dst_file, 1);
|
2017-05-31 10:35:34 +00:00
|
|
|
}
|
2015-11-05 18:11:05 +00:00
|
|
|
|
2017-07-10 16:30:16 +00:00
|
|
|
if (migrate_postcopy()) {
|
2015-11-05 18:11:05 +00:00
|
|
|
/*
|
|
|
|
* Tell the destination that we *might* want to do postcopy later;
|
|
|
|
* if the other end can't do postcopy it should fail now, nice and
|
|
|
|
* early.
|
|
|
|
*/
|
2016-01-15 03:37:42 +00:00
|
|
|
qemu_savevm_send_postcopy_advise(s->to_dst_file);
|
2015-11-05 18:11:05 +00:00
|
|
|
}
|
|
|
|
|
2018-09-03 04:38:47 +00:00
|
|
|
if (migrate_colo_enabled()) {
|
|
|
|
/* Notify migration destination that we enable COLO */
|
|
|
|
qemu_savevm_send_colo_enable(s->to_dst_file);
|
|
|
|
}
|
|
|
|
|
2017-06-28 09:52:24 +00:00
|
|
|
qemu_savevm_state_setup(s->to_dst_file);
|
2012-10-03 12:18:33 +00:00
|
|
|
|
2020-02-04 05:08:41 +00:00
|
|
|
if (qemu_savevm_state_guest_unplug_pending()) {
|
2019-10-29 11:49:02 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_WAIT_UNPLUG);
|
|
|
|
|
|
|
|
while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
|
|
|
|
qemu_savevm_state_guest_unplug_pending()) {
|
|
|
|
qemu_sem_timedwait(&s->wait_unplug_sem, 250);
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG,
|
|
|
|
MIGRATION_STATUS_ACTIVE);
|
|
|
|
}
|
|
|
|
|
2013-08-21 15:03:08 +00:00
|
|
|
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
2015-12-16 11:47:33 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_ACTIVE);
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 14:01:57 +00:00
|
|
|
|
2015-11-05 18:10:58 +00:00
|
|
|
trace_migration_thread_setup_complete();
|
|
|
|
|
2019-07-17 00:53:41 +00:00
|
|
|
while (migration_is_active(s)) {
|
2018-06-13 10:26:41 +00:00
|
|
|
if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
|
2018-01-03 12:20:14 +00:00
|
|
|
MigIterateState iter_state = migration_iteration_run(s);
|
|
|
|
if (iter_state == MIG_ITERATE_SKIP) {
|
|
|
|
continue;
|
|
|
|
} else if (iter_state == MIG_ITERATE_BREAK) {
|
2015-08-13 10:51:31 +00:00
|
|
|
break;
|
2012-10-03 18:33:34 +00:00
|
|
|
}
|
|
|
|
}
|
2013-02-22 16:36:20 +00:00
|
|
|
|
2018-05-02 10:47:19 +00:00
|
|
|
/*
|
|
|
|
* Try to detect any kind of failures, and see whether we
|
|
|
|
* should stop the migration now.
|
|
|
|
*/
|
|
|
|
thr_error = migration_detect_error(s);
|
|
|
|
if (thr_error == MIG_THR_ERR_FATAL) {
|
|
|
|
/* Stop migration */
|
2013-02-22 16:36:33 +00:00
|
|
|
break;
|
2018-05-02 10:47:19 +00:00
|
|
|
} else if (thr_error == MIG_THR_ERR_RECOVERED) {
|
|
|
|
/*
|
|
|
|
* Just recovered from a e.g. network failure, reset all
|
|
|
|
* the local variables. This is important to avoid
|
|
|
|
* breaking transferred_bytes and bandwidth calculation
|
|
|
|
*/
|
2019-08-02 10:18:41 +00:00
|
|
|
update_iteration_initial_status(s);
|
2013-02-22 16:36:33 +00:00
|
|
|
}
|
2018-01-03 12:20:13 +00:00
|
|
|
|
2019-12-05 10:29:18 +00:00
|
|
|
urgent = migration_rate_limit();
|
2013-02-22 16:36:18 +00:00
|
|
|
}
|
|
|
|
|
2015-11-05 18:11:05 +00:00
|
|
|
trace_migration_thread_after_loop();
|
2018-01-03 12:20:15 +00:00
|
|
|
migration_iteration_finish(s);
|
2019-02-27 16:49:00 +00:00
|
|
|
object_unref(OBJECT(s));
|
2015-07-09 06:55:38 +00:00
|
|
|
rcu_unregister_thread();
|
2012-10-03 12:18:33 +00:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2017-12-15 17:16:54 +00:00
|
|
|
void migrate_fd_connect(MigrationState *s, Error *error_in)
|
2012-10-03 12:18:33 +00:00
|
|
|
{
|
2019-06-12 09:33:27 +00:00
|
|
|
Error *local_err = NULL;
|
2018-05-02 10:47:24 +00:00
|
|
|
int64_t rate_limit;
|
|
|
|
bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
|
|
|
|
|
2016-09-15 16:20:28 +00:00
|
|
|
s->expected_downtime = s->parameters.downtime_limit;
|
2020-03-25 18:47:21 +00:00
|
|
|
if (resume) {
|
|
|
|
assert(s->cleanup_bh);
|
|
|
|
} else {
|
|
|
|
assert(!s->cleanup_bh);
|
|
|
|
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
|
|
|
|
}
|
2017-12-15 17:16:54 +00:00
|
|
|
if (error_in) {
|
|
|
|
migrate_fd_error(s, error_in);
|
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
return;
|
|
|
|
}
|
2012-10-03 12:18:33 +00:00
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
if (resume) {
|
|
|
|
/* This is a resumed migration */
|
2019-09-06 13:01:03 +00:00
|
|
|
rate_limit = s->parameters.max_postcopy_bandwidth /
|
|
|
|
XFER_LIMIT_RATIO;
|
2018-05-02 10:47:24 +00:00
|
|
|
} else {
|
|
|
|
/* This is a fresh new migration */
|
|
|
|
rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO;
|
2013-02-22 16:36:44 +00:00
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
/* Notify before starting migration thread */
|
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_file_set_rate_limit(s->to_dst_file, rate_limit);
|
|
|
|
qemu_file_set_blocking(s->to_dst_file, true);
|
2013-07-29 13:01:57 +00:00
|
|
|
|
2015-11-05 18:11:05 +00:00
|
|
|
/*
|
2017-06-26 10:28:55 +00:00
|
|
|
* Open the return path. For postcopy, it is used exclusively. For
|
|
|
|
* precopy, only if user specified "return-path" capability would
|
|
|
|
* QEMU uses the return path.
|
2015-11-05 18:11:05 +00:00
|
|
|
*/
|
2017-06-26 10:28:55 +00:00
|
|
|
if (migrate_postcopy_ram() || migrate_use_return_path()) {
|
2018-05-02 10:47:24 +00:00
|
|
|
if (open_return_path_on_source(s, !resume)) {
|
2015-11-05 18:11:05 +00:00
|
|
|
error_report("Unable to open return-path for postcopy");
|
2018-05-02 10:47:24 +00:00
|
|
|
migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
|
2015-11-05 18:11:05 +00:00
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2018-05-02 10:47:24 +00:00
|
|
|
if (resume) {
|
2018-05-02 10:47:25 +00:00
|
|
|
/* Wakeup the main migration thread to do the recovery */
|
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
|
|
|
|
MIGRATION_STATUS_POSTCOPY_RECOVER);
|
|
|
|
qemu_sem_post(&s->postcopy_pause_sem);
|
2018-05-02 10:47:24 +00:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2019-06-12 09:33:27 +00:00
|
|
|
if (multifd_save_setup(&local_err) != 0) {
|
|
|
|
error_report_err(local_err);
|
2016-01-14 15:52:55 +00:00
|
|
|
migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
|
|
|
migrate_fd_cleanup(s);
|
|
|
|
return;
|
|
|
|
}
|
2017-01-23 13:42:56 +00:00
|
|
|
qemu_thread_create(&s->thread, "live_migration", migration_thread, s,
|
2013-02-22 16:36:21 +00:00
|
|
|
QEMU_THREAD_JOINABLE);
|
2015-11-05 18:11:05 +00:00
|
|
|
s->migration_thread_running = true;
|
2012-10-03 12:18:33 +00:00
|
|
|
}
|
2015-11-05 18:10:52 +00:00
|
|
|
|
2017-06-27 04:10:19 +00:00
|
|
|
void migration_global_dump(Monitor *mon)
|
|
|
|
{
|
|
|
|
MigrationState *ms = migrate_get_current();
|
|
|
|
|
2017-10-26 09:49:57 +00:00
|
|
|
monitor_printf(mon, "globals:\n");
|
|
|
|
monitor_printf(mon, "store-global-state: %s\n",
|
|
|
|
ms->store_global_state ? "on" : "off");
|
|
|
|
monitor_printf(mon, "only-migratable: %s\n",
|
Revert "migration: move only_migratable to MigrationState"
This reverts commit 3df663e575f1876d7f3bc684f80e72fca0703d39.
This reverts commit b605c47b57b58e61a901a50a0762dccf43d94783.
Command line option --only-migratable is for disallowing any
configuration that can block migration.
Initially, --only-migratable set global variable @only_migratable.
Commit 3df663e575 "migration: move only_migratable to MigrationState"
replaced it by MigrationState member @only_migratable. That was a
mistake.
First, it doesn't make sense on the design level. MigrationState
captures the state of an individual migration, but --only-migratable
isn't a property of an individual migration, it's a restriction on
QEMU configuration. With fault tolerance, we could have several
migrations at once. --only-migratable would certainly protect all of
them. Storing it in MigrationState feels inappropriate.
Second, it contributes to a dependency cycle that manifests itself as
a bug now.
Putting @only_migratable into MigrationState means its available only
after migration_object_init().
We can't set it before migration_object_init(), so we delay setting it
with a global property (this is fixup commit b605c47b57 "migration:
fix handling for --only-migratable").
We can't get it before migration_object_init(), so anything that uses
it can only run afterwards.
Since migrate_add_blocker() needs to obey --only-migratable, any code
adding migration blockers can run only afterwards. This contributes
to the following dependency cycle:
* configure_blockdev() must run before machine_set_property()
so machine properties can refer to block backends
* machine_set_property() before configure_accelerator()
so machine properties like kvm-irqchip get applied
* configure_accelerator() before migration_object_init()
so that Xen's accelerator compat properties get applied.
* migration_object_init() before configure_blockdev()
so configure_blockdev() can add migration blockers
The cycle was closed when recent commit cda4aa9a5a0 "Create block
backends before setting machine properties" added the first
dependency, and satisfied it by violating the last one. Broke block
backends that add migration blockers.
Moving @only_migratable into MigrationState was a mistake. Revert it.
This doesn't quite break the "migration_object_init() before
configure_blockdev() dependency, since migrate_add_blocker() still has
another dependency on migration_object_init(). To be addressed the
next commit.
Note that the reverted commit made -only-migratable sugar for -global
migration.only-migratable=on below the hood. Documentation has only
ever mentioned -only-migratable. This commit removes the arcane &
undocumented alternative to -only-migratable again. Nobody should be
using it.
Conflicts:
include/migration/misc.h
migration/migration.c
migration/migration.h
vl.c
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-Id: <20190401090827.20793-3-armbru@redhat.com>
Reviewed-by: Igor Mammedov <imammedo@redhat.com>
2019-04-01 09:08:24 +00:00
|
|
|
only_migratable ? "on" : "off");
|
2017-10-26 09:49:57 +00:00
|
|
|
monitor_printf(mon, "send-configuration: %s\n",
|
|
|
|
ms->send_configuration ? "on" : "off");
|
|
|
|
monitor_printf(mon, "send-section-footer: %s\n",
|
|
|
|
ms->send_section_footer ? "on" : "off");
|
2018-05-03 08:06:11 +00:00
|
|
|
monitor_printf(mon, "decompress-error-check: %s\n",
|
|
|
|
ms->decompress_error_check ? "on" : "off");
|
migration: Split log_clear() into smaller chunks
Currently we are doing log_clear() right after log_sync() which mostly
keeps the old behavior when log_clear() was still part of log_sync().
This patch tries to further optimize the migration log_clear() code
path to split huge log_clear()s into smaller chunks.
We do this by spliting the whole guest memory region into memory
chunks, whose size is decided by MigrationState.clear_bitmap_shift (an
example will be given below). With that, we don't do the dirty bitmap
clear operation on the remote node (e.g., KVM) when we fetch the dirty
bitmap, instead we explicitly clear the dirty bitmap for the memory
chunk for each of the first time we send a page in that chunk.
Here comes an example.
Assuming the guest has 64G memory, then before this patch the KVM
ioctl KVM_CLEAR_DIRTY_LOG will be a single one covering 64G memory.
If after the patch, let's assume when the clear bitmap shift is 18,
then the memory chunk size on x86_64 will be 1UL<<18 * 4K = 1GB. Then
instead of sending a big 64G ioctl, we'll send 64 small ioctls, each
of the ioctl will cover 1G of the guest memory. For each of the 64
small ioctls, we'll only send if any of the page in that small chunk
was going to be sent right away.
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20190603065056.25211-12-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-06-03 06:50:56 +00:00
|
|
|
monitor_printf(mon, "clear-bitmap-shift: %u\n",
|
|
|
|
ms->clear_bitmap_shift);
|
2017-06-27 04:10:19 +00:00
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:03 +00:00
|
|
|
#define DEFINE_PROP_MIG_CAP(name, x) \
|
|
|
|
DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false)
|
|
|
|
|
2017-06-27 04:10:14 +00:00
|
|
|
static Property migration_properties[] = {
|
|
|
|
DEFINE_PROP_BOOL("store-global-state", MigrationState,
|
|
|
|
store_global_state, true),
|
2017-06-27 04:10:16 +00:00
|
|
|
DEFINE_PROP_BOOL("send-configuration", MigrationState,
|
|
|
|
send_configuration, true),
|
2017-06-27 04:10:17 +00:00
|
|
|
DEFINE_PROP_BOOL("send-section-footer", MigrationState,
|
|
|
|
send_section_footer, true),
|
2018-05-03 08:06:11 +00:00
|
|
|
DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
|
|
|
|
decompress_error_check, true),
|
migration: Split log_clear() into smaller chunks
Currently we are doing log_clear() right after log_sync() which mostly
keeps the old behavior when log_clear() was still part of log_sync().
This patch tries to further optimize the migration log_clear() code
path to split huge log_clear()s into smaller chunks.
We do this by spliting the whole guest memory region into memory
chunks, whose size is decided by MigrationState.clear_bitmap_shift (an
example will be given below). With that, we don't do the dirty bitmap
clear operation on the remote node (e.g., KVM) when we fetch the dirty
bitmap, instead we explicitly clear the dirty bitmap for the memory
chunk for each of the first time we send a page in that chunk.
Here comes an example.
Assuming the guest has 64G memory, then before this patch the KVM
ioctl KVM_CLEAR_DIRTY_LOG will be a single one covering 64G memory.
If after the patch, let's assume when the clear bitmap shift is 18,
then the memory chunk size on x86_64 will be 1UL<<18 * 4K = 1GB. Then
instead of sending a big 64G ioctl, we'll send 64 small ioctls, each
of the ioctl will cover 1G of the guest memory. For each of the 64
small ioctls, we'll only send if any of the page in that small chunk
was going to be sent right away.
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Juan Quintela <quintela@redhat.com>
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Message-Id: <20190603065056.25211-12-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2019-06-03 06:50:56 +00:00
|
|
|
DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
|
|
|
|
clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
|
2017-07-18 03:39:02 +00:00
|
|
|
|
|
|
|
/* Migration parameters */
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT8("x-compress-level", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.compress_level,
|
|
|
|
DEFAULT_MIGRATE_COMPRESS_LEVEL),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.compress_threads,
|
|
|
|
DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
|
2018-08-21 08:10:20 +00:00
|
|
|
DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
|
|
|
|
parameters.compress_wait_thread, true),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.decompress_threads,
|
|
|
|
DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
|
2020-02-24 02:31:42 +00:00
|
|
|
DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState,
|
|
|
|
parameters.throttle_trigger_threshold,
|
|
|
|
DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.cpu_throttle_initial,
|
|
|
|
DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.cpu_throttle_increment,
|
|
|
|
DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
|
2020-04-13 10:15:08 +00:00
|
|
|
DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState,
|
|
|
|
parameters.cpu_throttle_tailslow, false),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.max_bandwidth, MAX_THROTTLE),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT64("x-downtime-limit", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.downtime_limit,
|
|
|
|
DEFAULT_MIGRATE_SET_DOWNTIME),
|
2017-12-01 12:08:38 +00:00
|
|
|
DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState,
|
2017-07-18 03:39:02 +00:00
|
|
|
parameters.x_checkpoint_delay,
|
|
|
|
DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
|
2019-02-06 12:54:06 +00:00
|
|
|
DEFINE_PROP_UINT8("multifd-channels", MigrationState,
|
|
|
|
parameters.multifd_channels,
|
2016-01-15 07:56:17 +00:00
|
|
|
DEFAULT_MIGRATE_MULTIFD_CHANNELS),
|
2019-01-16 09:35:55 +00:00
|
|
|
DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState,
|
|
|
|
parameters.multifd_compression,
|
|
|
|
DEFAULT_MIGRATE_MULTIFD_COMPRESSION),
|
2020-01-23 16:08:52 +00:00
|
|
|
DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState,
|
|
|
|
parameters.multifd_zlib_level,
|
|
|
|
DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL),
|
2020-01-23 16:41:36 +00:00
|
|
|
DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState,
|
|
|
|
parameters.multifd_zstd_level,
|
|
|
|
DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL),
|
2017-10-05 19:30:10 +00:00
|
|
|
DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,
|
|
|
|
parameters.xbzrle_cache_size,
|
|
|
|
DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE),
|
2018-06-13 10:26:40 +00:00
|
|
|
DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState,
|
|
|
|
parameters.max_postcopy_bandwidth,
|
|
|
|
DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH),
|
2018-08-01 13:00:20 +00:00
|
|
|
DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState,
|
|
|
|
parameters.max_cpu_throttle,
|
|
|
|
DEFAULT_MIGRATE_MAX_CPU_THROTTLE),
|
2019-02-27 13:24:06 +00:00
|
|
|
DEFINE_PROP_SIZE("announce-initial", MigrationState,
|
|
|
|
parameters.announce_initial,
|
|
|
|
DEFAULT_MIGRATE_ANNOUNCE_INITIAL),
|
|
|
|
DEFINE_PROP_SIZE("announce-max", MigrationState,
|
|
|
|
parameters.announce_max,
|
|
|
|
DEFAULT_MIGRATE_ANNOUNCE_MAX),
|
|
|
|
DEFINE_PROP_SIZE("announce-rounds", MigrationState,
|
|
|
|
parameters.announce_rounds,
|
|
|
|
DEFAULT_MIGRATE_ANNOUNCE_ROUNDS),
|
|
|
|
DEFINE_PROP_SIZE("announce-step", MigrationState,
|
|
|
|
parameters.announce_step,
|
|
|
|
DEFAULT_MIGRATE_ANNOUNCE_STEP),
|
2017-07-18 03:39:03 +00:00
|
|
|
|
|
|
|
/* Migration capabilities */
|
|
|
|
DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
|
|
|
|
DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
|
2019-02-06 12:54:06 +00:00
|
|
|
DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD),
|
2017-07-18 03:39:03 +00:00
|
|
|
|
2017-06-27 04:10:14 +00:00
|
|
|
DEFINE_PROP_END_OF_LIST(),
|
|
|
|
};
|
|
|
|
|
2017-06-27 04:10:13 +00:00
|
|
|
static void migration_class_init(ObjectClass *klass, void *data)
|
|
|
|
{
|
|
|
|
DeviceClass *dc = DEVICE_CLASS(klass);
|
|
|
|
|
|
|
|
dc->user_creatable = false;
|
2020-01-10 15:30:32 +00:00
|
|
|
device_class_set_props(dc, migration_properties);
|
2017-06-27 04:10:13 +00:00
|
|
|
}
|
|
|
|
|
2017-08-01 16:04:18 +00:00
|
|
|
static void migration_instance_finalize(Object *obj)
|
|
|
|
{
|
|
|
|
MigrationState *ms = MIGRATION_OBJ(obj);
|
|
|
|
MigrationParameters *params = &ms->parameters;
|
|
|
|
|
2017-09-05 10:50:22 +00:00
|
|
|
qemu_mutex_destroy(&ms->error_mutex);
|
2018-05-02 10:47:38 +00:00
|
|
|
qemu_mutex_destroy(&ms->qemu_file_lock);
|
2017-08-01 16:04:18 +00:00
|
|
|
g_free(params->tls_hostname);
|
|
|
|
g_free(params->tls_creds);
|
2019-10-29 11:49:02 +00:00
|
|
|
qemu_sem_destroy(&ms->wait_unplug_sem);
|
2018-06-13 10:26:41 +00:00
|
|
|
qemu_sem_destroy(&ms->rate_limit_sem);
|
2017-10-20 09:05:52 +00:00
|
|
|
qemu_sem_destroy(&ms->pause_sem);
|
2018-05-02 10:47:19 +00:00
|
|
|
qemu_sem_destroy(&ms->postcopy_pause_sem);
|
2018-05-02 10:47:21 +00:00
|
|
|
qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
|
migration: synchronize dirty bitmap for resume
This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.
When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.
The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180502104740.12123-17-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2018-05-02 10:47:32 +00:00
|
|
|
qemu_sem_destroy(&ms->rp_state.rp_sem);
|
2018-03-06 17:09:59 +00:00
|
|
|
error_free(ms->error);
|
2017-08-01 16:04:18 +00:00
|
|
|
}
|
|
|
|
|
2017-06-27 04:10:13 +00:00
|
|
|
static void migration_instance_init(Object *obj)
|
|
|
|
{
|
|
|
|
MigrationState *ms = MIGRATION_OBJ(obj);
|
2017-07-18 03:39:06 +00:00
|
|
|
MigrationParameters *params = &ms->parameters;
|
2017-06-27 04:10:13 +00:00
|
|
|
|
|
|
|
ms->state = MIGRATION_STATUS_NONE;
|
|
|
|
ms->mbps = -1;
|
2019-01-11 06:37:30 +00:00
|
|
|
ms->pages_per_second = -1;
|
2017-10-20 09:05:52 +00:00
|
|
|
qemu_sem_init(&ms->pause_sem, 0);
|
2017-09-05 10:50:22 +00:00
|
|
|
qemu_mutex_init(&ms->error_mutex);
|
2017-07-18 03:39:06 +00:00
|
|
|
|
|
|
|
params->tls_hostname = g_strdup("");
|
|
|
|
params->tls_creds = g_strdup("");
|
|
|
|
|
|
|
|
/* Set has_* up only for parameter checks */
|
|
|
|
params->has_compress_level = true;
|
|
|
|
params->has_compress_threads = true;
|
|
|
|
params->has_decompress_threads = true;
|
2020-02-24 02:31:42 +00:00
|
|
|
params->has_throttle_trigger_threshold = true;
|
2017-07-18 03:39:06 +00:00
|
|
|
params->has_cpu_throttle_initial = true;
|
|
|
|
params->has_cpu_throttle_increment = true;
|
2020-04-13 10:15:08 +00:00
|
|
|
params->has_cpu_throttle_tailslow = true;
|
2017-07-18 03:39:06 +00:00
|
|
|
params->has_max_bandwidth = true;
|
|
|
|
params->has_downtime_limit = true;
|
|
|
|
params->has_x_checkpoint_delay = true;
|
|
|
|
params->has_block_incremental = true;
|
2019-02-06 12:54:06 +00:00
|
|
|
params->has_multifd_channels = true;
|
2019-01-16 09:35:55 +00:00
|
|
|
params->has_multifd_compression = true;
|
2020-01-23 16:08:52 +00:00
|
|
|
params->has_multifd_zlib_level = true;
|
2020-01-23 16:41:36 +00:00
|
|
|
params->has_multifd_zstd_level = true;
|
2017-10-05 19:30:10 +00:00
|
|
|
params->has_xbzrle_cache_size = true;
|
2018-06-13 10:26:40 +00:00
|
|
|
params->has_max_postcopy_bandwidth = true;
|
2018-08-01 13:00:20 +00:00
|
|
|
params->has_max_cpu_throttle = true;
|
2019-02-27 13:24:06 +00:00
|
|
|
params->has_announce_initial = true;
|
|
|
|
params->has_announce_max = true;
|
|
|
|
params->has_announce_rounds = true;
|
|
|
|
params->has_announce_step = true;
|
2018-05-02 10:47:19 +00:00
|
|
|
|
|
|
|
qemu_sem_init(&ms->postcopy_pause_sem, 0);
|
2018-05-02 10:47:21 +00:00
|
|
|
qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
|
migration: synchronize dirty bitmap for resume
This patch implements the first part of core RAM resume logic for
postcopy. ram_resume_prepare() is provided for the work.
When the migration is interrupted by network failure, the dirty bitmap
on the source side will be meaningless, because even the dirty bit is
cleared, it is still possible that the sent page was lost along the way
to destination. Here instead of continue the migration with the old
dirty bitmap on source, we ask the destination side to send back its
received bitmap, then invert it to be our initial dirty bitmap.
The source side send thread will issue the MIG_CMD_RECV_BITMAP requests,
once per ramblock, to ask for the received bitmap. On destination side,
MIG_RP_MSG_RECV_BITMAP will be issued, along with the requested bitmap.
Data will be received on the return-path thread of source, and the main
migration thread will be notified when all the ramblock bitmaps are
synchronized.
Reviewed-by: Dr. David Alan Gilbert <dgilbert@redhat.com>
Signed-off-by: Peter Xu <peterx@redhat.com>
Message-Id: <20180502104740.12123-17-peterx@redhat.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2018-05-02 10:47:32 +00:00
|
|
|
qemu_sem_init(&ms->rp_state.rp_sem, 0);
|
2018-06-13 10:26:41 +00:00
|
|
|
qemu_sem_init(&ms->rate_limit_sem, 0);
|
2019-10-29 11:49:02 +00:00
|
|
|
qemu_sem_init(&ms->wait_unplug_sem, 0);
|
2018-05-02 10:47:38 +00:00
|
|
|
qemu_mutex_init(&ms->qemu_file_lock);
|
2017-07-18 03:39:06 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Return true if check pass, false otherwise. Error will be put
|
|
|
|
* inside errp if provided.
|
|
|
|
*/
|
|
|
|
static bool migration_object_check(MigrationState *ms, Error **errp)
|
|
|
|
{
|
2017-07-18 03:39:10 +00:00
|
|
|
MigrationCapabilityStatusList *head = NULL;
|
|
|
|
/* Assuming all off */
|
|
|
|
bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret;
|
|
|
|
int i;
|
|
|
|
|
2017-07-18 03:39:06 +00:00
|
|
|
if (!migrate_params_check(&ms->parameters, errp)) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2017-07-18 03:39:10 +00:00
|
|
|
for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
|
|
|
|
if (ms->enabled_capabilities[i]) {
|
|
|
|
head = migrate_cap_add(head, i, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
ret = migrate_caps_check(cap_list, head, errp);
|
|
|
|
|
|
|
|
/* It works with head == NULL */
|
|
|
|
qapi_free_MigrationCapabilityStatusList(head);
|
|
|
|
|
|
|
|
return ret;
|
2017-06-27 04:10:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static const TypeInfo migration_type = {
|
|
|
|
.name = TYPE_MIGRATION,
|
2017-06-28 07:15:44 +00:00
|
|
|
/*
|
2017-07-05 08:21:23 +00:00
|
|
|
* NOTE: TYPE_MIGRATION is not really a device, as the object is
|
2020-06-10 05:32:19 +00:00
|
|
|
* not created using qdev_new(), it is not attached to the qdev
|
2017-07-05 08:21:23 +00:00
|
|
|
* device tree, and it is never realized.
|
|
|
|
*
|
|
|
|
* TODO: Make this TYPE_OBJECT once QOM provides something like
|
|
|
|
* TYPE_DEVICE's "-global" properties.
|
2017-06-28 07:15:44 +00:00
|
|
|
*/
|
2017-06-27 04:10:13 +00:00
|
|
|
.parent = TYPE_DEVICE,
|
|
|
|
.class_init = migration_class_init,
|
|
|
|
.class_size = sizeof(MigrationClass),
|
|
|
|
.instance_size = sizeof(MigrationState),
|
|
|
|
.instance_init = migration_instance_init,
|
2017-08-01 16:04:18 +00:00
|
|
|
.instance_finalize = migration_instance_finalize,
|
2017-06-27 04:10:13 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
static void register_migration_types(void)
|
|
|
|
{
|
|
|
|
type_register_static(&migration_type);
|
|
|
|
}
|
|
|
|
|
|
|
|
type_init(register_migration_types);
|