2008-10-13 11:12:02 +08:00
|
|
|
/*
|
|
|
|
* QEMU live migration
|
|
|
|
*
|
|
|
|
* Copyright IBM, Corp. 2008
|
|
|
|
*
|
|
|
|
* Authors:
|
|
|
|
* Anthony Liguori <aliguori@us.ibm.com>
|
|
|
|
*
|
|
|
|
* This work is licensed under the terms of the GNU GPL, version 2. See
|
|
|
|
* the COPYING file in the top-level directory.
|
|
|
|
*
|
2012-01-14 00:44:23 +08:00
|
|
|
* Contributions after 2012-01-13 are licensed under the terms of the
|
|
|
|
* GNU GPL, version 2 or (at your option) any later version.
|
2008-10-13 11:12:02 +08:00
|
|
|
*/
|
|
|
|
|
|
|
|
#include "qemu-common.h"
|
2015-03-18 01:29:20 +08:00
|
|
|
#include "qemu/error-report.h"
|
2013-08-21 23:02:47 +08:00
|
|
|
#include "qemu/main-loop.h"
|
2012-12-18 01:19:50 +08:00
|
|
|
#include "migration/migration.h"
|
2012-10-03 20:18:33 +08:00
|
|
|
#include "migration/qemu-file.h"
|
2012-12-18 01:20:04 +08:00
|
|
|
#include "sysemu/sysemu.h"
|
2012-12-18 01:19:44 +08:00
|
|
|
#include "block/block.h"
|
2015-03-18 00:22:46 +08:00
|
|
|
#include "qapi/qmp/qerror.h"
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/sockets.h"
|
2015-07-09 14:55:38 +08:00
|
|
|
#include "qemu/rcu.h"
|
2012-12-18 01:19:50 +08:00
|
|
|
#include "migration/block.h"
|
2012-07-23 11:45:29 +08:00
|
|
|
#include "qemu/thread.h"
|
2011-09-14 04:37:16 +08:00
|
|
|
#include "qmp-commands.h"
|
2013-02-23 00:36:19 +08:00
|
|
|
#include "trace.h"
|
2014-10-08 16:58:10 +08:00
|
|
|
#include "qapi/util.h"
|
2015-05-20 18:16:15 +08:00
|
|
|
#include "qapi-event.h"
|
2015-09-09 01:12:35 +08:00
|
|
|
#include "qom/cpu.h"
|
2008-11-12 00:46:33 +08:00
|
|
|
|
2015-09-09 01:12:37 +08:00
|
|
|
#define MAX_THROTTLE (32 << 20) /* Migration transfer speed throttling */
|
2008-10-13 11:12:02 +08:00
|
|
|
|
2012-12-19 17:40:48 +08:00
|
|
|
/* Amount of time to allocate to each "chunk" of bandwidth-throttled
|
|
|
|
* data. */
|
|
|
|
#define BUFFER_DELAY 100
|
|
|
|
#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
|
|
|
|
|
2015-03-23 16:32:17 +08:00
|
|
|
/* Default compression thread count */
|
|
|
|
#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
|
2015-03-23 16:32:18 +08:00
|
|
|
/* Default decompression thread count, usually decompression is at
|
|
|
|
* least 4 times as fast as compression.*/
|
|
|
|
#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
|
2015-03-23 16:32:17 +08:00
|
|
|
/*0: means nocompress, 1: best speed, ... 9: best compress ratio */
|
|
|
|
#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
|
2015-09-09 01:12:34 +08:00
|
|
|
/* Define default autoconverge cpu throttle migration parameters */
|
|
|
|
#define DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL 20
|
|
|
|
#define DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT 10
|
2015-03-23 16:32:17 +08:00
|
|
|
|
2012-08-07 02:42:53 +08:00
|
|
|
/* Migration XBZRLE default cache size */
|
|
|
|
#define DEFAULT_MIGRATE_CACHE_SIZE (64 * 1024 * 1024)
|
|
|
|
|
2010-12-14 00:30:12 +08:00
|
|
|
static NotifierList migration_state_notifiers =
|
|
|
|
NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
|
|
|
|
|
2015-02-19 19:40:27 +08:00
|
|
|
static bool deferred_incoming;
|
|
|
|
|
2011-10-05 19:50:43 +08:00
|
|
|
/* When we add fault tolerance, we could have several
|
|
|
|
migrations at once. For now we don't need to add
|
|
|
|
dynamic creation of migration */
|
|
|
|
|
2015-05-21 20:24:14 +08:00
|
|
|
/* For outgoing */
|
2012-08-13 15:42:49 +08:00
|
|
|
MigrationState *migrate_get_current(void)
|
2011-10-05 19:50:43 +08:00
|
|
|
{
|
|
|
|
static MigrationState current_migration = {
|
2015-03-13 16:08:38 +08:00
|
|
|
.state = MIGRATION_STATUS_NONE,
|
2011-02-23 07:33:19 +08:00
|
|
|
.bandwidth_limit = MAX_THROTTLE,
|
2012-08-07 02:42:53 +08:00
|
|
|
.xbzrle_cache_size = DEFAULT_MIGRATE_CACHE_SIZE,
|
2013-06-26 09:35:30 +08:00
|
|
|
.mbps = -1,
|
2015-03-23 16:32:27 +08:00
|
|
|
.parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] =
|
|
|
|
DEFAULT_MIGRATE_COMPRESS_LEVEL,
|
|
|
|
.parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
|
|
|
|
DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT,
|
|
|
|
.parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
|
|
|
|
DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT,
|
2015-09-09 01:12:34 +08:00
|
|
|
.parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
|
|
|
|
DEFAULT_MIGRATE_X_CPU_THROTTLE_INITIAL,
|
|
|
|
.parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
|
|
|
|
DEFAULT_MIGRATE_X_CPU_THROTTLE_INCREMENT,
|
2011-10-05 19:50:43 +08:00
|
|
|
};
|
|
|
|
|
|
|
|
return ¤t_migration;
|
|
|
|
}
|
|
|
|
|
2015-05-21 20:24:14 +08:00
|
|
|
/* For incoming */
|
|
|
|
static MigrationIncomingState *mis_current;
|
|
|
|
|
|
|
|
MigrationIncomingState *migration_incoming_get_current(void)
|
|
|
|
{
|
|
|
|
return mis_current;
|
|
|
|
}
|
|
|
|
|
|
|
|
MigrationIncomingState *migration_incoming_state_new(QEMUFile* f)
|
|
|
|
{
|
2015-09-14 19:51:31 +08:00
|
|
|
mis_current = g_new0(MigrationIncomingState, 1);
|
2015-11-06 02:10:34 +08:00
|
|
|
mis_current->from_src_file = f;
|
2015-05-21 20:24:16 +08:00
|
|
|
QLIST_INIT(&mis_current->loadvm_handlers);
|
2015-11-06 02:10:47 +08:00
|
|
|
qemu_mutex_init(&mis_current->rp_mutex);
|
2015-11-06 02:10:50 +08:00
|
|
|
qemu_event_init(&mis_current->main_thread_load_event, false);
|
2015-05-21 20:24:14 +08:00
|
|
|
|
|
|
|
return mis_current;
|
|
|
|
}
|
|
|
|
|
|
|
|
void migration_incoming_state_destroy(void)
|
|
|
|
{
|
2015-11-06 02:10:50 +08:00
|
|
|
qemu_event_destroy(&mis_current->main_thread_load_event);
|
2015-05-21 20:24:16 +08:00
|
|
|
loadvm_free_handlers(mis_current);
|
2015-05-21 20:24:14 +08:00
|
|
|
g_free(mis_current);
|
|
|
|
mis_current = NULL;
|
|
|
|
}
|
|
|
|
|
2014-10-08 16:58:10 +08:00
|
|
|
|
|
|
|
typedef struct {
|
2014-10-08 19:58:24 +08:00
|
|
|
bool optional;
|
2014-10-08 16:58:10 +08:00
|
|
|
uint32_t size;
|
|
|
|
uint8_t runstate[100];
|
2015-07-08 19:56:26 +08:00
|
|
|
RunState state;
|
|
|
|
bool received;
|
2014-10-08 16:58:10 +08:00
|
|
|
} GlobalState;
|
|
|
|
|
|
|
|
static GlobalState global_state;
|
|
|
|
|
2015-07-15 15:53:46 +08:00
|
|
|
int global_state_store(void)
|
2014-10-08 16:58:10 +08:00
|
|
|
{
|
|
|
|
if (!runstate_store((char *)global_state.runstate,
|
|
|
|
sizeof(global_state.runstate))) {
|
|
|
|
error_report("runstate name too big: %s", global_state.runstate);
|
|
|
|
trace_migrate_state_too_big();
|
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2015-08-03 22:29:19 +08:00
|
|
|
void global_state_store_running(void)
|
|
|
|
{
|
|
|
|
const char *state = RunState_lookup[RUN_STATE_RUNNING];
|
|
|
|
strncpy((char *)global_state.runstate,
|
|
|
|
state, sizeof(global_state.runstate));
|
|
|
|
}
|
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
static bool global_state_received(void)
|
2014-10-08 16:58:10 +08:00
|
|
|
{
|
2015-07-08 19:56:26 +08:00
|
|
|
return global_state.received;
|
|
|
|
}
|
|
|
|
|
|
|
|
static RunState global_state_get_runstate(void)
|
|
|
|
{
|
|
|
|
return global_state.state;
|
2014-10-08 16:58:10 +08:00
|
|
|
}
|
|
|
|
|
2014-10-08 19:58:24 +08:00
|
|
|
void global_state_set_optional(void)
|
|
|
|
{
|
|
|
|
global_state.optional = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static bool global_state_needed(void *opaque)
|
|
|
|
{
|
|
|
|
GlobalState *s = opaque;
|
|
|
|
char *runstate = (char *)s->runstate;
|
|
|
|
|
|
|
|
/* If it is not optional, it is mandatory */
|
|
|
|
|
|
|
|
if (s->optional == false) {
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* If state is running or paused, it is not needed */
|
|
|
|
|
|
|
|
if (strcmp(runstate, "running") == 0 ||
|
|
|
|
strcmp(runstate, "paused") == 0) {
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* for any other state it is needed */
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2014-10-08 16:58:10 +08:00
|
|
|
static int global_state_post_load(void *opaque, int version_id)
|
|
|
|
{
|
|
|
|
GlobalState *s = opaque;
|
2015-07-08 19:56:26 +08:00
|
|
|
Error *local_err = NULL;
|
|
|
|
int r;
|
2014-10-08 16:58:10 +08:00
|
|
|
char *runstate = (char *)s->runstate;
|
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
s->received = true;
|
2014-10-08 16:58:10 +08:00
|
|
|
trace_migrate_global_state_post_load(runstate);
|
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
r = qapi_enum_parse(RunState_lookup, runstate, RUN_STATE_MAX,
|
2014-10-08 16:58:10 +08:00
|
|
|
-1, &local_err);
|
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
if (r == -1) {
|
|
|
|
if (local_err) {
|
|
|
|
error_report_err(local_err);
|
2014-10-08 16:58:10 +08:00
|
|
|
}
|
2015-07-08 19:56:26 +08:00
|
|
|
return -EINVAL;
|
2014-10-08 16:58:10 +08:00
|
|
|
}
|
2015-07-08 19:56:26 +08:00
|
|
|
s->state = r;
|
2014-10-08 16:58:10 +08:00
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
return 0;
|
2014-10-08 16:58:10 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static void global_state_pre_save(void *opaque)
|
|
|
|
{
|
|
|
|
GlobalState *s = opaque;
|
|
|
|
|
|
|
|
trace_migrate_global_state_pre_save((char *)s->runstate);
|
|
|
|
s->size = strlen((char *)s->runstate) + 1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static const VMStateDescription vmstate_globalstate = {
|
|
|
|
.name = "globalstate",
|
|
|
|
.version_id = 1,
|
|
|
|
.minimum_version_id = 1,
|
|
|
|
.post_load = global_state_post_load,
|
|
|
|
.pre_save = global_state_pre_save,
|
2014-10-08 19:58:24 +08:00
|
|
|
.needed = global_state_needed,
|
2014-10-08 16:58:10 +08:00
|
|
|
.fields = (VMStateField[]) {
|
|
|
|
VMSTATE_UINT32(size, GlobalState),
|
|
|
|
VMSTATE_BUFFER(runstate, GlobalState),
|
|
|
|
VMSTATE_END_OF_LIST()
|
|
|
|
},
|
|
|
|
};
|
|
|
|
|
|
|
|
void register_global_state(void)
|
|
|
|
{
|
|
|
|
/* We would use it independently that we receive it */
|
|
|
|
strcpy((char *)&global_state.runstate, "");
|
2015-07-08 19:56:26 +08:00
|
|
|
global_state.received = false;
|
2014-10-08 16:58:10 +08:00
|
|
|
vmstate_register(NULL, 0, &vmstate_globalstate, &global_state);
|
|
|
|
}
|
|
|
|
|
2015-07-07 20:44:05 +08:00
|
|
|
static void migrate_generate_event(int new_state)
|
|
|
|
{
|
|
|
|
if (migrate_use_events()) {
|
|
|
|
qapi_event_send_migration(new_state, &error_abort);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-02-19 19:40:27 +08:00
|
|
|
/*
|
|
|
|
* Called on -incoming with a defer: uri.
|
|
|
|
* The migration can be started later after any parameters have been
|
|
|
|
* changed.
|
|
|
|
*/
|
|
|
|
static void deferred_incoming_migration(Error **errp)
|
|
|
|
{
|
|
|
|
if (deferred_incoming) {
|
|
|
|
error_setg(errp, "Incoming migration already deferred");
|
|
|
|
}
|
|
|
|
deferred_incoming = true;
|
|
|
|
}
|
|
|
|
|
2012-10-03 00:21:18 +08:00
|
|
|
void qemu_start_incoming_migration(const char *uri, Error **errp)
|
2008-10-13 11:12:02 +08:00
|
|
|
{
|
2008-10-13 11:14:31 +08:00
|
|
|
const char *p;
|
|
|
|
|
2015-05-20 23:15:42 +08:00
|
|
|
qapi_event_send_migration(MIGRATION_STATUS_SETUP, &error_abort);
|
2015-02-19 19:40:27 +08:00
|
|
|
if (!strcmp(uri, "defer")) {
|
|
|
|
deferred_incoming_migration(errp);
|
|
|
|
} else if (strstart(uri, "tcp:", &p)) {
|
2012-10-03 00:21:18 +08:00
|
|
|
tcp_start_incoming_migration(p, errp);
|
2013-07-22 22:01:54 +08:00
|
|
|
#ifdef CONFIG_RDMA
|
2015-02-19 19:40:27 +08:00
|
|
|
} else if (strstart(uri, "rdma:", &p)) {
|
2013-07-22 22:01:54 +08:00
|
|
|
rdma_start_incoming_migration(p, errp);
|
|
|
|
#endif
|
2008-11-12 00:46:33 +08:00
|
|
|
#if !defined(WIN32)
|
2015-02-19 19:40:27 +08:00
|
|
|
} else if (strstart(uri, "exec:", &p)) {
|
2012-10-03 00:21:18 +08:00
|
|
|
exec_start_incoming_migration(p, errp);
|
2015-02-19 19:40:27 +08:00
|
|
|
} else if (strstart(uri, "unix:", &p)) {
|
2012-10-03 00:21:18 +08:00
|
|
|
unix_start_incoming_migration(p, errp);
|
2015-02-19 19:40:27 +08:00
|
|
|
} else if (strstart(uri, "fd:", &p)) {
|
2012-10-03 00:21:18 +08:00
|
|
|
fd_start_incoming_migration(p, errp);
|
2008-11-12 00:46:33 +08:00
|
|
|
#endif
|
2015-02-19 19:40:27 +08:00
|
|
|
} else {
|
error: Strip trailing '\n' from error string arguments (again)
Commit 6daf194d and be62a2eb got rid of a bunch, but they keep coming
back. Tracked down with this Coccinelle semantic patch:
@r@
expression err, eno, cls, fmt;
position p;
@@
(
error_report(fmt, ...)@p
|
error_set(err, cls, fmt, ...)@p
|
error_set_errno(err, eno, cls, fmt, ...)@p
|
error_setg(err, fmt, ...)@p
|
error_setg_errno(err, eno, fmt, ...)@p
)
@script:python@
fmt << r.fmt;
p << r.p;
@@
if "\\n" in str(fmt):
print "%s:%s:%s:%s" % (p[0].file, p[0].line, p[0].column, fmt)
Signed-off-by: Markus Armbruster <armbru@redhat.com>
Message-id: 1360354939-10994-4-git-send-email-armbru@redhat.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-02-09 04:22:16 +08:00
|
|
|
error_setg(errp, "unknown migration protocol: %s", uri);
|
2010-06-09 20:10:54 +08:00
|
|
|
}
|
2008-10-13 11:12:02 +08:00
|
|
|
}
|
|
|
|
|
2012-08-07 16:57:43 +08:00
|
|
|
static void process_incoming_migration_co(void *opaque)
|
2010-06-09 20:10:55 +08:00
|
|
|
{
|
2012-08-07 16:57:43 +08:00
|
|
|
QEMUFile *f = opaque;
|
2014-03-12 22:59:16 +08:00
|
|
|
Error *local_err = NULL;
|
2012-08-07 16:51:51 +08:00
|
|
|
int ret;
|
|
|
|
|
2015-05-21 20:24:14 +08:00
|
|
|
migration_incoming_state_new(f);
|
2015-05-20 23:15:42 +08:00
|
|
|
migrate_generate_event(MIGRATION_STATUS_ACTIVE);
|
2012-08-07 16:51:51 +08:00
|
|
|
ret = qemu_loadvm_state(f);
|
2015-05-21 20:24:14 +08:00
|
|
|
|
2012-08-07 16:51:51 +08:00
|
|
|
qemu_fclose(f);
|
2014-01-31 02:08:35 +08:00
|
|
|
free_xbzrle_decoded_buf();
|
2015-05-21 20:24:14 +08:00
|
|
|
migration_incoming_state_destroy();
|
|
|
|
|
2012-08-07 16:51:51 +08:00
|
|
|
if (ret < 0) {
|
2015-05-20 23:15:42 +08:00
|
|
|
migrate_generate_event(MIGRATION_STATUS_FAILED);
|
2014-06-10 17:29:16 +08:00
|
|
|
error_report("load of migration failed: %s", strerror(-ret));
|
2015-03-23 16:32:18 +08:00
|
|
|
migrate_decompress_threads_join();
|
2013-04-17 05:50:41 +08:00
|
|
|
exit(EXIT_FAILURE);
|
2010-06-09 20:10:55 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 05:09:45 +08:00
|
|
|
/* Make sure all file formats flush their mutable metadata */
|
2014-03-12 22:59:16 +08:00
|
|
|
bdrv_invalidate_cache_all(&local_err);
|
|
|
|
if (local_err) {
|
2015-10-13 19:21:27 +08:00
|
|
|
migrate_generate_event(MIGRATION_STATUS_FAILED);
|
2015-02-19 02:21:52 +08:00
|
|
|
error_report_err(local_err);
|
2015-03-23 16:32:18 +08:00
|
|
|
migrate_decompress_threads_join();
|
2014-03-12 22:59:16 +08:00
|
|
|
exit(EXIT_FAILURE);
|
|
|
|
}
|
2011-11-15 05:09:45 +08:00
|
|
|
|
2015-10-14 20:07:19 +08:00
|
|
|
/*
|
|
|
|
* This must happen after all error conditions are dealt with and
|
|
|
|
* we're sure the VM is going to be running on this host.
|
|
|
|
*/
|
|
|
|
qemu_announce_self();
|
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
/* If global state section was not received or we are in running
|
|
|
|
state, we need to obey autostart. Any other state is set with
|
|
|
|
runstate_set. */
|
2014-10-08 16:58:10 +08:00
|
|
|
|
2015-07-08 19:56:26 +08:00
|
|
|
if (!global_state_received() ||
|
|
|
|
global_state_get_runstate() == RUN_STATE_RUNNING) {
|
2014-10-08 16:58:10 +08:00
|
|
|
if (autostart) {
|
|
|
|
vm_start();
|
|
|
|
} else {
|
|
|
|
runstate_set(RUN_STATE_PAUSED);
|
|
|
|
}
|
2015-07-08 19:56:26 +08:00
|
|
|
} else {
|
|
|
|
runstate_set(global_state_get_runstate());
|
2011-07-30 02:04:45 +08:00
|
|
|
}
|
2015-03-23 16:32:18 +08:00
|
|
|
migrate_decompress_threads_join();
|
2015-10-13 19:21:27 +08:00
|
|
|
/*
|
|
|
|
* This must happen after any state changes since as soon as an external
|
|
|
|
* observer sees this event they might start to prod at the VM assuming
|
|
|
|
* it's ready to use.
|
|
|
|
*/
|
|
|
|
migrate_generate_event(MIGRATION_STATUS_COMPLETED);
|
2010-06-09 20:10:55 +08:00
|
|
|
}
|
|
|
|
|
2012-08-07 16:57:43 +08:00
|
|
|
void process_incoming_migration(QEMUFile *f)
|
|
|
|
{
|
|
|
|
Coroutine *co = qemu_coroutine_create(process_incoming_migration_co);
|
|
|
|
int fd = qemu_get_fd(f);
|
|
|
|
|
|
|
|
assert(fd != -1);
|
2015-03-23 16:32:18 +08:00
|
|
|
migrate_decompress_threads_create();
|
2013-03-27 17:10:43 +08:00
|
|
|
qemu_set_nonblock(fd);
|
2012-08-07 16:57:43 +08:00
|
|
|
qemu_coroutine_enter(co, f);
|
|
|
|
}
|
|
|
|
|
2015-11-06 02:10:47 +08:00
|
|
|
/*
|
|
|
|
* Send a message on the return channel back to the source
|
|
|
|
* of the migration.
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_message(MigrationIncomingState *mis,
|
|
|
|
enum mig_rp_message_type message_type,
|
|
|
|
uint16_t len, void *data)
|
|
|
|
{
|
|
|
|
trace_migrate_send_rp_message((int)message_type, len);
|
|
|
|
qemu_mutex_lock(&mis->rp_mutex);
|
|
|
|
qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
|
|
|
|
qemu_put_be16(mis->to_src_file, len);
|
|
|
|
qemu_put_buffer(mis->to_src_file, data, len);
|
|
|
|
qemu_fflush(mis->to_src_file);
|
|
|
|
qemu_mutex_unlock(&mis->rp_mutex);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a 'SHUT' message on the return channel with the given value
|
|
|
|
* to indicate that we've finished with the RP. Non-0 value indicates
|
|
|
|
* error.
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_shut(MigrationIncomingState *mis,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Send a 'PONG' message on the return channel with the given value
|
|
|
|
* (normally in response to a 'PING')
|
|
|
|
*/
|
|
|
|
void migrate_send_rp_pong(MigrationIncomingState *mis,
|
|
|
|
uint32_t value)
|
|
|
|
{
|
|
|
|
uint32_t buf;
|
|
|
|
|
|
|
|
buf = cpu_to_be32(value);
|
|
|
|
migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
|
|
|
|
}
|
|
|
|
|
2009-05-29 03:22:57 +08:00
|
|
|
/* amount of nanoseconds we are willing to wait for migration to be down.
|
|
|
|
* the choice of nanoseconds is because it is the maximum resolution that
|
|
|
|
* get_clock() can achieve. It is an internal measure. All user-visible
|
|
|
|
* units must be in seconds */
|
2014-03-27 11:57:26 +08:00
|
|
|
static uint64_t max_downtime = 300000000;
|
2009-05-29 03:22:57 +08:00
|
|
|
|
|
|
|
uint64_t migrate_max_downtime(void)
|
|
|
|
{
|
|
|
|
return max_downtime;
|
|
|
|
}
|
|
|
|
|
2012-08-07 02:42:47 +08:00
|
|
|
MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationCapabilityStatusList *head = NULL;
|
|
|
|
MigrationCapabilityStatusList *caps;
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
int i;
|
|
|
|
|
2013-10-05 17:18:28 +08:00
|
|
|
caps = NULL; /* silence compiler warning */
|
2012-08-07 02:42:47 +08:00
|
|
|
for (i = 0; i < MIGRATION_CAPABILITY_MAX; i++) {
|
|
|
|
if (head == NULL) {
|
|
|
|
head = g_malloc0(sizeof(*caps));
|
|
|
|
caps = head;
|
|
|
|
} else {
|
|
|
|
caps->next = g_malloc0(sizeof(*caps));
|
|
|
|
caps = caps->next;
|
|
|
|
}
|
|
|
|
caps->value =
|
|
|
|
g_malloc(sizeof(*caps->value));
|
|
|
|
caps->value->capability = i;
|
|
|
|
caps->value->state = s->enabled_capabilities[i];
|
|
|
|
}
|
|
|
|
|
|
|
|
return head;
|
|
|
|
}
|
|
|
|
|
2015-03-23 16:32:28 +08:00
|
|
|
MigrationParameters *qmp_query_migrate_parameters(Error **errp)
|
|
|
|
{
|
|
|
|
MigrationParameters *params;
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
params = g_malloc0(sizeof(*params));
|
|
|
|
params->compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
|
|
|
|
params->compress_threads =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
|
|
|
|
params->decompress_threads =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
|
2015-09-09 01:12:34 +08:00
|
|
|
params->x_cpu_throttle_initial =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
|
|
|
|
params->x_cpu_throttle_increment =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
|
2015-03-23 16:32:28 +08:00
|
|
|
|
|
|
|
return params;
|
|
|
|
}
|
|
|
|
|
2015-11-06 02:10:48 +08:00
|
|
|
/*
|
|
|
|
* Return true if we're already in the middle of a migration
|
|
|
|
* (i.e. any of the active or setup states)
|
|
|
|
*/
|
|
|
|
static bool migration_is_setup_or_active(int state)
|
|
|
|
{
|
|
|
|
switch (state) {
|
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_SETUP:
|
|
|
|
return true;
|
|
|
|
|
|
|
|
default:
|
|
|
|
return false;
|
|
|
|
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2012-08-07 02:42:57 +08:00
|
|
|
static void get_xbzrle_cache_stats(MigrationInfo *info)
|
|
|
|
{
|
|
|
|
if (migrate_use_xbzrle()) {
|
|
|
|
info->has_xbzrle_cache = true;
|
|
|
|
info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
|
|
|
|
info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
|
|
|
|
info->xbzrle_cache->bytes = xbzrle_mig_bytes_transferred();
|
|
|
|
info->xbzrle_cache->pages = xbzrle_mig_pages_transferred();
|
|
|
|
info->xbzrle_cache->cache_miss = xbzrle_mig_pages_cache_miss();
|
2014-04-04 17:57:56 +08:00
|
|
|
info->xbzrle_cache->cache_miss_rate = xbzrle_mig_cache_miss_rate();
|
2012-08-07 02:42:57 +08:00
|
|
|
info->xbzrle_cache->overflow = xbzrle_mig_pages_overflow();
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-14 04:37:16 +08:00
|
|
|
MigrationInfo *qmp_query_migrate(Error **errp)
|
2008-10-13 11:12:02 +08:00
|
|
|
{
|
2011-09-14 04:37:16 +08:00
|
|
|
MigrationInfo *info = g_malloc0(sizeof(*info));
|
2011-10-05 19:50:43 +08:00
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
switch (s->state) {
|
2015-03-13 16:08:38 +08:00
|
|
|
case MIGRATION_STATUS_NONE:
|
2011-10-05 19:50:43 +08:00
|
|
|
/* no migration has happened ever */
|
|
|
|
break;
|
2015-03-13 16:08:38 +08:00
|
|
|
case MIGRATION_STATUS_SETUP:
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 22:01:57 +08:00
|
|
|
info->has_status = true;
|
2013-07-22 22:01:58 +08:00
|
|
|
info->has_total_time = false;
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 22:01:57 +08:00
|
|
|
break;
|
2015-03-13 16:08:38 +08:00
|
|
|
case MIGRATION_STATUS_ACTIVE:
|
|
|
|
case MIGRATION_STATUS_CANCELLING:
|
2011-09-14 04:37:16 +08:00
|
|
|
info->has_status = true;
|
2012-08-18 19:17:10 +08:00
|
|
|
info->has_total_time = true;
|
2013-08-21 23:03:08 +08:00
|
|
|
info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME)
|
2012-08-18 19:17:10 +08:00
|
|
|
- s->total_time;
|
2012-08-13 15:53:12 +08:00
|
|
|
info->has_expected_downtime = true;
|
|
|
|
info->expected_downtime = s->expected_downtime;
|
2013-07-22 22:01:58 +08:00
|
|
|
info->has_setup_time = true;
|
|
|
|
info->setup_time = s->setup_time;
|
2011-10-05 19:50:43 +08:00
|
|
|
|
2011-09-14 04:37:16 +08:00
|
|
|
info->has_ram = true;
|
|
|
|
info->ram = g_malloc0(sizeof(*info->ram));
|
|
|
|
info->ram->transferred = ram_bytes_transferred();
|
|
|
|
info->ram->remaining = ram_bytes_remaining();
|
|
|
|
info->ram->total = ram_bytes_total();
|
2012-08-07 02:42:56 +08:00
|
|
|
info->ram->duplicate = dup_mig_pages_transferred();
|
2013-03-26 17:58:37 +08:00
|
|
|
info->ram->skipped = skipped_mig_pages_transferred();
|
2012-08-07 02:42:56 +08:00
|
|
|
info->ram->normal = norm_mig_pages_transferred();
|
|
|
|
info->ram->normal_bytes = norm_mig_bytes_transferred();
|
2012-08-13 18:31:25 +08:00
|
|
|
info->ram->dirty_pages_rate = s->dirty_pages_rate;
|
2013-06-26 09:35:30 +08:00
|
|
|
info->ram->mbps = s->mbps;
|
2014-04-04 17:57:55 +08:00
|
|
|
info->ram->dirty_sync_count = s->dirty_sync_count;
|
2012-08-13 18:31:25 +08:00
|
|
|
|
2011-10-05 19:50:43 +08:00
|
|
|
if (blk_mig_active()) {
|
2011-09-14 04:37:16 +08:00
|
|
|
info->has_disk = true;
|
|
|
|
info->disk = g_malloc0(sizeof(*info->disk));
|
|
|
|
info->disk->transferred = blk_mig_bytes_transferred();
|
|
|
|
info->disk->remaining = blk_mig_bytes_remaining();
|
|
|
|
info->disk->total = blk_mig_bytes_total();
|
2008-10-25 06:10:31 +08:00
|
|
|
}
|
2012-08-07 02:42:57 +08:00
|
|
|
|
2015-09-09 01:12:36 +08:00
|
|
|
if (cpu_throttle_active()) {
|
|
|
|
info->has_x_cpu_throttle_percentage = true;
|
|
|
|
info->x_cpu_throttle_percentage = cpu_throttle_get_percentage();
|
|
|
|
}
|
|
|
|
|
2012-08-07 02:42:57 +08:00
|
|
|
get_xbzrle_cache_stats(info);
|
2011-10-05 19:50:43 +08:00
|
|
|
break;
|
2015-03-13 16:08:38 +08:00
|
|
|
case MIGRATION_STATUS_COMPLETED:
|
2012-08-07 02:42:57 +08:00
|
|
|
get_xbzrle_cache_stats(info);
|
|
|
|
|
2011-09-14 04:37:16 +08:00
|
|
|
info->has_status = true;
|
2013-07-19 10:23:45 +08:00
|
|
|
info->has_total_time = true;
|
2012-08-18 19:17:10 +08:00
|
|
|
info->total_time = s->total_time;
|
2012-08-13 15:35:16 +08:00
|
|
|
info->has_downtime = true;
|
|
|
|
info->downtime = s->downtime;
|
2013-07-22 22:01:58 +08:00
|
|
|
info->has_setup_time = true;
|
|
|
|
info->setup_time = s->setup_time;
|
2012-05-22 04:01:07 +08:00
|
|
|
|
|
|
|
info->has_ram = true;
|
|
|
|
info->ram = g_malloc0(sizeof(*info->ram));
|
|
|
|
info->ram->transferred = ram_bytes_transferred();
|
|
|
|
info->ram->remaining = 0;
|
|
|
|
info->ram->total = ram_bytes_total();
|
2012-08-07 02:42:56 +08:00
|
|
|
info->ram->duplicate = dup_mig_pages_transferred();
|
2013-03-26 17:58:37 +08:00
|
|
|
info->ram->skipped = skipped_mig_pages_transferred();
|
2012-08-07 02:42:56 +08:00
|
|
|
info->ram->normal = norm_mig_pages_transferred();
|
|
|
|
info->ram->normal_bytes = norm_mig_bytes_transferred();
|
2013-06-26 09:35:30 +08:00
|
|
|
info->ram->mbps = s->mbps;
|
2014-04-04 17:57:55 +08:00
|
|
|
info->ram->dirty_sync_count = s->dirty_sync_count;
|
2011-10-05 19:50:43 +08:00
|
|
|
break;
|
2015-03-13 16:08:38 +08:00
|
|
|
case MIGRATION_STATUS_FAILED:
|
2011-09-14 04:37:16 +08:00
|
|
|
info->has_status = true;
|
2011-10-05 19:50:43 +08:00
|
|
|
break;
|
2015-03-13 16:08:38 +08:00
|
|
|
case MIGRATION_STATUS_CANCELLED:
|
2011-09-14 04:37:16 +08:00
|
|
|
info->has_status = true;
|
2011-10-05 19:50:43 +08:00
|
|
|
break;
|
2008-10-13 11:12:02 +08:00
|
|
|
}
|
2015-03-13 16:08:41 +08:00
|
|
|
info->status = s->state;
|
2011-09-14 04:37:16 +08:00
|
|
|
|
|
|
|
return info;
|
2008-10-13 11:12:02 +08:00
|
|
|
}
|
|
|
|
|
2012-08-07 02:42:48 +08:00
|
|
|
void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
|
|
|
|
Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
MigrationCapabilityStatusList *cap;
|
|
|
|
|
2015-11-06 02:10:48 +08:00
|
|
|
if (migration_is_setup_or_active(s->state)) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_MIGRATION_ACTIVE);
|
2012-08-07 02:42:48 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cap = params; cap; cap = cap->next) {
|
|
|
|
s->enabled_capabilities[cap->value->capability] = cap->value->state;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2015-03-23 16:32:28 +08:00
|
|
|
void qmp_migrate_set_parameters(bool has_compress_level,
|
|
|
|
int64_t compress_level,
|
|
|
|
bool has_compress_threads,
|
|
|
|
int64_t compress_threads,
|
|
|
|
bool has_decompress_threads,
|
2015-09-09 01:12:34 +08:00
|
|
|
int64_t decompress_threads,
|
|
|
|
bool has_x_cpu_throttle_initial,
|
|
|
|
int64_t x_cpu_throttle_initial,
|
|
|
|
bool has_x_cpu_throttle_increment,
|
|
|
|
int64_t x_cpu_throttle_increment, Error **errp)
|
2015-03-23 16:32:28 +08:00
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
|
|
|
|
|
|
|
if (has_compress_level && (compress_level < 0 || compress_level > 9)) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
|
|
|
|
"is invalid, it should be in the range of 0 to 9");
|
2015-03-23 16:32:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (has_compress_threads &&
|
|
|
|
(compress_threads < 1 || compress_threads > 255)) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"compress_threads",
|
|
|
|
"is invalid, it should be in the range of 1 to 255");
|
2015-03-23 16:32:28 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
if (has_decompress_threads &&
|
|
|
|
(decompress_threads < 1 || decompress_threads > 255)) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"decompress_threads",
|
|
|
|
"is invalid, it should be in the range of 1 to 255");
|
2015-03-23 16:32:28 +08:00
|
|
|
return;
|
|
|
|
}
|
2015-09-09 01:12:34 +08:00
|
|
|
if (has_x_cpu_throttle_initial &&
|
|
|
|
(x_cpu_throttle_initial < 1 || x_cpu_throttle_initial > 99)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"x_cpu_throttle_initial",
|
|
|
|
"an integer in the range of 1 to 99");
|
|
|
|
}
|
|
|
|
if (has_x_cpu_throttle_increment &&
|
|
|
|
(x_cpu_throttle_increment < 1 || x_cpu_throttle_increment > 99)) {
|
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
|
|
|
|
"x_cpu_throttle_increment",
|
|
|
|
"an integer in the range of 1 to 99");
|
|
|
|
}
|
2015-03-23 16:32:28 +08:00
|
|
|
|
|
|
|
if (has_compress_level) {
|
|
|
|
s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
|
|
|
|
}
|
|
|
|
if (has_compress_threads) {
|
|
|
|
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] = compress_threads;
|
|
|
|
}
|
|
|
|
if (has_decompress_threads) {
|
|
|
|
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
|
|
|
|
decompress_threads;
|
|
|
|
}
|
2015-09-09 01:12:34 +08:00
|
|
|
if (has_x_cpu_throttle_initial) {
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
|
|
|
|
x_cpu_throttle_initial;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_x_cpu_throttle_increment) {
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
|
|
|
|
x_cpu_throttle_increment;
|
|
|
|
}
|
2015-03-23 16:32:28 +08:00
|
|
|
}
|
|
|
|
|
2008-11-12 00:46:33 +08:00
|
|
|
/* shared migration helpers */
|
|
|
|
|
2013-11-07 19:01:15 +08:00
|
|
|
static void migrate_set_state(MigrationState *s, int old_state, int new_state)
|
|
|
|
{
|
2015-06-17 08:06:20 +08:00
|
|
|
if (atomic_cmpxchg(&s->state, old_state, new_state) == old_state) {
|
2015-07-08 19:58:27 +08:00
|
|
|
trace_migrate_set_state(new_state);
|
2015-07-07 20:44:05 +08:00
|
|
|
migrate_generate_event(new_state);
|
2013-11-07 19:01:15 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-02-23 00:36:21 +08:00
|
|
|
static void migrate_fd_cleanup(void *opaque)
|
2008-11-12 00:46:33 +08:00
|
|
|
{
|
2013-02-23 00:36:21 +08:00
|
|
|
MigrationState *s = opaque;
|
|
|
|
|
|
|
|
qemu_bh_delete(s->cleanup_bh);
|
|
|
|
s->cleanup_bh = NULL;
|
|
|
|
|
2008-11-12 00:46:33 +08:00
|
|
|
if (s->file) {
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_migrate_fd_cleanup();
|
2013-02-23 00:36:46 +08:00
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
qemu_thread_join(&s->thread);
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
|
2015-03-23 16:32:17 +08:00
|
|
|
migrate_compress_threads_join();
|
2013-02-23 00:36:48 +08:00
|
|
|
qemu_fclose(s->file);
|
|
|
|
s->file = NULL;
|
2008-11-12 00:46:33 +08:00
|
|
|
}
|
|
|
|
|
2015-03-13 16:08:38 +08:00
|
|
|
assert(s->state != MIGRATION_STATUS_ACTIVE);
|
2013-02-23 00:36:09 +08:00
|
|
|
|
2015-11-02 15:37:00 +08:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING) {
|
|
|
|
migrate_set_state(s, MIGRATION_STATUS_CANCELLING,
|
|
|
|
MIGRATION_STATUS_CANCELLED);
|
2013-02-23 00:36:09 +08:00
|
|
|
}
|
2013-02-23 00:36:18 +08:00
|
|
|
|
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
2008-11-12 00:46:33 +08:00
|
|
|
}
|
|
|
|
|
2011-09-12 02:28:22 +08:00
|
|
|
void migrate_fd_error(MigrationState *s)
|
2008-11-12 00:46:33 +08:00
|
|
|
{
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_migrate_fd_error();
|
2013-02-23 00:36:21 +08:00
|
|
|
assert(s->file == NULL);
|
2015-06-17 07:36:40 +08:00
|
|
|
migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
|
2013-02-23 00:36:21 +08:00
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
2011-02-23 06:32:54 +08:00
|
|
|
}
|
|
|
|
|
2010-05-11 22:28:39 +08:00
|
|
|
static void migrate_fd_cancel(MigrationState *s)
|
2008-11-12 00:46:33 +08:00
|
|
|
{
|
2013-11-07 16:21:23 +08:00
|
|
|
int old_state ;
|
2015-01-08 19:11:32 +08:00
|
|
|
QEMUFile *f = migrate_get_current()->file;
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_migrate_fd_cancel();
|
2008-11-12 00:46:33 +08:00
|
|
|
|
2015-11-06 02:10:49 +08:00
|
|
|
if (s->rp_state.from_dst_file) {
|
|
|
|
/* shutdown the rp socket, so causing the rp thread to shutdown */
|
|
|
|
qemu_file_shutdown(s->rp_state.from_dst_file);
|
|
|
|
}
|
|
|
|
|
2013-11-07 16:21:23 +08:00
|
|
|
do {
|
|
|
|
old_state = s->state;
|
2015-11-06 02:10:48 +08:00
|
|
|
if (!migration_is_setup_or_active(old_state)) {
|
2013-11-07 16:21:23 +08:00
|
|
|
break;
|
|
|
|
}
|
2015-03-13 16:08:38 +08:00
|
|
|
migrate_set_state(s, old_state, MIGRATION_STATUS_CANCELLING);
|
|
|
|
} while (s->state != MIGRATION_STATUS_CANCELLING);
|
2015-01-08 19:11:32 +08:00
|
|
|
|
|
|
|
/*
|
|
|
|
* If we're unlucky the migration code might be stuck somewhere in a
|
|
|
|
* send/write while the network has failed and is waiting to timeout;
|
|
|
|
* if we've got shutdown(2) available then we can force it to quit.
|
|
|
|
* The outgoing qemu file gets closed in migrate_fd_cleanup that is
|
|
|
|
* called in a bh, so there is no race against this cancel.
|
|
|
|
*/
|
2015-03-13 16:08:38 +08:00
|
|
|
if (s->state == MIGRATION_STATUS_CANCELLING && f) {
|
2015-01-08 19:11:32 +08:00
|
|
|
qemu_file_shutdown(f);
|
|
|
|
}
|
2008-11-12 00:46:33 +08:00
|
|
|
}
|
|
|
|
|
2010-12-14 00:30:12 +08:00
|
|
|
void add_migration_state_change_notifier(Notifier *notify)
|
|
|
|
{
|
|
|
|
notifier_list_add(&migration_state_notifiers, notify);
|
|
|
|
}
|
|
|
|
|
|
|
|
void remove_migration_state_change_notifier(Notifier *notify)
|
|
|
|
{
|
2012-01-14 00:34:01 +08:00
|
|
|
notifier_remove(notify);
|
2010-12-14 00:30:12 +08:00
|
|
|
}
|
|
|
|
|
2013-07-29 21:01:58 +08:00
|
|
|
bool migration_in_setup(MigrationState *s)
|
2011-10-25 19:50:11 +08:00
|
|
|
{
|
2015-03-13 16:08:38 +08:00
|
|
|
return s->state == MIGRATION_STATUS_SETUP;
|
2011-10-25 19:50:11 +08:00
|
|
|
}
|
|
|
|
|
2011-02-23 07:43:59 +08:00
|
|
|
bool migration_has_finished(MigrationState *s)
|
2010-12-14 00:30:12 +08:00
|
|
|
{
|
2015-03-13 16:08:38 +08:00
|
|
|
return s->state == MIGRATION_STATUS_COMPLETED;
|
2010-12-14 00:30:12 +08:00
|
|
|
}
|
2010-05-11 22:28:39 +08:00
|
|
|
|
2011-10-25 19:50:11 +08:00
|
|
|
bool migration_has_failed(MigrationState *s)
|
|
|
|
{
|
2015-03-13 16:08:38 +08:00
|
|
|
return (s->state == MIGRATION_STATUS_CANCELLED ||
|
|
|
|
s->state == MIGRATION_STATUS_FAILED);
|
2011-10-25 19:50:11 +08:00
|
|
|
}
|
|
|
|
|
2015-11-06 02:10:40 +08:00
|
|
|
MigrationState *migrate_init(const MigrationParams *params)
|
2010-05-11 22:28:39 +08:00
|
|
|
{
|
2011-10-05 19:50:43 +08:00
|
|
|
MigrationState *s = migrate_get_current();
|
2011-02-23 07:33:19 +08:00
|
|
|
int64_t bandwidth_limit = s->bandwidth_limit;
|
2012-08-07 02:42:47 +08:00
|
|
|
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
|
2012-08-07 02:42:53 +08:00
|
|
|
int64_t xbzrle_cache_size = s->xbzrle_cache_size;
|
2015-03-23 16:32:27 +08:00
|
|
|
int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
|
|
|
|
int compress_thread_count =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
|
|
|
|
int decompress_thread_count =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
|
2015-09-09 01:12:34 +08:00
|
|
|
int x_cpu_throttle_initial =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
|
|
|
|
int x_cpu_throttle_increment =
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
|
2012-08-07 02:42:47 +08:00
|
|
|
|
|
|
|
memcpy(enabled_capabilities, s->enabled_capabilities,
|
|
|
|
sizeof(enabled_capabilities));
|
2010-05-11 22:28:39 +08:00
|
|
|
|
2011-10-05 19:50:43 +08:00
|
|
|
memset(s, 0, sizeof(*s));
|
2012-06-19 23:43:09 +08:00
|
|
|
s->params = *params;
|
2012-08-07 02:42:47 +08:00
|
|
|
memcpy(s->enabled_capabilities, enabled_capabilities,
|
|
|
|
sizeof(enabled_capabilities));
|
2012-08-07 02:42:53 +08:00
|
|
|
s->xbzrle_cache_size = xbzrle_cache_size;
|
2011-11-10 04:29:01 +08:00
|
|
|
|
2015-03-23 16:32:27 +08:00
|
|
|
s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
|
|
|
|
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
|
|
|
|
compress_thread_count;
|
|
|
|
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
|
|
|
|
decompress_thread_count;
|
2015-09-09 01:12:34 +08:00
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
|
|
|
|
x_cpu_throttle_initial;
|
|
|
|
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
|
|
|
|
x_cpu_throttle_increment;
|
2010-05-11 22:28:39 +08:00
|
|
|
s->bandwidth_limit = bandwidth_limit;
|
2015-06-17 07:36:40 +08:00
|
|
|
migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
|
2010-05-11 22:28:39 +08:00
|
|
|
|
2013-08-21 23:03:08 +08:00
|
|
|
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2010-05-11 22:28:39 +08:00
|
|
|
return s;
|
|
|
|
}
|
2011-02-23 06:54:21 +08:00
|
|
|
|
2011-11-15 05:09:43 +08:00
|
|
|
static GSList *migration_blockers;
|
|
|
|
|
|
|
|
void migrate_add_blocker(Error *reason)
|
|
|
|
{
|
|
|
|
migration_blockers = g_slist_prepend(migration_blockers, reason);
|
|
|
|
}
|
|
|
|
|
|
|
|
void migrate_del_blocker(Error *reason)
|
|
|
|
{
|
|
|
|
migration_blockers = g_slist_remove(migration_blockers, reason);
|
|
|
|
}
|
|
|
|
|
2015-02-19 19:40:28 +08:00
|
|
|
void qmp_migrate_incoming(const char *uri, Error **errp)
|
|
|
|
{
|
|
|
|
Error *local_err = NULL;
|
2015-02-26 22:54:41 +08:00
|
|
|
static bool once = true;
|
2015-02-19 19:40:28 +08:00
|
|
|
|
|
|
|
if (!deferred_incoming) {
|
2015-02-26 22:54:41 +08:00
|
|
|
error_setg(errp, "For use with '-incoming defer'");
|
2015-02-19 19:40:28 +08:00
|
|
|
return;
|
|
|
|
}
|
2015-02-26 22:54:41 +08:00
|
|
|
if (!once) {
|
|
|
|
error_setg(errp, "The incoming migration has already been started");
|
|
|
|
}
|
2015-02-19 19:40:28 +08:00
|
|
|
|
|
|
|
qemu_start_incoming_migration(uri, &local_err);
|
|
|
|
|
|
|
|
if (local_err) {
|
|
|
|
error_propagate(errp, local_err);
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2015-02-26 22:54:41 +08:00
|
|
|
once = false;
|
2015-02-19 19:40:28 +08:00
|
|
|
}
|
|
|
|
|
2011-12-06 00:48:01 +08:00
|
|
|
void qmp_migrate(const char *uri, bool has_blk, bool blk,
|
|
|
|
bool has_inc, bool inc, bool has_detach, bool detach,
|
|
|
|
Error **errp)
|
2011-02-23 06:54:21 +08:00
|
|
|
{
|
2012-10-03 20:34:33 +08:00
|
|
|
Error *local_err = NULL;
|
2011-10-05 19:50:43 +08:00
|
|
|
MigrationState *s = migrate_get_current();
|
2012-06-19 23:43:09 +08:00
|
|
|
MigrationParams params;
|
2011-02-23 06:54:21 +08:00
|
|
|
const char *p;
|
|
|
|
|
2013-07-30 07:39:52 +08:00
|
|
|
params.blk = has_blk && blk;
|
|
|
|
params.shared = has_inc && inc;
|
2012-06-19 23:43:09 +08:00
|
|
|
|
2015-11-06 02:10:48 +08:00
|
|
|
if (migration_is_setup_or_active(s->state) ||
|
2015-03-13 16:08:38 +08:00
|
|
|
s->state == MIGRATION_STATUS_CANCELLING) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_MIGRATION_ACTIVE);
|
2011-12-06 00:48:01 +08:00
|
|
|
return;
|
2011-02-23 06:54:21 +08:00
|
|
|
}
|
2014-04-15 00:03:59 +08:00
|
|
|
if (runstate_check(RUN_STATE_INMIGRATE)) {
|
|
|
|
error_setg(errp, "Guest is waiting for an incoming migration");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-12-06 00:48:01 +08:00
|
|
|
if (qemu_savevm_state_blocked(errp)) {
|
|
|
|
return;
|
2011-02-23 06:54:21 +08:00
|
|
|
}
|
|
|
|
|
2011-11-15 05:09:43 +08:00
|
|
|
if (migration_blockers) {
|
2011-12-06 00:48:01 +08:00
|
|
|
*errp = error_copy(migration_blockers->data);
|
|
|
|
return;
|
2011-11-15 05:09:43 +08:00
|
|
|
}
|
|
|
|
|
2015-07-01 15:32:29 +08:00
|
|
|
/* We are starting a new migration, so we want to start in a clean
|
|
|
|
state. This change is only needed if previous migration
|
|
|
|
failed/was cancelled. We don't use migrate_set_state() because
|
|
|
|
we are setting the initial state, not changing it. */
|
|
|
|
s->state = MIGRATION_STATUS_NONE;
|
|
|
|
|
2012-06-19 23:43:09 +08:00
|
|
|
s = migrate_init(¶ms);
|
2011-02-23 06:54:21 +08:00
|
|
|
|
|
|
|
if (strstart(uri, "tcp:", &p)) {
|
2012-10-02 16:02:46 +08:00
|
|
|
tcp_start_outgoing_migration(s, p, &local_err);
|
2013-07-22 22:01:54 +08:00
|
|
|
#ifdef CONFIG_RDMA
|
2013-12-19 04:52:01 +08:00
|
|
|
} else if (strstart(uri, "rdma:", &p)) {
|
2013-07-22 22:01:54 +08:00
|
|
|
rdma_start_outgoing_migration(s, p, &local_err);
|
|
|
|
#endif
|
2011-02-23 06:54:21 +08:00
|
|
|
#if !defined(WIN32)
|
|
|
|
} else if (strstart(uri, "exec:", &p)) {
|
2012-10-02 16:02:46 +08:00
|
|
|
exec_start_outgoing_migration(s, p, &local_err);
|
2011-02-23 06:54:21 +08:00
|
|
|
} else if (strstart(uri, "unix:", &p)) {
|
2012-10-02 16:02:46 +08:00
|
|
|
unix_start_outgoing_migration(s, p, &local_err);
|
2011-02-23 06:54:21 +08:00
|
|
|
} else if (strstart(uri, "fd:", &p)) {
|
2012-10-02 16:02:46 +08:00
|
|
|
fd_start_outgoing_migration(s, p, &local_err);
|
2011-02-23 06:54:21 +08:00
|
|
|
#endif
|
2010-12-14 00:30:12 +08:00
|
|
|
} else {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
|
|
|
|
"a valid migration protocol");
|
2015-06-17 07:36:40 +08:00
|
|
|
migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_FAILED);
|
2011-12-06 00:48:01 +08:00
|
|
|
return;
|
2011-02-23 06:54:21 +08:00
|
|
|
}
|
|
|
|
|
2012-10-02 16:02:46 +08:00
|
|
|
if (local_err) {
|
2012-10-02 15:59:38 +08:00
|
|
|
migrate_fd_error(s);
|
2012-10-02 16:02:46 +08:00
|
|
|
error_propagate(errp, local_err);
|
2011-12-06 00:48:01 +08:00
|
|
|
return;
|
2011-11-10 04:29:01 +08:00
|
|
|
}
|
2011-02-23 06:54:21 +08:00
|
|
|
}
|
|
|
|
|
2011-11-28 08:54:09 +08:00
|
|
|
void qmp_migrate_cancel(Error **errp)
|
2011-02-23 06:54:21 +08:00
|
|
|
{
|
2011-10-05 19:50:43 +08:00
|
|
|
migrate_fd_cancel(migrate_get_current());
|
2011-02-23 06:54:21 +08:00
|
|
|
}
|
|
|
|
|
2012-08-07 02:42:54 +08:00
|
|
|
void qmp_migrate_set_cache_size(int64_t value, Error **errp)
|
|
|
|
{
|
|
|
|
MigrationState *s = migrate_get_current();
|
2014-01-31 02:08:34 +08:00
|
|
|
int64_t new_size;
|
2012-08-07 02:42:54 +08:00
|
|
|
|
|
|
|
/* Check for truncation */
|
|
|
|
if (value != (size_t)value) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
|
|
|
|
"exceeding address space");
|
2012-08-07 02:42:54 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-01-31 02:08:36 +08:00
|
|
|
/* Cache should not be larger than guest ram size */
|
|
|
|
if (value > ram_bytes_total()) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
|
|
|
|
"exceeds guest ram size ");
|
2014-01-31 02:08:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2014-01-31 02:08:34 +08:00
|
|
|
new_size = xbzrle_cache_resize(value);
|
|
|
|
if (new_size < 0) {
|
2015-03-17 18:54:50 +08:00
|
|
|
error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
|
|
|
|
"is smaller than page size");
|
2014-01-31 02:08:34 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
s->xbzrle_cache_size = new_size;
|
2012-08-07 02:42:54 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int64_t qmp_query_migrate_cache_size(Error **errp)
|
|
|
|
{
|
|
|
|
return migrate_xbzrle_cache_size();
|
|
|
|
}
|
|
|
|
|
2011-11-28 21:59:37 +08:00
|
|
|
void qmp_migrate_set_speed(int64_t value, Error **errp)
|
2011-02-23 06:54:21 +08:00
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
2011-11-28 21:59:37 +08:00
|
|
|
if (value < 0) {
|
|
|
|
value = 0;
|
2010-12-14 00:30:12 +08:00
|
|
|
}
|
2013-02-23 00:36:44 +08:00
|
|
|
if (value > SIZE_MAX) {
|
|
|
|
value = SIZE_MAX;
|
|
|
|
}
|
2011-02-23 06:54:21 +08:00
|
|
|
|
2011-10-05 19:50:43 +08:00
|
|
|
s = migrate_get_current();
|
2011-11-28 21:59:37 +08:00
|
|
|
s->bandwidth_limit = value;
|
2013-02-23 00:36:44 +08:00
|
|
|
if (s->file) {
|
|
|
|
qemu_file_set_rate_limit(s->file, s->bandwidth_limit / XFER_LIMIT_RATIO);
|
|
|
|
}
|
2011-02-23 06:54:21 +08:00
|
|
|
}
|
|
|
|
|
2011-11-28 09:18:01 +08:00
|
|
|
void qmp_migrate_set_downtime(double value, Error **errp)
|
2011-02-23 06:54:21 +08:00
|
|
|
{
|
2011-11-28 09:18:01 +08:00
|
|
|
value *= 1e9;
|
|
|
|
value = MAX(0, MIN(UINT64_MAX, value));
|
|
|
|
max_downtime = (uint64_t)value;
|
2010-12-14 00:30:12 +08:00
|
|
|
}
|
2012-08-07 02:42:53 +08:00
|
|
|
|
2013-06-24 17:49:42 +08:00
|
|
|
bool migrate_auto_converge(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
|
|
|
|
}
|
|
|
|
|
2013-07-18 15:48:50 +08:00
|
|
|
bool migrate_zero_blocks(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
|
|
|
|
}
|
|
|
|
|
2015-03-23 16:32:17 +08:00
|
|
|
bool migrate_use_compression(void)
|
|
|
|
{
|
2015-03-23 16:32:26 +08:00
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
|
2015-03-23 16:32:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int migrate_compress_level(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2015-03-23 16:32:27 +08:00
|
|
|
return s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
|
2015-03-23 16:32:17 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
int migrate_compress_threads(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2015-03-23 16:32:27 +08:00
|
|
|
return s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
|
2015-03-23 16:32:17 +08:00
|
|
|
}
|
|
|
|
|
2015-03-23 16:32:18 +08:00
|
|
|
int migrate_decompress_threads(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
2015-03-23 16:32:27 +08:00
|
|
|
return s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
|
2015-03-23 16:32:18 +08:00
|
|
|
}
|
|
|
|
|
2015-07-07 20:44:05 +08:00
|
|
|
bool migrate_use_events(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
|
|
|
|
}
|
|
|
|
|
2012-08-07 02:42:53 +08:00
|
|
|
int migrate_use_xbzrle(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
|
|
|
|
}
|
|
|
|
|
|
|
|
int64_t migrate_xbzrle_cache_size(void)
|
|
|
|
{
|
|
|
|
MigrationState *s;
|
|
|
|
|
|
|
|
s = migrate_get_current();
|
|
|
|
|
|
|
|
return s->xbzrle_cache_size;
|
|
|
|
}
|
2012-10-03 20:18:33 +08:00
|
|
|
|
2015-11-06 02:10:49 +08:00
|
|
|
/* migration thread support */
|
|
|
|
/*
|
|
|
|
* Something bad happened to the RP stream, mark an error
|
|
|
|
* The caller shall print or trace something to indicate why
|
|
|
|
*/
|
|
|
|
static void mark_source_rp_bad(MigrationState *s)
|
|
|
|
{
|
|
|
|
s->rp_state.error = true;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct rp_cmd_args {
|
|
|
|
ssize_t len; /* -1 = variable */
|
|
|
|
const char *name;
|
|
|
|
} rp_cmd_args[] = {
|
|
|
|
[MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
|
|
|
|
[MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
|
|
|
|
[MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
|
|
|
|
[MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
|
|
|
|
};
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Handles messages sent on the return path towards the source VM
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
static void *source_return_path_thread(void *opaque)
|
|
|
|
{
|
|
|
|
MigrationState *ms = opaque;
|
|
|
|
QEMUFile *rp = ms->rp_state.from_dst_file;
|
|
|
|
uint16_t header_len, header_type;
|
|
|
|
const int max_len = 512;
|
|
|
|
uint8_t buf[max_len];
|
|
|
|
uint32_t tmp32, sibling_error;
|
|
|
|
int res;
|
|
|
|
|
|
|
|
trace_source_return_path_thread_entry();
|
|
|
|
while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
|
|
|
|
migration_is_setup_or_active(ms->state)) {
|
|
|
|
trace_source_return_path_thread_loop_top();
|
|
|
|
header_type = qemu_get_be16(rp);
|
|
|
|
header_len = qemu_get_be16(rp);
|
|
|
|
|
|
|
|
if (header_type >= MIG_RP_MSG_MAX ||
|
|
|
|
header_type == MIG_RP_MSG_INVALID) {
|
|
|
|
error_report("RP: Received invalid message 0x%04x length 0x%04x",
|
|
|
|
header_type, header_len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if ((rp_cmd_args[header_type].len != -1 &&
|
|
|
|
header_len != rp_cmd_args[header_type].len) ||
|
|
|
|
header_len > max_len) {
|
|
|
|
error_report("RP: Received '%s' message (0x%04x) with"
|
|
|
|
"incorrect length %d expecting %zu",
|
|
|
|
rp_cmd_args[header_type].name, header_type, header_len,
|
|
|
|
(size_t)rp_cmd_args[header_type].len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* We know we've got a valid header by this point */
|
|
|
|
res = qemu_get_buffer(rp, buf, header_len);
|
|
|
|
if (res != header_len) {
|
|
|
|
error_report("RP: Failed reading data for message 0x%04x"
|
|
|
|
" read %d expected %d",
|
|
|
|
header_type, res, header_len);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* OK, we have the message and the data */
|
|
|
|
switch (header_type) {
|
|
|
|
case MIG_RP_MSG_SHUT:
|
|
|
|
sibling_error = be32_to_cpup((uint32_t *)buf);
|
|
|
|
trace_source_return_path_thread_shut(sibling_error);
|
|
|
|
if (sibling_error) {
|
|
|
|
error_report("RP: Sibling indicated error %d", sibling_error);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
/*
|
|
|
|
* We'll let the main thread deal with closing the RP
|
|
|
|
* we could do a shutdown(2) on it, but we're the only user
|
|
|
|
* anyway, so there's nothing gained.
|
|
|
|
*/
|
|
|
|
goto out;
|
|
|
|
|
|
|
|
case MIG_RP_MSG_PONG:
|
|
|
|
tmp32 = be32_to_cpup((uint32_t *)buf);
|
|
|
|
trace_source_return_path_thread_pong(tmp32);
|
|
|
|
break;
|
|
|
|
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if (rp && qemu_file_get_error(rp)) {
|
|
|
|
trace_source_return_path_thread_bad_end();
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_source_return_path_thread_end();
|
|
|
|
out:
|
|
|
|
ms->rp_state.from_dst_file = NULL;
|
|
|
|
qemu_fclose(rp);
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
__attribute__ (( unused )) /* Until later in patch series */
|
|
|
|
static int open_return_path_on_source(MigrationState *ms)
|
|
|
|
{
|
|
|
|
|
|
|
|
ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->file);
|
|
|
|
if (!ms->rp_state.from_dst_file) {
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
trace_open_return_path_on_source();
|
|
|
|
qemu_thread_create(&ms->rp_state.rp_thread, "return path",
|
|
|
|
source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
|
|
|
|
|
|
|
|
trace_open_return_path_on_source_continue();
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
__attribute__ (( unused )) /* Until later in patch series */
|
|
|
|
/* Returns 0 if the RP was ok, otherwise there was an error on the RP */
|
|
|
|
static int await_return_path_close_on_source(MigrationState *ms)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* If this is a normal exit then the destination will send a SHUT and the
|
|
|
|
* rp_thread will exit, however if there's an error we need to cause
|
|
|
|
* it to exit.
|
|
|
|
*/
|
|
|
|
if (qemu_file_get_error(ms->file) && ms->rp_state.from_dst_file) {
|
|
|
|
/*
|
|
|
|
* shutdown(2), if we have it, will cause it to unblock if it's stuck
|
|
|
|
* waiting for the destination.
|
|
|
|
*/
|
|
|
|
qemu_file_shutdown(ms->rp_state.from_dst_file);
|
|
|
|
mark_source_rp_bad(ms);
|
|
|
|
}
|
|
|
|
trace_await_return_path_close_on_source_joining();
|
|
|
|
qemu_thread_join(&ms->rp_state.rp_thread);
|
|
|
|
trace_await_return_path_close_on_source_close();
|
|
|
|
return ms->rp_state.error;
|
|
|
|
}
|
|
|
|
|
2015-08-13 18:51:31 +08:00
|
|
|
/**
|
|
|
|
* migration_completion: Used by migration_thread when there's not much left.
|
|
|
|
* The caller 'breaks' the loop when this returns.
|
|
|
|
*
|
|
|
|
* @s: Current migration state
|
|
|
|
* @*old_vm_running: Pointer to old_vm_running flag
|
|
|
|
* @*start_time: Pointer to time to update
|
|
|
|
*/
|
|
|
|
static void migration_completion(MigrationState *s, bool *old_vm_running,
|
|
|
|
int64_t *start_time)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
*start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER);
|
|
|
|
*old_vm_running = runstate_is_running();
|
|
|
|
|
|
|
|
ret = global_state_store();
|
|
|
|
if (!ret) {
|
|
|
|
ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
|
|
|
|
if (ret >= 0) {
|
|
|
|
qemu_file_set_rate_limit(s->file, INT64_MAX);
|
2015-11-06 02:10:41 +08:00
|
|
|
qemu_savevm_state_complete_precopy(s->file);
|
2015-08-13 18:51:31 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
qemu_mutex_unlock_iothread();
|
|
|
|
|
|
|
|
if (ret < 0) {
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (qemu_file_get_error(s->file)) {
|
|
|
|
trace_migration_completion_file_err();
|
|
|
|
goto fail;
|
|
|
|
}
|
|
|
|
|
|
|
|
migrate_set_state(s, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_COMPLETED);
|
|
|
|
return;
|
|
|
|
|
|
|
|
fail:
|
|
|
|
migrate_set_state(s, MIGRATION_STATUS_ACTIVE, MIGRATION_STATUS_FAILED);
|
|
|
|
}
|
|
|
|
|
2015-11-06 02:10:49 +08:00
|
|
|
/*
|
|
|
|
* Master migration thread on the source VM.
|
|
|
|
* It drives the migration and pumps the data down the outgoing channel.
|
|
|
|
*/
|
2013-02-23 00:36:30 +08:00
|
|
|
static void *migration_thread(void *opaque)
|
2012-10-03 20:18:33 +08:00
|
|
|
{
|
2012-12-19 16:55:50 +08:00
|
|
|
MigrationState *s = opaque;
|
2013-08-21 23:03:08 +08:00
|
|
|
int64_t initial_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
|
|
|
int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
|
2013-02-23 00:36:43 +08:00
|
|
|
int64_t initial_bytes = 0;
|
2012-10-03 20:18:33 +08:00
|
|
|
int64_t max_size = 0;
|
2013-02-23 00:36:18 +08:00
|
|
|
int64_t start_time = initial_time;
|
2015-11-02 15:37:00 +08:00
|
|
|
int64_t end_time;
|
2013-02-23 00:36:18 +08:00
|
|
|
bool old_vm_running = false;
|
2012-10-04 02:16:24 +08:00
|
|
|
|
2015-07-09 14:55:38 +08:00
|
|
|
rcu_register_thread();
|
|
|
|
|
2015-05-21 20:24:12 +08:00
|
|
|
qemu_savevm_state_header(s->file);
|
2013-02-23 00:36:17 +08:00
|
|
|
qemu_savevm_state_begin(s->file, &s->params);
|
2012-10-03 20:18:33 +08:00
|
|
|
|
2013-08-21 23:03:08 +08:00
|
|
|
s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
|
2015-03-13 16:08:38 +08:00
|
|
|
migrate_set_state(s, MIGRATION_STATUS_SETUP, MIGRATION_STATUS_ACTIVE);
|
rdma: introduce MIG_STATE_NONE and change MIG_STATE_SETUP state transition
As described in the previous patch, until now, the MIG_STATE_SETUP
state was not really a 'formal' state. It has been used as a 'zero' state
(what we're calling 'NONE' here) and QEMU has been unconditionally transitioning
into this state when the QMP migration command was called. Instead we want to
introduce MIG_STATE_NONE, which is our starting state in the state machine, and
then immediately transition into the MIG_STATE_SETUP state when the QMP migrate
command is issued.
In order to do this, we must delay the transition into MIG_STATE_ACTIVE until
later in the migration_thread(). This is done to be able to timestamp the amount of
time spent in the SETUP state for proper accounting to the user during
an RDMA migration.
Furthermore, the management software, until now, has never been aware of the
existence of the SETUP state whatsoever. This must change, because, timing of this
state implies that the state actually exists.
These two patches cannot be separated because the 'query_migrate' QMP
switch statement needs to know how to handle this new state transition.
Reviewed-by: Juan Quintela <quintela@redhat.com>
Tested-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Michael R. Hines <mrhines@us.ibm.com>
Signed-off-by: Juan Quintela <quintela@redhat.com>
2013-07-22 22:01:57 +08:00
|
|
|
|
2015-03-13 16:08:38 +08:00
|
|
|
while (s->state == MIGRATION_STATUS_ACTIVE) {
|
2013-02-01 19:39:08 +08:00
|
|
|
int64_t current_time;
|
2012-10-04 02:33:34 +08:00
|
|
|
uint64_t pending_size;
|
2012-10-03 20:18:33 +08:00
|
|
|
|
2013-02-23 00:36:35 +08:00
|
|
|
if (!qemu_file_rate_limit(s->file)) {
|
2012-10-04 02:33:34 +08:00
|
|
|
pending_size = qemu_savevm_state_pending(s->file, max_size);
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_migrate_pending(pending_size, max_size);
|
2012-10-18 03:06:31 +08:00
|
|
|
if (pending_size && pending_size >= max_size) {
|
2013-02-23 00:36:17 +08:00
|
|
|
qemu_savevm_state_iterate(s->file);
|
2012-10-04 02:33:34 +08:00
|
|
|
} else {
|
2015-08-13 18:51:31 +08:00
|
|
|
trace_migration_thread_low_pending(pending_size);
|
|
|
|
migration_completion(s, &old_vm_running, &start_time);
|
|
|
|
break;
|
2012-10-04 02:33:34 +08:00
|
|
|
}
|
|
|
|
}
|
2013-02-23 00:36:20 +08:00
|
|
|
|
2013-02-23 00:36:33 +08:00
|
|
|
if (qemu_file_get_error(s->file)) {
|
2015-03-13 16:08:38 +08:00
|
|
|
migrate_set_state(s, MIGRATION_STATUS_ACTIVE,
|
|
|
|
MIGRATION_STATUS_FAILED);
|
2013-02-23 00:36:33 +08:00
|
|
|
break;
|
|
|
|
}
|
2013-08-21 23:03:08 +08:00
|
|
|
current_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2012-10-03 20:18:33 +08:00
|
|
|
if (current_time >= initial_time + BUFFER_DELAY) {
|
2013-02-23 00:36:43 +08:00
|
|
|
uint64_t transferred_bytes = qemu_ftell(s->file) - initial_bytes;
|
Revert "migration: don't account sleep time for calculating bandwidth"
This reverts commit 7161082c8d8cf167c508976887a0a63f4db92b51.
Reverting this patch fixes a divide-by-zero error in qemu that can be
fairly reliably triggered by doing block migration. In this case, the
configuration/error was:
source: temp/x86_64-softmmu/qemu-system-x86_64 -enable-kvm -L temp-bios
-M pc-i440fx-1.4 -m 512M -kernel boot/vmlinuz-x86_64 -initrd
boot/test-initramfs-x86_64.img.gz -vga std -append seed=1234 -drive
file=disk1.img,if=virtio -drive file=disk2.img,if=virtio -device
virtio-net-pci,netdev=net0 -netdev user,id=net0 -monitor
unix:/tmp/vm-hmp.sock,server,nowait -qmp
unix:/tmp/vm-qmp.sock,server,nowait -vnc :100
16837 Floating point exception(core dumped)
target: temp/x86_64-softmmu/qemu-system-x86_64 -enable-kvm -L temp-bios
-M pc-i440fx-1.4 -m 512M -kernel boot/vmlinuz-x86_64 -initrd
boot/test-initramfs-x86_64.img.gz -vga std -append seed=1234 -drive
file=target_disk1.img,if=virtio -drive file=target_disk2.img,if=virtio
-device virtio-net-pci,netdev=net0 -netdev user,id=net0 -incoming
unix:/tmp/migrate.sock -monitor
unix:/tmp/vm-hmp-incoming.sock,server,nowait -qmp
unix:/tmp/vm-qmp-incoming.sock,server,nowait -vnc :101
Receiving block device images
20 %
21 %
load of migration failed
This revert potentially re-introduces a bug that was present in 1.4,
but fixes a prevalent issue with block migration so we should revert
it for now and take an updated patch later.
Conflicts:
migration.c
* fixed up to remove logic introduced in 7161082c while leaving
changes in HEAD intact
Signed-off-by: Michael Roth <mdroth@linux.vnet.ibm.com>
Message-id: 1368739544-31021-1-git-send-email-mdroth@linux.vnet.ibm.com
Signed-off-by: Anthony Liguori <aliguori@us.ibm.com>
2013-05-17 05:25:44 +08:00
|
|
|
uint64_t time_spent = current_time - initial_time;
|
2012-10-03 20:18:33 +08:00
|
|
|
double bandwidth = transferred_bytes / time_spent;
|
|
|
|
max_size = bandwidth * migrate_max_downtime() / 1000000;
|
|
|
|
|
2013-06-26 09:35:30 +08:00
|
|
|
s->mbps = time_spent ? (((double) transferred_bytes * 8.0) /
|
|
|
|
((double) time_spent / 1000.0)) / 1000.0 / 1000.0 : -1;
|
|
|
|
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_migrate_transferred(transferred_bytes, time_spent,
|
|
|
|
bandwidth, max_size);
|
2013-02-01 20:22:37 +08:00
|
|
|
/* if we haven't sent anything, we don't want to recalculate
|
|
|
|
10000 is a small enough number for our purposes */
|
|
|
|
if (s->dirty_bytes_rate && transferred_bytes > 10000) {
|
|
|
|
s->expected_downtime = s->dirty_bytes_rate / bandwidth;
|
|
|
|
}
|
2012-10-03 20:18:33 +08:00
|
|
|
|
2013-02-23 00:36:45 +08:00
|
|
|
qemu_file_reset_rate_limit(s->file);
|
2012-10-03 20:18:33 +08:00
|
|
|
initial_time = current_time;
|
2013-02-23 00:36:43 +08:00
|
|
|
initial_bytes = qemu_ftell(s->file);
|
2012-10-03 20:18:33 +08:00
|
|
|
}
|
2013-02-23 00:36:35 +08:00
|
|
|
if (qemu_file_rate_limit(s->file)) {
|
2012-10-03 20:18:33 +08:00
|
|
|
/* usleep expects microseconds */
|
|
|
|
g_usleep((initial_time + BUFFER_DELAY - current_time)*1000);
|
|
|
|
}
|
2013-02-23 00:36:18 +08:00
|
|
|
}
|
|
|
|
|
2015-09-09 01:12:35 +08:00
|
|
|
/* If we enabled cpu throttling for auto-converge, turn it off. */
|
|
|
|
cpu_throttle_stop();
|
2015-11-02 15:37:00 +08:00
|
|
|
end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
|
2015-09-09 01:12:35 +08:00
|
|
|
|
2013-02-23 00:36:20 +08:00
|
|
|
qemu_mutex_lock_iothread();
|
2015-11-02 15:37:01 +08:00
|
|
|
qemu_savevm_state_cleanup();
|
2015-03-13 16:08:38 +08:00
|
|
|
if (s->state == MIGRATION_STATUS_COMPLETED) {
|
2014-05-12 16:46:00 +08:00
|
|
|
uint64_t transferred_bytes = qemu_ftell(s->file);
|
2013-02-23 00:36:18 +08:00
|
|
|
s->total_time = end_time - s->total_time;
|
|
|
|
s->downtime = end_time - start_time;
|
2014-05-12 16:46:00 +08:00
|
|
|
if (s->total_time) {
|
|
|
|
s->mbps = (((double) transferred_bytes * 8.0) /
|
|
|
|
((double) s->total_time)) / 1000;
|
|
|
|
}
|
2013-02-23 00:36:18 +08:00
|
|
|
runstate_set(RUN_STATE_POSTMIGRATE);
|
|
|
|
} else {
|
|
|
|
if (old_vm_running) {
|
|
|
|
vm_start();
|
2013-02-23 00:36:17 +08:00
|
|
|
}
|
2012-10-03 20:18:33 +08:00
|
|
|
}
|
2013-02-23 00:36:21 +08:00
|
|
|
qemu_bh_schedule(s->cleanup_bh);
|
2013-02-23 00:36:17 +08:00
|
|
|
qemu_mutex_unlock_iothread();
|
2013-02-23 00:36:20 +08:00
|
|
|
|
2015-07-09 14:55:38 +08:00
|
|
|
rcu_unregister_thread();
|
2012-10-03 20:18:33 +08:00
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2012-12-19 16:55:50 +08:00
|
|
|
void migrate_fd_connect(MigrationState *s)
|
2012-10-03 20:18:33 +08:00
|
|
|
{
|
2013-02-01 18:12:26 +08:00
|
|
|
/* This is a best 1st approximation. ns to ms */
|
|
|
|
s->expected_downtime = max_downtime/1000000;
|
2013-02-23 00:36:21 +08:00
|
|
|
s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup, s);
|
2012-10-03 20:18:33 +08:00
|
|
|
|
2013-02-23 00:36:44 +08:00
|
|
|
qemu_file_set_rate_limit(s->file,
|
|
|
|
s->bandwidth_limit / XFER_LIMIT_RATIO);
|
|
|
|
|
2013-07-29 21:01:57 +08:00
|
|
|
/* Notify before starting migration thread */
|
|
|
|
notifier_list_notify(&migration_state_notifiers, s);
|
|
|
|
|
2015-03-23 16:32:17 +08:00
|
|
|
migrate_compress_threads_create();
|
2014-01-30 18:20:32 +08:00
|
|
|
qemu_thread_create(&s->thread, "migration", migration_thread, s,
|
2013-02-23 00:36:21 +08:00
|
|
|
QEMU_THREAD_JOINABLE);
|
2012-10-03 20:18:33 +08:00
|
|
|
}
|