2008-11-12 05:33:36 +08:00
|
|
|
/*
|
|
|
|
* QEMU System Emulator
|
|
|
|
*
|
|
|
|
* Copyright (c) 2003-2008 Fabrice Bellard
|
|
|
|
*
|
|
|
|
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
|
|
* of this software and associated documentation files (the "Software"), to deal
|
|
|
|
* in the Software without restriction, including without limitation the rights
|
|
|
|
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
|
|
* copies of the Software, and to permit persons to whom the Software is
|
|
|
|
* furnished to do so, subject to the following conditions:
|
|
|
|
*
|
|
|
|
* The above copyright notice and this permission notice shall be included in
|
|
|
|
* all copies or substantial portions of the Software.
|
|
|
|
*
|
|
|
|
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
|
|
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
|
|
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
|
|
|
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
|
|
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
|
|
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
|
|
|
* THE SOFTWARE.
|
|
|
|
*/
|
|
|
|
|
2009-03-08 00:52:02 +08:00
|
|
|
#include "config-host.h"
|
2009-03-07 23:32:56 +08:00
|
|
|
#include "qemu-common.h"
|
2014-06-20 21:26:08 +08:00
|
|
|
#include "hw/boards.h"
|
2009-03-07 23:32:56 +08:00
|
|
|
#include "hw/hw.h"
|
2010-06-26 01:09:14 +08:00
|
|
|
#include "hw/qdev.h"
|
2012-10-24 14:43:34 +08:00
|
|
|
#include "net/net.h"
|
2012-12-18 01:19:49 +08:00
|
|
|
#include "monitor/monitor.h"
|
2012-12-18 01:20:04 +08:00
|
|
|
#include "sysemu/sysemu.h"
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/timer.h"
|
2009-03-07 23:32:56 +08:00
|
|
|
#include "audio/audio.h"
|
2012-12-18 01:19:50 +08:00
|
|
|
#include "migration/migration.h"
|
2012-12-18 01:20:00 +08:00
|
|
|
#include "qemu/sockets.h"
|
|
|
|
#include "qemu/queue.h"
|
2012-12-18 01:20:04 +08:00
|
|
|
#include "sysemu/cpus.h"
|
2012-12-18 01:19:49 +08:00
|
|
|
#include "exec/memory.h"
|
2012-01-25 20:24:51 +08:00
|
|
|
#include "qmp-commands.h"
|
2012-05-22 05:46:44 +08:00
|
|
|
#include "trace.h"
|
2013-03-22 22:47:58 +08:00
|
|
|
#include "qemu/iov.h"
|
2013-05-25 11:09:43 +08:00
|
|
|
#include "block/snapshot.h"
|
2013-05-25 11:09:44 +08:00
|
|
|
#include "block/qapi.h"
|
2009-03-07 23:32:56 +08:00
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2009-10-16 07:53:55 +08:00
|
|
|
#ifndef ETH_P_RARP
|
2010-04-24 20:54:07 +08:00
|
|
|
#define ETH_P_RARP 0x8035
|
2009-10-16 07:53:55 +08:00
|
|
|
#endif
|
|
|
|
#define ARP_HTYPE_ETH 0x0001
|
|
|
|
#define ARP_PTYPE_IP 0x0800
|
|
|
|
#define ARP_OP_REQUEST_REV 0x3
|
|
|
|
|
|
|
|
static int announce_self_create(uint8_t *buf,
|
2013-11-28 22:01:12 +08:00
|
|
|
uint8_t *mac_addr)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
2009-10-16 07:53:55 +08:00
|
|
|
/* Ethernet header. */
|
|
|
|
memset(buf, 0xff, 6); /* destination MAC addr */
|
|
|
|
memcpy(buf + 6, mac_addr, 6); /* source MAC addr */
|
|
|
|
*(uint16_t *)(buf + 12) = htons(ETH_P_RARP); /* ethertype */
|
|
|
|
|
|
|
|
/* RARP header. */
|
|
|
|
*(uint16_t *)(buf + 14) = htons(ARP_HTYPE_ETH); /* hardware addr space */
|
|
|
|
*(uint16_t *)(buf + 16) = htons(ARP_PTYPE_IP); /* protocol addr space */
|
|
|
|
*(buf + 18) = 6; /* hardware addr length (ethernet) */
|
|
|
|
*(buf + 19) = 4; /* protocol addr length (IPv4) */
|
|
|
|
*(uint16_t *)(buf + 20) = htons(ARP_OP_REQUEST_REV); /* opcode */
|
|
|
|
memcpy(buf + 22, mac_addr, 6); /* source hw addr */
|
|
|
|
memset(buf + 28, 0x00, 4); /* source protocol addr */
|
|
|
|
memcpy(buf + 32, mac_addr, 6); /* target hw addr */
|
|
|
|
memset(buf + 38, 0x00, 4); /* target protocol addr */
|
|
|
|
|
|
|
|
/* Padding to get up to 60 bytes (ethernet min packet size, minus FCS). */
|
|
|
|
memset(buf + 42, 0x00, 18);
|
|
|
|
|
|
|
|
return 60; /* len (FCS will be added by hardware) */
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2009-11-26 02:49:32 +08:00
|
|
|
static void qemu_announce_self_iter(NICState *nic, void *opaque)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
2009-10-16 07:53:55 +08:00
|
|
|
uint8_t buf[60];
|
2009-11-26 02:49:32 +08:00
|
|
|
int len;
|
|
|
|
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_qemu_announce_self_iter(qemu_ether_ntoa(&nic->conf->macaddr));
|
2009-11-26 02:49:32 +08:00
|
|
|
len = announce_self_create(buf, nic->conf->macaddr.a);
|
|
|
|
|
2013-01-30 19:12:22 +08:00
|
|
|
qemu_send_packet_raw(qemu_get_queue(nic), buf, len);
|
2009-11-26 02:49:32 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
static void qemu_announce_self_once(void *opaque)
|
|
|
|
{
|
2009-05-21 22:17:44 +08:00
|
|
|
static int count = SELF_ANNOUNCE_ROUNDS;
|
|
|
|
QEMUTimer *timer = *(QEMUTimer **)opaque;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2009-11-26 02:49:32 +08:00
|
|
|
qemu_foreach_nic(qemu_announce_self_iter, NULL);
|
|
|
|
|
2009-10-16 07:53:55 +08:00
|
|
|
if (--count) {
|
|
|
|
/* delay 50ms, 150ms, 250ms, ... */
|
2013-08-21 23:03:08 +08:00
|
|
|
timer_mod(timer, qemu_clock_get_ms(QEMU_CLOCK_REALTIME) +
|
2014-05-20 14:01:43 +08:00
|
|
|
self_announce_delay(count));
|
2009-05-21 22:17:44 +08:00
|
|
|
} else {
|
2013-11-28 22:01:12 +08:00
|
|
|
timer_del(timer);
|
|
|
|
timer_free(timer);
|
2009-05-21 22:17:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void qemu_announce_self(void)
|
|
|
|
{
|
2013-11-28 22:01:12 +08:00
|
|
|
static QEMUTimer *timer;
|
|
|
|
timer = timer_new_ms(QEMU_CLOCK_REALTIME, qemu_announce_self_once, &timer);
|
|
|
|
qemu_announce_self_once(&timer);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/***********************************************************/
|
|
|
|
/* savevm/loadvm support */
|
|
|
|
|
2013-04-06 03:27:54 +08:00
|
|
|
static ssize_t block_writev_buffer(void *opaque, struct iovec *iov, int iovcnt,
|
|
|
|
int64_t pos)
|
|
|
|
{
|
|
|
|
int ret;
|
|
|
|
QEMUIOVector qiov;
|
|
|
|
|
|
|
|
qemu_iovec_init_external(&qiov, iov, iovcnt);
|
|
|
|
ret = bdrv_writev_vmstate(opaque, &qiov, pos);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
return qiov.size;
|
|
|
|
}
|
|
|
|
|
2009-04-06 03:10:55 +08:00
|
|
|
static int block_put_buffer(void *opaque, const uint8_t *buf,
|
2008-11-12 05:33:36 +08:00
|
|
|
int64_t pos, int size)
|
|
|
|
{
|
2009-07-11 05:11:57 +08:00
|
|
|
bdrv_save_vmstate(opaque, buf, pos, size);
|
2008-11-12 05:33:36 +08:00
|
|
|
return size;
|
|
|
|
}
|
|
|
|
|
2009-04-06 03:10:55 +08:00
|
|
|
static int block_get_buffer(void *opaque, uint8_t *buf, int64_t pos, int size)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
2009-07-11 05:11:57 +08:00
|
|
|
return bdrv_load_vmstate(opaque, buf, pos, size);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
static int bdrv_fclose(void *opaque)
|
|
|
|
{
|
2012-06-06 06:04:50 +08:00
|
|
|
return bdrv_flush(opaque);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2012-08-08 16:15:15 +08:00
|
|
|
static const QEMUFileOps bdrv_read_ops = {
|
|
|
|
.get_buffer = block_get_buffer,
|
|
|
|
.close = bdrv_fclose
|
|
|
|
};
|
|
|
|
|
|
|
|
static const QEMUFileOps bdrv_write_ops = {
|
2013-04-06 03:27:54 +08:00
|
|
|
.put_buffer = block_put_buffer,
|
|
|
|
.writev_buffer = block_writev_buffer,
|
|
|
|
.close = bdrv_fclose
|
2012-08-08 16:15:15 +08:00
|
|
|
};
|
|
|
|
|
2009-07-11 05:11:57 +08:00
|
|
|
static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int is_writable)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
2013-11-28 22:01:13 +08:00
|
|
|
if (is_writable) {
|
2012-08-08 16:15:15 +08:00
|
|
|
return qemu_fopen_ops(bs, &bdrv_write_ops);
|
2013-11-28 22:01:13 +08:00
|
|
|
}
|
2012-08-08 16:15:15 +08:00
|
|
|
return qemu_fopen_ops(bs, &bdrv_read_ops);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2011-09-12 22:21:44 +08:00
|
|
|
|
2013-11-29 22:26:02 +08:00
|
|
|
/* QEMUFile timer support.
|
|
|
|
* Not in qemu-file.c to not add qemu-timer.c as dependency to qemu-file.c
|
|
|
|
*/
|
2011-09-12 22:21:44 +08:00
|
|
|
|
2013-08-21 23:03:02 +08:00
|
|
|
void timer_put(QEMUFile *f, QEMUTimer *ts)
|
2011-09-12 22:21:44 +08:00
|
|
|
{
|
|
|
|
uint64_t expire_time;
|
|
|
|
|
2013-08-21 23:02:39 +08:00
|
|
|
expire_time = timer_expire_time_ns(ts);
|
2011-09-12 22:21:44 +08:00
|
|
|
qemu_put_be64(f, expire_time);
|
|
|
|
}
|
|
|
|
|
2013-08-21 23:03:02 +08:00
|
|
|
void timer_get(QEMUFile *f, QEMUTimer *ts)
|
2011-09-12 22:21:44 +08:00
|
|
|
{
|
|
|
|
uint64_t expire_time;
|
|
|
|
|
|
|
|
expire_time = qemu_get_be64(f);
|
|
|
|
if (expire_time != -1) {
|
2013-08-21 23:03:08 +08:00
|
|
|
timer_mod_ns(ts, expire_time);
|
2011-09-12 22:21:44 +08:00
|
|
|
} else {
|
2013-08-21 23:03:08 +08:00
|
|
|
timer_del(ts);
|
2011-09-12 22:21:44 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2013-11-29 22:26:02 +08:00
|
|
|
/* VMState timer support.
|
|
|
|
* Not in vmstate.c to not add qemu-timer.c as dependency to vmstate.c
|
|
|
|
*/
|
2009-08-21 01:42:26 +08:00
|
|
|
|
|
|
|
static int get_timer(QEMUFile *f, void *pv, size_t size)
|
|
|
|
{
|
|
|
|
QEMUTimer *v = pv;
|
2013-08-21 23:03:02 +08:00
|
|
|
timer_get(f, v);
|
2009-08-21 01:42:26 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-09-30 04:48:20 +08:00
|
|
|
static void put_timer(QEMUFile *f, void *pv, size_t size)
|
2009-08-21 01:42:26 +08:00
|
|
|
{
|
2009-09-30 04:48:20 +08:00
|
|
|
QEMUTimer *v = pv;
|
2013-08-21 23:03:02 +08:00
|
|
|
timer_put(f, v);
|
2009-08-21 01:42:26 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
const VMStateInfo vmstate_info_timer = {
|
|
|
|
.name = "timer",
|
|
|
|
.get = get_timer,
|
|
|
|
.put = put_timer,
|
|
|
|
};
|
|
|
|
|
2012-10-30 15:45:12 +08:00
|
|
|
|
2010-06-26 01:09:14 +08:00
|
|
|
typedef struct CompatEntry {
|
|
|
|
char idstr[256];
|
|
|
|
int instance_id;
|
|
|
|
} CompatEntry;
|
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
typedef struct SaveStateEntry {
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_ENTRY(SaveStateEntry) entry;
|
2008-11-12 05:33:36 +08:00
|
|
|
char idstr[256];
|
|
|
|
int instance_id;
|
2010-05-15 19:32:40 +08:00
|
|
|
int alias_id;
|
2008-11-12 05:33:36 +08:00
|
|
|
int version_id;
|
|
|
|
int section_id;
|
2012-06-26 23:19:10 +08:00
|
|
|
SaveVMHandlers *ops;
|
2009-08-21 01:42:25 +08:00
|
|
|
const VMStateDescription *vmsd;
|
2008-11-12 05:33:36 +08:00
|
|
|
void *opaque;
|
2010-06-26 01:09:14 +08:00
|
|
|
CompatEntry *compat;
|
2010-07-27 08:11:00 +08:00
|
|
|
int no_migrate;
|
2012-01-25 20:24:51 +08:00
|
|
|
int is_ram;
|
2008-11-12 05:33:36 +08:00
|
|
|
} SaveStateEntry;
|
|
|
|
|
2009-11-02 21:40:58 +08:00
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
static QTAILQ_HEAD(savevm_handlers, SaveStateEntry) savevm_handlers =
|
|
|
|
QTAILQ_HEAD_INITIALIZER(savevm_handlers);
|
2009-08-21 01:42:25 +08:00
|
|
|
static int global_section_id;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2014-06-20 21:26:08 +08:00
|
|
|
static void dump_vmstate_vmsd(FILE *out_file,
|
|
|
|
const VMStateDescription *vmsd, int indent,
|
|
|
|
bool is_subsection);
|
|
|
|
|
|
|
|
static void dump_vmstate_vmsf(FILE *out_file, const VMStateField *field,
|
|
|
|
int indent)
|
|
|
|
{
|
|
|
|
fprintf(out_file, "%*s{\n", indent, "");
|
|
|
|
indent += 2;
|
|
|
|
fprintf(out_file, "%*s\"field\": \"%s\",\n", indent, "", field->name);
|
|
|
|
fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "",
|
|
|
|
field->version_id);
|
|
|
|
fprintf(out_file, "%*s\"field_exists\": %s,\n", indent, "",
|
|
|
|
field->field_exists ? "true" : "false");
|
|
|
|
fprintf(out_file, "%*s\"size\": %zu", indent, "", field->size);
|
|
|
|
if (field->vmsd != NULL) {
|
|
|
|
fprintf(out_file, ",\n");
|
|
|
|
dump_vmstate_vmsd(out_file, field->vmsd, indent, false);
|
|
|
|
}
|
|
|
|
fprintf(out_file, "\n%*s}", indent - 2, "");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_vmstate_vmss(FILE *out_file,
|
|
|
|
const VMStateSubsection *subsection,
|
|
|
|
int indent)
|
|
|
|
{
|
|
|
|
if (subsection->vmsd != NULL) {
|
|
|
|
dump_vmstate_vmsd(out_file, subsection->vmsd, indent, true);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_vmstate_vmsd(FILE *out_file,
|
|
|
|
const VMStateDescription *vmsd, int indent,
|
|
|
|
bool is_subsection)
|
|
|
|
{
|
|
|
|
if (is_subsection) {
|
|
|
|
fprintf(out_file, "%*s{\n", indent, "");
|
|
|
|
} else {
|
|
|
|
fprintf(out_file, "%*s\"%s\": {\n", indent, "", "Description");
|
|
|
|
}
|
|
|
|
indent += 2;
|
|
|
|
fprintf(out_file, "%*s\"name\": \"%s\",\n", indent, "", vmsd->name);
|
|
|
|
fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "",
|
|
|
|
vmsd->version_id);
|
|
|
|
fprintf(out_file, "%*s\"minimum_version_id\": %d", indent, "",
|
|
|
|
vmsd->minimum_version_id);
|
|
|
|
if (vmsd->fields != NULL) {
|
|
|
|
const VMStateField *field = vmsd->fields;
|
|
|
|
bool first;
|
|
|
|
|
|
|
|
fprintf(out_file, ",\n%*s\"Fields\": [\n", indent, "");
|
|
|
|
first = true;
|
|
|
|
while (field->name != NULL) {
|
|
|
|
if (field->flags & VMS_MUST_EXIST) {
|
|
|
|
/* Ignore VMSTATE_VALIDATE bits; these don't get migrated */
|
|
|
|
field++;
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (!first) {
|
|
|
|
fprintf(out_file, ",\n");
|
|
|
|
}
|
|
|
|
dump_vmstate_vmsf(out_file, field, indent + 2);
|
|
|
|
field++;
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
fprintf(out_file, "\n%*s]", indent, "");
|
|
|
|
}
|
|
|
|
if (vmsd->subsections != NULL) {
|
|
|
|
const VMStateSubsection *subsection = vmsd->subsections;
|
|
|
|
bool first;
|
|
|
|
|
|
|
|
fprintf(out_file, ",\n%*s\"Subsections\": [\n", indent, "");
|
|
|
|
first = true;
|
|
|
|
while (subsection->vmsd != NULL) {
|
|
|
|
if (!first) {
|
|
|
|
fprintf(out_file, ",\n");
|
|
|
|
}
|
|
|
|
dump_vmstate_vmss(out_file, subsection, indent + 2);
|
|
|
|
subsection++;
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
fprintf(out_file, "\n%*s]", indent, "");
|
|
|
|
}
|
|
|
|
fprintf(out_file, "\n%*s}", indent - 2, "");
|
|
|
|
}
|
|
|
|
|
|
|
|
static void dump_machine_type(FILE *out_file)
|
|
|
|
{
|
|
|
|
MachineClass *mc;
|
|
|
|
|
|
|
|
mc = MACHINE_GET_CLASS(current_machine);
|
|
|
|
|
|
|
|
fprintf(out_file, " \"vmschkmachine\": {\n");
|
|
|
|
fprintf(out_file, " \"Name\": \"%s\"\n", mc->name);
|
|
|
|
fprintf(out_file, " },\n");
|
|
|
|
}
|
|
|
|
|
|
|
|
void dump_vmstate_json_to_file(FILE *out_file)
|
|
|
|
{
|
|
|
|
GSList *list, *elt;
|
|
|
|
bool first;
|
|
|
|
|
|
|
|
fprintf(out_file, "{\n");
|
|
|
|
dump_machine_type(out_file);
|
|
|
|
|
|
|
|
first = true;
|
|
|
|
list = object_class_get_list(TYPE_DEVICE, true);
|
|
|
|
for (elt = list; elt; elt = elt->next) {
|
|
|
|
DeviceClass *dc = OBJECT_CLASS_CHECK(DeviceClass, elt->data,
|
|
|
|
TYPE_DEVICE);
|
|
|
|
const char *name;
|
|
|
|
int indent = 2;
|
|
|
|
|
|
|
|
if (!dc->vmsd) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!first) {
|
|
|
|
fprintf(out_file, ",\n");
|
|
|
|
}
|
|
|
|
name = object_class_get_name(OBJECT_CLASS(dc));
|
|
|
|
fprintf(out_file, "%*s\"%s\": {\n", indent, "", name);
|
|
|
|
indent += 2;
|
|
|
|
fprintf(out_file, "%*s\"Name\": \"%s\",\n", indent, "", name);
|
|
|
|
fprintf(out_file, "%*s\"version_id\": %d,\n", indent, "",
|
|
|
|
dc->vmsd->version_id);
|
|
|
|
fprintf(out_file, "%*s\"minimum_version_id\": %d,\n", indent, "",
|
|
|
|
dc->vmsd->minimum_version_id);
|
|
|
|
|
|
|
|
dump_vmstate_vmsd(out_file, dc->vmsd, indent, false);
|
|
|
|
|
|
|
|
fprintf(out_file, "\n%*s}", indent - 2, "");
|
|
|
|
first = false;
|
|
|
|
}
|
|
|
|
fprintf(out_file, "\n}\n");
|
|
|
|
fclose(out_file);
|
|
|
|
}
|
|
|
|
|
2009-09-01 08:12:31 +08:00
|
|
|
static int calculate_new_instance_id(const char *idstr)
|
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
int instance_id = 0;
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2009-09-01 08:12:31 +08:00
|
|
|
if (strcmp(idstr, se->idstr) == 0
|
|
|
|
&& instance_id <= se->instance_id) {
|
|
|
|
instance_id = se->instance_id + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return instance_id;
|
|
|
|
}
|
|
|
|
|
2010-06-26 01:09:14 +08:00
|
|
|
static int calculate_compat_instance_id(const char *idstr)
|
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
int instance_id = 0;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2013-11-28 22:01:13 +08:00
|
|
|
if (!se->compat) {
|
2010-06-26 01:09:14 +08:00
|
|
|
continue;
|
2013-11-28 22:01:13 +08:00
|
|
|
}
|
2010-06-26 01:09:14 +08:00
|
|
|
|
|
|
|
if (strcmp(idstr, se->compat->idstr) == 0
|
|
|
|
&& instance_id <= se->compat->instance_id) {
|
|
|
|
instance_id = se->compat->instance_id + 1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return instance_id;
|
|
|
|
}
|
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
/* TODO: Individual devices generally have very little idea about the rest
|
|
|
|
of the system, so instance_id should be removed/replaced.
|
|
|
|
Meanwhile pass -1 as instance_id if you do not already have a clearly
|
|
|
|
distinguishing id for all instances of your device class. */
|
2010-06-26 01:09:07 +08:00
|
|
|
int register_savevm_live(DeviceState *dev,
|
|
|
|
const char *idstr,
|
2008-11-12 05:33:36 +08:00
|
|
|
int instance_id,
|
|
|
|
int version_id,
|
2012-06-27 00:46:10 +08:00
|
|
|
SaveVMHandlers *ops,
|
2008-11-12 05:33:36 +08:00
|
|
|
void *opaque)
|
|
|
|
{
|
2009-09-01 08:12:31 +08:00
|
|
|
SaveStateEntry *se;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
se = g_malloc0(sizeof(SaveStateEntry));
|
2008-11-12 05:33:36 +08:00
|
|
|
se->version_id = version_id;
|
|
|
|
se->section_id = global_section_id++;
|
2012-06-27 00:46:10 +08:00
|
|
|
se->ops = ops;
|
2008-11-12 05:33:36 +08:00
|
|
|
se->opaque = opaque;
|
2009-08-21 01:42:25 +08:00
|
|
|
se->vmsd = NULL;
|
2010-07-27 08:11:00 +08:00
|
|
|
se->no_migrate = 0;
|
2012-01-25 20:24:51 +08:00
|
|
|
/* if this is a live_savem then set is_ram */
|
2012-06-28 21:31:37 +08:00
|
|
|
if (ops->save_live_setup != NULL) {
|
2012-01-25 20:24:51 +08:00
|
|
|
se->is_ram = 1;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2012-02-04 02:28:43 +08:00
|
|
|
if (dev) {
|
|
|
|
char *id = qdev_get_dev_path(dev);
|
2010-06-26 01:09:14 +08:00
|
|
|
if (id) {
|
|
|
|
pstrcpy(se->idstr, sizeof(se->idstr), id);
|
|
|
|
pstrcat(se->idstr, sizeof(se->idstr), "/");
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(id);
|
2010-06-26 01:09:14 +08:00
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
se->compat = g_malloc0(sizeof(CompatEntry));
|
2010-06-26 01:09:14 +08:00
|
|
|
pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), idstr);
|
|
|
|
se->compat->instance_id = instance_id == -1 ?
|
|
|
|
calculate_compat_instance_id(idstr) : instance_id;
|
|
|
|
instance_id = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pstrcat(se->idstr, sizeof(se->idstr), idstr);
|
|
|
|
|
2009-09-01 08:12:31 +08:00
|
|
|
if (instance_id == -1) {
|
2010-06-26 01:09:14 +08:00
|
|
|
se->instance_id = calculate_new_instance_id(se->idstr);
|
2009-09-01 08:12:31 +08:00
|
|
|
} else {
|
|
|
|
se->instance_id = instance_id;
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
2010-06-26 01:09:14 +08:00
|
|
|
assert(!se->compat || se->instance_id == 0);
|
2009-09-01 08:12:31 +08:00
|
|
|
/* add at the end of list */
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_INSERT_TAIL(&savevm_handlers, se, entry);
|
2008-11-12 05:33:36 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-26 01:09:07 +08:00
|
|
|
int register_savevm(DeviceState *dev,
|
|
|
|
const char *idstr,
|
2008-11-12 05:33:36 +08:00
|
|
|
int instance_id,
|
|
|
|
int version_id,
|
|
|
|
SaveStateHandler *save_state,
|
|
|
|
LoadStateHandler *load_state,
|
|
|
|
void *opaque)
|
|
|
|
{
|
2012-06-27 00:46:10 +08:00
|
|
|
SaveVMHandlers *ops = g_malloc0(sizeof(SaveVMHandlers));
|
|
|
|
ops->save_state = save_state;
|
|
|
|
ops->load_state = load_state;
|
2010-06-26 01:09:07 +08:00
|
|
|
return register_savevm_live(dev, idstr, instance_id, version_id,
|
2012-06-27 00:46:10 +08:00
|
|
|
ops, opaque);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2010-06-26 01:09:07 +08:00
|
|
|
void unregister_savevm(DeviceState *dev, const char *idstr, void *opaque)
|
2009-04-18 01:10:59 +08:00
|
|
|
{
|
2009-09-01 08:12:31 +08:00
|
|
|
SaveStateEntry *se, *new_se;
|
2010-06-26 01:09:14 +08:00
|
|
|
char id[256] = "";
|
|
|
|
|
2012-02-04 02:28:43 +08:00
|
|
|
if (dev) {
|
|
|
|
char *path = qdev_get_dev_path(dev);
|
2010-06-26 01:09:14 +08:00
|
|
|
if (path) {
|
|
|
|
pstrcpy(id, sizeof(id), path);
|
|
|
|
pstrcat(id, sizeof(id), "/");
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(path);
|
2010-06-26 01:09:14 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
pstrcat(id, sizeof(id), idstr);
|
2009-04-18 01:10:59 +08:00
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) {
|
2010-06-26 01:09:14 +08:00
|
|
|
if (strcmp(se->idstr, id) == 0 && se->opaque == opaque) {
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_REMOVE(&savevm_handlers, se, entry);
|
2010-07-21 22:35:31 +08:00
|
|
|
if (se->compat) {
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(se->compat);
|
2010-07-21 22:35:31 +08:00
|
|
|
}
|
2012-06-26 23:19:10 +08:00
|
|
|
g_free(se->ops);
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(se);
|
2009-04-18 01:10:59 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-06-26 01:09:07 +08:00
|
|
|
int vmstate_register_with_alias_id(DeviceState *dev, int instance_id,
|
2010-05-15 19:32:40 +08:00
|
|
|
const VMStateDescription *vmsd,
|
|
|
|
void *opaque, int alias_id,
|
|
|
|
int required_for_version)
|
2009-08-21 01:42:25 +08:00
|
|
|
{
|
2009-09-01 08:12:31 +08:00
|
|
|
SaveStateEntry *se;
|
2009-08-21 01:42:25 +08:00
|
|
|
|
2010-05-15 19:32:40 +08:00
|
|
|
/* If this triggers, alias support can be dropped for the vmsd. */
|
|
|
|
assert(alias_id == -1 || required_for_version >= vmsd->minimum_version_id);
|
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
se = g_malloc0(sizeof(SaveStateEntry));
|
2009-08-21 01:42:25 +08:00
|
|
|
se->version_id = vmsd->version_id;
|
|
|
|
se->section_id = global_section_id++;
|
|
|
|
se->opaque = opaque;
|
|
|
|
se->vmsd = vmsd;
|
2010-05-15 19:32:40 +08:00
|
|
|
se->alias_id = alias_id;
|
2011-07-08 16:44:35 +08:00
|
|
|
se->no_migrate = vmsd->unmigratable;
|
2009-08-21 01:42:25 +08:00
|
|
|
|
2012-02-04 02:28:43 +08:00
|
|
|
if (dev) {
|
|
|
|
char *id = qdev_get_dev_path(dev);
|
2010-06-26 01:09:14 +08:00
|
|
|
if (id) {
|
|
|
|
pstrcpy(se->idstr, sizeof(se->idstr), id);
|
|
|
|
pstrcat(se->idstr, sizeof(se->idstr), "/");
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(id);
|
2010-06-26 01:09:14 +08:00
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
se->compat = g_malloc0(sizeof(CompatEntry));
|
2010-06-26 01:09:14 +08:00
|
|
|
pstrcpy(se->compat->idstr, sizeof(se->compat->idstr), vmsd->name);
|
|
|
|
se->compat->instance_id = instance_id == -1 ?
|
|
|
|
calculate_compat_instance_id(vmsd->name) : instance_id;
|
|
|
|
instance_id = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
pstrcat(se->idstr, sizeof(se->idstr), vmsd->name);
|
|
|
|
|
2009-09-01 08:12:31 +08:00
|
|
|
if (instance_id == -1) {
|
2010-06-26 01:09:14 +08:00
|
|
|
se->instance_id = calculate_new_instance_id(se->idstr);
|
2009-09-01 08:12:31 +08:00
|
|
|
} else {
|
|
|
|
se->instance_id = instance_id;
|
2009-08-21 01:42:25 +08:00
|
|
|
}
|
2010-06-26 01:09:14 +08:00
|
|
|
assert(!se->compat || se->instance_id == 0);
|
2009-09-01 08:12:31 +08:00
|
|
|
/* add at the end of list */
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_INSERT_TAIL(&savevm_handlers, se, entry);
|
2009-08-21 01:42:25 +08:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2010-06-26 01:09:07 +08:00
|
|
|
void vmstate_unregister(DeviceState *dev, const VMStateDescription *vmsd,
|
|
|
|
void *opaque)
|
2009-08-21 01:42:25 +08:00
|
|
|
{
|
2009-09-10 09:04:29 +08:00
|
|
|
SaveStateEntry *se, *new_se;
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH_SAFE(se, &savevm_handlers, entry, new_se) {
|
2009-09-10 09:04:29 +08:00
|
|
|
if (se->vmsd == vmsd && se->opaque == opaque) {
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_REMOVE(&savevm_handlers, se, entry);
|
2010-07-21 22:35:31 +08:00
|
|
|
if (se->compat) {
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(se->compat);
|
2010-07-21 22:35:31 +08:00
|
|
|
}
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(se);
|
2009-09-10 09:04:29 +08:00
|
|
|
}
|
|
|
|
}
|
2009-08-21 01:42:25 +08:00
|
|
|
}
|
|
|
|
|
2009-08-21 01:42:24 +08:00
|
|
|
static int vmstate_load(QEMUFile *f, SaveStateEntry *se, int version_id)
|
|
|
|
{
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_vmstate_load(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
|
2009-08-21 01:42:25 +08:00
|
|
|
if (!se->vmsd) { /* Old style */
|
2012-06-26 23:19:10 +08:00
|
|
|
return se->ops->load_state(f, se->opaque, version_id);
|
2009-08-21 01:42:25 +08:00
|
|
|
}
|
|
|
|
return vmstate_load_state(f, se->vmsd, se->opaque, version_id);
|
2009-08-21 01:42:24 +08:00
|
|
|
}
|
|
|
|
|
2011-01-12 05:39:43 +08:00
|
|
|
static void vmstate_save(QEMUFile *f, SaveStateEntry *se)
|
2009-08-21 01:42:24 +08:00
|
|
|
{
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_vmstate_save(se->idstr, se->vmsd ? se->vmsd->name : "(old)");
|
2009-08-21 01:42:25 +08:00
|
|
|
if (!se->vmsd) { /* Old style */
|
2012-06-26 23:19:10 +08:00
|
|
|
se->ops->save_state(f, se->opaque);
|
2011-01-12 05:39:43 +08:00
|
|
|
return;
|
2009-08-21 01:42:25 +08:00
|
|
|
}
|
2013-11-28 22:01:13 +08:00
|
|
|
vmstate_save_state(f, se->vmsd, se->opaque);
|
2009-08-21 01:42:24 +08:00
|
|
|
}
|
|
|
|
|
2011-12-06 00:48:01 +08:00
|
|
|
bool qemu_savevm_state_blocked(Error **errp)
|
2011-01-12 05:39:43 +08:00
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
|
|
|
if (se->no_migrate) {
|
2014-03-22 07:42:26 +08:00
|
|
|
error_setg(errp, "State blocked by non-migratable device '%s'",
|
|
|
|
se->idstr);
|
2011-01-12 05:39:43 +08:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2013-02-23 00:36:13 +08:00
|
|
|
void qemu_savevm_state_begin(QEMUFile *f,
|
|
|
|
const MigrationParams *params)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
2011-09-22 17:02:14 +08:00
|
|
|
int ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_savevm_state_begin();
|
2009-11-02 21:40:58 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2012-06-26 23:19:10 +08:00
|
|
|
if (!se->ops || !se->ops->set_params) {
|
2009-11-02 21:40:58 +08:00
|
|
|
continue;
|
2012-06-19 23:43:09 +08:00
|
|
|
}
|
2012-06-26 23:19:10 +08:00
|
|
|
se->ops->set_params(params, se->opaque);
|
2009-11-02 21:40:58 +08:00
|
|
|
}
|
2013-11-28 22:01:13 +08:00
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
|
|
|
|
qemu_put_be32(f, QEMU_VM_FILE_VERSION);
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2008-11-12 05:33:36 +08:00
|
|
|
int len;
|
|
|
|
|
2012-06-28 21:11:57 +08:00
|
|
|
if (!se->ops || !se->ops->save_live_setup) {
|
2008-11-12 05:33:36 +08:00
|
|
|
continue;
|
2012-06-26 23:19:10 +08:00
|
|
|
}
|
2012-06-27 16:59:15 +08:00
|
|
|
if (se->ops && se->ops->is_active) {
|
|
|
|
if (!se->ops->is_active(se->opaque)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
/* Section type */
|
|
|
|
qemu_put_byte(f, QEMU_VM_SECTION_START);
|
|
|
|
qemu_put_be32(f, se->section_id);
|
|
|
|
|
|
|
|
/* ID string */
|
|
|
|
len = strlen(se->idstr);
|
|
|
|
qemu_put_byte(f, len);
|
|
|
|
qemu_put_buffer(f, (uint8_t *)se->idstr, len);
|
|
|
|
|
|
|
|
qemu_put_be32(f, se->instance_id);
|
|
|
|
qemu_put_be32(f, se->version_id);
|
|
|
|
|
2012-06-28 21:11:57 +08:00
|
|
|
ret = se->ops->save_live_setup(f, se->opaque);
|
2011-10-19 21:22:18 +08:00
|
|
|
if (ret < 0) {
|
2013-02-23 00:36:13 +08:00
|
|
|
qemu_file_set_error(f, ret);
|
|
|
|
break;
|
2011-10-19 21:22:18 +08:00
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-09-22 17:02:14 +08:00
|
|
|
/*
|
2011-11-22 18:06:26 +08:00
|
|
|
* this function has three return values:
|
2011-09-22 17:02:14 +08:00
|
|
|
* negative: there was one error, and we have -errno.
|
|
|
|
* 0 : We haven't finished, caller have to go again
|
|
|
|
* 1 : We have finished, we can go to complete phase
|
|
|
|
*/
|
2011-12-06 00:06:56 +08:00
|
|
|
int qemu_savevm_state_iterate(QEMUFile *f)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
int ret = 1;
|
|
|
|
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_savevm_state_iterate();
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2012-06-28 21:31:37 +08:00
|
|
|
if (!se->ops || !se->ops->save_live_iterate) {
|
2008-11-12 05:33:36 +08:00
|
|
|
continue;
|
2012-06-26 23:19:10 +08:00
|
|
|
}
|
2012-06-27 16:59:15 +08:00
|
|
|
if (se->ops && se->ops->is_active) {
|
|
|
|
if (!se->ops->is_active(se->opaque)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2012-05-22 06:38:26 +08:00
|
|
|
if (qemu_file_rate_limit(f)) {
|
|
|
|
return 0;
|
|
|
|
}
|
2014-03-07 04:03:37 +08:00
|
|
|
trace_savevm_section_start(se->idstr, se->section_id);
|
2008-11-12 05:33:36 +08:00
|
|
|
/* Section type */
|
|
|
|
qemu_put_byte(f, QEMU_VM_SECTION_PART);
|
|
|
|
qemu_put_be32(f, se->section_id);
|
|
|
|
|
2012-06-28 21:31:37 +08:00
|
|
|
ret = se->ops->save_live_iterate(f, se->opaque);
|
2014-03-07 04:03:37 +08:00
|
|
|
trace_savevm_section_end(se->idstr, se->section_id);
|
2012-05-22 05:46:44 +08:00
|
|
|
|
2013-02-23 00:36:13 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
qemu_file_set_error(f, ret);
|
|
|
|
}
|
2011-10-19 21:22:18 +08:00
|
|
|
if (ret <= 0) {
|
2009-12-01 22:19:55 +08:00
|
|
|
/* Do not proceed to the next vmstate before this one reported
|
|
|
|
completion of the current stage. This serializes the migration
|
|
|
|
and reduces the probability that a faster changing state is
|
|
|
|
synchronized over and over again. */
|
|
|
|
break;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
2011-09-22 17:02:14 +08:00
|
|
|
return ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2013-02-23 00:36:13 +08:00
|
|
|
void qemu_savevm_state_complete(QEMUFile *f)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
2011-10-19 21:22:18 +08:00
|
|
|
int ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_savevm_state_complete();
|
|
|
|
|
2010-03-02 02:10:30 +08:00
|
|
|
cpu_synchronize_all_states();
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2012-06-28 21:31:37 +08:00
|
|
|
if (!se->ops || !se->ops->save_live_complete) {
|
2008-11-12 05:33:36 +08:00
|
|
|
continue;
|
2012-06-26 23:19:10 +08:00
|
|
|
}
|
2012-06-27 16:59:15 +08:00
|
|
|
if (se->ops && se->ops->is_active) {
|
|
|
|
if (!se->ops->is_active(se->opaque)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
2014-03-07 04:03:37 +08:00
|
|
|
trace_savevm_section_start(se->idstr, se->section_id);
|
2008-11-12 05:33:36 +08:00
|
|
|
/* Section type */
|
|
|
|
qemu_put_byte(f, QEMU_VM_SECTION_END);
|
|
|
|
qemu_put_be32(f, se->section_id);
|
|
|
|
|
2012-06-28 21:31:37 +08:00
|
|
|
ret = se->ops->save_live_complete(f, se->opaque);
|
2014-03-07 04:03:37 +08:00
|
|
|
trace_savevm_section_end(se->idstr, se->section_id);
|
2011-10-19 21:22:18 +08:00
|
|
|
if (ret < 0) {
|
2013-02-23 00:36:13 +08:00
|
|
|
qemu_file_set_error(f, ret);
|
|
|
|
return;
|
2011-10-19 21:22:18 +08:00
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2008-11-12 05:33:36 +08:00
|
|
|
int len;
|
|
|
|
|
2012-06-26 23:19:10 +08:00
|
|
|
if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
|
2013-11-28 22:01:12 +08:00
|
|
|
continue;
|
2012-06-26 23:19:10 +08:00
|
|
|
}
|
2014-03-07 04:03:37 +08:00
|
|
|
trace_savevm_section_start(se->idstr, se->section_id);
|
2008-11-12 05:33:36 +08:00
|
|
|
/* Section type */
|
|
|
|
qemu_put_byte(f, QEMU_VM_SECTION_FULL);
|
|
|
|
qemu_put_be32(f, se->section_id);
|
|
|
|
|
|
|
|
/* ID string */
|
|
|
|
len = strlen(se->idstr);
|
|
|
|
qemu_put_byte(f, len);
|
|
|
|
qemu_put_buffer(f, (uint8_t *)se->idstr, len);
|
|
|
|
|
|
|
|
qemu_put_be32(f, se->instance_id);
|
|
|
|
qemu_put_be32(f, se->version_id);
|
|
|
|
|
2011-01-12 05:39:43 +08:00
|
|
|
vmstate_save(f, se);
|
2014-03-07 04:03:37 +08:00
|
|
|
trace_savevm_section_end(se->idstr, se->section_id);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
qemu_put_byte(f, QEMU_VM_EOF);
|
2013-02-23 00:36:29 +08:00
|
|
|
qemu_fflush(f);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2012-09-21 17:18:18 +08:00
|
|
|
uint64_t qemu_savevm_state_pending(QEMUFile *f, uint64_t max_size)
|
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
uint64_t ret = 0;
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
|
|
|
if (!se->ops || !se->ops->save_live_pending) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (se->ops && se->ops->is_active) {
|
|
|
|
if (!se->ops->is_active(se->opaque)) {
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ret += se->ops->save_live_pending(f, se->opaque, max_size);
|
|
|
|
}
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-01-14 21:14:42 +08:00
|
|
|
void qemu_savevm_state_cancel(void)
|
2009-12-01 01:21:21 +08:00
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
|
2014-03-11 07:42:29 +08:00
|
|
|
trace_savevm_state_cancel();
|
2009-12-01 01:21:21 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2012-06-27 01:26:41 +08:00
|
|
|
if (se->ops && se->ops->cancel) {
|
|
|
|
se->ops->cancel(se->opaque);
|
2009-12-01 01:21:21 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2011-12-06 00:48:01 +08:00
|
|
|
static int qemu_savevm_state(QEMUFile *f)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
int ret;
|
2012-06-19 23:43:09 +08:00
|
|
|
MigrationParams params = {
|
|
|
|
.blk = 0,
|
|
|
|
.shared = 0
|
|
|
|
};
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2011-12-06 00:48:01 +08:00
|
|
|
if (qemu_savevm_state_blocked(NULL)) {
|
2013-02-23 00:36:10 +08:00
|
|
|
return -EINVAL;
|
2011-01-12 05:39:43 +08:00
|
|
|
}
|
|
|
|
|
2013-02-23 00:36:28 +08:00
|
|
|
qemu_mutex_unlock_iothread();
|
2013-02-23 00:36:13 +08:00
|
|
|
qemu_savevm_state_begin(f, ¶ms);
|
2013-02-23 00:36:28 +08:00
|
|
|
qemu_mutex_lock_iothread();
|
|
|
|
|
2013-02-23 00:36:13 +08:00
|
|
|
while (qemu_file_get_error(f) == 0) {
|
|
|
|
if (qemu_savevm_state_iterate(f) > 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2013-02-23 00:36:13 +08:00
|
|
|
ret = qemu_file_get_error(f);
|
2011-09-22 17:02:14 +08:00
|
|
|
if (ret == 0) {
|
2013-02-23 00:36:13 +08:00
|
|
|
qemu_savevm_state_complete(f);
|
2011-10-05 07:02:52 +08:00
|
|
|
ret = qemu_file_get_error(f);
|
2011-09-22 17:02:14 +08:00
|
|
|
}
|
2013-02-23 00:36:10 +08:00
|
|
|
if (ret != 0) {
|
|
|
|
qemu_savevm_state_cancel();
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2012-01-25 20:24:51 +08:00
|
|
|
static int qemu_save_device_state(QEMUFile *f)
|
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
|
|
|
|
qemu_put_be32(f, QEMU_VM_FILE_MAGIC);
|
|
|
|
qemu_put_be32(f, QEMU_VM_FILE_VERSION);
|
|
|
|
|
|
|
|
cpu_synchronize_all_states();
|
|
|
|
|
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
|
|
|
int len;
|
|
|
|
|
|
|
|
if (se->is_ram) {
|
|
|
|
continue;
|
|
|
|
}
|
2012-06-26 23:19:10 +08:00
|
|
|
if ((!se->ops || !se->ops->save_state) && !se->vmsd) {
|
2012-01-25 20:24:51 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Section type */
|
|
|
|
qemu_put_byte(f, QEMU_VM_SECTION_FULL);
|
|
|
|
qemu_put_be32(f, se->section_id);
|
|
|
|
|
|
|
|
/* ID string */
|
|
|
|
len = strlen(se->idstr);
|
|
|
|
qemu_put_byte(f, len);
|
|
|
|
qemu_put_buffer(f, (uint8_t *)se->idstr, len);
|
|
|
|
|
|
|
|
qemu_put_be32(f, se->instance_id);
|
|
|
|
qemu_put_be32(f, se->version_id);
|
|
|
|
|
|
|
|
vmstate_save(f, se);
|
|
|
|
}
|
|
|
|
|
|
|
|
qemu_put_byte(f, QEMU_VM_EOF);
|
|
|
|
|
|
|
|
return qemu_file_get_error(f);
|
|
|
|
}
|
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
static SaveStateEntry *find_se(const char *idstr, int instance_id)
|
|
|
|
{
|
|
|
|
SaveStateEntry *se;
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QTAILQ_FOREACH(se, &savevm_handlers, entry) {
|
2008-11-12 05:33:36 +08:00
|
|
|
if (!strcmp(se->idstr, idstr) &&
|
2010-05-15 19:32:40 +08:00
|
|
|
(instance_id == se->instance_id ||
|
|
|
|
instance_id == se->alias_id))
|
2008-11-12 05:33:36 +08:00
|
|
|
return se;
|
2010-06-26 01:09:14 +08:00
|
|
|
/* Migrating from an older version? */
|
|
|
|
if (strstr(se->idstr, idstr) && se->compat) {
|
|
|
|
if (!strcmp(se->compat->idstr, idstr) &&
|
|
|
|
(instance_id == se->compat->instance_id ||
|
|
|
|
instance_id == se->alias_id))
|
|
|
|
return se;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
|
|
|
typedef struct LoadStateEntry {
|
2009-09-12 15:36:22 +08:00
|
|
|
QLIST_ENTRY(LoadStateEntry) entry;
|
2008-11-12 05:33:36 +08:00
|
|
|
SaveStateEntry *se;
|
|
|
|
int section_id;
|
|
|
|
int version_id;
|
|
|
|
} LoadStateEntry;
|
|
|
|
|
|
|
|
int qemu_loadvm_state(QEMUFile *f)
|
|
|
|
{
|
2009-09-12 15:36:22 +08:00
|
|
|
QLIST_HEAD(, LoadStateEntry) loadvm_handlers =
|
|
|
|
QLIST_HEAD_INITIALIZER(loadvm_handlers);
|
2009-09-01 08:12:33 +08:00
|
|
|
LoadStateEntry *le, *new_le;
|
2008-11-12 05:33:36 +08:00
|
|
|
uint8_t section_type;
|
|
|
|
unsigned int v;
|
|
|
|
int ret;
|
|
|
|
|
2011-12-06 00:48:01 +08:00
|
|
|
if (qemu_savevm_state_blocked(NULL)) {
|
2011-01-12 05:39:43 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
v = qemu_get_be32(f);
|
2013-11-28 22:01:13 +08:00
|
|
|
if (v != QEMU_VM_FILE_MAGIC) {
|
2008-11-12 05:33:36 +08:00
|
|
|
return -EINVAL;
|
2013-11-28 22:01:13 +08:00
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
|
|
|
|
v = qemu_get_be32(f);
|
2009-09-10 09:04:24 +08:00
|
|
|
if (v == QEMU_VM_FILE_VERSION_COMPAT) {
|
|
|
|
fprintf(stderr, "SaveVM v2 format is obsolete and don't work anymore\n");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
2013-11-28 22:01:13 +08:00
|
|
|
if (v != QEMU_VM_FILE_VERSION) {
|
2008-11-12 05:33:36 +08:00
|
|
|
return -ENOTSUP;
|
2013-11-28 22:01:13 +08:00
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
|
|
|
|
while ((section_type = qemu_get_byte(f)) != QEMU_VM_EOF) {
|
|
|
|
uint32_t instance_id, version_id, section_id;
|
|
|
|
SaveStateEntry *se;
|
|
|
|
char idstr[257];
|
|
|
|
int len;
|
|
|
|
|
|
|
|
switch (section_type) {
|
|
|
|
case QEMU_VM_SECTION_START:
|
|
|
|
case QEMU_VM_SECTION_FULL:
|
|
|
|
/* Read section start */
|
|
|
|
section_id = qemu_get_be32(f);
|
|
|
|
len = qemu_get_byte(f);
|
|
|
|
qemu_get_buffer(f, (uint8_t *)idstr, len);
|
|
|
|
idstr[len] = 0;
|
|
|
|
instance_id = qemu_get_be32(f);
|
|
|
|
version_id = qemu_get_be32(f);
|
|
|
|
|
|
|
|
/* Find savevm section */
|
|
|
|
se = find_se(idstr, instance_id);
|
|
|
|
if (se == NULL) {
|
|
|
|
fprintf(stderr, "Unknown savevm section or instance '%s' %d\n", idstr, instance_id);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Validate version */
|
|
|
|
if (version_id > se->version_id) {
|
|
|
|
fprintf(stderr, "savevm: unsupported version %d for '%s' v%d\n",
|
|
|
|
version_id, idstr, se->version_id);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add entry */
|
2011-08-21 11:09:37 +08:00
|
|
|
le = g_malloc0(sizeof(*le));
|
2008-11-12 05:33:36 +08:00
|
|
|
|
|
|
|
le->se = se;
|
|
|
|
le->section_id = section_id;
|
|
|
|
le->version_id = version_id;
|
2009-09-12 15:36:22 +08:00
|
|
|
QLIST_INSERT_HEAD(&loadvm_handlers, le, entry);
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2009-08-21 01:42:24 +08:00
|
|
|
ret = vmstate_load(f, le->se, le->version_id);
|
2009-08-21 01:42:23 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "qemu: warning: error while loading state for instance 0x%x of device '%s'\n",
|
|
|
|
instance_id, idstr);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
break;
|
|
|
|
case QEMU_VM_SECTION_PART:
|
|
|
|
case QEMU_VM_SECTION_END:
|
|
|
|
section_id = qemu_get_be32(f);
|
|
|
|
|
2009-09-12 15:36:22 +08:00
|
|
|
QLIST_FOREACH(le, &loadvm_handlers, entry) {
|
2009-09-01 08:12:33 +08:00
|
|
|
if (le->section_id == section_id) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
if (le == NULL) {
|
|
|
|
fprintf(stderr, "Unknown savevm section %d\n", section_id);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2009-08-21 01:42:24 +08:00
|
|
|
ret = vmstate_load(f, le->se, le->version_id);
|
2009-08-21 01:42:23 +08:00
|
|
|
if (ret < 0) {
|
|
|
|
fprintf(stderr, "qemu: warning: error while loading state section id %d\n",
|
|
|
|
section_id);
|
|
|
|
goto out;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
break;
|
|
|
|
default:
|
|
|
|
fprintf(stderr, "Unknown savevm section type %d\n", section_type);
|
|
|
|
ret = -EINVAL;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2010-03-02 02:10:30 +08:00
|
|
|
cpu_synchronize_all_post_init();
|
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
ret = 0;
|
|
|
|
|
|
|
|
out:
|
2009-09-12 15:36:22 +08:00
|
|
|
QLIST_FOREACH_SAFE(le, &loadvm_handlers, entry, new_le) {
|
|
|
|
QLIST_REMOVE(le, entry);
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(le);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2011-10-05 07:14:46 +08:00
|
|
|
if (ret == 0) {
|
|
|
|
ret = qemu_file_get_error(f);
|
2011-10-05 07:02:52 +08:00
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
2013-05-25 11:09:42 +08:00
|
|
|
static BlockDriverState *find_vmstate_bs(void)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs = NULL;
|
|
|
|
while ((bs = bdrv_next(bs))) {
|
|
|
|
if (bdrv_can_snapshot(bs)) {
|
|
|
|
return bs;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2009-11-04 00:34:37 +08:00
|
|
|
/*
|
|
|
|
* Deletes snapshots of a given name in all opened images.
|
|
|
|
*/
|
|
|
|
static int del_existing_snapshots(Monitor *mon, const char *name)
|
|
|
|
{
|
|
|
|
BlockDriverState *bs;
|
|
|
|
QEMUSnapshotInfo sn1, *snapshot = &sn1;
|
snapshot: distinguish id and name in snapshot delete
Snapshot creation actually already distinguish id and name since it take
a structured parameter *sn, but delete can't. Later an accurate delete
is needed in qmp_transaction abort and blockdev-snapshot-delete-sync,
so change its prototype. Also *errp is added to tip error, but return
value is kepted to let caller check what kind of error happens. Existing
caller for it are savevm, delvm and qemu-img, they are not impacted by
introducing a new function bdrv_snapshot_delete_by_id_or_name(), which
check the return value and do the operation again.
Before this patch:
For qcow2, it search id first then name to find the one to delete.
For rbd, it search name.
For sheepdog, it does nothing.
After this patch:
For qcow2, logic is the same by call it twice in caller.
For rbd, it always fails in delete with id, but still search for name
in second try, no change to user.
Some code for *errp is based on Pavel's patch.
Signed-off-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Signed-off-by: Pavel Hrdina <phrdina@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-09-11 14:04:33 +08:00
|
|
|
Error *err = NULL;
|
2009-11-04 00:34:37 +08:00
|
|
|
|
2010-06-03 00:55:21 +08:00
|
|
|
bs = NULL;
|
|
|
|
while ((bs = bdrv_next(bs))) {
|
2009-11-04 00:34:37 +08:00
|
|
|
if (bdrv_can_snapshot(bs) &&
|
2013-11-28 22:01:13 +08:00
|
|
|
bdrv_snapshot_find(bs, snapshot, name) >= 0) {
|
snapshot: distinguish id and name in snapshot delete
Snapshot creation actually already distinguish id and name since it take
a structured parameter *sn, but delete can't. Later an accurate delete
is needed in qmp_transaction abort and blockdev-snapshot-delete-sync,
so change its prototype. Also *errp is added to tip error, but return
value is kepted to let caller check what kind of error happens. Existing
caller for it are savevm, delvm and qemu-img, they are not impacted by
introducing a new function bdrv_snapshot_delete_by_id_or_name(), which
check the return value and do the operation again.
Before this patch:
For qcow2, it search id first then name to find the one to delete.
For rbd, it search name.
For sheepdog, it does nothing.
After this patch:
For qcow2, logic is the same by call it twice in caller.
For rbd, it always fails in delete with id, but still search for name
in second try, no change to user.
Some code for *errp is based on Pavel's patch.
Signed-off-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Signed-off-by: Pavel Hrdina <phrdina@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-09-11 14:04:33 +08:00
|
|
|
bdrv_snapshot_delete_by_id_or_name(bs, name, &err);
|
2014-01-30 22:07:28 +08:00
|
|
|
if (err) {
|
2009-11-04 00:34:37 +08:00
|
|
|
monitor_printf(mon,
|
snapshot: distinguish id and name in snapshot delete
Snapshot creation actually already distinguish id and name since it take
a structured parameter *sn, but delete can't. Later an accurate delete
is needed in qmp_transaction abort and blockdev-snapshot-delete-sync,
so change its prototype. Also *errp is added to tip error, but return
value is kepted to let caller check what kind of error happens. Existing
caller for it are savevm, delvm and qemu-img, they are not impacted by
introducing a new function bdrv_snapshot_delete_by_id_or_name(), which
check the return value and do the operation again.
Before this patch:
For qcow2, it search id first then name to find the one to delete.
For rbd, it search name.
For sheepdog, it does nothing.
After this patch:
For qcow2, logic is the same by call it twice in caller.
For rbd, it always fails in delete with id, but still search for name
in second try, no change to user.
Some code for *errp is based on Pavel's patch.
Signed-off-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Signed-off-by: Pavel Hrdina <phrdina@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-09-11 14:04:33 +08:00
|
|
|
"Error while deleting snapshot on device '%s':"
|
|
|
|
" %s\n",
|
|
|
|
bdrv_get_device_name(bs),
|
|
|
|
error_get_pretty(err));
|
|
|
|
error_free(err);
|
2009-11-04 00:34:37 +08:00
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2009-08-29 02:27:13 +08:00
|
|
|
void do_savevm(Monitor *mon, const QDict *qdict)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
BlockDriverState *bs, *bs1;
|
|
|
|
QEMUSnapshotInfo sn1, *sn = &sn1, old_sn1, *old_sn = &old_sn1;
|
2009-11-04 00:34:37 +08:00
|
|
|
int ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
QEMUFile *f;
|
|
|
|
int saved_vm_running;
|
2011-11-16 18:35:54 +08:00
|
|
|
uint64_t vm_state_size;
|
2013-01-08 05:20:27 +08:00
|
|
|
qemu_timeval tv;
|
2010-08-05 01:55:49 +08:00
|
|
|
struct tm tm;
|
2009-08-29 02:27:13 +08:00
|
|
|
const char *name = qdict_get_try_str(qdict, "name");
|
2008-11-12 05:33:36 +08:00
|
|
|
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
/* Verify if there is a device that doesn't support snapshots and is writable */
|
2010-06-03 00:55:21 +08:00
|
|
|
bs = NULL;
|
|
|
|
while ((bs = bdrv_next(bs))) {
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
|
2011-08-03 21:08:11 +08:00
|
|
|
if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bdrv_can_snapshot(bs)) {
|
|
|
|
monitor_printf(mon, "Device '%s' is writable but does not support snapshots.\n",
|
|
|
|
bdrv_get_device_name(bs));
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-05-25 11:09:42 +08:00
|
|
|
bs = find_vmstate_bs();
|
2008-11-12 05:33:36 +08:00
|
|
|
if (!bs) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "No block device can accept snapshots\n");
|
2008-11-12 05:33:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-07-30 02:36:43 +08:00
|
|
|
saved_vm_running = runstate_is_running();
|
2011-10-01 01:45:27 +08:00
|
|
|
vm_stop(RUN_STATE_SAVE_VM);
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2009-11-04 00:34:37 +08:00
|
|
|
memset(sn, 0, sizeof(*sn));
|
2008-11-12 05:33:36 +08:00
|
|
|
|
|
|
|
/* fill auxiliary fields */
|
2013-01-08 05:20:27 +08:00
|
|
|
qemu_gettimeofday(&tv);
|
2008-11-12 05:33:36 +08:00
|
|
|
sn->date_sec = tv.tv_sec;
|
|
|
|
sn->date_nsec = tv.tv_usec * 1000;
|
2013-08-21 23:03:08 +08:00
|
|
|
sn->vm_clock_nsec = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL);
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2010-08-05 01:55:49 +08:00
|
|
|
if (name) {
|
|
|
|
ret = bdrv_snapshot_find(bs, old_sn, name);
|
|
|
|
if (ret >= 0) {
|
|
|
|
pstrcpy(sn->name, sizeof(sn->name), old_sn->name);
|
|
|
|
pstrcpy(sn->id_str, sizeof(sn->id_str), old_sn->id_str);
|
|
|
|
} else {
|
|
|
|
pstrcpy(sn->name, sizeof(sn->name), name);
|
|
|
|
}
|
|
|
|
} else {
|
2010-09-10 03:13:04 +08:00
|
|
|
/* cast below needed for OpenBSD where tv_sec is still 'long' */
|
|
|
|
localtime_r((const time_t *)&tv.tv_sec, &tm);
|
2010-08-05 01:55:49 +08:00
|
|
|
strftime(sn->name, sizeof(sn->name), "vm-%Y%m%d%H%M%S", &tm);
|
|
|
|
}
|
|
|
|
|
2009-11-04 00:34:37 +08:00
|
|
|
/* Delete old snapshots of the same name */
|
2010-01-21 00:26:34 +08:00
|
|
|
if (name && del_existing_snapshots(mon, name) < 0) {
|
2009-11-04 00:34:37 +08:00
|
|
|
goto the_end;
|
|
|
|
}
|
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
/* save the VM state */
|
2009-07-11 05:11:57 +08:00
|
|
|
f = qemu_fopen_bdrv(bs, 1);
|
2008-11-12 05:33:36 +08:00
|
|
|
if (!f) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "Could not open VM state file\n");
|
2008-11-12 05:33:36 +08:00
|
|
|
goto the_end;
|
|
|
|
}
|
2011-12-06 00:48:01 +08:00
|
|
|
ret = qemu_savevm_state(f);
|
2008-12-12 05:06:49 +08:00
|
|
|
vm_state_size = qemu_ftell(f);
|
2008-11-12 05:33:36 +08:00
|
|
|
qemu_fclose(f);
|
|
|
|
if (ret < 0) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "Error %d while writing VM\n", ret);
|
2008-11-12 05:33:36 +08:00
|
|
|
goto the_end;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* create the snapshots */
|
|
|
|
|
2010-06-03 00:55:21 +08:00
|
|
|
bs1 = NULL;
|
|
|
|
while ((bs1 = bdrv_next(bs1))) {
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
if (bdrv_can_snapshot(bs1)) {
|
2008-12-12 05:06:49 +08:00
|
|
|
/* Write VM state size only to the image that contains the state */
|
|
|
|
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
|
2008-11-12 05:33:36 +08:00
|
|
|
ret = bdrv_snapshot_create(bs1, sn);
|
|
|
|
if (ret < 0) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
|
|
|
|
bdrv_get_device_name(bs1));
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
the_end:
|
2013-11-28 22:01:13 +08:00
|
|
|
if (saved_vm_running) {
|
2008-11-12 05:33:36 +08:00
|
|
|
vm_start();
|
2013-11-28 22:01:13 +08:00
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
2012-01-25 20:24:51 +08:00
|
|
|
void qmp_xen_save_devices_state(const char *filename, Error **errp)
|
|
|
|
{
|
|
|
|
QEMUFile *f;
|
|
|
|
int saved_vm_running;
|
|
|
|
int ret;
|
|
|
|
|
|
|
|
saved_vm_running = runstate_is_running();
|
|
|
|
vm_stop(RUN_STATE_SAVE_VM);
|
|
|
|
|
|
|
|
f = qemu_fopen(filename, "wb");
|
|
|
|
if (!f) {
|
2013-06-08 02:36:58 +08:00
|
|
|
error_setg_file_open(errp, errno, filename);
|
2012-01-25 20:24:51 +08:00
|
|
|
goto the_end;
|
|
|
|
}
|
|
|
|
ret = qemu_save_device_state(f);
|
|
|
|
qemu_fclose(f);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_set(errp, QERR_IO_ERROR);
|
|
|
|
}
|
|
|
|
|
|
|
|
the_end:
|
2013-11-28 22:01:13 +08:00
|
|
|
if (saved_vm_running) {
|
2012-01-25 20:24:51 +08:00
|
|
|
vm_start();
|
2013-11-28 22:01:13 +08:00
|
|
|
}
|
2012-01-25 20:24:51 +08:00
|
|
|
}
|
|
|
|
|
2010-02-17 23:24:10 +08:00
|
|
|
int load_vmstate(const char *name)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
2010-07-20 02:25:01 +08:00
|
|
|
BlockDriverState *bs, *bs_vm_state;
|
2008-12-12 05:06:49 +08:00
|
|
|
QEMUSnapshotInfo sn;
|
2008-11-12 05:33:36 +08:00
|
|
|
QEMUFile *f;
|
2009-07-22 22:42:57 +08:00
|
|
|
int ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2013-05-25 11:09:42 +08:00
|
|
|
bs_vm_state = find_vmstate_bs();
|
2010-07-20 02:25:01 +08:00
|
|
|
if (!bs_vm_state) {
|
|
|
|
error_report("No block device supports snapshots");
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Don't even try to load empty VM states */
|
|
|
|
ret = bdrv_snapshot_find(bs_vm_state, &sn, name);
|
|
|
|
if (ret < 0) {
|
|
|
|
return ret;
|
|
|
|
} else if (sn.vm_state_size == 0) {
|
2011-03-01 17:48:12 +08:00
|
|
|
error_report("This is a disk-only snapshot. Revert to it offline "
|
|
|
|
"using qemu-img.");
|
2010-07-20 02:25:01 +08:00
|
|
|
return -EINVAL;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Verify if there is any device that doesn't support snapshots and is
|
|
|
|
writable and check if the requested snapshot is available too. */
|
2010-06-03 00:55:21 +08:00
|
|
|
bs = NULL;
|
|
|
|
while ((bs = bdrv_next(bs))) {
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
|
2011-08-03 21:08:11 +08:00
|
|
|
if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs)) {
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!bdrv_can_snapshot(bs)) {
|
|
|
|
error_report("Device '%s' is writable but does not support snapshots.",
|
|
|
|
bdrv_get_device_name(bs));
|
|
|
|
return -ENOTSUP;
|
|
|
|
}
|
|
|
|
|
2010-07-20 02:25:01 +08:00
|
|
|
ret = bdrv_snapshot_find(bs, &sn, name);
|
|
|
|
if (ret < 0) {
|
|
|
|
error_report("Device '%s' does not have the requested snapshot '%s'",
|
|
|
|
bdrv_get_device_name(bs), name);
|
|
|
|
return ret;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/* Flush all IO requests so they don't interfere with the new state. */
|
2011-11-30 20:23:43 +08:00
|
|
|
bdrv_drain_all();
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2010-07-20 02:25:01 +08:00
|
|
|
bs = NULL;
|
|
|
|
while ((bs = bdrv_next(bs))) {
|
|
|
|
if (bdrv_can_snapshot(bs)) {
|
|
|
|
ret = bdrv_snapshot_goto(bs, name);
|
2008-11-12 05:33:36 +08:00
|
|
|
if (ret < 0) {
|
2010-07-20 02:25:01 +08:00
|
|
|
error_report("Error %d while activating snapshot '%s' on '%s'",
|
|
|
|
ret, name, bdrv_get_device_name(bs));
|
|
|
|
return ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* restore the VM state */
|
2010-07-20 02:25:01 +08:00
|
|
|
f = qemu_fopen_bdrv(bs_vm_state, 0);
|
2008-11-12 05:33:36 +08:00
|
|
|
if (!f) {
|
2010-02-19 00:25:24 +08:00
|
|
|
error_report("Could not open VM state file");
|
2009-08-21 01:42:22 +08:00
|
|
|
return -EINVAL;
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
2010-07-20 02:25:01 +08:00
|
|
|
|
2011-06-15 00:29:45 +08:00
|
|
|
qemu_system_reset(VMRESET_SILENT);
|
2008-11-12 05:33:36 +08:00
|
|
|
ret = qemu_loadvm_state(f);
|
2010-07-20 02:25:01 +08:00
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
qemu_fclose(f);
|
|
|
|
if (ret < 0) {
|
2010-02-19 00:25:24 +08:00
|
|
|
error_report("Error %d while loading VM state", ret);
|
2009-08-21 01:42:22 +08:00
|
|
|
return ret;
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
2010-07-20 02:25:01 +08:00
|
|
|
|
2009-08-21 01:42:22 +08:00
|
|
|
return 0;
|
2009-08-21 01:42:20 +08:00
|
|
|
}
|
|
|
|
|
2009-08-29 02:27:13 +08:00
|
|
|
void do_delvm(Monitor *mon, const QDict *qdict)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
BlockDriverState *bs, *bs1;
|
snapshot: distinguish id and name in snapshot delete
Snapshot creation actually already distinguish id and name since it take
a structured parameter *sn, but delete can't. Later an accurate delete
is needed in qmp_transaction abort and blockdev-snapshot-delete-sync,
so change its prototype. Also *errp is added to tip error, but return
value is kepted to let caller check what kind of error happens. Existing
caller for it are savevm, delvm and qemu-img, they are not impacted by
introducing a new function bdrv_snapshot_delete_by_id_or_name(), which
check the return value and do the operation again.
Before this patch:
For qcow2, it search id first then name to find the one to delete.
For rbd, it search name.
For sheepdog, it does nothing.
After this patch:
For qcow2, logic is the same by call it twice in caller.
For rbd, it always fails in delete with id, but still search for name
in second try, no change to user.
Some code for *errp is based on Pavel's patch.
Signed-off-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Signed-off-by: Pavel Hrdina <phrdina@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-09-11 14:04:33 +08:00
|
|
|
Error *err = NULL;
|
2009-08-29 02:27:13 +08:00
|
|
|
const char *name = qdict_get_str(qdict, "name");
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2013-05-25 11:09:42 +08:00
|
|
|
bs = find_vmstate_bs();
|
2008-11-12 05:33:36 +08:00
|
|
|
if (!bs) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "No block device supports snapshots\n");
|
2008-11-12 05:33:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2010-06-03 00:55:21 +08:00
|
|
|
bs1 = NULL;
|
|
|
|
while ((bs1 = bdrv_next(bs1))) {
|
savevm: Really verify if a drive supports snapshots
Both bdrv_can_snapshot() and bdrv_has_snapshot() does not work as advertized.
First issue: Their names implies different porpouses, but they do the same thing
and have exactly the same code. Maybe copied and pasted and forgotten?
bdrv_has_snapshot() is called in various places for actually checking if there
is snapshots or not.
Second issue: the way bdrv_can_snapshot() verifies if a block driver supports or
not snapshots does not catch all cases. E.g.: a raw image.
So when do_savevm() is called, first thing it does is to set a global
BlockDriverState to save the VM memory state calling get_bs_snapshots().
static BlockDriverState *get_bs_snapshots(void)
{
BlockDriverState *bs;
DriveInfo *dinfo;
if (bs_snapshots)
return bs_snapshots;
QTAILQ_FOREACH(dinfo, &drives, next) {
bs = dinfo->bdrv;
if (bdrv_can_snapshot(bs))
goto ok;
}
return NULL;
ok:
bs_snapshots = bs;
return bs;
}
bdrv_can_snapshot() may return a BlockDriverState that does not support
snapshots and do_savevm() goes on.
Later on in do_savevm(), we find:
QTAILQ_FOREACH(dinfo, &drives, next) {
bs1 = dinfo->bdrv;
if (bdrv_has_snapshot(bs1)) {
/* Write VM state size only to the image that contains the state */
sn->vm_state_size = (bs == bs1 ? vm_state_size : 0);
ret = bdrv_snapshot_create(bs1, sn);
if (ret < 0) {
monitor_printf(mon, "Error while creating snapshot on '%s'\n",
bdrv_get_device_name(bs1));
}
}
}
bdrv_has_snapshot(bs1) is not checking if the device does support or has
snapshots as explained above. Only in bdrv_snapshot_create() the device is
actually checked for snapshot support.
So, in cases where the first device supports snapshots, and the second does not,
the snapshot on the first will happen anyways. I believe this is not a good
behavior. It should be an all or nothing process.
This patch addresses these issues by making bdrv_can_snapshot() actually do
what it must do and enforces better tests to avoid errors in the middle of
do_savevm(). bdrv_has_snapshot() is removed and replaced by bdrv_can_snapshot()
where appropriate.
bdrv_can_snapshot() was moved from savevm.c to block.c. It makes more sense to me.
The loadvm_state() function was updated too to enforce that when loading a VM at
least all writable devices must support snapshots too.
Signed-off-by: Miguel Di Ciurcio Filho <miguel.filho@gmail.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2010-06-08 21:40:55 +08:00
|
|
|
if (bdrv_can_snapshot(bs1)) {
|
snapshot: distinguish id and name in snapshot delete
Snapshot creation actually already distinguish id and name since it take
a structured parameter *sn, but delete can't. Later an accurate delete
is needed in qmp_transaction abort and blockdev-snapshot-delete-sync,
so change its prototype. Also *errp is added to tip error, but return
value is kepted to let caller check what kind of error happens. Existing
caller for it are savevm, delvm and qemu-img, they are not impacted by
introducing a new function bdrv_snapshot_delete_by_id_or_name(), which
check the return value and do the operation again.
Before this patch:
For qcow2, it search id first then name to find the one to delete.
For rbd, it search name.
For sheepdog, it does nothing.
After this patch:
For qcow2, logic is the same by call it twice in caller.
For rbd, it always fails in delete with id, but still search for name
in second try, no change to user.
Some code for *errp is based on Pavel's patch.
Signed-off-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Signed-off-by: Pavel Hrdina <phrdina@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-09-11 14:04:33 +08:00
|
|
|
bdrv_snapshot_delete_by_id_or_name(bs, name, &err);
|
2014-01-30 22:07:28 +08:00
|
|
|
if (err) {
|
snapshot: distinguish id and name in snapshot delete
Snapshot creation actually already distinguish id and name since it take
a structured parameter *sn, but delete can't. Later an accurate delete
is needed in qmp_transaction abort and blockdev-snapshot-delete-sync,
so change its prototype. Also *errp is added to tip error, but return
value is kepted to let caller check what kind of error happens. Existing
caller for it are savevm, delvm and qemu-img, they are not impacted by
introducing a new function bdrv_snapshot_delete_by_id_or_name(), which
check the return value and do the operation again.
Before this patch:
For qcow2, it search id first then name to find the one to delete.
For rbd, it search name.
For sheepdog, it does nothing.
After this patch:
For qcow2, logic is the same by call it twice in caller.
For rbd, it always fails in delete with id, but still search for name
in second try, no change to user.
Some code for *errp is based on Pavel's patch.
Signed-off-by: Wenchao Xia <xiawenc@linux.vnet.ibm.com>
Signed-off-by: Pavel Hrdina <phrdina@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
2013-09-11 14:04:33 +08:00
|
|
|
monitor_printf(mon,
|
|
|
|
"Error while deleting snapshot on device '%s':"
|
|
|
|
" %s\n",
|
|
|
|
bdrv_get_device_name(bs),
|
|
|
|
error_get_pretty(err));
|
|
|
|
error_free(err);
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2013-01-14 14:06:25 +08:00
|
|
|
void do_info_snapshots(Monitor *mon, const QDict *qdict)
|
2008-11-12 05:33:36 +08:00
|
|
|
{
|
|
|
|
BlockDriverState *bs, *bs1;
|
2010-08-05 01:55:48 +08:00
|
|
|
QEMUSnapshotInfo *sn_tab, *sn, s, *sn_info = &s;
|
|
|
|
int nb_sns, i, ret, available;
|
|
|
|
int total;
|
|
|
|
int *available_snapshots;
|
2008-11-12 05:33:36 +08:00
|
|
|
|
2013-05-25 11:09:42 +08:00
|
|
|
bs = find_vmstate_bs();
|
2008-11-12 05:33:36 +08:00
|
|
|
if (!bs) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "No available block device supports snapshots\n");
|
2008-11-12 05:33:36 +08:00
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
nb_sns = bdrv_snapshot_list(bs, &sn_tab);
|
|
|
|
if (nb_sns < 0) {
|
2009-03-06 07:01:23 +08:00
|
|
|
monitor_printf(mon, "bdrv_snapshot_list: error %d\n", nb_sns);
|
2008-11-12 05:33:36 +08:00
|
|
|
return;
|
|
|
|
}
|
2010-08-05 01:55:48 +08:00
|
|
|
|
|
|
|
if (nb_sns == 0) {
|
|
|
|
monitor_printf(mon, "There is no snapshot available.\n");
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
available_snapshots = g_malloc0(sizeof(int) * nb_sns);
|
2010-08-05 01:55:48 +08:00
|
|
|
total = 0;
|
|
|
|
for (i = 0; i < nb_sns; i++) {
|
2008-11-12 05:33:36 +08:00
|
|
|
sn = &sn_tab[i];
|
2010-08-05 01:55:48 +08:00
|
|
|
available = 1;
|
|
|
|
bs1 = NULL;
|
|
|
|
|
|
|
|
while ((bs1 = bdrv_next(bs1))) {
|
|
|
|
if (bdrv_can_snapshot(bs1) && bs1 != bs) {
|
|
|
|
ret = bdrv_snapshot_find(bs1, sn_info, sn->id_str);
|
|
|
|
if (ret < 0) {
|
|
|
|
available = 0;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (available) {
|
|
|
|
available_snapshots[total] = i;
|
|
|
|
total++;
|
|
|
|
}
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
2010-08-05 01:55:48 +08:00
|
|
|
|
|
|
|
if (total > 0) {
|
2013-05-25 11:09:45 +08:00
|
|
|
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, NULL);
|
|
|
|
monitor_printf(mon, "\n");
|
2010-08-05 01:55:48 +08:00
|
|
|
for (i = 0; i < total; i++) {
|
|
|
|
sn = &sn_tab[available_snapshots[i]];
|
2013-05-25 11:09:45 +08:00
|
|
|
bdrv_snapshot_dump((fprintf_function)monitor_printf, mon, sn);
|
|
|
|
monitor_printf(mon, "\n");
|
2010-08-05 01:55:48 +08:00
|
|
|
}
|
|
|
|
} else {
|
|
|
|
monitor_printf(mon, "There is no suitable snapshot available\n");
|
|
|
|
}
|
|
|
|
|
2011-08-21 11:09:37 +08:00
|
|
|
g_free(sn_tab);
|
|
|
|
g_free(available_snapshots);
|
2010-08-05 01:55:48 +08:00
|
|
|
|
2008-11-12 05:33:36 +08:00
|
|
|
}
|
2011-12-20 21:59:12 +08:00
|
|
|
|
|
|
|
void vmstate_register_ram(MemoryRegion *mr, DeviceState *dev)
|
|
|
|
{
|
2012-01-08 19:18:19 +08:00
|
|
|
qemu_ram_set_idstr(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK,
|
2011-12-20 21:59:12 +08:00
|
|
|
memory_region_name(mr), dev);
|
|
|
|
}
|
|
|
|
|
|
|
|
void vmstate_unregister_ram(MemoryRegion *mr, DeviceState *dev)
|
|
|
|
{
|
2014-04-02 15:13:27 +08:00
|
|
|
qemu_ram_unset_idstr(memory_region_get_ram_addr(mr) & TARGET_PAGE_MASK);
|
2011-12-20 21:59:12 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
void vmstate_register_ram_global(MemoryRegion *mr)
|
|
|
|
{
|
|
|
|
vmstate_register_ram(mr, NULL);
|
|
|
|
}
|